text
stringlengths 4
1.02M
| meta
dict |
---|---|
"""Util library for saving, copying and moving checkpoints."""
import concurrent.futures
import os
import tensorflow.compat.v1 as tf
# Default target file name to copy the best checkpoint to.
BEST_CHECKPOINT_FILENAME = "best_checkpoint"
# Default file name for storing the best evaluation results.
BEST_EVAL_INFO_FILENAME = "best_eval_info"
def copy_checkpoint(checkpoint_path,
to_dir,
to_checkpoint_name = BEST_CHECKPOINT_FILENAME):
"""Copies a checkpoint to a new directory.
Args:
checkpoint_path: Specific checkpoint path to copy.
to_dir: The target directory to copy to.
to_checkpoint_name: The target checkpoint name to copy to.
Raises:
NotFoundError: When the given checkpoint is not found.
"""
if not checkpoint_path:
raise tf.errors.NotFoundError(None, None,
"Checkpoint path must be non-empty")
old_filenames = tf.io.gfile.glob(checkpoint_path + "*")
if not old_filenames:
raise tf.errors.NotFoundError(
None, None, "Unable to find checkpoint: %s" % checkpoint_path)
if not tf.io.gfile.exists(to_dir):
tf.io.gfile.makedirs(to_dir)
# Threaded copying helps to mitigate issues where large checkpoints do not
# finish copying before being deleted.
threads = []
executor = concurrent.futures.ThreadPoolExecutor(max_workers=20)
for old_filename in old_filenames:
_, suffix = os.path.splitext(old_filename)
new_filename = os.path.join(to_dir, to_checkpoint_name + suffix)
threads.append(
executor.submit(
tf.io.gfile.copy, old_filename, new_filename, overwrite=True))
concurrent.futures.wait(threads)
tf.logging.info("Copied checkpoint %s to dir %s", checkpoint_path, to_dir)
# Recreates a checkpoint file.
new_checkpoint = os.path.join(to_dir, to_checkpoint_name)
tf.train.update_checkpoint_state(to_dir, new_checkpoint)
tf.logging.info("Writing new checkpoint file for %s", to_dir)
def update_eval_info(
directory,
eval_result,
higher_is_better = True,
eval_info_filename = BEST_EVAL_INFO_FILENAME):
"""Updates the eval info if the new result is better.
Args:
directory: The directory where the best eval info file is stored.
eval_result: The new eval result.
higher_is_better: Whether higher eval numbers are better.
eval_info_filename: The name of the best eval file.
Returns:
Whether the new eval result is better than the previous one.
"""
# Read the previous eval number and compare it to the current one.
full_path = os.path.join(directory, eval_info_filename)
if not tf.io.gfile.exists(full_path):
is_better = True
else:
with tf.io.gfile.GFile(full_path, "r") as eval_file:
previous_eval_result_str = eval_file.read()
try:
previous_eval_result = float(previous_eval_result_str)
if higher_is_better:
is_better = eval_result > previous_eval_result
else:
is_better = eval_result < previous_eval_result
except ValueError:
is_better = True
tf.logging.info("Skip previous eval info because it is ill-formed.")
if is_better:
if not tf.io.gfile.exists(directory):
tf.io.gfile.makedirs(directory)
with tf.io.gfile.GFile(full_path, "w") as eval_file:
eval_file.write("%f\n" % eval_result)
return is_better
def save_checkpoint_if_best(eval_result,
checkpoint_path,
to_dir,
to_checkpoint_name = BEST_CHECKPOINT_FILENAME,
higher_is_better = True):
"""Copies a checkpoint if it is the best so far.
Args:
eval_result: The new eval result.
checkpoint_path: Specific checkpoint path to compare and copy.
to_dir: The target directory to copy to.
to_checkpoint_name: The target checkpoint name to copy to.
higher_is_better: Whether higher eval numbers are better.
Returns:
Whether the new eval result is better than the previous one.
"""
is_better = update_eval_info(to_dir, eval_result, higher_is_better)
if is_better:
copy_checkpoint(checkpoint_path, to_dir, to_checkpoint_name)
return is_better
| {
"content_hash": "88c6beb677477f6612f08852b68c44c6",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 76,
"avg_line_length": 34.08943089430894,
"alnum_prop": 0.6775578344860482,
"repo_name": "google-research/language",
"id": "14dea8db54a6c9dfb6520d77602dab03d86b7989",
"size": "4808",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "language/multivec/models/checkpoint_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "9834"
},
{
"name": "CSS",
"bytes": "602"
},
{
"name": "HTML",
"bytes": "25162"
},
{
"name": "JavaScript",
"bytes": "8857"
},
{
"name": "Jupyter Notebook",
"bytes": "1505066"
},
{
"name": "Python",
"bytes": "7139472"
},
{
"name": "Shell",
"bytes": "183709"
}
],
"symlink_target": ""
} |
from otp.ai.AIBaseGlobal import *
import DistributedCCharBaseAI
from direct.directnotify import DirectNotifyGlobal
from direct.fsm import ClassicFSM, State
from direct.fsm import State
from direct.task import Task
import random
from toontown.toonbase import ToontownGlobals
import CharStateDatasAI
from toontown.toonbase import TTLocalizer
class DistributedMickeyAI(DistributedCCharBaseAI.DistributedCCharBaseAI):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedMickeyAI')
def __init__(self, air):
DistributedCCharBaseAI.DistributedCCharBaseAI.__init__(self, air, TTLocalizer.Mickey)
self.fsm = ClassicFSM.ClassicFSM('DistributedMickeyAI', [
State.State('Off', self.enterOff, self.exitOff, [
'Lonely',
'TransitionToCostume',
'Walk']),
State.State('Lonely', self.enterLonely, self.exitLonely, [
'Chatty',
'Walk',
'TransitionToCostume']),
State.State('Chatty', self.enterChatty, self.exitChatty, [
'Lonely',
'Walk',
'TransitionToCostume']),
State.State('Walk', self.enterWalk, self.exitWalk, [
'Lonely',
'Chatty',
'TransitionToCostume']),
State.State('TransitionToCostume', self.enterTransitionToCostume, self.exitTransitionToCostume, [
'Off'])], 'Off', 'Off')
self.fsm.enterInitialState()
self.handleHolidays()
def delete(self):
self.fsm.requestFinalState()
del self.fsm
DistributedCCharBaseAI.DistributedCCharBaseAI.delete(self)
self.lonelyDoneEvent = None
self.lonely = None
self.chattyDoneEvent = None
self.chatty = None
self.walkDoneEvent = None
self.walk = None
self.notify.debug('MickeyAI Deleted')
def generate(self):
DistributedCCharBaseAI.DistributedCCharBaseAI.generate(self)
name = self.getName()
self.lonelyDoneEvent = self.taskName(name + '-lonely-done')
self.lonely = CharStateDatasAI.CharLonelyStateAI(self.lonelyDoneEvent, self)
self.chattyDoneEvent = self.taskName(name + '-chatty-done')
self.chatty = CharStateDatasAI.CharChattyStateAI(self.chattyDoneEvent, self)
self.walkDoneEvent = self.taskName(name + '-walk-done')
if self.diffPath == None:
self.walk = CharStateDatasAI.CharWalkStateAI(self.walkDoneEvent, self)
else:
self.walk = CharStateDatasAI.CharWalkStateAI(self.walkDoneEvent, self, self.diffPath)
def walkSpeed(self):
return ToontownGlobals.MickeySpeed
def start(self):
self.fsm.request('Lonely')
def _DistributedMickeyAI__decideNextState(self, doneStatus):
if self.transitionToCostume == 1:
curWalkNode = self.walk.getDestNode()
if simbase.air.holidayManager:
if ToontownGlobals.HALLOWEEN_COSTUMES in simbase.air.holidayManager.currentHolidays and simbase.air.holidayManager.currentHolidays[ToontownGlobals.HALLOWEEN_COSTUMES]:
simbase.air.holidayManager.currentHolidays[ToontownGlobals.HALLOWEEN_COSTUMES].triggerSwitch(curWalkNode, self)
self.fsm.request('TransitionToCostume')
elif ToontownGlobals.APRIL_FOOLS_COSTUMES in simbase.air.holidayManager.currentHolidays and simbase.air.holidayManager.currentHolidays[ToontownGlobals.APRIL_FOOLS_COSTUMES]:
simbase.air.holidayManager.currentHolidays[ToontownGlobals.APRIL_FOOLS_COSTUMES].triggerSwitch(curWalkNode, self)
self.fsm.request('TransitionToCostume')
else:
self.notify.warning('transitionToCostume == 1 but no costume holiday')
else:
self.notify.warning('transitionToCostume == 1 but no holiday Manager')
if doneStatus['state'] == 'lonely' and doneStatus['status'] == 'done':
self.fsm.request('Walk')
elif doneStatus['state'] == 'chatty' and doneStatus['status'] == 'done':
self.fsm.request('Walk')
elif doneStatus['state'] == 'walk' and doneStatus['status'] == 'done':
if len(self.nearbyAvatars) > 0:
self.fsm.request('Chatty')
else:
self.fsm.request('Lonely')
def enterOff(self):
pass
def exitOff(self):
DistributedCCharBaseAI.DistributedCCharBaseAI.exitOff(self)
def enterLonely(self):
self.lonely.enter()
self.acceptOnce(self.lonelyDoneEvent, self._DistributedMickeyAI__decideNextState)
def exitLonely(self):
self.ignore(self.lonelyDoneEvent)
self.lonely.exit()
def _DistributedMickeyAI__goForAWalk(self, task):
self.notify.debug('going for a walk')
self.fsm.request('Walk')
return Task.done
def enterChatty(self):
self.chatty.enter()
self.acceptOnce(self.chattyDoneEvent, self._DistributedMickeyAI__decideNextState)
def exitChatty(self):
self.ignore(self.chattyDoneEvent)
self.chatty.exit()
def enterWalk(self):
self.notify.debug('going for a walk')
self.walk.enter()
self.acceptOnce(self.walkDoneEvent, self._DistributedMickeyAI__decideNextState)
def exitWalk(self):
self.ignore(self.walkDoneEvent)
self.walk.exit()
def avatarEnterNextState(self):
if len(self.nearbyAvatars) == 1:
if self.fsm.getCurrentState().getName() != 'Walk':
self.fsm.request('Chatty')
else:
self.notify.debug('avatarEnterNextState: in walk state')
else:
self.notify.debug('avatarEnterNextState: num avatars: ' + str(len(self.nearbyAvatars)))
def avatarExitNextState(self):
if len(self.nearbyAvatars) == 0:
if self.fsm.getCurrentState().getName() != 'Walk':
self.fsm.request('Lonely')
def enterTransitionToCostume(self):
pass
def exitTransitionToCostume(self):
pass
def handleHolidays(self):
DistributedCCharBaseAI.DistributedCCharBaseAI.handleHolidays(self)
if hasattr(simbase.air, 'holidayManager'):
if ToontownGlobals.APRIL_FOOLS_COSTUMES in simbase.air.holidayManager.currentHolidays and simbase.air.holidayManager.currentHolidays[ToontownGlobals.APRIL_FOOLS_COSTUMES] != None and simbase.air.holidayManager.currentHolidays[ToontownGlobals.APRIL_FOOLS_COSTUMES].getRunningState():
self.diffPath = TTLocalizer.Daisy
| {
"content_hash": "d96bb0422a7e8a1912786b3ecb82afd2",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 294,
"avg_line_length": 42.81290322580645,
"alnum_prop": 0.6552139843279083,
"repo_name": "silly-wacky-3-town-toon/SOURCE-COD",
"id": "0c02673248beba45a55509d110a8307e8a91ad4b",
"size": "6636",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "toontown/classicchars/DistributedMickeyAI.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "10249"
},
{
"name": "C",
"bytes": "1752256"
},
{
"name": "C#",
"bytes": "8440"
},
{
"name": "C++",
"bytes": "5485400"
},
{
"name": "Emacs Lisp",
"bytes": "210083"
},
{
"name": "F#",
"bytes": "2310"
},
{
"name": "Forth",
"bytes": "506"
},
{
"name": "GLSL",
"bytes": "1040"
},
{
"name": "JavaScript",
"bytes": "7003"
},
{
"name": "Makefile",
"bytes": "895"
},
{
"name": "Mask",
"bytes": "969"
},
{
"name": "NSIS",
"bytes": "1009050"
},
{
"name": "Objective-C",
"bytes": "21821"
},
{
"name": "PLSQL",
"bytes": "10200"
},
{
"name": "Pascal",
"bytes": "4986"
},
{
"name": "Perl6",
"bytes": "30612"
},
{
"name": "Puppet",
"bytes": "259"
},
{
"name": "Python",
"bytes": "33566014"
},
{
"name": "Shell",
"bytes": "14642"
},
{
"name": "Tcl",
"bytes": "2084458"
}
],
"symlink_target": ""
} |
from homogeneous_simple_shear import rotate
import numpy as np
import itertools
class TestRotate:
def test_forward_equal_inverse(self):
def check(theta, alpha, phi):
x, y = np.mgrid[:10, :20]
z = np.ones_like(x)
xyz = np.vstack([x.ravel(), y.ravel(), z.ravel()]).T
rxyz = rotate(xyz, theta, alpha, phi)
ixyz = rotate(rxyz, theta, alpha, phi, inverse=True)
assert np.allclose(ixyz, xyz)
thetas = range(-180, 360, 30)
alphas = range(-90, 100, 30)
phis = range(-90, 100, 30)
for theta, alpha, phi in itertools.product(thetas, alphas, phis):
t, a, p = [np.radians(item) for item in [theta, alpha, phi]]
check(t, a, p)
| {
"content_hash": "3a8608aaec09c70e29c885678cf06e66",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 73,
"avg_line_length": 34.45454545454545,
"alnum_prop": 0.5606860158311345,
"repo_name": "joferkington/fault_kinematics",
"id": "7eeb07d17f1901a36fef33bac6b959ecf70043d0",
"size": "758",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fault_kinematics/test_homogeneous_shear.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11828"
}
],
"symlink_target": ""
} |
from ...core.utils import snake_to_camel_case
def convert_dict_keys_to_camel_case(d):
"""Changes dict fields from d[field_name] to d[fieldName].
Useful when dealing with dict data such as address that need to be parsed
into graphql input.
"""
data = {}
for k, v in d.items():
new_key = snake_to_camel_case(k)
data[new_key] = d[k]
return data
| {
"content_hash": "cf20e9fd7c45d521ad16fc3b4ef0cc94",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 77,
"avg_line_length": 27.785714285714285,
"alnum_prop": 0.6323907455012854,
"repo_name": "mociepka/saleor",
"id": "a0bb6102142a949aab63754156f78759dce416ae",
"size": "389",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "saleor/graphql/account/tests/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "2228"
},
{
"name": "HTML",
"bytes": "249248"
},
{
"name": "Procfile",
"bytes": "290"
},
{
"name": "Python",
"bytes": "12686831"
},
{
"name": "Shell",
"bytes": "439"
}
],
"symlink_target": ""
} |
"""
Support for WeMo switches.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/switch.wemo/
"""
import logging
from homeassistant.components.switch import SwitchDevice
from homeassistant.const import (
STATE_OFF, STATE_ON, STATE_STANDBY, STATE_UNKNOWN)
from homeassistant.loader import get_component
DEPENDENCIES = ['wemo']
_LOGGER = logging.getLogger(__name__)
ATTR_SENSOR_STATE = "sensor_state"
ATTR_SWITCH_MODE = "switch_mode"
ATTR_CURRENT_STATE_DETAIL = 'state_detail'
MAKER_SWITCH_MOMENTARY = "momentary"
MAKER_SWITCH_TOGGLE = "toggle"
MAKER_SWITCH_MOMENTARY = "momentary"
MAKER_SWITCH_TOGGLE = "toggle"
WEMO_ON = 1
WEMO_OFF = 0
WEMO_STANDBY = 8
# pylint: disable=unused-argument, too-many-function-args
def setup_platform(hass, config, add_devices_callback, discovery_info=None):
"""Setup discovered WeMo switches."""
import pywemo.discovery as discovery
if discovery_info is not None:
location = discovery_info[2]
mac = discovery_info[3]
device = discovery.device_from_description(location, mac)
if device:
add_devices_callback([WemoSwitch(device)])
class WemoSwitch(SwitchDevice):
"""Representation of a WeMo switch."""
def __init__(self, device):
"""Initialize the WeMo switch."""
self.wemo = device
self.insight_params = None
self.maker_params = None
self._state = None
wemo = get_component('wemo')
wemo.SUBSCRIPTION_REGISTRY.register(self.wemo)
wemo.SUBSCRIPTION_REGISTRY.on(self.wemo, None, self._update_callback)
def _update_callback(self, _device, _params):
"""Called by the Wemo device callback to update state."""
_LOGGER.info(
'Subscription update for %s',
_device)
if not hasattr(self, 'hass'):
self.update()
return
self.update_ha_state(True)
@property
def should_poll(self):
"""No polling needed with subscriptions."""
return False
@property
def unique_id(self):
"""Return the ID of this WeMo switch."""
return "{}.{}".format(self.__class__, self.wemo.serialnumber)
@property
def name(self):
"""Return the name of the switch if any."""
return self.wemo.name
@property
def device_state_attributes(self):
"""Return the state attributes of the device."""
attr = {}
if self.maker_params:
# Is the maker sensor on or off.
if self.maker_params['hassensor']:
# Note a state of 1 matches the WeMo app 'not triggered'!
if self.maker_params['sensorstate']:
attr[ATTR_SENSOR_STATE] = STATE_OFF
else:
attr[ATTR_SENSOR_STATE] = STATE_ON
# Is the maker switch configured as toggle(0) or momentary (1).
if self.maker_params['switchmode']:
attr[ATTR_SWITCH_MODE] = MAKER_SWITCH_MOMENTARY
else:
attr[ATTR_SWITCH_MODE] = MAKER_SWITCH_TOGGLE
if self.insight_params:
attr[ATTR_CURRENT_STATE_DETAIL] = self.detail_state
return attr
@property
def current_power_mwh(self):
"""Current power usage in mWh."""
if self.insight_params:
return self.insight_params['currentpower']
@property
def today_power_mw(self):
"""Today total power usage in mW."""
if self.insight_params:
return self.insight_params['todaymw']
@property
def detail_state(self):
"""Return the state of the device."""
if self.insight_params:
standby_state = int(self.insight_params['state'])
if standby_state == WEMO_ON:
return STATE_ON
elif standby_state == WEMO_OFF:
return STATE_OFF
elif standby_state == WEMO_STANDBY:
return STATE_STANDBY
else:
return STATE_UNKNOWN
@property
def is_on(self):
"""Return true if switch is on. Standby is on."""
return self._state
@property
def available(self):
"""True if switch is available."""
if self.wemo.model_name == 'Insight' and self.insight_params is None:
return False
if self.wemo.model_name == 'Maker' and self.maker_params is None:
return False
return True
def turn_on(self, **kwargs):
"""Turn the switch on."""
self._state = WEMO_ON
self.wemo.on()
self.schedule_update_ha_state()
def turn_off(self):
"""Turn the switch off."""
self._state = WEMO_OFF
self.wemo.off()
self.schedule_update_ha_state()
def update(self):
"""Update WeMo state."""
try:
self._state = self.wemo.get_state(True)
if self.wemo.model_name == 'Insight':
self.insight_params = self.wemo.insight_params
self.insight_params['standby_state'] = (
self.wemo.get_standby_state)
elif self.wemo.model_name == 'Maker':
self.maker_params = self.wemo.maker_params
except AttributeError:
_LOGGER.warning('Could not update status for %s', self.name)
| {
"content_hash": "4348b25a33d775e276998b74ce48fcf6",
"timestamp": "",
"source": "github",
"line_count": 173,
"max_line_length": 77,
"avg_line_length": 31.01156069364162,
"alnum_prop": 0.5955265610438024,
"repo_name": "jaharkes/home-assistant",
"id": "d4f6b721e9d95a069915b7bf9c0293f69b6355cb",
"size": "5365",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/switch/wemo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1446622"
},
{
"name": "Python",
"bytes": "3984986"
},
{
"name": "Ruby",
"bytes": "379"
},
{
"name": "Shell",
"bytes": "7255"
}
],
"symlink_target": ""
} |
import numpy
#punishment for too many components
#BIG_VALUE = 100
#Treshold from which we subtract the weight, to obtain the fitness
#BIGGER_VALUE = 1000
'''
Should work now
'''
def root(father, a):
if father[a] != a:
father[a] = root(father, father[a])
return father[a]
def same(father, a, b):
return root(father, a) == root(father, b)
'''
Calculates the Minimum spanning tree of the subgraph induced by the allowed vertices
(=steiner vertices + starting points), used for the fitness evaluation
I think it's best to represent the graph by a list of the edges, and they should be
sorted before calling this method, as a matter of efficiency.
allowed_vertices is a bitstring, where 1 represents that the vertex should be in the MST and 0 that it doesnt
Uses Kruskal ad Union-Find datastructure with high compression to achieve almost linear complexity (n*Ack^(-1)(n,m))
'''
def mst(steinergraph, tree_vertices):
#print('entering mst with tree_vertices: ', tree_vertices)
graph = steinergraph.edges
n = tree_vertices.size
father = numpy.arange(n)
#father = numpy.arange(n)
rank = numpy.zeros(n)
cnt = 0
for i in tree_vertices:
cnt += i
#print('initial cnt: ', cnt)
weight = 0
#print(tree_vertices)
#print(cnt)
for edge in graph:
a = edge[0]
b = edge[1]
rootA = root(father, a)
rootB = root(father, b)
if tree_vertices[a] and tree_vertices[b] and rootA != rootB:
weight += edge[2]
if rank[rootA] > rank[rootB]:
father[rootB] = rootA
else:
if rank[rootA] < rank[rootB]:
father[rootA] = rootB
else:
father[rootA] = rootB
rank[rootB] += 1
cnt -= 1
'''
print('Weight:', weight)
print('Count:', cnt)
print('Treevert', tree_vertices)
print('Fitness:', fitness)
'''
#print('leaving mst with weight: ', weight, ' and cnt: ', cnt)
return weight, cnt
def score(graph, tree_vertices, max_fitness, component_cost):
weight, cnt = mst(graph, tree_vertices)
fitness = (max_fitness - (weight + ((cnt - 1) * component_cost)))
return fitness | {
"content_hash": "25326abfc15cc9765b3b436199bdd345",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 116,
"avg_line_length": 32.05714285714286,
"alnum_prop": 0.6158645276292335,
"repo_name": "au-re/steiner-tree",
"id": "bae88438682e1b69ae2aa3e3e762de5db45c8cf6",
"size": "2244",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/fitness_calculator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14706"
}
],
"symlink_target": ""
} |
"""Copyright 2014 Cyrus Dasadia
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
This is a helper script to help integrate monitoring systems with CitoEngine.
For more information, refer to http://citoengine.readthedocs.org/
May the force be with you!
"""
import sys
import re
import csv
try:
from argparse import ArgumentParser
except ImportError:
print 'Please run "pip install argparse"'
sys.exit(1)
def parse_arguments():
options = dict()
args = ArgumentParser()
args.add_argument('--config-file', '-c', help='Monitoring service\'s config file', required=True, dest='cfg_file')
args.add_argument('--type', '-t', help='Config type', choices=['nagios', 'sensu'], required=True, dest='config_type')
args.add_argument('--debug', help='Debug mode', required=False, action='store_true', dest='debug')
args.add_argument('--events-file', '-e', help='Events csv file', required=False, dest='events_file')
args.add_argument('--out', '-o', help='Save output to a file', required=False, default='stdout', dest='out_file')
xgroup = args.add_mutually_exclusive_group(required=True)
xgroup.add_argument('--parse', '-p', help='Parse config and display the output', action='store_true', dest='parse')
xgroup.add_argument('--generate', '-g', help='Generate config and display the output', action='store_true', dest='generate')
options = args.parse_args()
return options
class CitoConfigParser(object):
def __init__(self, *args, **kwargs):
self.cfg_file = kwargs['cfg_file']
self.service_defs = dict()
self.config_type = kwargs['config_type']
if 'debug' in kwargs:
self.debug = True
else:
self.debug = False
if self.config_type == 'nagios':
self.pattern = r'service_description\s+([\w\-\s]+)\b'
else:
raise ValueError('Config type %s does not have a valid pattern' % self.config_type)
self.events_def = dict()
if kwargs['out_file'] != 'stdout':
self.output = open(kwargs['out_file'], 'w')
else:
self.output = None
def grep_pattern(self, line):
match = re.compile(self.pattern).search(line)
if match:
svc = match.groups()[0]
return svc
else:
return None
def output_writer(self, line):
if self.output:
self.output.write(line + '\n')
else:
print line
def parse_config_file(self):
for line_num, line in enumerate(open(self.cfg_file)):
svc = self.grep_pattern(line)
if svc:
if svc in self.service_defs:
if self.debug:
print "Duplicate Service: %s at line %s and %s" % (svc, self.service_defs[svc], line_num)
else:
self.service_defs[svc] = line_num
def parse_events_file(self, events_file):
with open(events_file) as csv_file:
csvreader = csv.reader(csv_file)
for row in csvreader:
self.events_def[row[6]] = row[0]
def print_service_deps(self):
self.parse_config_file()
for svc in self.service_defs:
self.output_writer(svc)
def generate_new_config(self, events_file):
self.parse_config_file()
self.parse_events_file(events_file)
for line in open(self.cfg_file):
svc = self.grep_pattern(line)
self.output_writer(line.strip())
if svc:
if self.config_type == 'nagios':
try:
self.output_writer('_CITOEVENTID\t\t\t\t%s' % self.events_def[svc])
except KeyError:
print "ERROR: Cannot find event_id for service:%s in %s" % (svc, events_file)
print "Event defs \n\n%s" % self.events_def
sys.exit(1)
else:
raise ValueError('Cannot generate config for %s config_type, yet.' % self.config_type)
if __name__ == "__main__":
options = parse_arguments()
c = CitoConfigParser(cfg_file=options.cfg_file, config_type=options.config_type, out_file=options.out_file)
if options.parse:
c.print_service_deps()
elif options.generate:
if not options.events_file:
print 'You need to call this script with --events-file <filename>'
sys.exit(1)
else:
c.generate_new_config(options.events_file)
| {
"content_hash": "7f4e210178cf0ca0a1d31d2703fb2cb9",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 128,
"avg_line_length": 39.37795275590551,
"alnum_prop": 0.6050789842031594,
"repo_name": "CitoEngine/integration_tools",
"id": "39214259628b900ddcdbaea318ac5d16107da0aa",
"size": "5023",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cito_config_parser.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "7753"
}
],
"symlink_target": ""
} |
import logging
import traceback
from google.appengine.ext import webapp
from google.appengine.ext import db
from google.appengine.ext import deferred
from google.appengine.api import taskqueue
import config
from siteinadropbox import models
from siteinadropbox import controller
class FetchWorker(webapp.RequestHandler):
def post(self):
"""
implementation note:
If a push task request handler returns an HTTP status code within the range 200 - 299,
App Engine considers the task to have completed successfully. If the task returns a
status code outside of this range, App Engine retries the task until it succeeds.
We treat FormatErrors and DropboxErrors specially.
All other errors are assumed to be bad code: An error is logged and status 200 is returned
"""
resource=db.get(self.request.get('key'))
if not resource:
logging.info('FetchWorker failed to find resource for %s'%self.request.get('key'))
return #Do not fail: we want the queue item to die
new_revision=self.request.get_range('new_revision')
action=self.request.get('action', 'fetch')
if not hasattr(resource, action):
logging.info('FetchWorker: Resource object %s does not have method %s'%(resource, action))
return
logging.debug('Fetchworker, will initiate %s on %s (new revision: %d)'%(action, resource, new_revision))
gov = None
try:
gov = controller.get_current_site_controller()
getattr(resource,action)(gov, new_revision=new_revision)
except (models.InvalidSiteError, models.DropboxError), e:
logging.warn('Unable to access dropbox: %s'%e)
except models.FormatError, e:
logging.debug('Format error %s reported -- notifying owner'%e)
if gov:
gov.handle_format_error(resource, e)
except BaseException, e:
logging.error('BUG: Unexpected exception while executing %s on %s'%(action,resource))
logging.debug('Exception message: %s'%e)
logging.debug('Stacktrace: \n%s'%traceback.format_exc())
| {
"content_hash": "fa5b19acdb2a1f12e456e0a9133afbd9",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 112,
"avg_line_length": 41.886792452830186,
"alnum_prop": 0.6554054054054054,
"repo_name": "Japanuspus/Site-in-a-Dropbox",
"id": "c77f63871d1b3d79fb4237654a9f442878a4b16a",
"size": "2220",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/siteinadropbox/handlers/resourcehandlers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "16839"
},
{
"name": "Python",
"bytes": "288464"
}
],
"symlink_target": ""
} |
import numbers
from . import Image, ImageColor
from ._util import isStringType
"""
A simple 2D drawing interface for PIL images.
<p>
Application code should use the <b>Draw</b> factory, instead of
directly.
"""
class ImageDraw(object):
def __init__(self, im, mode=None):
"""
Create a drawing instance.
:param im: The image to draw in.
:param mode: Optional mode to use for color values. For RGB
images, this argument can be RGB or RGBA (to blend the
drawing into the image). For all other modes, this argument
must be the same as the image mode. If omitted, the mode
defaults to the mode of the image.
"""
im.load()
if im.readonly:
im._copy() # make it writeable
blend = 0
if mode is None:
mode = im.mode
if mode != im.mode:
if mode == "RGBA" and im.mode == "RGB":
blend = 1
else:
raise ValueError("mode mismatch")
if mode == "P":
self.palette = im.palette
else:
self.palette = None
self.im = im.im
self.draw = Image.core.draw(self.im, blend)
self.mode = mode
if mode in ("I", "F"):
self.ink = self.draw.draw_ink(1, mode)
else:
self.ink = self.draw.draw_ink(-1, mode)
if mode in ("1", "P", "I", "F"):
# FIXME: fix Fill2 to properly support matte for I+F images
self.fontmode = "1"
else:
self.fontmode = "L" # aliasing is okay for other modes
self.fill = 0
self.font = None
def getfont(self):
"""
Get the current default font.
:returns: An image font."""
if not self.font:
# FIXME: should add a font repository
from . import ImageFont
self.font = ImageFont.load_default()
return self.font
def _getink(self, ink, fill=None):
if ink is None and fill is None:
if self.fill:
fill = self.ink
else:
ink = self.ink
else:
if ink is not None:
if isStringType(ink):
ink = ImageColor.getcolor(ink, self.mode)
if self.palette and not isinstance(ink, numbers.Number):
ink = self.palette.getcolor(ink)
ink = self.draw.draw_ink(ink, self.mode)
if fill is not None:
if isStringType(fill):
fill = ImageColor.getcolor(fill, self.mode)
if self.palette and not isinstance(fill, numbers.Number):
fill = self.palette.getcolor(fill)
fill = self.draw.draw_ink(fill, self.mode)
return ink, fill
def arc(self, xy, start, end, fill=None):
"""Draw an arc."""
ink, fill = self._getink(fill)
if ink is not None:
self.draw.draw_arc(xy, start, end, ink)
def bitmap(self, xy, bitmap, fill=None):
"""Draw a bitmap."""
bitmap.load()
ink, fill = self._getink(fill)
if ink is None:
ink = fill
if ink is not None:
self.draw.draw_bitmap(xy, bitmap.im, ink)
def chord(self, xy, start, end, fill=None, outline=None):
"""Draw a chord."""
ink, fill = self._getink(outline, fill)
if fill is not None:
self.draw.draw_chord(xy, start, end, fill, 1)
if ink is not None:
self.draw.draw_chord(xy, start, end, ink, 0)
def ellipse(self, xy, fill=None, outline=None):
"""Draw an ellipse."""
ink, fill = self._getink(outline, fill)
if fill is not None:
self.draw.draw_ellipse(xy, fill, 1)
if ink is not None:
self.draw.draw_ellipse(xy, ink, 0)
def line(self, xy, fill=None, width=0):
"""Draw a line, or a connected sequence of line segments."""
ink, fill = self._getink(fill)
if ink is not None:
self.draw.draw_lines(xy, ink, width)
def shape(self, shape, fill=None, outline=None):
"""(Experimental) Draw a shape."""
shape.close()
ink, fill = self._getink(outline, fill)
if fill is not None:
self.draw.draw_outline(shape, fill, 1)
if ink is not None:
self.draw.draw_outline(shape, ink, 0)
def pieslice(self, xy, start, end, fill=None, outline=None):
"""Draw a pieslice."""
ink, fill = self._getink(outline, fill)
if fill is not None:
self.draw.draw_pieslice(xy, start, end, fill, 1)
if ink is not None:
self.draw.draw_pieslice(xy, start, end, ink, 0)
def point(self, xy, fill=None):
"""Draw one or more individual pixels."""
ink, fill = self._getink(fill)
if ink is not None:
self.draw.draw_points(xy, ink)
def polygon(self, xy, fill=None, outline=None):
"""Draw a polygon."""
ink, fill = self._getink(outline, fill)
if fill is not None:
self.draw.draw_polygon(xy, fill, 1)
if ink is not None:
self.draw.draw_polygon(xy, ink, 0)
def rectangle(self, xy, fill=None, outline=None):
"""Draw a rectangle."""
ink, fill = self._getink(outline, fill)
if fill is not None:
self.draw.draw_rectangle(xy, fill, 1)
if ink is not None:
self.draw.draw_rectangle(xy, ink, 0)
def _multiline_check(self, text):
"""Draw text."""
split_character = "\n" if isinstance(text, str) else b"\n"
return split_character in text
def _multiline_split(self, text):
split_character = "\n" if isinstance(text, str) else b"\n"
return text.split(split_character)
def text(self, xy, text, fill=None, font=None, anchor=None,
*args, **kwargs):
if self._multiline_check(text):
return self.multiline_text(xy, text, fill, font, anchor,
*args, **kwargs)
ink, fill = self._getink(fill)
if font is None:
font = self.getfont()
if ink is None:
ink = fill
if ink is not None:
try:
mask, offset = font.getmask2(text, self.fontmode, *args, **kwargs)
xy = xy[0] + offset[0], xy[1] + offset[1]
except AttributeError:
try:
mask = font.getmask(text, self.fontmode, *args, **kwargs)
except TypeError:
mask = font.getmask(text)
self.draw.draw_bitmap(xy, mask, ink)
def multiline_text(self, xy, text, fill=None, font=None, anchor=None,
spacing=4, align="left", direction=None, features=None):
widths = []
max_width = 0
lines = self._multiline_split(text)
line_spacing = self.textsize('A', font=font)[1] + spacing
for line in lines:
line_width, line_height = self.textsize(line, font)
widths.append(line_width)
max_width = max(max_width, line_width)
left, top = xy
for idx, line in enumerate(lines):
if align == "left":
pass # left = x
elif align == "center":
left += (max_width - widths[idx]) / 2.0
elif align == "right":
left += (max_width - widths[idx])
else:
assert False, 'align must be "left", "center" or "right"'
self.text((left, top), line, fill, font, anchor,
direction=direction, features=features)
top += line_spacing
left = xy[0]
def textsize(self, text, font=None, spacing=4, direction=None,
features=None):
"""Get the size of a given string, in pixels."""
if self._multiline_check(text):
return self.multiline_textsize(text, font, spacing,
direction, features)
if font is None:
font = self.getfont()
return font.getsize(text, direction, features)
def multiline_textsize(self, text, font=None, spacing=4, direction=None,
features=None):
max_width = 0
lines = self._multiline_split(text)
line_spacing = self.textsize('A', font=font)[1] + spacing
for line in lines:
line_width, line_height = self.textsize(line, font, spacing,
direction, features)
max_width = max(max_width, line_width)
return max_width, len(lines)*line_spacing
def Draw(im, mode=None):
"""
A simple 2D drawing interface for PIL images.
:param im: The image to draw in.
:param mode: Optional mode to use for color values. For RGB
images, this argument can be RGB or RGBA (to blend the
drawing into the image). For all other modes, this argument
must be the same as the image mode. If omitted, the mode
defaults to the mode of the image.
"""
try:
return im.getdraw(mode)
except AttributeError:
return ImageDraw(im, mode)
# experimental access to the outline API
try:
Outline = Image.core.outline
except AttributeError:
Outline = None
def getdraw(im=None, hints=None):
"""
(Experimental) A more advanced 2D drawing interface for PIL images,
based on the WCK interface.
:param im: The image to draw in.
:param hints: An optional list of hints.
:returns: A (drawing context, drawing resource factory) tuple.
"""
# FIXME: this needs more work!
# FIXME: come up with a better 'hints' scheme.
handler = None
if not hints or "nicest" in hints:
try:
from . import _imagingagg as handler
except ImportError:
pass
if handler is None:
from . import ImageDraw2 as handler
if im:
im = handler.Draw(im)
return im, handler
def floodfill(image, xy, value, border=None, thresh=0):
"""
(experimental) Fills a bounded region with a given color.
:param image: Target image.
:param xy: Seed position (a 2-item coordinate tuple).
:param value: Fill color.
:param border: Optional border value. If given, the region consists of
pixels with a color different from the border color. If not given,
the region consists of pixels having the same color as the seed
pixel.
:param thresh: Optional threshold value which specifies a maximum
tolerable difference of a pixel value from the 'background' in
order for it to be replaced. Useful for filling regions of non-
homogeneous, but similar, colors.
"""
# based on an implementation by Eric S. Raymond
pixel = image.load()
x, y = xy
try:
background = pixel[x, y]
if _color_diff(value, background) <= thresh:
return # seed point already has fill color
pixel[x, y] = value
except (ValueError, IndexError):
return # seed point outside image
edge = [(x, y)]
if border is None:
while edge:
newedge = []
for (x, y) in edge:
for (s, t) in ((x+1, y), (x-1, y), (x, y+1), (x, y-1)):
try:
p = pixel[s, t]
except IndexError:
pass
else:
if _color_diff(p, background) <= thresh:
pixel[s, t] = value
newedge.append((s, t))
edge = newedge
else:
while edge:
newedge = []
for (x, y) in edge:
for (s, t) in ((x+1, y), (x-1, y), (x, y+1), (x, y-1)):
try:
p = pixel[s, t]
except IndexError:
pass
else:
if p != value and p != border:
pixel[s, t] = value
newedge.append((s, t))
edge = newedge
def _color_diff(rgb1, rgb2):
"""
Uses 1-norm distance to calculate difference between two rgb values.
"""
return abs(rgb1[0]-rgb2[0]) + abs(rgb1[1]-rgb2[1]) + abs(rgb1[2]-rgb2[2])
| {
"content_hash": "a4cc4827b42d4396790aad12b556a2fa",
"timestamp": "",
"source": "github",
"line_count": 354,
"max_line_length": 82,
"avg_line_length": 35.112994350282484,
"alnum_prop": 0.5345132743362832,
"repo_name": "isabernardes/Heriga",
"id": "89df2733811f4c0bb9700cba37d5931ebfbd5c85",
"size": "13641",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Herigaenv/lib/python2.7/site-packages/PIL/ImageDraw.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "662999"
},
{
"name": "HTML",
"bytes": "116009"
},
{
"name": "JavaScript",
"bytes": "848298"
},
{
"name": "Python",
"bytes": "5703559"
},
{
"name": "Shell",
"bytes": "3711"
}
],
"symlink_target": ""
} |
from google.appengine.api import users
import cgi
import os
import urllib
import jinja2
import webapp2
import model
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'])
from google.appengine.ext import ndb
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from google.appengine.runtime import DeadlineExceededError
"""
The front Thegither page
"""
class MainBoard(webapp2.RequestHandler):
"""
Process an HTTP GET request
"""
def get(self):
# Fetch the board from the datastore
template_values = {
'body_class': 'main',
}
# Get the user making the query, and set the template values accordingly
user = users.get_current_user()
if user:
template_values.update({
'user': user,
'is_admin': users.is_current_user_admin(),
'sign_off_url': users.create_logout_url('/'),
})
else:
template_values.update({
'sign_on_url': users.create_login_url(self.request.uri),
})
# Now display the page
template = JINJA_ENVIRONMENT.get_template('template/main.html')
self.response.write(template.render(template_values))
application = webapp2.WSGIApplication([
('/', MainBoard)
], debug=True)
| {
"content_hash": "67268e00fe63aa0fc2baed7f4a43eb49",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 78,
"avg_line_length": 22.737704918032787,
"alnum_prop": 0.6596971881759193,
"repo_name": "bpreece/thegither",
"id": "ea0fd89fa7e35d565e3706d4d87a56e5fc394b8d",
"size": "1387",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "thegither-dev/thegither/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "26555"
},
{
"name": "Python",
"bytes": "27718"
}
],
"symlink_target": ""
} |
"""
Spanish-specific Form helpers
"""
from __future__ import absolute_import, unicode_literals
import re
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError
from django.forms.fields import RegexField, Select
from django.utils.translation import ugettext_lazy as _
from .es_provinces import PROVINCE_CHOICES
from .es_regions import REGION_CHOICES
class ESPostalCodeField(RegexField):
"""
A form field that validates its input as a spanish postal code.
Spanish postal code is a five digits string, with two first digits
between 01 and 52, assigned to provinces code.
"""
default_error_messages = {
'invalid': _('Enter a valid postal code in the range and format 01XXX - 52XXX.'),
}
def __init__(self, max_length=None, min_length=None, *args, **kwargs):
super(ESPostalCodeField, self).__init__(
r'^(0[1-9]|[1-4][0-9]|5[0-2])\d{3}$',
max_length, min_length, *args, **kwargs)
class ESPhoneNumberField(RegexField):
"""
A form field that validates its input as a Spanish phone number.
Information numbers are ommited.
Spanish phone numbers are nine digit numbers, where first digit is 6 (for
cell phones), 8 (for special phones), or 9 (for landlines and special
phones)
TODO: accept and strip characters like dot, hyphen... in phone number
"""
default_error_messages = {
'invalid': _('Enter a valid phone number in one of the formats 6XXXXXXXX, 8XXXXXXXX or 9XXXXXXXX.'),
}
def __init__(self, max_length=None, min_length=None, *args, **kwargs):
super(ESPhoneNumberField, self).__init__(r'^(6|7|8|9)\d{8}$',
max_length, min_length, *args, **kwargs)
class ESIdentityCardNumberField(RegexField):
"""
Spanish NIF/NIE/CIF (Fiscal Identification Number) code.
Validates three diferent formats:
NIF (individuals): 12345678A
CIF (companies): A12345678
NIE (foreigners): X12345678A
according to a couple of simple checksum algorithms.
Value can include a space or hyphen separator between number and letters.
Number length is not checked for NIF (or NIE), old values start with a 1,
and future values can contain digits greater than 8. The CIF control digit
can be a number or a letter depending on company type. Algorithm is not
public, and different authors have different opinions on which ones allows
letters, so both validations are assumed true for all types.
"""
default_error_messages = {
'invalid': _('Please enter a valid NIF, NIE, or CIF.'),
'invalid_only_nif': _('Please enter a valid NIF or NIE.'),
'invalid_nif': _('Invalid checksum for NIF.'),
'invalid_nie': _('Invalid checksum for NIE.'),
'invalid_cif': _('Invalid checksum for CIF.'),
}
def __init__(self, only_nif=False, max_length=None, min_length=None, *args, **kwargs):
self.only_nif = only_nif
self.nif_control = 'TRWAGMYFPDXBNJZSQVHLCKE'
self.cif_control = 'JABCDEFGHI'
self.cif_types = 'ABCDEFGHKLMNPQS'
self.nie_types = 'XT'
id_card_re = re.compile(r'^([%s]?)[ -]?(\d+)[ -]?([%s]?)$' % (self.cif_types + self.nie_types, self.nif_control + self.cif_control), re.IGNORECASE)
super(ESIdentityCardNumberField, self).__init__(
id_card_re, max_length, min_length,
error_message=self.default_error_messages['invalid%s' % (self.only_nif and '_only_nif' or '')], *args, **kwargs)
def clean(self, value):
super(ESIdentityCardNumberField, self).clean(value)
if value in EMPTY_VALUES:
return ''
nif_get_checksum = lambda d: self.nif_control[int(d) % 23]
value = value.upper().replace(' ', '').replace('-', '')
m = re.match(r'^([%s]?)[ -]?(\d+)[ -]?([%s]?)$' % (self.cif_types + self.nie_types, self.nif_control + self.cif_control), value)
letter1, number, letter2 = m.groups()
if not letter1 and letter2:
# NIF
if letter2 == nif_get_checksum(number):
return value
else:
raise ValidationError(self.error_messages['invalid_nif'])
elif letter1 in self.nie_types and letter2:
# NIE
if letter2 == nif_get_checksum(number):
return value
else:
raise ValidationError(self.error_messages['invalid_nie'])
elif not self.only_nif and letter1 in self.cif_types and len(number) in [7, 8]:
# CIF
if not letter2:
number, letter2 = number[:-1], int(number[-1])
checksum = cif_get_checksum(number)
if letter2 in (checksum, self.cif_control[checksum]):
return value
else:
raise ValidationError(self.error_messages['invalid_cif'])
else:
raise ValidationError(self.error_messages['invalid'])
class ESCCCField(RegexField):
"""
A form field that validates its input as a Spanish bank account or CCC
(Codigo Cuenta Cliente).
Spanish CCC is in format EEEE-OOOO-CC-AAAAAAAAAA where:
E = entity
O = office
C = checksum
A = account
It's also valid to use a space as delimiter, or to use no delimiter.
First checksum digit validates entity and office, and last one
validates account. Validation is done multiplying every digit of 10
digit value (with leading 0 if necessary) by number in its position in
string 1, 2, 4, 8, 5, 10, 9, 7, 3, 6. Sum resulting numbers and extract
it from 11. Result is checksum except when 10 then is 1, or when 11
then is 0.
TODO: allow IBAN validation too
"""
default_error_messages = {
'invalid': _('Please enter a valid bank account number in format XXXX-XXXX-XX-XXXXXXXXXX.'),
'checksum': _('Invalid checksum for bank account number.'),
}
def __init__(self, max_length=None, min_length=None, *args, **kwargs):
super(ESCCCField, self).__init__(r'^\d{4}[ -]?\d{4}[ -]?\d{2}[ -]?\d{10}$',
max_length, min_length, *args, **kwargs)
def clean(self, value):
super(ESCCCField, self).clean(value)
if value in EMPTY_VALUES:
return ''
control_str = [1, 2, 4, 8, 5, 10, 9, 7, 3, 6]
m = re.match(r'^(\d{4})[ -]?(\d{4})[ -]?(\d{2})[ -]?(\d{10})$', value)
entity, office, checksum, account = m.groups()
get_checksum = lambda d: str(11 - sum([int(digit) * int(control) for digit, control in zip(d, control_str)]) % 11).replace('10', '1').replace('11', '0')
if get_checksum('00' + entity + office) + get_checksum(account) == checksum:
return value
else:
raise ValidationError(self.error_messages['checksum'])
class ESRegionSelect(Select):
"""
A Select widget that uses a list of spanish regions as its choices.
"""
def __init__(self, attrs=None):
super(ESRegionSelect, self).__init__(attrs, choices=REGION_CHOICES)
class ESProvinceSelect(Select):
"""
A Select widget that uses a list of spanish provinces as its choices.
"""
def __init__(self, attrs=None):
super(ESProvinceSelect, self).__init__(attrs, choices=PROVINCE_CHOICES)
def cif_get_checksum(number):
s1 = sum([int(digit) for pos, digit in enumerate(number) if int(pos) % 2])
s2 = sum([sum([int(unit) for unit in str(int(digit) * 2)])
for pos, digit in enumerate(number) if not int(pos) % 2])
return (10 - ((s1 + s2) % 10)) % 10
| {
"content_hash": "6612333ed17a926f49b8058ea2f4335b",
"timestamp": "",
"source": "github",
"line_count": 194,
"max_line_length": 160,
"avg_line_length": 39.8041237113402,
"alnum_prop": 0.6108521108521109,
"repo_name": "yakky/django-localflavor",
"id": "ff450cb0339e95e9f1cf8c973c3b505e5c68f53c",
"size": "7746",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "localflavor/es/forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "573411"
},
{
"name": "Shell",
"bytes": "6725"
}
],
"symlink_target": ""
} |
from google.cloud import gke_backup_v1
def sample_list_backup_plans():
# Create a client
client = gke_backup_v1.BackupForGKEClient()
# Initialize request argument(s)
request = gke_backup_v1.ListBackupPlansRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_backup_plans(request=request)
# Handle the response
for response in page_result:
print(response)
# [END gkebackup_v1_generated_BackupForGKE_ListBackupPlans_sync]
| {
"content_hash": "741640d97861d44721f369d9bc446d7d",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 64,
"avg_line_length": 25.2,
"alnum_prop": 0.7003968253968254,
"repo_name": "googleapis/python-gke-backup",
"id": "9faa7fbd77e586ba8f33554ca28c5d29328328c5",
"size": "1897",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/gkebackup_v1_generated_backup_for_gke_list_backup_plans_sync.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "1275539"
},
{
"name": "Shell",
"bytes": "30672"
}
],
"symlink_target": ""
} |
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.coverage']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Tyrant'
copyright = '2013, Taylor "Nekroze" Lawson'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Tyrantdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Tyrant.tex', 'Tyrant Documentation',
'Taylor "Nekroze" Lawson', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'tyrant', 'Tyrant Documentation',
['Taylor "Nekroze" Lawson'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Tyrant', 'Tyrant Documentation',
'Taylor "Nekroze" Lawson', 'Tyrant', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = 'Tyrant'
epub_author = 'Taylor "Nekroze" Lawson'
epub_publisher = 'Taylor "Nekroze" Lawson'
epub_copyright = '2013, Taylor "Nekroze" Lawson'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Fix unsupported image types using the PIL.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# If 'no', URL addresses will not be shown.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
| {
"content_hash": "a61a793d20b2db3298fc0ed9a4bd83a4",
"timestamp": "",
"source": "github",
"line_count": 293,
"max_line_length": 80,
"avg_line_length": 31.457337883959045,
"alnum_prop": 0.7008788108929153,
"repo_name": "Nekroze/tyrant",
"id": "ab296cb8f185a3216122a0937ecc55f31ac382ad",
"size": "9657",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "28732"
},
{
"name": "Shell",
"bytes": "6701"
}
],
"symlink_target": ""
} |
import re
import urlparse
import formencode
from formencode import htmlfill, validators
class BaseForm(formencode.Schema):
allow_extra_fields = True
filter_extra_fields = True
_xsrf = validators.PlainText(not_empty=True, max=32)
def __init__(self, handler):
self._parmas = {}
self._values = {}
self._form_errors = {}
arguments = {}
# re-parse qs, keep_blank_values for formencode to validate
# so formencode not_empty setting work.
request = handler.request
content_type = request.headers.get("Content-Type", "")
if request.method == "POST":
if content_type.startswith("application/x-www-form-urlencoded"):
arguments = urlparse.parse_qs(request.body, keep_blank_values=1)
for k, v in arguments.iteritems():
if len(v) == 1:
self._parmas[k] = v[0]
else:
# keep a list of values as list (or set)
self._parmas[k] = v
self._handler = handler
self._result = True
def validate(self):
try:
self._values = self.to_python(self._parmas)
self._result = True
self.__after__()
except formencode.Invalid, error:
self._values = error.value
self._form_errors = error.error_dict or {}
self._result = False
return self._result
# add custom error msg
def add_error(self, attr, msg):
self._result = False
self._form_errors[attr] = msg
def render(self, template_name, **kwargs):
html = self._handler.render_string(template_name, **kwargs)
if not self._result:
html = htmlfill.render(
html,
defaults=self._values,
errors=self._form_errors,
encoding="utf8",
)
self._handler.finish(html)
# post process hook
def __after__(self):
pass
class URL(validators.URL):
url_re = re.compile(r'''
^(http|https)://
(?:[%:\w]*@)? # authenticator
(?P<domain>[a-z0-9][a-z0-9\-]{0,62}\.)* # (sub)domain - alpha followed by 62max chars (63 total)
(?P<tld>[a-z]{2,}) # TLD
(?::[0-9]+)? # port
# files/delims/etc
(?P<path>/[a-z0-9\-\._~:/\?#\[\]@!%\$&\'\(\)\*\+,;=]*)?
$
''', re.I | re.VERBOSE)
| {
"content_hash": "99144914521eb5e387301005a29bec4f",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 104,
"avg_line_length": 31.819277108433734,
"alnum_prop": 0.4804998106777736,
"repo_name": "felinx/poweredsites",
"id": "7279184d64f6ad7688a822b78fcaf33a64dc4fa9",
"size": "3265",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "poweredsites/forms/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "156464"
},
{
"name": "Python",
"bytes": "155120"
},
{
"name": "Shell",
"bytes": "0"
}
],
"symlink_target": ""
} |
"""
A Python package for working with the Human Brain Project Model Validation Framework.
Andrew Davison and Shailesh Appukuttan, CNRS, 2017-2020
License: BSD 3-clause, see LICENSE.txt
"""
import os
import re
import getpass
import json
import platform
import socket
from importlib import import_module
from pathlib import Path
from urllib.error import URLError
from urllib.parse import urlparse, urljoin, urlencode, parse_qs, quote
from urllib.request import urlopen
import ast
import simplejson
import requests
from requests.auth import AuthBase
from .datastores import URI_SCHEME_MAP
from nameparser import HumanName
# check if running within Jupyter notebook inside Collab v2
try:
from clb_nb_utils import oauth
have_collab_token_handler = True
except ImportError:
have_collab_token_handler = False
TOKENFILE = os.path.expanduser("~/.hbptoken")
class ResponseError(Exception):
pass
def handle_response_error(message, response):
try:
structured_error_message = response.json()
except simplejson.errors.JSONDecodeError:
structured_error_message = None
if structured_error_message:
response_text = str(structured_error_message) # temporary, to be improved
else:
response_text = response.text
full_message = "{}. Response = {}".format(message, response_text)
raise ResponseError(full_message)
def renameNestedJSONKey(iterable, old_key, new_key):
if isinstance(iterable, list):
return [renameNestedJSONKey(item, old_key, new_key) for item in iterable]
if isinstance(iterable, dict):
for key in list(iterable.keys()):
if key == old_key:
iterable[new_key] = iterable.pop(key)
return iterable
class HBPAuth(AuthBase):
"""Attaches OIDC Bearer Authentication to the given Request object."""
def __init__(self, token):
# setup any auth-related data here
self.token = token
def __call__(self, r):
# modify and return the request
r.headers['Authorization'] = 'Bearer ' + self.token
return r
class BaseClient(object):
"""
Base class that handles HBP authentication
"""
# Note: Could possibly simplify the code later
__test__ = False
def __init__(self, username=None,
password=None,
environment="production",
token=None):
self.username = username
self.verify = True
self.environment = environment
self.token = token
if environment == "production":
self.url = "https://validation-v2.brainsimulation.eu"
self.client_id = "8a6b7458-1044-4ebd-9b7e-f8fd3469069c" # Prod ID
elif environment == "integration":
self.url = "https://validation-staging.brainsimulation.eu"
self.client_id = "8a6b7458-1044-4ebd-9b7e-f8fd3469069c"
elif environment == "dev":
self.url = "https://validation-dev.brainsimulation.eu"
self.client_id = "90c719e0-29ce-43a2-9c53-15cb314c2d0b" # Dev ID
else:
if os.path.isfile('config.json') and os.access('config.json', os.R_OK):
with open('config.json') as config_file:
config = json.load(config_file)
if environment in config:
if "url" in config[environment] and "client_id" in config[environment]:
self.url = config[environment]["url"]
self.client_id = config[environment]["client_id"]
self.verify = config[environment].get("verify_ssl", True)
else:
raise KeyError("Cannot load environment info: config.json does not contain sufficient info for environment = {}".format(environment))
else:
raise KeyError("Cannot load environment info: config.json does not contain environment = {}".format(environment))
else:
raise IOError("Cannot load environment info: config.json not found in the current directory.")
if self.token:
pass
elif password is None:
self.token = None
if have_collab_token_handler:
# if are we running in a Jupyter notebook within the Collaboratory
# the token is already available
self.token = oauth.get_token()
elif os.path.exists(TOKENFILE):
# check for a stored token
with open(TOKENFILE) as fp:
# self.token = json.load(fp).get(username, None)["access_token"]
data = json.load(fp).get(username, None)
if data and "access_token" in data:
self.token = data["access_token"]
if not self._check_token_valid():
print("HBP authentication token is invalid or has expired. Will need to re-authenticate.")
self.token = None
else:
print("HBP authentication token file not having required JSON data.")
else:
print("HBP authentication token file not found locally.")
if self.token is None:
if not username:
print("\n==============================================")
print("Please enter your HBP username.")
username = input('HBP Username: ')
password = os.environ.get('HBP_PASS')
if password is not None:
try:
self._hbp_auth(username, password)
except Exception:
print("Authentication Failure. Possibly incorrect HBP password saved in environment variable 'HBP_PASS'.")
if not hasattr(self, 'config'):
try:
# prompt for password
print("Please enter your HBP password: ")
password = getpass.getpass()
self._hbp_auth(username, password)
except Exception:
print("Authentication Failure! Password entered is possibly incorrect.")
raise
with open(TOKENFILE, "w") as fp:
json.dump({username: self.config["token"]}, fp)
os.chmod(TOKENFILE, 0o600)
else:
try:
self._hbp_auth(username, password)
except Exception:
print("Authentication Failure! Password entered is possibly incorrect.")
raise
with open(TOKENFILE, "w") as fp:
json.dump({username: self.config["token"]}, fp)
os.chmod(TOKENFILE, 0o600)
self.auth = HBPAuth(self.token)
def _check_token_valid(self):
url = "https://drive.ebrains.eu/api2/auth/ping/"
data = requests.get(url, auth=HBPAuth(self.token), verify=self.verify)
if data.status_code == 200:
return True
else:
return False
def _format_people_name(self, names):
# converts a string of people names separated by semi-colons
# into a list of dicts. Each list element will correspond to a
# single person, and consist of keys `given_name` and `family_name`
# list input - multiple persons
if isinstance(names, list):
if all("given_name" in entry.keys() for entry in names) and all("family_name" in entry.keys() for entry in names):
return names
else:
raise ValueError("Name input as list but without required keys: given_name, family_name")
# dict input - single person
if isinstance(names, dict):
if "given_name" in names.keys() and "family_name" in names.keys():
return [names]
else:
raise ValueError("Name input as dict but without required keys: given_name, family_name")
# string input - multiple persons
output_names_list = []
if names:
input_names_list = names.split(";")
for name in input_names_list:
parsed_name = HumanName(name.strip())
output_names_list.append({"given_name": " ".join(filter(None, [parsed_name.first, parsed_name.middle])), "family_name": parsed_name.last})
else:
output_names_list.append({"given_name": "", "family_name": ""})
return output_names_list
# def exists_in_collab_else_create(self, collab_id):
# # TODO: needs to be updated for Collab v2
# """
# Checks with the hbp-collab-service if the Model Catalog / Validation Framework app
# exists inside the current collab (if run inside the Collaboratory), or Collab ID
# specified by the user (when run externally).
# """
# try:
# url = "https://services.humanbrainproject.eu/collab/v0/collab/"+str(collab_id)+"/nav/all/"
# response = requests.get(url, auth=HBPAuth(self.token), verify=self.verify)
# except ValueError:
# print("Error contacting hbp-collab-service for Collab info. Possibly invalid Collab ID: {}".format(collab_id))
# for app_item in response.json():
# if app_item["app_id"] == str(self.app_id):
# app_nav_id = app_item["id"]
# print ("Using existing {} app in this Collab. App nav ID: {}".format(self.app_name,app_nav_id))
# break
# else:
# url = "https://services.humanbrainproject.eu/collab/v0/collab/"+str(collab_id)+"/nav/root/"
# collab_root = requests.get(url, auth=HBPAuth(self.token), verify=self.verify).json()["id"]
# import uuid
# app_info = {"app_id": self.app_id,
# "context": str(uuid.uuid4()),
# "name": self.app_name,
# "order_index": "-1",
# "parent": collab_root,
# "type": "IT"}
# url = "https://services.humanbrainproject.eu/collab/v0/collab/"+str(collab_id)+"/nav/"
# headers = {'Content-type': 'application/json'}
# response = requests.post(url, data=json.dumps(app_info),
# auth=HBPAuth(self.token), headers=headers,
# verify=self.verify)
# app_nav_id = response.json()["id"]
# print ("New {} app created in this Collab. App nav ID: {}".format(self.app_name,app_nav_id))
# return app_nav_id
# def _configure_app_collab(self, config_data):
# # TODO: needs to be updated for Collab v2
# """
# Used to configure the apps inside a Collab. Example `config_data`:
# {
# "config":{
# "app_id":68489,
# "app_type":"model_catalog",
# "brain_region":"",
# "cell_type":"",
# "collab_id":"model-validation",
# "recording_modality":"",
# "model_scope":"",
# "abstraction_level":"",
# "organization":"",
# "species":"",
# "test_type":""
# },
# "only_if_new":False,
# "url":"https://validation-v1.brainsimulation.eu/parametersconfiguration-model-catalog/parametersconfigurationrest/"
# }
# """
# if not config_data["config"]["collab_id"]:
# raise ValueError("`collab_id` cannot be empty!")
# if not config_data["config"]["app_id"]:
# raise ValueError("`app_id` cannot be empty!")
# # check if the app has previously been configured: decide POST or PUT
# response = requests.get(config_data["url"]+"?app_id="+str(config_data["config"]["app_id"]), auth=self.auth, verify=self.verify)
# headers = {'Content-type': 'application/json'}
# config_data["config"]["id"] = config_data["config"]["app_id"]
# app_id = config_data["config"].pop("app_id")
# if not response.json()["param"]:
# response = requests.post(config_data["url"], data=json.dumps(config_data["config"]),
# auth=self.auth, headers=headers,
# verify=self.verify)
# if response.status_code == 201:
# print("New app has beeen created and sucessfully configured!")
# else:
# print("Error! App could not be configured. Response = " + str(response.content))
# else:
# if not config_data["only_if_new"]:
# response = requests.put(config_data["url"], data=json.dumps(config_data["config"]),
# auth=self.auth, headers=headers,
# verify=self.verify)
# if response.status_code == 202:
# print("Existing app has beeen sucessfully reconfigured!")
# else:
# print("Error! App could not be reconfigured. Response = " + str(response.content))
def _hbp_auth(self, username, password):
"""
HBP authentication
"""
redirect_uri = self.url + '/auth'
session = requests.Session()
# log-in page of model validation service
r_login = session.get(self.url + "/login", allow_redirects=False)
if r_login.status_code != 302:
raise Exception(
"Something went wrong. Status code {} from login, expected 302"
.format(r_login.status_code))
# redirects to EBRAINS IAM log-in page
iam_auth_url = r_login.headers.get('location')
r_iam1 = session.get(iam_auth_url, allow_redirects=False)
if r_iam1.status_code != 200:
raise Exception(
"Something went wrong loading EBRAINS log-in page. Status code {}"
.format(r_iam1.status_code))
# fill-in and submit form
match = re.search(r'action=\"(?P<url>[^\"]+)\"', r_iam1.text)
if not match:
raise Exception("Received an unexpected page")
iam_authenticate_url = match['url'].replace("&", "&")
r_iam2 = session.post(
iam_authenticate_url,
data={"username": username, "password": password},
headers={"Referer": iam_auth_url, "Host": "iam.ebrains.eu", "Origin": "https://iam.ebrains.eu"},
allow_redirects=False
)
if r_iam2.status_code != 302:
raise Exception(
"Something went wrong. Status code {} from authenticate, expected 302"
.format(r_iam2.status_code))
# redirects back to model validation service
r_val = session.get(r_iam2.headers['Location'])
if r_val.status_code != 200:
raise Exception(
"Something went wrong. Status code {} from final authentication step"
.format(r_val.status_code))
config = r_val.json()
self.token = config['token']['access_token']
self.config = config
@classmethod
def from_existing(cls, client):
"""Used to easily create a TestLibrary if you already have a ModelCatalog, or vice versa"""
obj = cls.__new__(cls)
for attrname in ("username", "url", "client_id", "token", "verify", "auth", "environment"):
setattr(obj, attrname, getattr(client, attrname))
obj._set_app_info()
return obj
def _get_attribute_options(self, param, valid_params):
if param in ("", "all"):
url = self.url + "/vocab/"
elif param in valid_params:
url = self.url + "/vocab/" + param.replace("_", "-") + "/"
else:
raise Exception("Specified attribute '{}' is invalid. Valid attributes: {}".format(param, valid_params))
return requests.get(url, auth=self.auth, verify=self.verify).json()
class TestLibrary(BaseClient):
"""Client for the HBP Validation Test library.
The TestLibrary client manages all actions pertaining to tests and results.
The following actions can be performed:
==================================== ====================================
Action Method
==================================== ====================================
Get test definition :meth:`get_test_definition`
Get test as Python (sciunit) class :meth:`get_validation_test`
List test definitions :meth:`list_tests`
Add new test definition :meth:`add_test`
Edit test definition :meth:`edit_test`
Get test instances :meth:`get_test_instance`
List test instances :meth:`list_test_instances`
Add new test instance :meth:`add_test_instance`
Edit test instance :meth:`edit_test_instance`
Get valid attribute values :meth:`get_attribute_options`
Get test result :meth:`get_result`
List test results :meth:`list_results`
Register test result :meth:`register_result`
==================================== ====================================
Parameters
----------
username : string
Your HBP Collaboratory username. Not needed in Jupyter notebooks within the HBP Collaboratory.
password : string, optional
Your HBP Collaboratory password; advisable to not enter as plaintext.
If left empty, you would be prompted for password at run time (safer).
Not needed in Jupyter notebooks within the HBP Collaboratory.
environment : string, optional
Used to indicate whether being used for development/testing purposes.
Set as `production` as default for using the production system,
which is appropriate for most users. When set to `dev`, it uses the
`development` system. Other environments, if required, should be defined
inside a json file named `config.json` in the working directory. Example:
.. code-block:: JSON
{
"prod": {
"url": "https://validation-v1.brainsimulation.eu",
"client_id": "3ae21f28-0302-4d28-8581-15853ad6107d"
},
"dev_test": {
"url": "https://localhost:8000",
"client_id": "90c719e0-29ce-43a2-9c53-15cb314c2d0b",
"verify_ssl": false
}
}
token : string, optional
You may directly input a valid authenticated token from Collaboratory v1 or v2.
Note: you should use the `access_token` and NOT `refresh_token`.
Examples
--------
Instantiate an instance of the TestLibrary class
>>> test_library = TestLibrary(username="<<hbp_username>>", password="<<hbp_password>>")
>>> test_library = TestLibrary(token="<<token>>")
"""
__test__ = False
def __init__(self, username=None, password=None, environment="production", token=None):
super(TestLibrary, self).__init__(username, password, environment, token)
self._set_app_info()
def _set_app_info(self):
if self.environment == "production":
self.app_name = "Validation Framework"
elif self.environment == "dev":
self.app_name = "Validation Framework (dev)"
elif self.environment == "integration":
self.app_name = "Model Validation app (staging)"
# def set_app_config(self, collab_id="", only_if_new=False, recording_modality="", test_type="", species="", brain_region="", cell_type="", model_scope="", abstraction_level="", organization=""):
# # TODO: needs to be updated for Collab v2
# inputArgs = locals()
# params = {}
# params["url"] = self.url + "/parametersconfiguration-validation-app/parametersconfigurationrest/"
# params["only_if_new"] = only_if_new
# params["config"] = inputArgs
# params["config"].pop("self")
# params["config"].pop("only_if_new")
# params["config"]["app_type"] = "validation_app"
# self._configure_app_collab(params)
def get_test_definition(self, test_path="", test_id = "", alias=""):
"""Retrieve a specific test definition.
A specific test definition can be retrieved from the test library
in the following ways (in order of priority):
1. load from a local JSON file specified via `test_path`
2. specify the `test_id`
3. specify the `alias` (of the test)
Parameters
----------
test_path : string
Location of local JSON file with test definition.
test_id : UUID
System generated unique identifier associated with test definition.
alias : string
User-assigned unique identifier associated with test definition.
Note
----
Also see: :meth:`get_validation_test`
Returns
-------
dict
Information about the test.
Examples
--------
>>> test = test_library.get_test_definition("/home/shailesh/Work/dummy_test.json")
>>> test = test_library.get_test_definition(test_id="7b63f87b-d709-4194-bae1-15329daf3dec")
>>> test = test_library.get_test_definition(alias="CDT-6")
"""
if test_path == "" and test_id == "" and alias == "":
raise Exception("test_path or test_id or alias needs to be provided for finding a test.")
if test_path:
if os.path.isfile(test_path):
# test_path is a local path
with open(test_path) as fp:
test_json = json.load(fp)
else:
raise Exception("Error in local file path specified by test_path.")
else:
if test_id:
url = self.url + "/tests/" + test_id
else:
url = self.url + "/tests/" + quote(alias)
test_json = requests.get(url, auth=self.auth, verify=self.verify)
if test_json.status_code != 200:
handle_response_error("Error in retrieving test", test_json)
return test_json.json()
def get_validation_test(self, test_path="", instance_path="", instance_id ="", test_id = "", alias="", version="", **params):
"""Retrieve a specific test instance as a Python class (sciunit.Test instance).
A specific test definition can be specified
in the following ways (in order of priority):
1. load from a local JSON file specified via `test_path` and `instance_path`
2. specify `instance_id` corresponding to test instance in test library
3. specify `test_id` and `version`
4. specify `alias` (of the test) and `version`
Note: for (3) and (4) above, if `version` is not specified,
then the latest test version is retrieved
Parameters
----------
test_path : string
Location of local JSON file with test definition.
instance_path : string
Location of local JSON file with test instance metadata.
instance_id : UUID
System generated unique identifier associated with test instance.
test_id : UUID
System generated unique identifier associated with test definition.
alias : string
User-assigned unique identifier associated with test definition.
version : string
User-assigned identifier (unique for each test) associated with test instance.
**params :
Additional keyword arguments to be passed to the Test constructor.
Note
----
To confirm the priority of parameters for specifying tests and instances,
see :meth:`get_test_definition` and :meth:`get_test_instance`
Returns
-------
sciunit.Test
Returns a :class:`sciunit.Test` instance.
Examples
--------
>>> test = test_library.get_validation_test(alias="CDT-6", instance_id="36a1960e-3e1f-4c3c-a3b6-d94e6754da1b")
"""
if test_path == "" and instance_id == "" and test_id == "" and alias == "":
raise Exception("One of the following needs to be provided for finding the required test:\n"
"test_path, instance_id, test_id or alias")
else:
if instance_id:
# `instance_id` is sufficient for identifying both test and instance
test_instance_json = self.get_test_instance(instance_path=instance_path, instance_id=instance_id) # instance_path added just to maintain order of priority
test_id = test_instance_json["test_id"]
test_json = self.get_test_definition(test_path=test_path, test_id=test_id) # test_path added just to maintain order of priority
else:
test_json = self.get_test_definition(test_path=test_path, test_id=test_id, alias=alias)
test_id = test_json["id"] # in case test_id was not input for specifying test
test_instance_json = self.get_test_instance(instance_path=instance_path, instance_id=instance_id, test_id=test_id, version=version)
# Import the Test class specified in the definition.
# This assumes that the module containing the class is installed.
# In future we could add the ability to (optionally) install
# Python packages automatically.
path_parts = test_instance_json["path"].split(".")
cls_name = path_parts[-1]
module_name = ".".join(path_parts[:-1])
test_module = import_module(module_name)
test_cls = getattr(test_module, cls_name)
# Load the reference data ("observations")
observation_data = self._load_reference_data(test_json["data_location"])
# Create the :class:`sciunit.Test` instance
test_instance = test_cls(observation=observation_data, **params)
test_instance.uuid = test_instance_json["id"]
return test_instance
def list_tests(self, size=1000000, from_index=0, **filters):
"""Retrieve a list of test definitions satisfying specified filters.
The filters may specify one or more attributes that belong
to a test definition. The following test attributes can be specified:
* alias
* name
* implementation_status
* brain_region
* species
* cell_type
* data_type
* recording_modality
* test_type
* score_type
* author
Parameters
----------
size : positive integer
Max number of tests to be returned; default is set to 1000000.
from_index : positive integer
Index of first test to be returned; default is set to 0.
**filters : variable length keyword arguments
To be used to filter test definitions from the test library.
Returns
-------
list
List of model descriptions satisfying specified filters.
Examples
--------
>>> tests = test_library.list_tests()
>>> tests = test_library.list_tests(test_type="single cell activity")
>>> tests = test_library.list_tests(test_type="single cell activity", cell_type="Pyramidal Cell")
"""
valid_filters = ["alias", "name", "implementation_status", "brain_region", "species", "cell_type", "data_type", "recording_modality", "test_type", "score_type", "author"]
params = locals()["filters"]
for filter in params:
if filter not in valid_filters:
raise ValueError("The specified filter '{}' is an invalid filter!\nValid filters are: {}".format(filter, valid_filters))
url = self.url + "/tests/"
url += "?" + urlencode(params, doseq=True) + "&size=" + str(size) + "&from_index=" + str(from_index)
response = requests.get(url, auth=self.auth, verify=self.verify)
if response.status_code != 200:
handle_response_error("Error listing tests", response)
tests = response.json()
return tests
def add_test(self, name=None, alias=None, author=None,
species=None, age=None, brain_region=None, cell_type=None,
publication=None, description=None, recording_modality=None, test_type=None, score_type=None, data_location=None, data_type=None,
instances=[]):
"""Register a new test on the test library.
This allows you to add a new test to the test library.
Parameters
----------
name : string
Name of the test definition to be created.
alias : string, optional
User-assigned unique identifier to be associated with test definition.
author : string
Name of person creating the test.
species : string
The species from which the data was collected.
age : string
The age of the specimen.
brain_region : string
The brain region being targeted in the test.
cell_type : string
The type of cell being examined.
recording_modality : string
Specifies the type of observation used in the test.
test_type : string
Specifies the type of the test.
score_type : string
The type of score produced by the test.
description : string
Experimental protocol involved in obtaining reference data.
data_location : string
URL of file containing reference data (observation).
data_type : string
The type of reference data (observation).
publication : string
Publication or comment (e.g. "Unpublished") to be associated with observation.
instances : list, optional
Specify a list of instances (versions) of the test.
Returns
-------
dict
data of test instance that has been created.
Examples
--------
>>> test = test_library.add_test(name="Cell Density Test", alias="", version="1.0", author="Shailesh Appukuttan",
species="Mouse (Mus musculus)", age="TBD", brain_region="Hippocampus", cell_type="Other",
recording_modality="electron microscopy", test_type="network structure", score_type="Other", description="Later",
data_location="https://object.cscs.ch/v1/AUTH_c0a333ecf7c045809321ce9d9ecdfdea/sp6_validation_data/hippounit/feat_CA1_pyr_cACpyr_more_features.json",
data_type="Mean, SD", publication="Halasy et al., 1996",
repository="https://github.com/appukuttan-shailesh/morphounit.git", path="morphounit.tests.CellDensityTest")
"""
test_data = {}
args = locals()
for field in ["name", "alias", "author",
"species", "age", "brain_region", "cell_type",
"publication", "description", "recording_modality", "test_type", "score_type", "data_location", "data_type",
"instances"]:
if args[field]:
test_data[field] = args[field]
values = self.get_attribute_options()
for field in ("species", "brain_region", "cell_type", "recording_modality", "test_type", "score_type"):
if field in test_data and test_data[field] not in values[field] + [None]:
raise Exception("{} = '{}' is invalid.\nValue has to be one of these: {}".format(field, test_data[field], values[field]))
# format names of authors as required by API
if "author" in test_data:
test_data["author"] = self._format_people_name(test_data["author"])
# 'data_location' is now a list of urls
if not isinstance(test_data["data_location"], list):
test_data["data_location"] = [test_data["data_location"]]
url = self.url + "/tests/"
headers = {'Content-type': 'application/json'}
response = requests.post(url, data=json.dumps(test_data),
auth=self.auth, headers=headers,
verify=self.verify)
if response.status_code == 201:
return response.json()
else:
handle_response_error("Error in adding test", response)
def edit_test(self, test_id=None, name=None, alias=None, author=None,
species=None, age=None, brain_region=None, cell_type=None,
publication=None, description=None, recording_modality=None, test_type=None, score_type=None, data_location=None, data_type=None, ):
"""Edit an existing test in the test library.
To update an existing test, the `test_id` must be provided. Any of the
other parameters may be updated.
Only the parameters being updated need to be specified.
Parameters
----------
name : string
Name of the test definition.
test_id : UUID
System generated unique identifier associated with test definition.
alias : string, optional
User-assigned unique identifier to be associated with test definition.
author : string
Name of person who created the test.
species : string
The species from which the data was collected.
age : string
The age of the specimen.
brain_region : string
The brain region being targeted in the test.
cell_type : string
The type of cell being examined.
recording_modality : string
Specifies the type of observation used in the test.
test_type : string
Specifies the type of the test.
score_type : string
The type of score produced by the test.
description : string
Experimental protocol involved in obtaining reference data.
data_location : string
URL of file containing reference data (observation).
data_type : string
The type of reference data (observation).
publication : string
Publication or comment (e.g. "Unpublished") to be associated with observation.
Note
----
Test instances cannot be edited here.
This has to be done using :meth:`edit_test_instance`
Returns
-------
data
data of test instance that has been edited.
Examples
--------
test = test_library.edit_test(name="Cell Density Test", test_id="7b63f87b-d709-4194-bae1-15329daf3dec", alias="CDT-6", author="Shailesh Appukuttan", publication="Halasy et al., 1996",
species="Mouse (Mus musculus)", brain_region="Hippocampus", cell_type="Other", age="TBD", recording_modality="electron microscopy",
test_type="network structure", score_type="Other", protocol="To be filled sometime later", data_location="https://object.cscs.ch/v1/AUTH_c0a333ecf7c045809321ce9d9ecdfdea/sp6_validation_data/hippounit/feat_CA1_pyr_cACpyr_more_features.json", data_type="Mean, SD")
"""
if not test_id:
raise Exception("Test ID needs to be provided for editing a test.")
test_data = {}
args = locals()
for field in ["name", "alias", "author",
"species", "age", "brain_region", "cell_type",
"publication", "description", "recording_modality", "test_type", "score_type", "data_location", "data_type"]:
if args[field]:
test_data[field] = args[field]
values = self.get_attribute_options()
for field in ("species", "brain_region", "cell_type", "recording_modality", "test_type", "score_type"):
if field in test_data and test_data[field] not in values[field] + [None]:
raise Exception("{} = '{}' is invalid.\nValue has to be one of these: {}".format(field, test_data[field], values[field]))
# format names of authors as required by API
if "author" in test_data:
test_data["author"] = self._format_people_name(test_data["author"])
# 'data_location' is now a list of urls
if not isinstance(test_data["data_location"], list):
test_data["data_location"] = [test_data["data_location"]]
url = self.url + "/tests/" + test_id
headers = {'Content-type': 'application/json'}
response = requests.put(url, data=json.dumps(test_data),
auth=self.auth, headers=headers,
verify=self.verify)
if response.status_code == 200:
return response.json()
else:
handle_response_error("Error in editing test", response)
def delete_test(self, test_id="", alias=""):
"""ONLY FOR SUPERUSERS: Delete a specific test definition by its test_id or alias.
A specific test definition can be deleted from the test library, along with all
associated test instances, in the following ways (in order of priority):
1. specify the `test_id`
2. specify the `alias` (of the test)
Parameters
----------
test_id : UUID
System generated unique identifier associated with test definition.
alias : string
User-assigned unique identifier associated with test definition.
Note
----
* This feature is only for superusers!
Examples
--------
>>> test_library.delete_test(test_id="8c7cb9f6-e380-452c-9e98-e77254b088c5")
>>> test_library.delete_test(alias="B1")
"""
if test_id == "" and alias == "":
raise Exception("test ID or alias needs to be provided for deleting a test.")
elif test_id != "":
url = self.url + "/tests/" + test_id
else:
url = self.url + "/tests/" + quote(alias)
test_json = requests.delete(url, auth=self.auth, verify=self.verify)
if test_json.status_code == 403:
handle_response_error("Only SuperUser accounts can delete data", test_json)
elif test_json.status_code != 200:
handle_response_error("Error in deleting test", test_json)
def get_test_instance(self, instance_path="", instance_id="", test_id="", alias="", version=""):
"""Retrieve a specific test instance definition from the test library.
A specific test instance can be retrieved
in the following ways (in order of priority):
1. load from a local JSON file specified via `instance_path`
2. specify `instance_id` corresponding to test instance in test library
3. specify `test_id` and `version`
4. specify `alias` (of the test) and `version`
Note: for (3) and (4) above, if `version` is not specified,
then the latest test version is retrieved
Parameters
----------
instance_path : string
Location of local JSON file with test instance metadata.
instance_id : UUID
System generated unique identifier associated with test instance.
test_id : UUID
System generated unique identifier associated with test definition.
alias : string
User-assigned unique identifier associated with test definition.
version : string
User-assigned identifier (unique for each test) associated with test instance.
Returns
-------
dict
Information about the test instance.
Examples
--------
>>> test_instance = test_library.get_test_instance(test_id="7b63f87b-d709-4194-bae1-15329daf3dec", version="1.0")
>>> test_instance = test_library.get_test_instance(test_id="7b63f87b-d709-4194-bae1-15329daf3dec")
"""
if instance_path == "" and instance_id == "" and test_id == "" and alias == "":
raise Exception("instance_path or instance_id or test_id or alias needs to be provided for finding a test instance.")
if instance_path:
if os.path.isfile(instance_path):
# instance_path is a local path
with open(instance_path) as fp:
test_instance_json = json.load(fp)
else:
raise Exception("Error in local file path specified by instance_path.")
else:
test_identifier = test_id or alias
if instance_id:
url = self.url + "/tests/query/instances/" + instance_id
elif test_id and version:
url = self.url + "/tests/" + test_id + "/instances/?version=" + version
elif alias and version:
url = self.url + "/tests/" + quote(alias) + "/instances/?version=" + version
elif test_id and not version:
url = self.url + "/tests/" + test_id + "/instances/latest"
else:
url = self.url + "/tests/" + quote(alias) + "/instances/latest"
response = requests.get(url, auth=self.auth, verify=self.verify)
if response.status_code != 200:
handle_response_error("Error in retrieving test instance", response)
test_instance_json = response.json()
if isinstance(test_instance_json, list): # can have multiple instances with the same version but different parameters
if len(test_instance_json) == 1:
test_instance_json = test_instance_json[0]
elif len(test_instance_json) > 1:
return max(test_instance_json, key=lambda x: x['timestamp'])
return test_instance_json
def list_test_instances(self, instance_path="", test_id="", alias=""):
"""Retrieve list of test instances belonging to a specified test.
This can be retrieved in the following ways (in order of priority):
1. load from a local JSON file specified via `instance_path`
2. specify `test_id`
3. specify `alias` (of the test)
Parameters
----------
instance_path : string
Location of local JSON file with test instance metadata.
test_id : UUID
System generated unique identifier associated with test definition.
alias : string
User-assigned unique identifier associated with test definition.
Returns
-------
dict[]
Information about the test instances.
Examples
--------
>>> test_instances = test_library.list_test_instances(test_id="8b63f87b-d709-4194-bae1-15329daf3dec")
"""
if instance_path == "" and test_id == "" and alias == "":
raise Exception("instance_path or test_id or alias needs to be provided for finding test instances.")
if instance_path and os.path.isfile(instance_path):
# instance_path is a local path
with open(instance_path) as fp:
test_instances_json = json.load(fp)
else:
if test_id:
url = self.url + "/tests/" + test_id + "/instances/?size=100000"
else:
url = self.url + "/tests/" + quote(alias) + "/instances/?size=100000"
response = requests.get(url, auth=self.auth, verify=self.verify)
if response.status_code != 200:
handle_response_error("Error in retrieving test instances", response)
test_instances_json = response.json()
return test_instances_json
def add_test_instance(self, test_id="", alias="", repository="", path="", version="", description="", parameters=""):
"""Register a new test instance.
This allows to add a new instance to an existing test in the test library.
The `test_id` or `alias` needs to be specified as input parameter.
Parameters
----------
test_id : UUID
System generated unique identifier associated with test definition.
alias : string
User-assigned unique identifier associated with test definition.
version : string
User-assigned identifier (unique for each test) associated with test instance.
repository : string
URL of Python package repository (e.g. github).
path : string
Python path (not filesystem path) to test source code within Python package.
description : string, optional
Text describing this specific test instance.
parameters : string, optional
Any additional parameters to be submitted to test, or used by it, at runtime.
Returns
-------
dict
data of test instance that has been created.
Examples
--------
>>> instance = test_library.add_test_instance(test_id="7b63f87b-d709-4194-bae1-15329daf3dec",
repository="https://github.com/appukuttan-shailesh/morphounit.git",
path="morphounit.tests.CellDensityTest",
version="3.0")
"""
instance_data = locals()
instance_data.pop("self")
for key, val in instance_data.items():
if val == "":
instance_data[key] = None
test_id = test_id or alias
if not test_id:
raise Exception("test_id or alias needs to be provided for finding the test.")
else:
url = self.url + "/tests/" + quote(test_id) + "/instances/"
headers = {'Content-type': 'application/json'}
response = requests.post(url, data=json.dumps(instance_data),
auth=self.auth, headers=headers,
verify=self.verify)
if response.status_code == 201:
return response.json()
else:
handle_response_error("Error in adding test instance", response)
def edit_test_instance(self, instance_id="", test_id="", alias="", repository=None, path=None, version=None, description=None, parameters=None):
"""Edit an existing test instance.
This allows to edit an instance of an existing test in the test library.
The test instance can be specified in the following ways (in order of priority):
1. specify `instance_id` corresponding to test instance in test library
2. specify `test_id` and `version`
3. specify `alias` (of the test) and `version`
Only the parameters being updated need to be specified. You cannot
edit the test `version` in the latter two cases. To do so,
you must employ the first option above. You can retrieve the `instance_id`
via :meth:`get_test_instance`
Parameters
----------
instance_id : UUID
System generated unique identifier associated with test instance.
test_id : UUID
System generated unique identifier associated with test definition.
alias : string
User-assigned unique identifier associated with test definition.
repository : string
URL of Python package repository (e.g. github).
path : string
Python path (not filesystem path) to test source code within Python package.
version : string
User-assigned identifier (unique for each test) associated with test instance.
description : string, optional
Text describing this specific test instance.
parameters : string, optional
Any additional parameters to be submitted to test, or used by it, at runtime.
Returns
-------
dict
data of test instance that has was edited.
Examples
--------
>>> instance = test_library.edit_test_instance(test_id="7b63f87b-d709-4194-bae1-15329daf3dec",
repository="https://github.com/appukuttan-shailesh/morphounit.git",
path="morphounit.tests.CellDensityTest",
version="4.0")
"""
test_identifier = test_id or alias
if instance_id == "" and (test_identifier == "" or version is None):
raise Exception("instance_id or (test_id, version) or (alias, version) needs to be provided for finding a test instance.")
instance_data = {}
args = locals()
for field in ("repository", "path", "version", "description", "parameters"):
value = args[field]
if value:
instance_data[field] = value
if instance_id:
url = self.url + "/tests/query/instances/" + instance_id
else:
url = self.url + "/tests/" + test_identifier + "/instances/?version=" + version
response0 = requests.get(url, auth=self.auth, verify=self.verify)
if response0.status_code != 200:
raise Exception("Invalid test identifier and/or version")
url = self.url + "/tests/query/instances/" + response0.json()[0]["id"] # todo: handle more than 1 instance in response
headers = {'Content-type': 'application/json'}
response = requests.put(url, data=json.dumps(instance_data), auth=self.auth, headers=headers,
verify=self.verify)
if response.status_code == 200:
return response.json()
else:
handle_response_error("Error in editing test instance", response)
def delete_test_instance(self, instance_id="", test_id="", alias="", version=""):
"""ONLY FOR SUPERUSERS: Delete an existing test instance.
This allows to delete an instance of an existing test in the test library.
The test instance can be specified in the following ways (in order of priority):
1. specify `instance_id` corresponding to test instance in test library
2. specify `test_id` and `version`
3. specify `alias` (of the test) and `version`
Parameters
----------
instance_id : UUID
System generated unique identifier associated with test instance.
test_id : UUID
System generated unique identifier associated with test definition.
alias : string
User-assigned unique identifier associated with test definition.
version : string
User-assigned unique identifier associated with test instance.
Note
----
* This feature is only for superusers!
Examples
--------
>>> test_library.delete_model_instance(test_id="8c7cb9f6-e380-452c-9e98-e77254b088c5")
>>> test_library.delete_model_instance(alias="B1", version="1.0")
"""
test_identifier = test_id or alias
if instance_id == "" and (test_identifier == "" or version == ""):
raise Exception("instance_id or (test_id, version) or (alias, version) needs to be provided for finding a test instance.")
if instance_id:
url = self.url + "/tests/query/instances/" + instance_id
else:
url = self.url + "/tests/" + test_identifier + "/instances/" + version
response0 = requests.get(url, auth=self.auth, verify=self.verify)
if response0.status_code != 200:
raise Exception("Invalid test identifier and/or version")
url = self.url + "/tests/query/instances/" + response0.json()[0]["id"]
response = requests.delete(url, auth=self.auth, verify=self.verify)
if response.status_code == 403:
handle_response_error("Only SuperUser accounts can delete data", response)
elif response.status_code != 200:
handle_response_error("Error in deleting test instance", response)
def _load_reference_data(self, uri_list):
# Load the reference data ("observations").
observation_data = []
return_single = False
if not isinstance(uri_list, list):
uri_list = [uri_list]
return_single = True
for uri in uri_list:
parse_result = urlparse(uri)
datastore = URI_SCHEME_MAP[parse_result.scheme](auth=self.auth)
observation_data.append(datastore.load_data(uri))
if return_single:
return observation_data[0]
else:
return observation_data
def get_attribute_options(self, param=""):
"""Retrieve valid values for test attributes.
Will return the list of valid values (where applicable) for various test attributes.
The following test attributes can be specified:
* cell_type
* test_type
* score_type
* brain_region
* recording_modality
* species
If an attribute is specified, then only values that correspond to it will be returned,
else values for all attributes are returned.
Parameters
----------
param : string, optional
Attribute of interest
Returns
-------
dict
Dictionary with key(s) as attribute(s), and value(s) as list of valid options.
Examples
--------
>>> data = test_library.get_attribute_options()
>>> data = test_library.get_attribute_options("cell types")
"""
valid_params = ["species", "brain_region", "cell_type", "test_type", "score_type", "recording_modality", "implementation_status"]
return self._get_attribute_options(param, valid_params)
def get_result(self, result_id=""):
"""Retrieve a test result.
This allows to retrieve the test result score and other related information.
The `result_id` needs to be specified as input parameter.
Parameters
----------
result_id : UUID
System generated unique identifier associated with result.
Returns
-------
dict
Information about the result retrieved.
Examples
--------
>>> result = test_library.get_result(result_id="901ac0f3-2557-4ae3-bb2b-37617312da09")
"""
if not result_id:
raise Exception("result_id needs to be provided for finding a specific result.")
else:
url = self.url + "/results/" + result_id
response = requests.get(url, auth=self.auth, verify=self.verify)
if response.status_code != 200:
handle_response_error("Error in retrieving result", response)
result_json = renameNestedJSONKey(response.json(), "project_id", "collab_id")
return result_json
def list_results(self, size=1000000, from_index=0, **filters):
"""Retrieve test results satisfying specified filters.
This allows to retrieve a list of test results with their scores
and other related information.
Parameters
----------
size : positive integer
Max number of results to be returned; default is set to 1000000.
from_index : positive integer
Index of first result to be returned; default is set to 0.
**filters : variable length keyword arguments
To be used to filter the results metadata.
Returns
-------
dict
Information about the results retrieved.
Examples
--------
>>> results = test_library.list_results()
>>> results = test_library.list_results(test_id="7b63f87b-d709-4194-bae1-15329daf3dec")
>>> results = test_library.list_results(id="901ac0f3-2557-4ae3-bb2b-37617312da09")
>>> results = test_library.list_results(model_instance_id="f32776c7-658f-462f-a944-1daf8765ec97")
"""
url = self.url + "/results/"
url += "?" + urlencode(filters, doseq=True) + "&size=" + str(size) + "&from_index=" + str(from_index)
response = requests.get(url, auth=self.auth, verify=self.verify)
if response.status_code != 200:
handle_response_error("Error in retrieving results", response)
result_json = response.json()
return renameNestedJSONKey(result_json, "project_id", "collab_id")
def register_result(self, test_result, data_store=None, collab_id=None):
"""Register test result with HBP Validation Results Service.
The score of a test, along with related output data such as figures,
can be registered on the validation framework.
Parameters
----------
test_result : :class:`sciunit.Score`
a :class:`sciunit.Score` instance returned by `test.judge(model)`
data_store : :class:`DataStore`
a :class:`DataStore` instance, for uploading related data generated by the test run, e.g. figures.
collab_id : str
String input specifying the Collab path, e.g. 'model-validation' to indicate Collab 'https://wiki.ebrains.eu/bin/view/Collabs/model-validation/'.
This is used to indicate the Collab where results should be saved.
Note
----
Source code for this method still contains comments/suggestions from
previous client. To be removed or implemented.
Returns
-------
dict
data of test result that has been created.
Examples
--------
>>> score = test.judge(model)
>>> response = test_library.register_result(test_result=score)
"""
if collab_id is None:
collab_id = test_result.related_data.get("collab_id", None)
if collab_id is None:
raise Exception("Don't know where to register this result. Please specify `collab_id`!")
model_catalog = ModelCatalog.from_existing(self)
model_instance_uuid = model_catalog.find_model_instance_else_add(test_result.model)["id"]
results_storage = []
if data_store:
if not data_store.authorized:
data_store.authorize(self.auth) # relies on data store using HBP authorization
# if this is not the case, need to authenticate/authorize
# the data store before passing to `register()`
if data_store.collab_id is None:
data_store.collab_id = collab_id
files_to_upload = []
if "figures" in test_result.related_data:
files_to_upload.extend(test_result.related_data["figures"])
if files_to_upload:
list_dict_files_to_upload = [{"download_url": f["filepath"], "size": f["filesize"]} for f in data_store.upload_data(files_to_upload)]
results_storage.extend(list_dict_files_to_upload)
url = self.url + "/results/"
result_json = {
"model_instance_id": model_instance_uuid,
"test_instance_id": test_result.test.uuid,
"results_storage": results_storage,
"score": int(test_result.score) if isinstance(test_result.score, bool) else test_result.score,
"passed": None if "passed" not in test_result.related_data else test_result.related_data["passed"],
#"platform": str(self._get_platform()), # not currently supported in v2
"project_id": collab_id,
"normalized_score": int(test_result.score) if isinstance(test_result.score, bool) else test_result.score,
}
headers = {'Content-type': 'application/json'}
response = requests.post(url, data=json.dumps(result_json),
auth=self.auth, headers=headers,
verify=self.verify)
if response.status_code == 201:
print("Result registered successfully!")
return renameNestedJSONKey(response.json(), "project_id", "collab_id")
else:
handle_response_error("Error registering result", response)
def delete_result(self, result_id=""):
"""ONLY FOR SUPERUSERS: Delete a result on the validation framework.
This allows to delete an existing result info on the validation framework.
The `result_id` needs to be specified as input parameter.
Parameters
----------
result_id : UUID
System generated unique identifier associated with result.
Note
----
* This feature is only for superusers!
Examples
--------
>>> model_catalog.delete_result(result_id="2b45e7d4-a7a1-4a31-a287-aee7072e3e75")
"""
if not result_id:
raise Exception("result_id needs to be provided for finding a specific result.")
else:
url = self.url + "/results/" + result_id
model_image_json = requests.delete(url, auth=self.auth, verify=self.verify)
if model_image_json.status_code == 403:
handle_response_error("Only SuperUser accounts can delete data", model_image_json)
elif model_image_json.status_code != 200:
handle_response_error("Error in deleting result", model_image_json)
def _get_platform(self):
"""
Return a dict containing information about the platform the test was run on.
"""
# This needs to be extended to support remote execution, e.g. job queues on clusters.
# Use Sumatra?
network_name = platform.node()
bits, linkage = platform.architecture()
return dict(architecture_bits=bits,
architecture_linkage=linkage,
machine=platform.machine(),
network_name=network_name,
ip_addr=_get_ip_address(),
processor=platform.processor(),
release=platform.release(),
system_name=platform.system(),
version=platform.version())
class ModelCatalog(BaseClient):
"""Client for the HBP Model Catalog.
The ModelCatalog client manages all actions pertaining to models.
The following actions can be performed:
==================================== ====================================
Action Method
==================================== ====================================
Get model description :meth:`get_model`
List model descriptions :meth:`list_models`
Register new model description :meth:`register_model`
Edit model description :meth:`edit_model`
Get valid attribute values :meth:`get_attribute_options`
Get model instance :meth:`get_model_instance`
Download model instance :meth:`download_model_instance`
List model instances :meth:`list_model_instances`
Add new model instance :meth:`add_model_instance`
Find model instance; else add :meth:`find_model_instance_else_add`
Edit existing model instance :meth:`edit_model_instance`
==================================== ====================================
Parameters
----------
username : string
Your HBP Collaboratory username. Not needed in Jupyter notebooks within the HBP Collaboratory.
password : string, optional
Your HBP Collaboratory password; advisable to not enter as plaintext.
If left empty, you would be prompted for password at run time (safer).
Not needed in Jupyter notebooks within the HBP Collaboratory.
environment : string, optional
Used to indicate whether being used for development/testing purposes.
Set as `production` as default for using the production system,
which is appropriate for most users. When set to `dev`, it uses the
`development` system. Other environments, if required, should be defined
inside a json file named `config.json` in the working directory. Example:
.. code-block:: JSON
{
"prod": {
"url": "https://validation-v1.brainsimulation.eu",
"client_id": "3ae21f28-0302-4d28-8581-15853ad6107d"
},
"dev_test": {
"url": "https://localhost:8000",
"client_id": "90c719e0-29ce-43a2-9c53-15cb314c2d0b",
"verify_ssl": false
}
}
token : string, optional
You may directly input a valid authenticated token from Collaboratory v1 or v2.
Note: you should use the `access_token` and NOT `refresh_token`.
Examples
--------
Instantiate an instance of the ModelCatalog class
>>> model_catalog = ModelCatalog(username="<<hbp_username>>", password="<<hbp_password>>")
>>> model_catalog = ModelCatalog(token="<<token>>")
"""
__test__ = False
def __init__(self, username=None, password=None, environment="production", token=None):
super(ModelCatalog, self).__init__(username, password, environment, token)
self._set_app_info()
def _set_app_info(self):
if self.environment == "production":
self.app_name = "Model Catalog"
elif self.environment == "dev":
self.app_name = "Model Catalog (dev)"
elif self.environment == "integration":
self.app_name = "Model Catalog (staging)"
# def set_app_config(self, collab_id="", only_if_new=False, species="", brain_region="", cell_type="", model_scope="", abstraction_level="", organization=""):
# # TODO: needs to be updated for Collab v2
# inputArgs = locals()
# params = {}
# params["url"] = self.url + "/parametersconfiguration-model-catalog/parametersconfigurationrest/"
# params["only_if_new"] = only_if_new
# params["config"] = inputArgs
# params["config"].pop("self")
# params["config"].pop("only_if_new")
# params["config"]["app_type"] = "model_catalog"
# self._configure_app_collab(params)
# def set_app_config_minimal(self, project_="", only_if_new=False):
# # TODO: needs to be updated for Collab v2
# inputArgs = locals()
# species = []
# brain_region = []
# cell_type = []
# model_scope = []
# abstraction_level = []
# organization = []
# models = self.list_models(app_id=app_id)
# if len(models) == 0:
# print("There are currently no models associated with this Model Catalog app.\nConfiguring filters to show all accessible data.")
# for model in models:
# if model["species"] not in species:
# species.append(model["species"])
# if model["brain_region"] not in brain_region:
# brain_region.append(model["brain_region"])
# if model["cell_type"] not in cell_type:
# cell_type.append(model["cell_type"])
# if model["model_scope"] not in model_scope:
# model_scope.append(model["model_scope"])
# if model["abstraction_level"] not in abstraction_level:
# abstraction_level.append(model["abstraction_level"])
# if model["organization"] not in organization:
# organization.append(model["organization"])
# filters = {}
# for key in ["collab_id", "app_id", "species", "brain_region", "cell_type", "model_scope", "abstraction_level", "organization"]:
# if isinstance(locals()[key], list):
# filters[key] = ",".join(locals()[key])
# else:
# filters[key] = locals()[key]
# params = {}
# params["url"] = self.url + "/parametersconfiguration-model-catalog/parametersconfigurationrest/"
# params["only_if_new"] = only_if_new
# params["config"] = filters
# params["config"]["app_type"] = "model_catalog"
# self._configure_app_collab(params)
def get_model(self, model_id="", alias="", instances=True, images=True):
"""Retrieve a specific model description by its model_id or alias.
A specific model description can be retrieved from the model catalog
in the following ways (in order of priority):
1. specify the `model_id`
2. specify the `alias` (of the model)
Parameters
----------
model_id : UUID
System generated unique identifier associated with model description.
alias : string
User-assigned unique identifier associated with model description.
instances : boolean, optional
Set to False if you wish to omit the details of the model instances; default True.
images : boolean, optional
Set to False if you wish to omit the details of the model images (figures); default True.
Returns
-------
dict
Entire model description as a JSON object.
Examples
--------
>>> model = model_catalog.get_model(model_id="8c7cb9f6-e380-452c-9e98-e77254b088c5")
>>> model = model_catalog.get_model(alias="B1")
"""
if model_id == "" and alias == "":
raise Exception("Model ID or alias needs to be provided for finding a model.")
elif model_id != "":
url = self.url + "/models/" + model_id
else:
url = self.url + "/models/" + quote(alias)
model_json = requests.get(url, auth=self.auth, verify=self.verify)
if model_json.status_code != 200:
handle_response_error("Error in retrieving model", model_json)
model_json = model_json.json()
if instances is False:
model_json.pop("instances")
if images is False:
model_json.pop("images")
return renameNestedJSONKey(model_json, "project_id", "collab_id")
def list_models(self, size=1000000, from_index=0, **filters):
"""Retrieve list of model descriptions satisfying specified filters.
The filters may specify one or more attributes that belong
to a model description. The following model attributes can be specified:
* alias
* name
* brain_region
* species
* cell_type
* model_scope
* abstraction_level
* author
* owner
* organization
* collab_id
* private
Parameters
----------
size : positive integer
Max number of models to be returned; default is set to 1000000.
from_index : positive integer
Index of first model to be returned; default is set to 0.
**filters : variable length keyword arguments
To be used to filter model descriptions from the model catalog.
Returns
-------
list
List of model descriptions satisfying specified filters.
Examples
--------
>>> models = model_catalog.list_models()
>>> models = model_catalog.list_models(collab_id="model-validation")
>>> models = model_catalog.list_models(cell_type="Pyramidal Cell", brain_region="Hippocampus")
"""
valid_filters = ["name", "alias", "brain_region", "species", "cell_type", "model_scope", "abstraction_level", "author", "owner", "organization", "collab_id", "private"]
params = locals()["filters"]
for filter in params:
if filter not in valid_filters:
raise ValueError("The specified filter '{}' is an invalid filter!\nValid filters are: {}".format(filter, valid_filters))
# handle naming difference with API: collab_id <-> project_id
if "collab_id" in params:
params["project_id"] = params.pop("collab_id")
url = self.url + "/models/"
url += "?" + urlencode(params, doseq=True) + "&size=" + str(size) + "&from_index=" + str(from_index)
response = requests.get(url, auth=self.auth, verify=self.verify)
try:
models = response.json()
except (json.JSONDecodeError, simplejson.JSONDecodeError):
handle_response_error("Error in list_models()", response)
return renameNestedJSONKey(models, "project_id", "collab_id")
def register_model(self, collab_id=None, name=None, alias=None, author=None, owner=None, organization=None, private=False,
species=None, brain_region=None, cell_type=None, model_scope=None, abstraction_level=None,
project=None, license=None, description=None, instances=[], images=[]):
"""Register a new model in the model catalog.
This allows you to add a new model to the model catalog. Model instances
and/or images (figures) can optionally be specified at the time of model
creation, or can be added later individually.
Parameters
----------
collab_id : string
Specifies the ID of the host collab in the HBP Collaboratory.
(the model would belong to this collab)
name : string
Name of the model description to be created.
alias : string, optional
User-assigned unique identifier to be associated with model description.
author : string
Name of person creating the model description.
organization : string, optional
Option to tag model with organization info.
private : boolean
Set visibility of model description. If True, model would only be seen in host app (where created). Default False.
species : string
The species for which the model is developed.
brain_region : string
The brain region for which the model is developed.
cell_type : string
The type of cell for which the model is developed.
model_scope : string
Specifies the type of the model.
abstraction_level : string
Specifies the model abstraction level.
owner : string
Specifies the owner of the model. Need not necessarily be the same as the author.
project : string
Can be used to indicate the project to which the model belongs.
license : string
Indicates the license applicable for this model.
description : string
Provides a description of the model.
instances : list, optional
Specify a list of instances (versions) of the model.
images : list, optional
Specify a list of images (figures) to be linked to the model.
Returns
-------
dict
Model description that has been created.
Examples
--------
(without instances and images)
>>> model = model_catalog.register_model(collab_id="model-validation", name="Test Model - B2",
alias="Model vB2", author="Shailesh Appukuttan", organization="HBP-SP6",
private=False, cell_type="Granule Cell", model_scope="Single cell model",
abstraction_level="Spiking neurons",
brain_region="Basal Ganglia", species="Mouse (Mus musculus)",
owner="Andrew Davison", project="SP 6.4", license="BSD 3-Clause",
description="This is a test entry")
(with instances and images)
>>> model = model_catalog.register_model(collab_id="model-validation", name="Test Model - C2",
alias="Model vC2", author="Shailesh Appukuttan", organization="HBP-SP6",
private=False, cell_type="Granule Cell", model_scope="Single cell model",
abstraction_level="Spiking neurons",
brain_region="Basal Ganglia", species="Mouse (Mus musculus)",
owner="Andrew Davison", project="SP 6.4", license="BSD 3-Clause",
description="This is a test entry! Please ignore.",
instances=[{"source":"https://www.abcde.com",
"version":"1.0", "parameters":""},
{"source":"https://www.12345.com",
"version":"2.0", "parameters":""}],
images=[{"url":"http://www.neuron.yale.edu/neuron/sites/default/themes/xchameleon/logo.png",
"caption":"NEURON Logo"},
{"url":"https://collab.humanbrainproject.eu/assets/hbp_diamond_120.png",
"caption":"HBP Logo"}])
"""
model_data = {}
args = locals()
# handle naming difference with API: collab_id <-> project_id
args["project_id"] = args.pop("collab_id")
for field in ["project_id", "name", "alias", "author", "organization", "private",
"cell_type", "model_scope", "abstraction_level", "brain_region",
"species", "owner", "project", "license", "description",
"instances", "images"]:
if args[field] or field == "private":
model_data[field] = args[field]
values = self.get_attribute_options()
for field in ["species", "brain_region", "cell_type", "abstraction_level", "model_scope"]:
if field in model_data and model_data[field] not in values[field] + [None]:
raise Exception("{} = '{}' is invalid.\nValue has to be one of these: {}".format(field, model_data[field], values[field]))
if private not in [True, False]:
raise Exception("Model's 'private' attribute should be specified as True / False. Default value is False.")
# format names of authors and owners as required by API
for field in ("author", "owner"):
if model_data[field]:
model_data[field] = self._format_people_name(model_data[field])
url = self.url + "/models/"
headers = {'Content-type': 'application/json'}
response = requests.post(url, data=json.dumps(model_data),
auth=self.auth, headers=headers,
verify=self.verify)
if response.status_code == 201:
return renameNestedJSONKey(response.json(), "project_id", "collab_id")
else:
handle_response_error("Error in adding model", response)
def edit_model(self, model_id=None, collab_id=None, name=None, alias=None, author=None, owner=None, organization=None, private=None,
species=None, brain_region=None, cell_type=None, model_scope=None, abstraction_level=None,
project=None, license=None, description=None):
"""Edit an existing model on the model catalog.
This allows you to edit a new model to the model catalog.
The `model_id` must be provided. Any of the other parameters maybe updated.
Only the parameters being updated need to be specified.
Parameters
----------
model_id : UUID
System generated unique identifier associated with model description.
collab_id : string
Specifies the ID of the host collab in the HBP Collaboratory.
(the model would belong to this collab)
name : string
Name of the model description to be created.
alias : string, optional
User-assigned unique identifier to be associated with model description.
author : string
Name of person creating the model description.
organization : string, optional
Option to tag model with organization info.
private : boolean
Set visibility of model description. If True, model would only be seen in host app (where created). Default False.
species : string
The species for which the model is developed.
brain_region : string
The brain region for which the model is developed.
cell_type : string
The type of cell for which the model is developed.
model_scope : string
Specifies the type of the model.
abstraction_level : string
Specifies the model abstraction level.
owner : string
Specifies the owner of the model. Need not necessarily be the same as the author.
project : string
Can be used to indicate the project to which the model belongs.
license : string
Indicates the license applicable for this model.
description : string
Provides a description of the model.
Note
----
Model instances and images (figures) cannot be edited here.
This has to be done using :meth:`edit_model_instance` and :meth:`edit_model_image`
Returns
-------
dict
Model description that has been edited.
Examples
--------
>>> model = model_catalog.edit_model(collab_id="model-validation", name="Test Model - B2",
model_id="8c7cb9f6-e380-452c-9e98-e77254b088c5",
alias="Model-B2", author="Shailesh Appukuttan", organization="HBP-SP6",
private=False, cell_type="Granule Cell", model_scope="Single cell model",
abstraction_level="Spiking neurons",
brain_region="Basal Ganglia", species="Mouse (Mus musculus)",
owner="Andrew Davison", project="SP 6.4", license="BSD 3-Clause",
description="This is a test entry")
"""
if not model_id:
raise Exception("Model ID needs to be provided for editing a model.")
model_data = {}
args = locals()
# handle naming difference with API: collab_id <-> project_id
args["project_id"] = args.pop("collab_id")
for field in ["project_id", "name", "alias", "author", "organization", "private",
"cell_type", "model_scope", "abstraction_level", "brain_region",
"species", "owner", "project", "license", "description"]:
if args[field] or (field == "private" and args[field] != None):
model_data[field] = args[field]
values = self.get_attribute_options()
for field in ("species", "brain_region", "cell_type", "abstraction_level", "model_scope"):
if field in model_data and model_data[field] not in values[field] + [None]:
raise Exception("{} = '{}' is invalid.\nValue has to be one of these: {}".format(field, model_data[field], values[field]))
if private and private not in [True, False]:
raise Exception("Model's 'private' attribute should be specified as True / False. Default value is False.")
# format names of authors and owners as required by API
for field in ("author", "owner"):
if model_data.get(field):
model_data[field] = self._format_people_name(model_data[field])
if "alias" in model_data and model_data["alias"] == "":
model_data["alias"] = None
if model_data.get("private", False) not in (True, False):
raise Exception("Model's 'private' attribute should be specified as True / False. Default value is False.")
headers = {'Content-type': 'application/json'}
url = self.url + "/models/" + model_id
response = requests.put(url, data=json.dumps(model_data),
auth=self.auth, headers=headers,
verify=self.verify)
if response.status_code == 200:
return renameNestedJSONKey(response.json(), "project_id", "collab_id")
else:
handle_response_error("Error in updating model", response)
def delete_model(self, model_id="", alias=""):
"""ONLY FOR SUPERUSERS: Delete a specific model description by its model_id or alias.
A specific model description can be deleted from the model catalog, along with all
associated model instances, images and results, in the following ways (in order of priority):
1. specify the `model_id`
2. specify the `alias` (of the model)
Parameters
----------
model_id : UUID
System generated unique identifier associated with model description.
alias : string
User-assigned unique identifier associated with model description.
Note
----
* This feature is only for superusers!
Examples
--------
>>> model_catalog.delete_model(model_id="8c7cb9f6-e380-452c-9e98-e77254b088c5")
>>> model_catalog.delete_model(alias="B1")
"""
if model_id == "" and alias == "":
raise Exception("Model ID or alias needs to be provided for deleting a model.")
elif model_id != "":
url = self.url + "/models/" + model_id
else:
url = self.url + "/models/" + quote(alias)
model_json = requests.delete(url, auth=self.auth, verify=self.verify)
if model_json.status_code == 403:
handle_response_error("Only SuperUser accounts can delete data", model_json)
elif model_json.status_code != 200:
handle_response_error("Error in deleting model", model_json)
def get_attribute_options(self, param=""):
"""Retrieve valid values for attributes.
Will return the list of valid values (where applicable) for various attributes.
The following model attributes can be specified:
* cell_type
* brain_region
* model_scope
* abstraction_level
* species
* organization
If an attribute is specified then, only values that correspond to it will be returned,
else values for all attributes are returned.
Parameters
----------
param : string, optional
Attribute of interest
Returns
-------
dict
Dictionary with key(s) as attribute(s), and value(s) as list of valid options.
Examples
--------
>>> data = model_catalog.get_attribute_options()
>>> data = model_catalog.get_attribute_options("cell types")
"""
valid_params = ["species", "brain_region", "cell_type", "model_scope", "abstraction_level"]
return self._get_attribute_options(param, valid_params)
def get_model_instance(self, instance_path="", instance_id="", model_id="", alias="", version=""):
"""Retrieve an existing model instance.
A specific model instance can be retrieved
in the following ways (in order of priority):
1. load from a local JSON file specified via `instance_path`
2. specify `instance_id` corresponding to model instance in model catalog
3. specify `model_id` and `version`
4. specify `alias` (of the model) and `version`
Parameters
----------
instance_path : string
Location of local JSON file with model instance metadata.
instance_id : UUID
System generated unique identifier associated with model instance.
model_id : UUID
System generated unique identifier associated with model description.
alias : string
User-assigned unique identifier associated with model description.
version : string
User-assigned identifier (unique for each model) associated with model instance.
Returns
-------
dict
Information about the model instance.
Examples
--------
>>> model_instance = model_catalog.get_model_instance(instance_id="a035f2b2-fe2e-42fd-82e2-4173a304263b")
"""
if instance_path == "" and instance_id == "" and (model_id == "" or version == "") and (alias == "" or version == ""):
raise Exception("instance_path or instance_id or (model_id, version) or (alias, version) needs to be provided for finding a model instance.")
if instance_path and os.path.isfile(instance_path):
# instance_path is a local path
with open(instance_path) as fp:
model_instance_json = json.load(fp)
else:
if instance_id:
url = self.url + "/models/query/instances/" + instance_id
elif model_id and version:
url = self.url + "/models/" + model_id + "/instances/?version=" + version
else:
url = self.url + "/models/" + quote(alias) + "/instances/?version=" + version
model_instance_json = requests.get(url, auth=self.auth, verify=self.verify)
if model_instance_json.status_code != 200:
handle_response_error("Error in retrieving model instance", model_instance_json)
model_instance_json = model_instance_json.json()
# if specifying a version, this can return multiple instances, since instances
# can have the same version but different parameters
if len(model_instance_json) == 1:
return model_instance_json[0]
return model_instance_json
def download_model_instance(self, instance_path="", instance_id="", model_id="", alias="", version="", local_directory=".", overwrite=False):
"""Download files/directory corresponding to an existing model instance.
Files/directory corresponding to a model instance to be downloaded. The model
instance can be specified in the following ways (in order of priority):
1. load from a local JSON file specified via `instance_path`
2. specify `instance_id` corresponding to model instance in model catalog
3. specify `model_id` and `version`
4. specify `alias` (of the model) and `version`
Parameters
----------
instance_path : string
Location of local JSON file with model instance metadata.
instance_id : UUID
System generated unique identifier associated with model instance.
model_id : UUID
System generated unique identifier associated with model description.
alias : string
User-assigned unique identifier associated with model description.
version : string
User-assigned identifier (unique for each model) associated with model instance.
local_directory : string
Directory path (relative/absolute) where files should be downloaded and saved. Default is current location.
overwrite: Boolean
Indicates if any existing file at the target location should be overwritten; default is set to False
Returns
-------
string
Absolute path of the downloaded file/directory.
Note
----
Existing files, if any, at the target location will be overwritten!
Examples
--------
>>> file_path = model_catalog.download_model_instance(instance_id="a035f2b2-fe2e-42fd-82e2-4173a304263b")
"""
model_source = self.get_model_instance(instance_path=instance_path, instance_id=instance_id, model_id=model_id, alias=alias, version=version)["source"]
if model_source[-1]=="/":
model_source = model_source[:-1] # remove trailing '/'
Path(local_directory).mkdir(parents=True, exist_ok=True)
fileList = []
if "drive.ebrains.eu/lib/" in model_source:
# ***** Handles Collab storage urls *****
repo_id = model_source.split("drive.ebrains.eu/lib/")[1].split("/")[0]
model_path = "/" + "/".join(model_source.split("drive.ebrains.eu/lib/")[1].split("/")[2:])
datastore = URI_SCHEME_MAP["collab_v2"](collab_id=repo_id, auth=self.auth)
fileList = datastore.download_data(model_path, local_directory=local_directory, overwrite=overwrite)
elif model_source.startswith("swift://cscs.ch/"):
# ***** Handles CSCS private urls *****
datastore = URI_SCHEME_MAP["swift"]()
fileList = datastore.download_data(str(model_source), local_directory=local_directory, overwrite=overwrite)
elif model_source.startswith("https://object.cscs.ch/"):
# ***** Handles CSCS public urls (file or folder) *****
model_source = urljoin(model_source, urlparse(model_source).path) # remove query params from URL, e.g. `?bluenaas=true`
req = requests.head(model_source)
if req.status_code == 200:
if "directory" in req.headers["Content-Type"]:
base_source = "/".join(model_source.split("/")[:6])
model_rel_source = "/".join(model_source.split("/")[6:])
dir_name = model_source.split("/")[-1]
req = requests.get(base_source)
contents = req.text.split("\n")
files_match = [os.path.join(base_source, x) for x in contents if x.startswith(model_rel_source) and "." in x]
local_directory = os.path.join(local_directory, dir_name)
Path(local_directory).mkdir(parents=True, exist_ok=True)
else:
files_match = [model_source]
datastore = URI_SCHEME_MAP["http"]()
fileList = datastore.download_data(files_match, local_directory=local_directory, overwrite=overwrite)
else:
raise FileNotFoundError("Requested file/folder not found: {}".format(model_source))
else:
# ***** Handles ModelDB and external urls (only file; not folder) *****
datastore = URI_SCHEME_MAP["http"]()
fileList = datastore.download_data(str(model_source), local_directory=local_directory, overwrite=overwrite)
if len(fileList) > 0:
flag = True
if len(fileList) == 1:
outpath = fileList[0]
else:
outpath = os.path.dirname(os.path.commonprefix(fileList))
return os.path.abspath(outpath.encode('ascii'))
else:
print("\nSource location: {}".format(model_source))
print("Could not download the specified file(s)!")
return None
def list_model_instances(self, instance_path="", model_id="", alias=""):
"""Retrieve list of model instances belonging to a specified model.
This can be retrieved in the following ways (in order of priority):
1. load from a local JSON file specified via `instance_path`
2. specify `model_id`
3. specify `alias` (of the model)
Parameters
----------
instance_path : string
Location of local JSON file with model instance metadata.
model_id : UUID
System generated unique identifier associated with model description.
alias : string
User-assigned unique identifier associated with model description.
Returns
-------
list
List of dicts containing information about the model instances.
Examples
--------
>>> model_instances = model_catalog.list_model_instances(alias="Model vB2")
"""
if instance_path == "" and model_id == "" and alias == "":
raise Exception("instance_path or model_id or alias needs to be provided for finding model instances.")
if instance_path and os.path.isfile(instance_path):
# instance_path is a local path
with open(instance_path) as fp:
model_instances_json = json.load(fp)
else:
if model_id:
url = self.url + "/models/" + model_id + "/instances/?size=100000"
else:
url = self.url + "/models/" + quote(alias) + "/instances/?size=100000"
model_instances_json = requests.get(url, auth=self.auth, verify=self.verify)
if model_instances_json.status_code != 200:
handle_response_error("Error in retrieving model instances", model_instances_json)
model_instances_json = model_instances_json.json()
return model_instances_json
def add_model_instance(self, model_id="", alias="", source="", version="", description="", parameters="", code_format="", hash="", morphology="", license=""):
"""Register a new model instance.
This allows to add a new instance of an existing model in the model catalog.
The `model_id` or 'alias' needs to be specified as input parameter.
Parameters
----------
model_id : UUID
System generated unique identifier associated with model description.
alias : string
User-assigned unique identifier associated with model description.
source : string
Path to model source code repository (e.g. github).
version : string
User-assigned identifier (unique for each model) associated with model instance.
description : string, optional
Text describing this specific model instance.
parameters : string, optional
Any additional parameters to be submitted to model, or used by it, at runtime.
code_format : string, optional
Indicates the language/platform in which the model was developed.
hash : string, optional
Similar to a checksum; can be used to identify model instances from their implementation.
morphology : string / list, optional
URL(s) to the morphology file(s) employed in this model.
license : string
Indicates the license applicable for this model instance.
Returns
-------
dict
data of model instance that has been created.
Examples
--------
>>> instance = model_catalog.add_model_instance(model_id="196b89a3-e672-4b96-8739-748ba3850254",
source="https://www.abcde.com",
version="1.0",
description="basic model variant",
parameters="",
code_format="py",
hash="",
morphology="",
license="BSD 3-Clause")
"""
instance_data = locals()
instance_data.pop("self")
for key, val in instance_data.items():
if val == "":
instance_data[key] = None
model_id = model_id or alias
if not model_id:
raise Exception("model_id or alias needs to be provided for finding the model.")
else:
url = self.url + "/models/" + quote(model_id) + "/instances/"
headers = {'Content-type': 'application/json'}
response = requests.post(url, data=json.dumps(instance_data),
auth=self.auth, headers=headers,
verify=self.verify)
if response.status_code == 201:
return response.json()
else:
handle_response_error("Error in adding model instance", response)
def find_model_instance_else_add(self, model_obj):
"""Find existing model instance; else create a new instance
This checks if the input model object has an associated model instance.
If not, a new model instance is created.
Parameters
----------
model_obj : object
Python object representing a model.
Returns
-------
dict
data of existing or created model instance.
Note
----
* `model_obj` is expected to contain the attribute `model_instance_uuid`,
or both the attributes `model_uuid`/`model_alias` and `model_version`.
Examples
--------
>>> instance = model_catalog.find_model_instance_else_add(model)
"""
if not getattr(model_obj, "model_instance_uuid", None):
# check that the model is registered with the model registry.
if not hasattr(model_obj, "model_uuid") and not hasattr(model_obj, "model_alias"):
raise AttributeError("Model object does not have a 'model_uuid'/'model_alias' attribute. "
"Please register it with the Validation Framework and add the 'model_uuid'/'model_alias' to the model object.")
if not hasattr(model_obj, "model_version"):
raise AttributeError("Model object does not have a 'model_version' attribute")
model_instance = self.get_model_instance(model_id=getattr(model_obj, "model_uuid", None),
alias=getattr(model_obj, "model_alias", None),
version=model_obj.model_version)
if not model_instance: # check if instance doesn't exist
# if yes, then create a new instance
model_instance = self.add_model_instance(model_id=getattr(model_obj, "model_uuid", None),
alias=getattr(model_obj, "model_alias", None),
source=getattr(model_obj, "remote_url", ""),
version=model_obj.model_version,
parameters=getattr(model_obj, "parameters", ""))
else:
model_instance = self.get_model_instance(instance_id=model_obj.model_instance_uuid)
return model_instance
def edit_model_instance(self, instance_id="", model_id="", alias="", source=None, version=None, description=None, parameters=None, code_format=None, hash=None, morphology=None, license=None):
"""Edit an existing model instance.
This allows to edit an instance of an existing model in the model catalog.
The model instance can be specified in the following ways (in order of priority):
1. specify `instance_id` corresponding to model instance in model catalog
2. specify `model_id` and `version`
3. specify `alias` (of the model) and `version`
Only the parameters being updated need to be specified. You cannot
edit the model `version` in the latter two cases. To do so,
you must employ the first option above. You can retrieve the `instance_id`
via :meth:`get_model_instance`
Parameters
----------
instance_id : UUID
System generated unique identifier associated with model instance.
model_id : UUID
System generated unique identifier associated with model description.
alias : string
User-assigned unique identifier associated with model description.
source : string
Path to model source code repository (e.g. github).
version : string
User-assigned identifier (unique for each model) associated with model instance.
description : string, optional
Text describing this specific model instance.
parameters : string, optional
Any additional parameters to be submitted to model, or used by it, at runtime.
code_format : string, optional
Indicates the language/platform in which the model was developed.
hash : string, optional
Similar to a checksum; can be used to identify model instances from their implementation.
morphology : string / list, optional
URL(s) to the morphology file(s) employed in this model.
license : string
Indicates the license applicable for this model instance.
Returns
-------
dict
data of model instance that has been edited.
Examples
--------
>>> instance = model_catalog.edit_model_instance(instance_id="fd1ab546-80f7-4912-9434-3c62af87bc77",
source="https://www.abcde.com",
version="1.0",
description="passive model variant",
parameters="",
code_format="py",
hash="",
morphology="",
license="BSD 3-Clause")
"""
if instance_id == "" and (model_id == "" or not version) and (alias == "" or not version):
raise Exception("instance_id or (model_id, version) or (alias, version) needs to be provided for finding a model instance.")
instance_data = {key:value for key, value in locals().items()
if value is not None}
# assign existing values for parameters not specified
if instance_id:
url = self.url + "/models/query/instances/" + instance_id
else:
model_identifier = quote(model_id or alias)
response0 = requests.get(self.url + f"/models/{model_identifier}/instances/?version={version}",
auth=self.auth, verify=self.verify)
if response0.status_code != 200:
raise Exception("Invalid model_id, alias and/or version")
model_data = response0.json()[0] # to fix: in principle, can have multiple instances with same version but different parameters
url = self.url + f"/models/{model_identifier}/instances/{model_data['id']}"
for key in ["self", "instance_id", "alias", "model_id"]:
instance_data.pop(key)
headers = {'Content-type': 'application/json'}
response = requests.put(url, data=json.dumps(instance_data), auth=self.auth, headers=headers,
verify=self.verify)
if response.status_code == 200:
return response.json()
else:
handle_response_error("Error in editing model instance at {}".format(url), response)
def delete_model_instance(self, instance_id="", model_id="", alias="", version=""):
"""ONLY FOR SUPERUSERS: Delete an existing model instance.
This allows to delete an instance of an existing model in the model catalog.
The model instance can be specified in the following ways (in order of priority):
1. specify `instance_id` corresponding to model instance in model catalog
2. specify `model_id` and `version`
3. specify `alias` (of the model) and `version`
Parameters
----------
instance_id : UUID
System generated unique identifier associated with model instance.
model_id : UUID
System generated unique identifier associated with model description.
alias : string
User-assigned unique identifier associated with model description.
version : string
User-assigned unique identifier associated with model instance.
Note
----
* This feature is only for superusers!
Examples
--------
>>> model_catalog.delete_model_instance(model_id="8c7cb9f6-e380-452c-9e98-e77254b088c5")
>>> model_catalog.delete_model_instance(alias="B1", version="1.0")
"""
if instance_id == "" and (model_id == "" or not version) and (alias == "" or not version):
raise Exception("instance_id or (model_id, version) or (alias, version) needs to be provided for finding a model instance.")
if instance_id:
id = instance_id # as needed by API
if alias:
model_alias = alias # as needed by API
if instance_id:
if model_id:
url = self.url + "/models/" + model_id + "/instances/" + instance_id
else:
url = self.url + "/models/query/instances/" + instance_id
else:
raise NotImplementedError("Need to retrieve instance to get id")
model_instance_json = requests.delete(url, auth=self.auth, verify=self.verify)
if model_instance_json.status_code == 403:
handle_response_error("Only SuperUser accounts can delete data", model_instance_json)
elif model_instance_json.status_code != 200:
handle_response_error("Error in deleting model instance", model_instance_json)
def _get_ip_address():
"""
Not foolproof, but allows checking for an external connection with a short
timeout, before trying socket.gethostbyname(), which has a very long
timeout.
"""
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
return s.getsockname()[0]
except (OSError, URLError, socket.timeout, socket.gaierror):
return "127.0.0.1"
| {
"content_hash": "63498d6a29d6e95e1beaaf80a9a23e24",
"timestamp": "",
"source": "github",
"line_count": 2408,
"max_line_length": 300,
"avg_line_length": 45.69892026578073,
"alnum_prop": 0.579146333705915,
"repo_name": "apdavison/hbp-validation-client",
"id": "76fc7c276c372571af6a1daf8da6ee5bae3643a1",
"size": "110043",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hbp_validation_framework/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "11296"
},
{
"name": "HTML",
"bytes": "16400"
},
{
"name": "JavaScript",
"bytes": "37363"
},
{
"name": "Python",
"bytes": "278108"
},
{
"name": "Shell",
"bytes": "249"
}
],
"symlink_target": ""
} |
import argparse
import sys
from sim.load_elf import load_elf
from sim.standalonesim import StandaloneSim
from sim.stats import ExecutionStatAnalyzer
def main() -> int:
parser = argparse.ArgumentParser()
parser.add_argument('elf')
parser.add_argument('-v', '--verbose', action='store_true')
parser.add_argument(
'--dump-dmem',
metavar="FILE",
type=argparse.FileType('wb'),
help=("after execution, write the data memory contents to this file. "
"Use '-' to write to STDOUT.")
)
parser.add_argument(
'--dump-regs',
metavar="FILE",
type=argparse.FileType('w'),
help=("after execution, write the GPR and WDR contents to this file. "
"Use '-' to write to STDOUT.")
)
parser.add_argument(
'--dump-stats',
metavar="FILE",
type=argparse.FileType('w'),
help=("after execution, write execution statistics to this file. "
"Use '-' to write to STDOUT.")
)
args = parser.parse_args()
collect_stats = args.dump_stats is not None
sim = StandaloneSim()
exp_end_addr = load_elf(sim, args.elf)
key0 = int((str("deadbeef") * 12), 16)
key1 = int((str("baadf00d") * 12), 16)
sim.state.wsrs.set_sideload_keys(key0, key1)
sim.state.ext_regs.commit()
sim.start(collect_stats)
sim.run(verbose=args.verbose, dump_file=args.dump_regs)
if exp_end_addr is not None:
if sim.state.pc != exp_end_addr:
print('Run stopped at PC {:#x}, but _expected_end_addr was {:#x}.'
.format(sim.state.pc, exp_end_addr),
file=sys.stderr)
return 1
if args.dump_dmem is not None:
args.dump_dmem.write(sim.dump_data())
if collect_stats:
assert sim.stats is not None
stat_analyzer = ExecutionStatAnalyzer(sim.stats, args.elf)
args.dump_stats.write(stat_analyzer.dump())
return 0
if __name__ == "__main__":
sys.exit(main())
| {
"content_hash": "fb5f4d454f841c70bb7cbc508cff9696",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 78,
"avg_line_length": 29.26086956521739,
"alnum_prop": 0.5948489351163943,
"repo_name": "lowRISC/opentitan",
"id": "3c14f8d5b730b80a6589c519cbf223aa012b2a67",
"size": "2190",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hw/ip/otbn/dv/otbnsim/standalone.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "516881"
},
{
"name": "C",
"bytes": "4864968"
},
{
"name": "C++",
"bytes": "1629214"
},
{
"name": "CSS",
"bytes": "3281"
},
{
"name": "Dockerfile",
"bytes": "6732"
},
{
"name": "Emacs Lisp",
"bytes": "411542"
},
{
"name": "HTML",
"bytes": "149270"
},
{
"name": "Makefile",
"bytes": "20646"
},
{
"name": "Python",
"bytes": "2576872"
},
{
"name": "Rust",
"bytes": "856480"
},
{
"name": "SCSS",
"bytes": "54700"
},
{
"name": "Shell",
"bytes": "119163"
},
{
"name": "Smarty",
"bytes": "771102"
},
{
"name": "Starlark",
"bytes": "688003"
},
{
"name": "Stata",
"bytes": "3676"
},
{
"name": "SystemVerilog",
"bytes": "14853322"
},
{
"name": "Tcl",
"bytes": "361936"
},
{
"name": "Verilog",
"bytes": "3296"
}
],
"symlink_target": ""
} |
from vis import label, mag, mag2
from planets import planet_list
from parameters import u, n, f, obj_global, a_global, eps_global, name_global, radius_global
# planet/spaceship label definition:
popup = label(visible=False, box=False, xoffset=-50, yoffset=50, font='sans', opacity=0.4)
def lbl(popup, obj, sw_lbl, dt):
# global variables for comparison between function calls
global obj_global, a_global, eps_global, name_global, radius_global
err = 1e20 # large number for error comparison in the for loop below
r0mag = mag(obj.pos) # computing the instantaneous distance from the Sun
if r0mag == 0: # turning off the planet label if the clicked object is centered at the origin (i.e. Sun, stars)
sw_lbl = not sw_lbl
return popup, sw_lbl
if obj_global != obj: # execute only if new object was chosen
# looking through the planet list searching for the closest value for semi major axis for the selected object:
for planet in planet_list:
if (abs(planet['a'] - r0mag)) < err:
err = (abs(planet['a'] - r0mag)) # assign new closest value
a_global = planet['a'] # assign semi-major axis
name_global = planet['name'] # assign planet name
radius_global = planet['radius'] # assign planet radius
eps_global = -u / (2 * a_global) # compute specific orbital energy
obj_global = obj # assign new object as already labeled
v0mag = (2 * (eps_global + u / r0mag)) ** 0.5 # velocity calculation using specific orbital energy
popup.pos = obj.pos # update label position to overlap with planet position
# update label text with new data:
popup.text = str(name_global) + \
"\nRadius: " + str(radius_global) + " km" + \
"\nDistance from the Sun: " + str(int(round(r0mag))) + " km (" + str(
round(r0mag / 149598261, 2)) + " AU)" + \
"\nOrbital Velocity: " + str(round(v0mag, 2)) + " km/s" + \
"\nTime scale: 1 s = " + str(round(f * dt * 365.25 * 86400 / (3600. * n), 3)) + "hrs"
popup.visible = True
return popup, sw_lbl
def lbl_ship(popup, obj, s, v, a):
# get magnitudes of distance from the Sun, velocity and acceleration:
r0mag = mag(s)
v0mag = mag(v)
a0mag = mag(a) * 1e6 # converted from km/s^2 to m/s^2
eps = mag2(v) / 2 - u / r0mag # compute specific orbital energy
popup.pos = obj.pos # update label position to overlap with spaceship position
# update label text with new data:
popup.text = "Spaceship!" + \
"\nAcceleration: " + str(a) + \
"\nSpecific orbital energy: " + str(eps) + " MJ/kg" + \
"\nDistance from the Sun: " + str(int(round(r0mag))) + " km (" + str(
round(r0mag / 149598261, 2)) + " AU)" + \
"\nEngine Acceleration: " + str(round(a0mag, 2)) + " m/s^2" + \
"\nOrbital Velocity: " + str(round(v0mag, 2)) + " km/s"
popup.visible = True
return popup | {
"content_hash": "e287129436713fa4db98b35bc31edd3f",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 118,
"avg_line_length": 41.63513513513514,
"alnum_prop": 0.593638429081467,
"repo_name": "lukekulik/solar-system",
"id": "6cbe292bf176c609ae61c7f7a813a17010690f1f",
"size": "3081",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "info.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "37820"
}
],
"symlink_target": ""
} |
import base64
import concurrent.futures
import datetime
import logging
import os
import platform
import portpicker
import psutil
import random
import re
import signal
import string
import subprocess
import time
import traceback
from mobly.controllers.android_device_lib import adb
# File name length is limited to 255 chars on some OS, so we need to make sure
# the file names we output fits within the limit.
MAX_FILENAME_LEN = 255
# Number of times to retry to get available port
MAX_PORT_ALLOCATION_RETRY = 50
ascii_letters_and_digits = string.ascii_letters + string.digits
valid_filename_chars = "-_." + ascii_letters_and_digits
GMT_to_olson = {
"GMT-9": "America/Anchorage",
"GMT-8": "US/Pacific",
"GMT-7": "US/Mountain",
"GMT-6": "US/Central",
"GMT-5": "US/Eastern",
"GMT-4": "America/Barbados",
"GMT-3": "America/Buenos_Aires",
"GMT-2": "Atlantic/South_Georgia",
"GMT-1": "Atlantic/Azores",
"GMT+0": "Africa/Casablanca",
"GMT+1": "Europe/Amsterdam",
"GMT+2": "Europe/Athens",
"GMT+3": "Europe/Moscow",
"GMT+4": "Asia/Baku",
"GMT+5": "Asia/Oral",
"GMT+6": "Asia/Almaty",
"GMT+7": "Asia/Bangkok",
"GMT+8": "Asia/Hong_Kong",
"GMT+9": "Asia/Tokyo",
"GMT+10": "Pacific/Guam",
"GMT+11": "Pacific/Noumea",
"GMT+12": "Pacific/Fiji",
"GMT+13": "Pacific/Tongatapu",
"GMT-11": "Pacific/Midway",
"GMT-10": "Pacific/Honolulu"
}
class Error(Exception):
"""Raised when an error occurs in a util"""
def abs_path(path):
"""Resolve the '.' and '~' in a path to get the absolute path.
Args:
path: The path to expand.
Returns:
The absolute path of the input path.
"""
return os.path.abspath(os.path.expanduser(path))
def create_dir(path):
"""Creates a directory if it does not exist already.
Args:
path: The path of the directory to create.
"""
full_path = abs_path(path)
if not os.path.exists(full_path):
os.makedirs(full_path)
def create_alias(target_path, alias_path):
"""Creates an alias at 'alias_path' pointing to the file 'target_path'.
On Unix, this is implemented via symlink. On Windows, this is done by
creating a Windows shortcut file.
Args:
target_path: Destination path that the alias should point to.
alias_path: Path at which to create the new alias.
"""
if platform.system() == 'Windows' and not alias_path.endswith('.lnk'):
alias_path += '.lnk'
if os.path.lexists(alias_path):
os.remove(alias_path)
if platform.system() == 'Windows':
from win32com import client
shell = client.Dispatch('WScript.Shell')
shortcut = shell.CreateShortCut(alias_path)
shortcut.Targetpath = target_path
shortcut.save()
else:
os.symlink(target_path, alias_path)
def get_current_epoch_time():
"""Current epoch time in milliseconds.
Returns:
An integer representing the current epoch time in milliseconds.
"""
return int(round(time.time() * 1000))
def get_current_human_time():
"""Returns the current time in human readable format.
Returns:
The current time stamp in Month-Day-Year Hour:Min:Sec format.
"""
return time.strftime("%m-%d-%Y %H:%M:%S ")
def epoch_to_human_time(epoch_time):
"""Converts an epoch timestamp to human readable time.
This essentially converts an output of get_current_epoch_time to an output
of get_current_human_time
Args:
epoch_time: An integer representing an epoch timestamp in milliseconds.
Returns:
A time string representing the input time.
None if input param is invalid.
"""
if isinstance(epoch_time, int):
try:
d = datetime.datetime.fromtimestamp(epoch_time / 1000)
return d.strftime("%m-%d-%Y %H:%M:%S ")
except ValueError:
return None
def get_timezone_olson_id():
"""Return the Olson ID of the local (non-DST) timezone.
Returns:
A string representing one of the Olson IDs of the local (non-DST)
timezone.
"""
tzoffset = int(time.timezone / 3600)
gmt = None
if tzoffset <= 0:
gmt = "GMT+{}".format(-tzoffset)
else:
gmt = "GMT-{}".format(tzoffset)
return GMT_to_olson[gmt]
def find_files(paths, file_predicate):
"""Locate files whose names and extensions match the given predicate in
the specified directories.
Args:
paths: A list of directory paths where to find the files.
file_predicate: A function that returns True if the file name and
extension are desired.
Returns:
A list of files that match the predicate.
"""
file_list = []
for path in paths:
p = abs_path(path)
for dirPath, _, fileList in os.walk(p):
for fname in fileList:
name, ext = os.path.splitext(fname)
if file_predicate(name, ext):
file_list.append((dirPath, name, ext))
return file_list
def load_file_to_base64_str(f_path):
"""Loads the content of a file into a base64 string.
Args:
f_path: full path to the file including the file name.
Returns:
A base64 string representing the content of the file in utf-8 encoding.
"""
path = abs_path(f_path)
with open(path, 'rb') as f:
f_bytes = f.read()
base64_str = base64.b64encode(f_bytes).decode("utf-8")
return base64_str
def find_field(item_list, cond, comparator, target_field):
"""Finds the value of a field in a dict object that satisfies certain
conditions.
Args:
item_list: A list of dict objects.
cond: A param that defines the condition.
comparator: A function that checks if an dict satisfies the condition.
target_field: Name of the field whose value to be returned if an item
satisfies the condition.
Returns:
Target value or None if no item satisfies the condition.
"""
for item in item_list:
if comparator(item, cond) and target_field in item:
return item[target_field]
return None
def rand_ascii_str(length):
"""Generates a random string of specified length, composed of ascii letters
and digits.
Args:
length: The number of characters in the string.
Returns:
The random string generated.
"""
letters = [random.choice(ascii_letters_and_digits) for _ in range(length)]
return ''.join(letters)
# Thead/Process related functions.
def concurrent_exec(func, param_list):
"""Executes a function with different parameters pseudo-concurrently.
This is basically a map function. Each element (should be an iterable) in
the param_list is unpacked and passed into the function. Due to Python's
GIL, there's no true concurrency. This is suited for IO-bound tasks.
Args:
func: The function that parforms a task.
param_list: A list of iterables, each being a set of params to be
passed into the function.
Returns:
A list of return values from each function execution. If an execution
caused an exception, the exception object will be the corresponding
result.
"""
with concurrent.futures.ThreadPoolExecutor(max_workers=30) as executor:
# Start the load operations and mark each future with its params
future_to_params = {executor.submit(func, *p): p for p in param_list}
return_vals = []
for future in concurrent.futures.as_completed(future_to_params):
params = future_to_params[future]
try:
return_vals.append(future.result())
except Exception as exc:
logging.exception("{} generated an exception: {}".format(
params, traceback.format_exc()))
return_vals.append(exc)
return return_vals
def start_standing_subprocess(cmd, shell=False):
"""Starts a long-running subprocess.
This is not a blocking call and the subprocess started by it should be
explicitly terminated with stop_standing_subprocess.
For short-running commands, you should use subprocess.check_call, which
blocks.
Args:
cmd: string, the command to start the subprocess with.
shell: bool, True to run this command through the system shell,
False to invoke it directly. See subprocess.Proc() docs.
Returns:
The subprocess that was started.
"""
logging.debug('Starting standing subprocess with: %s', cmd)
proc = subprocess.Popen(
cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=shell)
# Leaving stdin open causes problems for input, e.g. breaking the
# code.inspect() shell (http://stackoverflow.com/a/25512460/1612937), so
# explicitly close it assuming it is not needed for standing subprocesses.
proc.stdin.close()
proc.stdin = None
logging.debug('Started standing subprocess %d', proc.pid)
return proc
def stop_standing_subprocess(proc, kill_signal=signal.SIGTERM):
"""Stops a subprocess started by start_standing_subprocess.
Before killing the process, we check if the process is running, if it has
terminated, Error is raised.
Catches and ignores the PermissionError which only happens on Macs.
Args:
proc: Subprocess to terminate.
Raises:
Error: if the subprocess could not be stopped.
"""
pid = proc.pid
logging.debug('Stopping standing subprocess %d', pid)
process = psutil.Process(pid)
failed = []
try:
children = process.children(recursive=True)
except AttributeError:
# Handle versions <3.0.0 of psutil.
children = process.get_children(recursive=True)
for child in children:
try:
child.kill()
child.wait(timeout=10)
except psutil.NoSuchProcess:
# Ignore if the child process has already terminated.
pass
except:
failed.append(child.pid)
logging.exception('Failed to kill standing subprocess %d',
child.pid)
try:
process.kill()
process.wait(timeout=10)
except psutil.NoSuchProcess:
# Ignore if the process has already terminated.
pass
except:
failed.append(pid)
logging.exception('Failed to kill standing subprocess %d', pid)
if failed:
raise Error('Failed to kill standing subprocesses: %s' % failed)
logging.debug('Stopped standing subprocess %d', pid)
def wait_for_standing_subprocess(proc, timeout=None):
"""Waits for a subprocess started by start_standing_subprocess to finish
or times out.
Propagates the exception raised by the subprocess.wait(.) function.
The subprocess.TimeoutExpired exception is raised if the process timed-out
rather then terminating.
If no exception is raised: the subprocess terminated on its own. No need
to call stop_standing_subprocess() to kill it.
If an exception is raised: the subprocess is still alive - it did not
terminate. Either call stop_standing_subprocess() to kill it, or call
wait_for_standing_subprocess() to keep waiting for it to terminate on its
own.
Args:
p: Subprocess to wait for.
timeout: An integer number of seconds to wait before timing out.
"""
proc.wait(timeout)
def get_available_host_port():
"""Gets a host port number available for adb forward.
Returns:
An integer representing a port number on the host available for adb
forward.
Raises:
Error: when no port is found after MAX_PORT_ALLOCATION_RETRY times.
"""
for _ in range(MAX_PORT_ALLOCATION_RETRY):
port = portpicker.PickUnusedPort()
# Make sure adb is not using this port so we don't accidentally
# interrupt ongoing runs by trying to bind to the port.
if port not in adb.list_occupied_adb_ports():
return port
raise Error('Failed to find available port after {} retries'.format(
MAX_PORT_ALLOCATION_RETRY))
def grep(regex, output):
"""Similar to linux's `grep`, this returns the line in an output stream
that matches a given regex pattern.
It does not rely on the `grep` binary and is not sensitive to line endings,
so it can be used cross-platform.
Args:
regex: string, a regex that matches the expected pattern.
output: byte string, the raw output of the adb cmd.
Returns:
A list of strings, all of which are output lines that matches the
regex pattern.
"""
lines = output.decode('utf-8').strip().splitlines()
results = []
for line in lines:
if re.search(regex, line):
results.append(line.strip())
return results
| {
"content_hash": "460ed31fcebb8db350c06c883a59874e",
"timestamp": "",
"source": "github",
"line_count": 412,
"max_line_length": 79,
"avg_line_length": 31.429611650485437,
"alnum_prop": 0.6480037068499498,
"repo_name": "l-meng/mobly",
"id": "79bf9e208a046ae43d92e0e8c1e657d472f9d53c",
"size": "13525",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mobly/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "440648"
}
],
"symlink_target": ""
} |
"""Test that custom generators can be passed to --format
"""
import TestGyp
test = TestGyp.TestGypCustom(format='mygenerator.py')
test.run_gyp('test.gyp')
# mygenerator.py should generate a file called MyBuildFile containing
# "Testing..." alongside the gyp file.
test.must_match('MyBuildFile', 'Testing...\n')
test.pass_test()
| {
"content_hash": "02728522cc8c7fb14034af481cbff1a9",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 69,
"avg_line_length": 24.785714285714285,
"alnum_prop": 0.7060518731988472,
"repo_name": "Jet-Streaming/gyp",
"id": "5e7e04f9643a123a6d2ebffdaac241cdc2296828",
"size": "529",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/custom-generator/gyptest-custom-generator.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "1194"
},
{
"name": "Batchfile",
"bytes": "1133"
},
{
"name": "C",
"bytes": "38674"
},
{
"name": "C++",
"bytes": "41140"
},
{
"name": "Objective-C",
"bytes": "10353"
},
{
"name": "Objective-C++",
"bytes": "1958"
},
{
"name": "Python",
"bytes": "3290293"
},
{
"name": "Shell",
"bytes": "12644"
},
{
"name": "Swift",
"bytes": "124"
}
],
"symlink_target": ""
} |
from unimodalarraymax import unimodalmax
import unittest
class TestUnimodalArrayMaxElement(unittest.TestCase):
def setUp(self):
self.unimodalarray = [1,6,5,4,2]
def test_unimodal_array_max_element_finder(self):
maxElement = unimodalmax(self.unimodalarray)
self.assertEqual(maxElement, 6)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "2030f3fc25ac2f5d1f15618b521f856b",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 53,
"avg_line_length": 25.214285714285715,
"alnum_prop": 0.7337110481586402,
"repo_name": "mez/algorithms_with_python",
"id": "ac9b578948edb72eb218d74b1e5d9534cf389c5a",
"size": "372",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "unimodal_array_max/unimodalarraymax_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7539"
}
],
"symlink_target": ""
} |
import pytest
from azure.core.utils import parse_connection_string
from devtools_testutils import AzureMgmtTestCase
class CoreConnectionStringParserTests(AzureMgmtTestCase):
# cSpell:disable
def test_parsing_with_case_sensitive_keys_for_sensitive_conn_str(self, **kwargs):
conn_str = 'Endpoint=XXXXENDPOINTXXXX;SharedAccessKeyName=XXXXPOLICYXXXX;SharedAccessKey=THISISATESTKEYXXXXXXXXXXXXXXXXXXXXXXXXXXXX='
parse_result = parse_connection_string(conn_str, True)
assert parse_result["Endpoint"] == 'XXXXENDPOINTXXXX'
assert parse_result["SharedAccessKeyName"] == 'XXXXPOLICYXXXX'
assert parse_result["SharedAccessKey"] == 'THISISATESTKEYXXXXXXXXXXXXXXXXXXXXXXXXXXXX='
with pytest.raises(KeyError):
parse_result["endPoint"]
with pytest.raises(KeyError):
parse_result["sharedAccESSkEynAME"]
with pytest.raises(KeyError):
parse_result["sharedaccesskey"]
def test_parsing_with_case_insensitive_keys_for_sensitive_conn_str(self, **kwargs):
conn_str = 'Endpoint=XXXXENDPOINTXXXX;SharedAccessKeyName=XXXXPOLICYXXXX;SharedAccessKey=THISISATESTKEYXXXXXXXXXXXXXXXXXXXXXXXXXXXX='
parse_result = parse_connection_string(conn_str, False)
assert parse_result["endpoint"] == 'XXXXENDPOINTXXXX'
assert parse_result["sharedaccesskeyname"] == 'XXXXPOLICYXXXX'
assert parse_result["sharedaccesskey"] == 'THISISATESTKEYXXXXXXXXXXXXXXXXXXXXXXXXXXXX='
def test_parsing_with_case_insensitive_keys_for_insensitive_conn_str(self, **kwargs):
conn_str = 'enDpoiNT=XXXXENDPOINTXXXX;sharedaccesskeyname=XXXXPOLICYXXXX;SHAREDACCESSKEY=THISISATESTKEYXXXXXXXXXXXXXXXXXXXXXXXXXXXX='
parse_result = parse_connection_string(conn_str, False)
assert parse_result["endpoint"] == 'XXXXENDPOINTXXXX'
assert parse_result["sharedaccesskeyname"] == 'XXXXPOLICYXXXX'
assert parse_result["sharedaccesskey"] == 'THISISATESTKEYXXXXXXXXXXXXXXXXXXXXXXXXXXXX='
def test_error_with_duplicate_case_sensitive_keys_for_sensitive_conn_str(self, **kwargs):
conn_str = 'Endpoint=XXXXENDPOINTXXXX;Endpoint=XXXXENDPOINT2XXXX;SharedAccessKeyName=XXXXPOLICYXXXX;SharedAccessKey=THISISATESTKEYXXXXXXXXXXXXXXXXXXXXXXXXXXXX='
with pytest.raises(ValueError) as e:
parse_result = parse_connection_string(conn_str, True)
assert str(e.value) == "Connection string is either blank or malformed."
def test_success_with_duplicate_case_sensitive_keys_for_sensitive_conn_str(self, **kwargs):
conn_str = 'enDpoInt=XXXXENDPOINTXXXX;Endpoint=XXXXENDPOINT2XXXX;'
parse_result = parse_connection_string(conn_str, True)
assert parse_result["enDpoInt"] == 'XXXXENDPOINTXXXX'
assert parse_result["Endpoint"] == 'XXXXENDPOINT2XXXX'
def test_error_with_duplicate_case_insensitive_keys_for_insensitive_conn_str(self, **kwargs):
conn_str = 'endPoinT=XXXXENDPOINTXXXX;eNdpOint=XXXXENDPOINT2XXXX;sharedaccesskeyname=XXXXPOLICYXXXX;SHAREDACCESSKEY=THISISATESTKEYXXXXXXXXXXXXXXXXXXXXXXXXXXXX='
with pytest.raises(ValueError) as e:
parse_result = parse_connection_string(conn_str, False)
assert str(e.value) == "Duplicate key in connection string: endpoint"
def test_error_with_malformed_conn_str(self):
for conn_str in ["", "foobar", "foo;bar;baz", ";", "foo=;bar=;", "=", "=;=="]:
with pytest.raises(ValueError) as e:
parse_result = parse_connection_string(conn_str)
self.assertEqual(str(e.value), "Connection string is either blank or malformed.")
def test_case_insensitive_clear_method(self):
conn_str = 'enDpoiNT=XXXXENDPOINTXXXX;sharedaccesskeyname=XXXXPOLICYXXXX;SHAREDACCESSKEY=THISISATESTKEYXXXXXXXXXXXXXXXXXXXXXXXXXXXX='
parse_result = parse_connection_string(conn_str, False)
parse_result.clear()
assert len(parse_result) == 0
def test_case_insensitive_copy_method(self):
conn_str = 'enDpoiNT=XXXXENDPOINTXXXX;sharedaccesskeyname=XXXXPOLICYXXXX;SHAREDACCESSKEY=THISISATESTKEYXXXXXXXXXXXXXXXXXXXXXXXXXXXX='
parse_result = parse_connection_string(conn_str, False)
copied = parse_result.copy()
assert copied == parse_result
def test_case_insensitive_get_method(self):
conn_str = 'Endpoint=XXXXENDPOINTXXXX;SharedAccessKeyName=XXXXPOLICYXXXX;SharedAccessKey=THISISATESTKEYXXXXXXXXXXXXXXXXXXXXXXXXXXXX='
parse_result = parse_connection_string(conn_str, False)
assert parse_result.get("sharedaccesskeyname") == 'XXXXPOLICYXXXX'
assert parse_result.get("sharedaccesskey") == 'THISISATESTKEYXXXXXXXXXXXXXXXXXXXXXXXXXXXX='
assert parse_result.get("accesskey") is None
assert parse_result.get("accesskey", "XXothertestkeyXX=") == "XXothertestkeyXX="
def test_case_insensitive_keys_method(self):
conn_str = 'enDpoiNT=XXXXENDPOINTXXXX;sharedaccesskeyname=XXXXPOLICYXXXX;SHAREDACCESSKEY=THISISATESTKEYXXXXXXXXXXXXXXXXXXXXXXXXXXXX='
parse_result = parse_connection_string(conn_str, False)
keys = parse_result.keys()
assert len(keys) == 3
assert "endpoint" in keys
def test_case_insensitive_pop_method(self):
conn_str = 'enDpoiNT=XXXXENDPOINTXXXX;sharedaccesskeyname=XXXXPOLICYXXXX;SHAREDACCESSKEY=THISISATESTKEYXXXXXXXXXXXXXXXXXXXXXXXXXXXX='
parse_result = parse_connection_string(conn_str, False)
endpoint = parse_result.pop("endpoint")
sharedaccesskey = parse_result.pop("sharedaccesskey")
assert len(parse_result) == 1
assert endpoint == "XXXXENDPOINTXXXX"
assert sharedaccesskey == "THISISATESTKEYXXXXXXXXXXXXXXXXXXXXXXXXXXXX="
def test_case_insensitive_update_with_insensitive_method(self):
conn_str = 'enDpoiNT=XXXXENDPOINTXXXX;sharedaccesskeyname=XXXXPOLICYXXXX;SHAREDACCESSKEY=THISISATESTKEYXXXXXXXXXXXXXXXXXXXXXXXXXXXX='
conn_str2 = 'hostName=XXXXENDPOINTXXXX;ACCessKEy=THISISATESTKEYXXXXXXXXXXXXXXXXXXXXXXXXXXXX=;'
parse_result_insensitive = parse_connection_string(conn_str, False)
parse_result_insensitive2 = parse_connection_string(conn_str2, False)
parse_result_insensitive.update(parse_result_insensitive2)
assert len(parse_result_insensitive) == 5
assert parse_result_insensitive["hostname"] == "XXXXENDPOINTXXXX"
assert parse_result_insensitive["accesskey"] == "THISISATESTKEYXXXXXXXXXXXXXXXXXXXXXXXXXXXX="
# check that update replace duplicate case insensitive keys
conn_str_duplicate_key = "endpoint=XXXXENDPOINT2XXXX;ACCessKEy=TestKey"
parse_result_insensitive_dupe = parse_connection_string(conn_str_duplicate_key, False)
parse_result_insensitive.update(parse_result_insensitive_dupe)
assert parse_result_insensitive_dupe["endpoint"] == "XXXXENDPOINT2XXXX"
assert parse_result_insensitive_dupe["accesskey"] == "TestKey"
assert len(parse_result_insensitive) == 5
def test_case_sensitive_update_with_insensitive_method(self):
conn_str = 'enDpoiNT=XXXXENDPOINTXXXX;sharedaccesskeyname=XXXXPOLICYXXXX;SHAREDACCESSKEY=THISISATESTKEYXXXXXXXXXXXXXXXXXXXXXXXXXXXX='
conn_str2 = 'hostName=XXXXENDPOINTXXXX;ACCessKEy=THISISATESTKEYXXXXXXXXXXXXXXXXXXXXXXXXXXXX=;'
parse_result_insensitive = parse_connection_string(conn_str, False)
parse_result_sensitive = parse_connection_string(conn_str2, True)
parse_result_sensitive.update(parse_result_insensitive)
assert len(parse_result_sensitive) == 5
assert parse_result_sensitive["hostName"] == "XXXXENDPOINTXXXX"
with pytest.raises(KeyError):
parse_result_sensitive["hostname"]
def test_case_insensitive_values_method(self):
conn_str = 'enDpoiNT=XXXXENDPOINTXXXX;sharedaccesskeyname=XXXXPOLICYXXXX;SHAREDACCESSKEY=THISISATESTKEYXXXXXXXXXXXXXXXXXXXXXXXXXXXX='
parse_result = parse_connection_string(conn_str, False)
values = parse_result.values()
assert len(values) == 3
# cSpell:enable | {
"content_hash": "d3f80db2a90db81ce73c5042bfdf39f4",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 168,
"avg_line_length": 61.93893129770992,
"alnum_prop": 0.7335469558787281,
"repo_name": "Azure/azure-sdk-for-python",
"id": "1a3957e5b35dbb1ed118c863893ec6bf7668f424",
"size": "8114",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/core/azure-core/tests/test_connection_string_parsing.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
"""Tests transmission of tickets across gRPC-on-the-wire."""
import unittest
from grpc._adapter import _intermediary_low
from grpc._links import invocation
from grpc._links import service
from grpc.framework.interfaces.links import links
from grpc_test import test_common
from grpc_test._links import _proto_scenarios
from grpc_test.framework.common import test_constants
from grpc_test.framework.interfaces.links import test_cases
from grpc_test.framework.interfaces.links import test_utilities
_IDENTITY = lambda x: x
class TransmissionTest(test_cases.TransmissionTest, unittest.TestCase):
def create_transmitting_links(self):
service_link = service.service_link(
{self.group_and_method(): self.deserialize_request},
{self.group_and_method(): self.serialize_response})
port = service_link.add_port(0, None)
service_link.start()
channel = _intermediary_low.Channel('localhost:%d' % port, None)
invocation_link = invocation.invocation_link(
channel, 'localhost',
{self.group_and_method(): self.serialize_request},
{self.group_and_method(): self.deserialize_response})
invocation_link.start()
return invocation_link, service_link
def destroy_transmitting_links(self, invocation_side_link, service_side_link):
invocation_side_link.stop()
service_side_link.stop_gracefully()
def create_invocation_initial_metadata(self):
return (
('first invocation initial metadata key', 'just a string value'),
('second invocation initial metadata key', '0123456789'),
('third invocation initial metadata key-bin', '\x00\x57' * 100),
)
def create_invocation_terminal_metadata(self):
return None
def create_service_initial_metadata(self):
return (
('first service initial metadata key', 'just another string value'),
('second service initial metadata key', '9876543210'),
('third service initial metadata key-bin', '\x00\x59\x02' * 100),
)
def create_service_terminal_metadata(self):
return (
('first service terminal metadata key', 'yet another string value'),
('second service terminal metadata key', 'abcdefghij'),
('third service terminal metadata key-bin', '\x00\x37' * 100),
)
def create_invocation_completion(self):
return None, None
def create_service_completion(self):
return _intermediary_low.Code.OK, 'An exuberant test "details" message!'
def assertMetadataTransmitted(self, original_metadata, transmitted_metadata):
self.assertTrue(
test_common.metadata_transmitted(
original_metadata, transmitted_metadata),
'%s erroneously transmitted as %s' % (
original_metadata, transmitted_metadata))
class RoundTripTest(unittest.TestCase):
def testZeroMessageRoundTrip(self):
test_operation_id = object()
test_group = 'test package.Test Group'
test_method = 'test method'
identity_transformation = {(test_group, test_method): _IDENTITY}
test_code = _intermediary_low.Code.OK
test_message = 'a test message'
service_link = service.service_link(
identity_transformation, identity_transformation)
service_mate = test_utilities.RecordingLink()
service_link.join_link(service_mate)
port = service_link.add_port(0, None)
service_link.start()
channel = _intermediary_low.Channel('localhost:%d' % port, None)
invocation_link = invocation.invocation_link(
channel, 'localhost', identity_transformation, identity_transformation)
invocation_mate = test_utilities.RecordingLink()
invocation_link.join_link(invocation_mate)
invocation_link.start()
invocation_ticket = links.Ticket(
test_operation_id, 0, test_group, test_method,
links.Ticket.Subscription.FULL, test_constants.LONG_TIMEOUT, None, None,
None, None, None, None, links.Ticket.Termination.COMPLETION, None)
invocation_link.accept_ticket(invocation_ticket)
service_mate.block_until_tickets_satisfy(test_cases.terminated)
service_ticket = links.Ticket(
service_mate.tickets()[-1].operation_id, 0, None, None, None, None,
None, None, None, None, test_code, test_message,
links.Ticket.Termination.COMPLETION, None)
service_link.accept_ticket(service_ticket)
invocation_mate.block_until_tickets_satisfy(test_cases.terminated)
invocation_link.stop()
service_link.stop_gracefully()
self.assertIs(
service_mate.tickets()[-1].termination,
links.Ticket.Termination.COMPLETION)
self.assertIs(
invocation_mate.tickets()[-1].termination,
links.Ticket.Termination.COMPLETION)
def _perform_scenario_test(self, scenario):
test_operation_id = object()
test_group, test_method = scenario.group_and_method()
test_code = _intermediary_low.Code.OK
test_message = 'a scenario test message'
service_link = service.service_link(
{(test_group, test_method): scenario.deserialize_request},
{(test_group, test_method): scenario.serialize_response})
service_mate = test_utilities.RecordingLink()
service_link.join_link(service_mate)
port = service_link.add_port(0, None)
service_link.start()
channel = _intermediary_low.Channel('localhost:%d' % port, None)
invocation_link = invocation.invocation_link(
channel, 'localhost',
{(test_group, test_method): scenario.serialize_request},
{(test_group, test_method): scenario.deserialize_response})
invocation_mate = test_utilities.RecordingLink()
invocation_link.join_link(invocation_mate)
invocation_link.start()
invocation_ticket = links.Ticket(
test_operation_id, 0, test_group, test_method,
links.Ticket.Subscription.FULL, test_constants.LONG_TIMEOUT, None, None,
None, None, None, None, None, None)
invocation_link.accept_ticket(invocation_ticket)
requests = scenario.requests()
for request_index, request in enumerate(requests):
request_ticket = links.Ticket(
test_operation_id, 1 + request_index, None, None, None, None, 1, None,
request, None, None, None, None, None)
invocation_link.accept_ticket(request_ticket)
service_mate.block_until_tickets_satisfy(
test_cases.at_least_n_payloads_received_predicate(1 + request_index))
response_ticket = links.Ticket(
service_mate.tickets()[0].operation_id, request_index, None, None,
None, None, 1, None, scenario.response_for_request(request), None,
None, None, None, None)
service_link.accept_ticket(response_ticket)
invocation_mate.block_until_tickets_satisfy(
test_cases.at_least_n_payloads_received_predicate(1 + request_index))
request_count = len(requests)
invocation_completion_ticket = links.Ticket(
test_operation_id, request_count + 1, None, None, None, None, None,
None, None, None, None, None, links.Ticket.Termination.COMPLETION,
None)
invocation_link.accept_ticket(invocation_completion_ticket)
service_mate.block_until_tickets_satisfy(test_cases.terminated)
service_completion_ticket = links.Ticket(
service_mate.tickets()[0].operation_id, request_count, None, None, None,
None, None, None, None, None, test_code, test_message,
links.Ticket.Termination.COMPLETION, None)
service_link.accept_ticket(service_completion_ticket)
invocation_mate.block_until_tickets_satisfy(test_cases.terminated)
invocation_link.stop()
service_link.stop_gracefully()
observed_requests = tuple(
ticket.payload for ticket in service_mate.tickets()
if ticket.payload is not None)
observed_responses = tuple(
ticket.payload for ticket in invocation_mate.tickets()
if ticket.payload is not None)
self.assertTrue(scenario.verify_requests(observed_requests))
self.assertTrue(scenario.verify_responses(observed_responses))
def testEmptyScenario(self):
self._perform_scenario_test(_proto_scenarios.EmptyScenario())
def testBidirectionallyUnaryScenario(self):
self._perform_scenario_test(_proto_scenarios.BidirectionallyUnaryScenario())
def testBidirectionallyStreamingScenario(self):
self._perform_scenario_test(
_proto_scenarios.BidirectionallyStreamingScenario())
if __name__ == '__main__':
unittest.main(verbosity=2)
| {
"content_hash": "763c49d38e76c27ae33065ecf1e770ab",
"timestamp": "",
"source": "github",
"line_count": 203,
"max_line_length": 80,
"avg_line_length": 41.320197044334975,
"alnum_prop": 0.7039818788745827,
"repo_name": "gpndata/grpc",
"id": "02ddd512c2260759e763758de17ff5758cae08f9",
"size": "9917",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/grpcio_test/grpc_test/_links/_transmission_test.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3220"
},
{
"name": "C",
"bytes": "3669210"
},
{
"name": "C#",
"bytes": "775171"
},
{
"name": "C++",
"bytes": "973799"
},
{
"name": "DTrace",
"bytes": "147"
},
{
"name": "JavaScript",
"bytes": "196335"
},
{
"name": "Makefile",
"bytes": "2038316"
},
{
"name": "Objective-C",
"bytes": "241053"
},
{
"name": "PHP",
"bytes": "66550"
},
{
"name": "Protocol Buffer",
"bytes": "105002"
},
{
"name": "Python",
"bytes": "1284380"
},
{
"name": "Ruby",
"bytes": "348383"
},
{
"name": "Shell",
"bytes": "25777"
}
],
"symlink_target": ""
} |
class FrameSeqException(Exception):
def __init__(self, msg):
self.msg = msg
def split_frames_by_missing(frames):
""" Splits frames into an array of frames
sequences where each sequence does NOT
contain any missing frames.
If no frames are missing an array with one
entry (frames) are returned.
"""
if len(frames) == 0:
return [[]]
f = frames[0]
expected_num = f.coded_picture_number + 1
cur = [f]
seq = []
for f in frames[1:]:
next_num = f.coded_picture_number
if next_num == expected_num:
cur.append(f)
expected_num = f.coded_picture_number + 1
elif next_num > expected_num:
seq.append(cur)
cur = [f]
expected_num = f.coded_picture_number + 1
else:
s = "Unexpected coded_picture_number %d " \
"should be larger than or equal to %d" % \
(next_num, expected_num)
e = FrameSeqException(s)
raise e
seq.append(cur)
return seq
def are_frames_missing(frames):
""" Checks that there are no gaps in coded
picure number. Does NOT check that there is
a constant time between frames.
"""
splits = split_frames_by_missing(frames)
return len(splits) > 1
def is_fps_fixed(frames):
""" Checks if there is a constant time
between frames (fixed) or not.
Does NOT check for gaps in coded picture
number (lost frames).
"""
pass
def calculate_fps(frames):
""" Calculates an average fps based on
the frames.
"""
pass
def calculate_bitrate(frames):
""" Calculates an average bitrate based
on the frames.
"""
pass
def are_gops_fixed(gops):
""" Checks if each gop consists of the same
number of frames. Needs more than one gop.
"""
pass
def calculate_gop_size(gops):
""" Calculates an average gop size (number of frames
including first key frame) based on the gops.
"""
pass
| {
"content_hash": "2d47b41c91fcb291bfb0fd0cafecea30",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 58,
"avg_line_length": 23.20689655172414,
"alnum_prop": 0.5938583457157008,
"repo_name": "2hdddg/pyvidstream",
"id": "e9fad2357c000fabd49e5fccca176ef7c5c475b0",
"size": "2019",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vidutil/vidanalyze.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15462"
}
],
"symlink_target": ""
} |
DEFAULT_HOSTS = ['http://gitlab.com']
# GitLab hook domain
HOOK_DOMAIN = None
HOOK_CONTENT_TYPE = 'json'
HOOK_EVENTS = ['push'] # Only log commits
# Max render size in bytes; no max if None
MAX_RENDER_SIZE = None
CACHE = False
| {
"content_hash": "14f84a92469de69569935b8df1835c2a",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 42,
"avg_line_length": 21,
"alnum_prop": 0.6926406926406926,
"repo_name": "sloria/osf.io",
"id": "542fec4644ef10ed73743599d428fd31551e83f5",
"size": "231",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "addons/gitlab/settings/defaults.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "109070"
},
{
"name": "Dockerfile",
"bytes": "8455"
},
{
"name": "HTML",
"bytes": "263083"
},
{
"name": "JavaScript",
"bytes": "1856674"
},
{
"name": "Mako",
"bytes": "690812"
},
{
"name": "Python",
"bytes": "8397175"
},
{
"name": "VCL",
"bytes": "13885"
}
],
"symlink_target": ""
} |
import os
import sys
import djstripe
version = djstripe.__version__
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
print("You probably want to also tag the version now:")
print(" git tag -a %s -m 'version %s'" % (version, version))
print(" git push --tags")
sys.exit()
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
setup(
name='dj-stripe',
version=version,
description=djstripe.__summary__,
long_description=readme + '\n\n' + history,
author=djstripe.__author__,
author_email=djstripe.__email__,
url=djstripe.__uri__,
packages=[
'djstripe',
],
package_dir={'djstripe': 'djstripe'},
include_package_data=True,
install_requires=[
'django>=1.4',
'stripe>=1.9.2',
'django-model-utils>=1.4.0',
'django-braces>=1.2.1',
'django-jsonfield>=0.9.10'
],
license=djstripe.__license__,
zip_safe=False,
keywords='stripe django',
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3'
],
) | {
"content_hash": "ec53acb18f491e911696a1efdbdc7745",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 66,
"avg_line_length": 27.88135593220339,
"alnum_prop": 0.5902735562310031,
"repo_name": "rawjam/dj-stripe",
"id": "98b12dd65306ded20a9c945f5fa1ce8059a0fcca",
"size": "1668",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "22023"
},
{
"name": "Makefile",
"bytes": "226"
},
{
"name": "Python",
"bytes": "189353"
}
],
"symlink_target": ""
} |
import sys
import argparse
import struct
class Vertex:
def __init__(self, line):
values = line.split(' ')[1:]
values = [float(v) for v in values]
if len(values) == 3:
self.values = values + [1.0]
elif len(values) == 4:
self.values = values
else:
raise Exception('Invalid number of values for vertex')
class Normal:
def __init__(self, line):
values = line.split(' ')[1:]
values = [float(v) for v in values]
if len(values) != 3:
raise Exception('Invalid number of values for normal')
self.values = values
class Face:
class Point:
def __init__(self):
self.vertexIndex = 0
self.normalIndex = 0
self.textureIndex = 0
def __init__(self, line):
self.format = ''
elements = line.split(' ')[1:]
self.points = []
if len(elements) != 3: raise Exception('Invalid number of points on face')
for i in range(len(elements)):
keys = elements[i].split('/')
point = Face.Point()
if keys[0] != '':
if i == 0: self.format = self.format + 'v'
point.vertexIndex = int(keys[0]) - 1
else:
raise Exception('Face did not contain vertex index')
if keys[1] != '':
if i == 0: self.format = self.format + 't'
point.textureIndex = int(keys[1]) - 1
if keys[2] != '':
if i == 0: self.format = self.format + 'n'
point.normalIndex = int(keys[2]) - 1
else:
raise Exception('Face did not contain normal index')
self.points.append(point)
def convert_to_objects(data):
lines = data.split('\n')
vertexes = []
normals = []
faces = []
texture = []
for line in lines:
if line.startswith('#'):
continue
elif line.startswith('v '):
vertexes.append(Vertex(line))
elif line.startswith('vn '):
normals.append(Normal(line))
elif line.startswith('f '):
faces.append(Face(line))
if len(faces) > 1:
if faces[-1].format != faces[-2].format:
raise Exception('Format of face changed during file')
if len(faces) == 0:
raise Exception('No faces found')
if len(vertexes) == 0:
raise Exception('No vertexes found')
if len(normals) == 0:
raise Exception('No normals found')
return (faces[0].format, faces, vertexes, normals, texture)
def formatDataVn(faces, vertexes, normals):
indices = []
data = []
for f in faces:
for p in f.points:
value = vertexes[p.vertexIndex].values + normals[p.normalIndex].values
if not value in data:
index = len(data)
data.append(value)
indices.append(index)
trace('Adding new indice: data index ' + str(index) + ', value ' + str(value))
else:
index = data.index(value)
indices.append(index)
trace('Reusing value from data index: ' + str(index) + ', value ' + str(value))
return indices, data
def serializeVn32b(indices, attributeData, fo):
data = ''
# Attribute format
data += struct.pack('<cccc', 'v', 'n', ' ', ' ')
# Size of indice element
indicesElementFormat = ''
if len(data) < struct.unpack('H', '\xff\xff'):
data += struct.pack('<B', 2)
indicesElementFormat = '<H'
trace('Size of indice: 2B')
else:
data += struct.pack('<B', 4)
indicesElementFormat = '<I'
trace('Size of indice: 4B')
# Number of indices
data += struct.pack('<I', len(indices))
# Indices
for i in indices:
data += struct.pack(indicesElementFormat, i)
# Attribute data
data += struct.pack('<I', len(attributeData))
for d in attributeData:
data += struct.pack('<fffffff', d[0], d[1], d[2], d[3], d[4], d[5], d[6])
fo.write(data)
trace('Size of data: ' + str(len(data) + 1) # +1 for version which was set in main
+ ', number of indices: ' + str(len(indices))
+ ', number of attribute elements: ' + str(len(attributeData))
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Converts .obj into SToolbox .sm format')
parser.add_argument('-i', '--input_file', type=str, help='Input file', required=True)
parser.add_argument('-o', '--output_file', type=str, help='Output file', required=True)
parser.add_argument('-v', action='store_true', help='Verbose output')
args = parser.parse_args()
if args.v:
def verbose(str):
print str
else:
verbose = lambda * a: None
global trace
trace = verbose
fi = open(args.input_file, 'r')
fo = open(args.output_file, 'wb')
format, faces, vertexes, normals, texture = convert_to_objects(fi.read())
protocolVersion = 1
fo.write(struct.pack('<b', protocolVersion))
if format == 'vn':
indices, data = formatDataVn(faces, vertexes, normals)
serializeVn32b(indices, data, fo)
else:
raise Exception("Invalid format")
| {
"content_hash": "ead8da009bc5228f32f2057c49c1d95d",
"timestamp": "",
"source": "github",
"line_count": 172,
"max_line_length": 95,
"avg_line_length": 30.936046511627907,
"alnum_prop": 0.5416275136252584,
"repo_name": "stbd/stoolbox",
"id": "1f165e065f822345803339812dd5fa1c661d25e0",
"size": "5321",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/obj-to-sm-conversion.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "143228"
},
{
"name": "CMake",
"bytes": "2841"
},
{
"name": "PowerShell",
"bytes": "6240"
},
{
"name": "Python",
"bytes": "8919"
},
{
"name": "Shell",
"bytes": "2908"
}
],
"symlink_target": ""
} |
import functools
import sys
from argparse import ArgumentParser
import tensorflow as tf
from pprint import pformat
from tensorflow.contrib.framework import arg_scope, add_arg_scope
import tfsnippet as spt
from tfsnippet.examples.utils import (MLResults,
save_images_collection,
bernoulli_as_pixel,
print_with_title,
bernoulli_flow)
class ExpConfig(spt.Config):
# model parameters
z_dim = 80
x_dim = 784
# training parameters
result_dir = None
write_summary = False
max_epoch = 3000
max_step = None
batch_size = 128
l2_reg = 0.0001
initial_lr = 0.001
lr_anneal_factor = 0.5
lr_anneal_epoch_freq = 300
lr_anneal_step_freq = None
# evaluation parameters
test_n_z = 500
test_batch_size = 128
config = ExpConfig()
@spt.global_reuse
@add_arg_scope
def q_net(x, observed=None, n_z=None):
net = spt.BayesianNet(observed=observed)
# compute the hidden features
with arg_scope([spt.layers.dense],
activation_fn=tf.nn.leaky_relu,
kernel_regularizer=spt.layers.l2_regularizer(config.l2_reg)):
h_x = tf.to_float(x)
h_x = spt.layers.dense(h_x, 500)
h_x = spt.layers.dense(h_x, 500)
# sample z ~ q(z|x)
z_logits = spt.layers.dense(h_x, config.z_dim, name='z_logits')
z = net.add('z', spt.Bernoulli(logits=z_logits), n_samples=n_z,
group_ndims=1)
return net
@spt.global_reuse
@add_arg_scope
def p_net(observed=None, n_z=None):
net = spt.BayesianNet(observed=observed)
# sample z ~ p(z)
z = net.add('z', spt.Bernoulli(tf.zeros([1, config.z_dim])),
group_ndims=1, n_samples=n_z)
# compute the hidden features
with arg_scope([spt.layers.dense],
activation_fn=tf.nn.leaky_relu,
kernel_regularizer=spt.layers.l2_regularizer(config.l2_reg)):
h_z = tf.to_float(z)
h_z = spt.layers.dense(h_z, 500)
h_z = spt.layers.dense(h_z, 500)
# sample x ~ p(x|z)
x_logits = spt.layers.dense(h_z, config.x_dim, name='x_logits')
x = net.add('x', spt.Bernoulli(logits=x_logits), group_ndims=1)
return net
@spt.global_reuse
def baseline_net(x):
with arg_scope([spt.layers.dense],
activation_fn=tf.nn.leaky_relu,
kernel_regularizer=spt.layers.l2_regularizer(config.l2_reg)):
h_x = tf.to_float(x)
h_x = spt.layers.dense(h_x, 500)
return tf.squeeze(spt.layers.dense(h_x, 1), -1)
def main():
# parse the arguments
arg_parser = ArgumentParser()
spt.register_config_arguments(config, arg_parser, title='Model options')
spt.register_config_arguments(spt.settings, arg_parser, prefix='tfsnippet',
title='TFSnippet options')
arg_parser.parse_args(sys.argv[1:])
# print the config
print_with_title('Configurations', pformat(config.to_dict()), after='\n')
# open the result object and prepare for result directories
results = MLResults(config.result_dir)
results.save_config(config) # save experiment settings for review
results.make_dirs('plotting', exist_ok=True)
results.make_dirs('train_summary', exist_ok=True)
# input placeholders
input_x = tf.placeholder(
dtype=tf.int32, shape=(None, config.x_dim), name='input_x')
learning_rate = spt.AnnealingVariable(
'learning_rate', config.initial_lr, config.lr_anneal_factor)
# derive the loss and lower-bound for training
with tf.name_scope('training'):
train_q_net = q_net(input_x)
train_chain = train_q_net.chain(p_net, observed={'x': input_x})
baseline = baseline_net(input_x)
nvil_loss = tf.reduce_mean(
train_chain.vi.training.nvil(baseline=baseline))
loss = tf.losses.get_regularization_loss() + nvil_loss
# derive the nll and logits output for testing
with tf.name_scope('testing'):
test_q_net = q_net(input_x, n_z=config.test_n_z)
test_chain = test_q_net.chain(
p_net, latent_axis=0, observed={'x': input_x})
test_nll = -tf.reduce_mean(
test_chain.vi.evaluation.is_loglikelihood())
test_lb = tf.reduce_mean(test_chain.vi.lower_bound.elbo())
# derive the optimizer
with tf.name_scope('optimizing'):
optimizer = tf.train.AdamOptimizer(learning_rate)
params = tf.trainable_variables()
grads = optimizer.compute_gradients(loss, var_list=params)
with tf.control_dependencies(
tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
train_op = optimizer.apply_gradients(grads)
# derive the plotting function
with tf.name_scope('plotting'):
plot_p_net = p_net(n_z=100)
x_plots = tf.reshape(bernoulli_as_pixel(plot_p_net['x']), (-1, 28, 28))
def plot_samples(loop):
with loop.timeit('plot_time'):
session = spt.utils.get_default_session_or_error()
images = session.run(x_plots)
save_images_collection(
images=images,
filename='plotting/{}.png'.format(loop.epoch),
grid_size=(10, 10),
results=results
)
# prepare for training and testing data
(x_train, y_train), (x_test, y_test) = \
spt.datasets.load_mnist(x_shape=[784])
train_flow = bernoulli_flow(
x_train, config.batch_size, shuffle=True, skip_incomplete=True)
test_flow = bernoulli_flow(
x_test, config.test_batch_size, sample_now=True)
with spt.utils.create_session().as_default():
# train the network
with spt.TrainLoop(params,
max_epoch=config.max_epoch,
max_step=config.max_step,
summary_dir=(results.system_path('train_summary')
if config.write_summary else None),
summary_graph=tf.get_default_graph(),
early_stopping=False) as loop:
trainer = spt.Trainer(
loop, train_op, [input_x], train_flow,
metrics={'loss': loss},
summaries=tf.summary.merge_all(spt.GraphKeys.AUTO_HISTOGRAM)
)
trainer.anneal_after(
learning_rate,
epochs=config.lr_anneal_epoch_freq,
steps=config.lr_anneal_step_freq
)
evaluator = spt.Evaluator(
loop,
metrics={'test_nll': test_nll, 'test_lb': test_lb},
inputs=[input_x],
data_flow=test_flow,
time_metric_name='test_time'
)
evaluator.events.on(
spt.EventKeys.AFTER_EXECUTION,
lambda e: results.update_metrics(evaluator.last_metrics_dict)
)
trainer.evaluate_after_epochs(evaluator, freq=10)
trainer.evaluate_after_epochs(
functools.partial(plot_samples, loop), freq=10)
trainer.log_after_epochs(freq=1)
trainer.run()
# print the final metrics and close the results object
print_with_title('Results', results.format_metrics(), before='\n')
results.close()
if __name__ == '__main__':
main()
| {
"content_hash": "500d0c06d4a963f712bcec74c01e4856",
"timestamp": "",
"source": "github",
"line_count": 214,
"max_line_length": 80,
"avg_line_length": 34.88317757009346,
"alnum_prop": 0.5828533154722036,
"repo_name": "korepwx/tfsnippet",
"id": "e8a59b945af423d71525e451c835ed3ced4b9e26",
"size": "7489",
"binary": false,
"copies": "1",
"ref": "refs/heads/dependabot/pip/tensorflow-2.5.3",
"path": "tfsnippet/examples/auto_encoders/bernoulli_latent_vae.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "471912"
}
],
"symlink_target": ""
} |
from collections import OrderedDict
from typing import Dict, Type
from .base import RecaptchaEnterpriseServiceTransport
from .grpc import RecaptchaEnterpriseServiceGrpcTransport
from .grpc_asyncio import RecaptchaEnterpriseServiceGrpcAsyncIOTransport
# Compile a registry of transports.
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[RecaptchaEnterpriseServiceTransport]]
_transport_registry["grpc"] = RecaptchaEnterpriseServiceGrpcTransport
_transport_registry["grpc_asyncio"] = RecaptchaEnterpriseServiceGrpcAsyncIOTransport
__all__ = (
"RecaptchaEnterpriseServiceTransport",
"RecaptchaEnterpriseServiceGrpcTransport",
"RecaptchaEnterpriseServiceGrpcAsyncIOTransport",
)
| {
"content_hash": "ea548ec0645d3d45a70a604e7428bb3f",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 84,
"avg_line_length": 37.31578947368421,
"alnum_prop": 0.8293370944992948,
"repo_name": "googleapis/python-recaptcha-enterprise",
"id": "2b433e0cc2236233667c96c464e82af1226c97fd",
"size": "1309",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/cloud/recaptchaenterprise_v1/services/recaptcha_enterprise_service/transports/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "4074"
},
{
"name": "Python",
"bytes": "503467"
},
{
"name": "Shell",
"bytes": "30702"
}
],
"symlink_target": ""
} |
import argparse
import collections
import datetime
import functools
import gc
import inspect
import json
import logging
import operator
import os
import posixpath as webpath
import random
import re
import string
import sys
import threading
from king_phisher import color
from king_phisher import constants
from king_phisher import find
from king_phisher import its
from king_phisher import startup
import dateutil
import dateutil.tz
import email_validator
import jsonschema
import smoke_zephyr.utilities
EMAIL_REGEX = re.compile(r'^[a-z0-9._%+-]+@[a-z0-9.-]+\.[a-z]{2,9}$', flags=re.IGNORECASE)
TIMESTAMP_FORMAT = '%Y-%m-%d %H:%M:%S'
class FreezableDict(collections.OrderedDict):
"""
A dictionary that can be frozen to prevent further editing. Useful for
debugging. If any function tries to edit a frozen dictionary, a
:py:exc:`RuntimeError` will be raised and a traceback will occur.
"""
__slots__ = ('_frozen',)
def __init__(self, *args, **kwargs):
self._frozen = False
super(FreezableDict, self).__init__(*args, **kwargs)
def __repr__(self):
return "<{0} frozen={1} {2}>".format(self.__class__.__name__, self._frozen, super(FreezableDict, self).__repr__())
def __setitem__(self, *args, **kwargs):
if self._frozen:
raise RuntimeError('Frozen dictionary cannot be modified')
super(FreezableDict, self).__setitem__(*args, **kwargs)
def __delitem__(self, *args, **kwargs):
if self._frozen:
raise RuntimeError('Frozen dictionary cannot be modified')
super(FreezableDict, self).__delitem__(*args, **kwargs)
def pop(self, *args, **kwargs):
if self._frozen:
raise RuntimeError('Frozen dictionary cannot be modified')
super(FreezableDict, self).pop(*args, **kwargs)
def update(self, *args, **kwargs):
if self._frozen:
raise RuntimeError('Frozen dictionary cannot be modified')
super(FreezableDict, self).update(*args, **kwargs)
def popitem(self, *args, **kwargs):
if self._frozen:
raise RuntimeError('Frozen dictionary cannot be modified')
super(FreezableDict, self).popitem(*args, **kwargs)
def clear(self, *args, **kwargs):
if self._frozen:
raise RuntimeError('Frozen dictionary cannot be modified')
super(FreezableDict, self).clear(*args, **kwargs)
def freeze(self):
"""
Freeze the dictionary to prevent further editing.
"""
self._frozen = True
def thaw(self):
"""
Thaw the dictionary to once again enable editing.
"""
self._frozen = False
@property
def frozen(self):
"""
Whether or not the dictionary is frozen and can not be modified.
:rtype: bool
"""
return self._frozen
class Mock(object):
"""
A fake object used to replace missing imports when generating documentation.
"""
__all__ = []
def __init__(self, *args, **kwargs):
pass
def __add__(self, other):
return other
def __call__(self, *args, **kwargs):
return Mock()
@classmethod
def __getattr__(cls, name):
if name in ('__file__', '__path__'):
return os.devnull
else:
return Mock()
def __or__(self, other):
return other
@classmethod
def __setattr__(cls, name, value):
pass
def __getitem__(self, name):
return Mock()
def __setitem__(self, name, value):
pass
def argp_add_args(parser, default_root=''):
"""
Add standard arguments to a new :py:class:`argparse.ArgumentParser` instance
for configuring logging options from the command line and displaying the
version information.
.. note::
This function installs a hook to *parser.parse_args* to automatically
handle options which it adds. This includes setting up a stream logger
based on the added options.
:param parser: The parser to add arguments to.
:type parser: :py:class:`argparse.ArgumentParser`
:param str default_root: The default root logger to specify.
"""
startup.argp_add_default_args(parser, default_root=default_root)
@functools.wraps(parser.parse_args)
def parse_args_hook(*args, **kwargs):
arguments = parser._parse_args(*args, **kwargs)
configure_stream_logger(arguments.logger, arguments.loglvl)
gc.set_debug(arguments.gc_debug_stats | arguments.gc_debug_leak)
return arguments
parser._parse_args = parser.parse_args
parser.parse_args = parse_args_hook
return parser
def assert_arg_type(arg, arg_type, arg_pos=1, func_name=None):
"""
Check that an argument is an instance of the specified type and if not
raise a :py:exc:`TypeError` exception with a meaningful message. If
*func_name* is not specified, it will be determined by examining the stack.
:param arg: The argument to check.
:param arg_type: The type or sequence of types that *arg* can be.
:type arg_type: list, tuple, type
:param int arg_pos: The position of the argument in the function.
:param str func_name: The name of the function the argument is for.
"""
if isinstance(arg, arg_type):
return
if func_name is None:
parent_frame = inspect.stack()[1][0]
func_name = parent_frame.f_code.co_name
if isinstance(arg_type, (list, tuple)):
if len(arg_type) == 1:
arg_type = arg_type[0].__name__
else:
arg_type = tuple(at.__name__ for at in arg_type)
arg_type = ', '.join(arg_type[:-1]) + ' or ' + arg_type[-1]
else:
arg_type = arg_type.__name__
raise TypeError("{0}() argument {1} must be {2}, not {3}".format(func_name, arg_pos, arg_type, type(arg).__name__))
def configure_stream_logger(logger, level=None):
"""
Configure the default stream handler for logging messages to the console.
This also configures the basic logging environment for the application.
:param str logger: The logger to add the stream handler for.
:param level: The level to set the logger to, will default to WARNING if no level is specified.
:type level: None, int, str
:return: The new configured stream handler.
:rtype: :py:class:`logging.StreamHandler`
"""
if level is None:
level = constants.DEFAULT_LOG_LEVEL
if isinstance(level, str):
level = getattr(logging, level)
root_logger = logging.getLogger('')
for handler in root_logger.handlers:
root_logger.removeHandler(handler)
logging.getLogger(logger).setLevel(logging.DEBUG)
console_log_handler = logging.StreamHandler()
console_log_handler.setLevel(level)
if its.on_linux:
console_log_handler.setFormatter(color.ColoredLogFormatter('%(levelname)s %(message)s'))
else:
console_log_handler.setFormatter(logging.Formatter('%(levelname)-8s %(message)s'))
logging.getLogger(logger).addHandler(console_log_handler)
logging.captureWarnings(True)
return console_log_handler
def datetime_local_to_utc(dt):
"""
Convert a :py:class:`datetime.datetime` instance from the local time to UTC
time.
:param dt: The time to convert from local to UTC.
:type dt: :py:class:`datetime.datetime`
:return: The time converted to the UTC timezone.
:rtype: :py:class:`datetime.datetime`
"""
dt = dt.replace(tzinfo=dateutil.tz.tzlocal())
dt = dt.astimezone(dateutil.tz.tzutc())
return dt.replace(tzinfo=None)
def datetime_utc_to_local(dt):
"""
Convert a :py:class:`datetime.datetime` instance from UTC time to the local
time.
:param dt: The time to convert from UTC to local.
:type dt: :py:class:`datetime.datetime`
:return: The time converted to the local timezone.
:rtype: :py:class:`datetime.datetime`
"""
dt = dt.replace(tzinfo=dateutil.tz.tzutc())
dt = dt.astimezone(dateutil.tz.tzlocal())
return dt.replace(tzinfo=None)
def format_datetime(dt, encoding='utf-8'):
"""
Format a date time object into a string. If the object *dt* is not an
instance of :py:class:`datetime.datetime` then an empty string will be
returned.
:param dt: The object to format.
:type dt: :py:class:`datetime.datetime`
:param str encoding: The encoding to use to coerce the return value into a unicode string.
:return: The string representing the formatted time.
:rtype: str
"""
if isinstance(dt, datetime.datetime):
formatted = dt.strftime(TIMESTAMP_FORMAT)
else:
formatted = ''
if isinstance(formatted, bytes):
formatted = formatted.decode(encoding)
return formatted
def is_valid_email_address(email_address):
"""
Check that the string specified appears to be a valid email address.
:param str email_address: The email address to validate.
:return: Whether the email address appears to be valid or not.
:rtype: bool
"""
if email_address is None:
return False
try:
email_validator.validate_email(email_address, allow_empty_local=False, check_deliverability=False)
except email_validator.EmailNotValidError:
return False
return True
def open_uri(uri):
"""
Open a URI in a platform intelligent way. On Windows this will use
'cmd.exe /c start' and on Linux this will use gvfs-open or xdg-open
depending on which is available. If no suitable application can be
found to open the URI, a RuntimeError will be raised.
:param str uri: The URI to open.
"""
proc_args = []
if sys.platform.startswith('win'):
proc_args.append(smoke_zephyr.utilities.which('cmd.exe'))
proc_args.append('/c')
proc_args.append('start')
elif smoke_zephyr.utilities.which('gvfs-open'):
proc_args.append(smoke_zephyr.utilities.which('gvfs-open'))
elif smoke_zephyr.utilities.which('xdg-open'):
proc_args.append(smoke_zephyr.utilities.which('xdg-open'))
else:
raise RuntimeError('could not find suitable application to open uri')
proc_args.append(uri)
return startup.start_process(proc_args)
def parse_datetime(ts):
"""
Parse a time stamp into a :py:class:`datetime.datetime` instance. The time
stamp must be in a compatible format, as would have been returned from the
:py:func:`.format_datetime` function.
:param str ts: The timestamp to parse.
:return: The parsed timestamp.
:rtype: :py:class:`datetime.datetime`
"""
assert_arg_type(ts, str)
return datetime.datetime.strptime(ts, TIMESTAMP_FORMAT)
def password_is_complex(password, min_len=12):
"""
Check that the specified string meets standard password complexity
requirements.
:param str password: The password to validate.
:param int min_len: The minimum length the password should be.
:return: Whether the strings appears to be complex or not.
:rtype: bool
"""
has_upper = False
has_lower = False
has_digit = False
if len(password) < min_len:
return False
for char in password:
if char.isupper():
has_upper = True
if char.islower():
has_lower = True
if char.isdigit():
has_digit = True
if has_upper and has_lower and has_digit:
return True
return False
def make_message_uid(upper=True, lower=True, digits=True):
"""
Creates a random string of specified character set to be used as a message
id. At least one of *upper*, *lower*, or *digits* must be ``True``.
:param bool upper: Include upper case characters in the UID.
:param bool lower: Include lower case characters in the UID.
:param bool digits: Include digits in the UID.
:return: String of characters from the random_string function.
:rtype: str
"""
charset = ''
if upper:
charset += string.ascii_uppercase
if lower:
charset += string.ascii_lowercase
if digits:
charset += string.digits
if not charset:
raise ValueError('at least one of upper, lower, or digits must be True')
return random_string(16, charset=charset)
def make_webrelpath(path):
"""
Forcefully make *path* into a web-suitable relative path. This will strip
off leading and trailing directory separators.
.. versionadded:: 1.14.0
:param str path: The path to convert into a web-suitable relative path.
:return: The converted path.
:rtype: str
"""
if not path.startswith(webpath.sep):
path = webpath.sep + path
path = webpath.relpath(path, webpath.sep)
if path == webpath.curdir:
path = ''
return path
def make_visit_uid():
"""
Creates a random string of characters and numbers to be used as a visit id.
:return: String of characters from the random_string function.
:rtype: str
"""
return random_string(24)
def nonempty_string(value):
"""
Convert *value* into either a non-empty string or None. This will also
strip leading and trailing whitespace.
:param str value: The value to convert.
:return: Either the non-empty string or None.
"""
if not value:
return None
value = value.strip()
return value if value else None
def random_string(size, charset=None):
"""
Generate a random string consisting of uppercase letters, lowercase letters
and numbers of the specified size.
:param int size: The size of the string to make.
:return: The string containing the random characters.
:rtype: str
"""
charset = charset or string.ascii_letters + string.digits
return ''.join(random.choice(charset) for _ in range(size))
def random_string_lower_numeric(size):
"""
Generate a random string consisting of lowercase letters and numbers of the
specified size.
:param int size: The size of the string to make.
:return: The string containing the random characters.
:rtype: str
"""
return ''.join(random.choice(string.ascii_lowercase + string.digits) for x in range(size))
def switch(value, comp=operator.eq, swapped=False):
"""
A pure Python implementation of a switch case statement. *comp* will be used
as a comparison function and passed two arguments of *value* and the
provided case respectively.
Switch case example usage:
.. code-block:: python
for case in switch(2):
if case(1):
print('case 1 matched!')
break
if case(2):
print('case 2 matched!')
break
else:
print('no cases were matched')
:param value: The value to compare in each of the case statements.
:param comp: The function to use for comparison in the case statements.
:param swapped: Whether or not to swap the arguments to the *comp* function.
:return: A function to be called for each case statement.
"""
if swapped:
yield lambda case: comp(case, value)
else:
yield lambda case: comp(value, case)
def validate_json_schema(data, schema_file_id):
"""
Validate the specified data against the specified schema. The schema file
will be searched for and loaded based on it's id. If the validation fails
a :py:class:`~jsonschema.exceptions.ValidationError` will be raised.
:param data: The data to validate against the schema.
:param schema_file_id: The id of the schema to load.
"""
schema_file_name = schema_file_id + '.json'
file_path = find.data_file(os.path.join('schemas', 'json', schema_file_name))
if file_path is None:
raise FileNotFoundError('the json schema file was not found')
with open(file_path, 'r') as file_h:
schema = json.load(file_h)
jsonschema.validate(data, schema)
class Thread(threading.Thread):
"""
King Phisher's base threading class with two way events.
"""
logger = logging.getLogger('KingPhisher.Thread')
def __init__(self, target=None, name=None, args=(), kwargs=None, **_kwargs):
kwargs = kwargs or {}
super(Thread, self).__init__(target=target, name=name, args=args, kwargs=kwargs, **_kwargs)
self.target_name = None
if target is not None:
self.target_name = target.__module__ + '.' + target.__name__
self.stop_flag = Event()
self.stop_flag.clear()
def run(self):
self.logger.debug("thread {0} running {1} in tid: 0x{2:x}".format(self.name, self.target_name, threading.current_thread().ident))
super(Thread, self).run()
def stop(self):
"""
Sets the flag to signal the thread to stop.
"""
self.stop_flag.set()
def is_stopped(self):
"""
Check to see if the flag is set to stop the thread.
"""
return not self.is_alive()
class Event(getattr(threading, ('_Event' if hasattr(threading, '_Event') else 'Event'))):
__slots__ = ('__event',)
def __init__(self):
super(Event, self).__init__()
self.__event = threading.Event()
self.__event.set()
def __repr__(self):
return "<{0} is_set={1!r} >".format(self.__class__.__name__, self.is_set())
def clear(self):
super(Event, self).clear()
self.__event.set()
def is_clear(self):
return self.__event.is_set()
def set(self):
self.__event.clear()
super(Event, self).set()
def wait(self, timeout=None):
if super(Event, self).wait(timeout=timeout):
self.__event.clear()
def wait_clear(self, timeout=None):
if self.__event.wait(timeout=timeout):
super(Event, self).set()
class PrefixLoggerAdapter(logging.LoggerAdapter):
"""
A log adapter that simply prefixes the specified string to all messages. A
single space will be inserted between the prefix and the message.
"""
def __init__(self, prefix, *args, **kwargs):
"""
:param str prefix: The string to prefix all messages with.
"""
self.prefix = prefix + ' '
super(PrefixLoggerAdapter, self).__init__(*args, **kwargs)
def process(self, message, kwargs):
return self.prefix + message, kwargs
| {
"content_hash": "589516a61852ca687a240f2e1234aae8",
"timestamp": "",
"source": "github",
"line_count": 545,
"max_line_length": 131,
"avg_line_length": 30.396330275229356,
"alnum_prop": 0.7112761076904504,
"repo_name": "securestate/king-phisher",
"id": "30d3229940adf253ee7ec03e8614de25913470bf",
"size": "18144",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "king_phisher/utilities.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1680"
},
{
"name": "CSS",
"bytes": "22839"
},
{
"name": "Dockerfile",
"bytes": "496"
},
{
"name": "HTML",
"bytes": "25790"
},
{
"name": "JavaScript",
"bytes": "1328"
},
{
"name": "Jupyter Notebook",
"bytes": "11394"
},
{
"name": "Mako",
"bytes": "574"
},
{
"name": "Python",
"bytes": "1384385"
},
{
"name": "Ruby",
"bytes": "7629"
},
{
"name": "Shell",
"bytes": "28081"
}
],
"symlink_target": ""
} |
"""
differential_evolution: The differential evolution global optimization algorithm
Added by Andrew Nelson 2014
"""
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from scipy.optimize import OptimizeResult, minimize
from scipy.optimize.optimize import _status_message
from scipy._lib._util import check_random_state, MapWrapper
from scipy._lib.six import xrange, string_types
from scipy.optimize._constraints import (Bounds, new_bounds_to_old,
NonlinearConstraint, LinearConstraint)
__all__ = ['differential_evolution']
_MACHEPS = np.finfo(np.float64).eps
def differential_evolution(func, bounds, args=(), strategy='best1bin',
maxiter=1000, popsize=15, tol=0.01,
mutation=(0.5, 1), recombination=0.7, seed=None,
callback=None, disp=False, polish=True,
init='latinhypercube', atol=0, updating='immediate',
workers=1, constraints=()):
"""Finds the global minimum of a multivariate function.
Differential Evolution is stochastic in nature (does not use gradient
methods) to find the minimum, and can search large areas of candidate
space, but often requires larger numbers of function evaluations than
conventional gradient-based techniques.
The algorithm is due to Storn and Price [1]_.
Parameters
----------
func : callable
The objective function to be minimized. Must be in the form
``f(x, *args)``, where ``x`` is the argument in the form of a 1-D array
and ``args`` is a tuple of any additional fixed parameters needed to
completely specify the function.
bounds : sequence or `Bounds`, optional
Bounds for variables. There are two ways to specify the bounds:
1. Instance of `Bounds` class.
2. ``(min, max)`` pairs for each element in ``x``, defining the finite
lower and upper bounds for the optimizing argument of `func`. It is
required to have ``len(bounds) == len(x)``. ``len(bounds)`` is used
to determine the number of parameters in ``x``.
args : tuple, optional
Any additional fixed parameters needed to
completely specify the objective function.
strategy : str, optional
The differential evolution strategy to use. Should be one of:
- 'best1bin'
- 'best1exp'
- 'rand1exp'
- 'randtobest1exp'
- 'currenttobest1exp'
- 'best2exp'
- 'rand2exp'
- 'randtobest1bin'
- 'currenttobest1bin'
- 'best2bin'
- 'rand2bin'
- 'rand1bin'
The default is 'best1bin'.
maxiter : int, optional
The maximum number of generations over which the entire population is
evolved. The maximum number of function evaluations (with no polishing)
is: ``(maxiter + 1) * popsize * len(x)``
popsize : int, optional
A multiplier for setting the total population size. The population has
``popsize * len(x)`` individuals (unless the initial population is
supplied via the `init` keyword).
tol : float, optional
Relative tolerance for convergence, the solving stops when
``np.std(pop) <= atol + tol * np.abs(np.mean(population_energies))``,
where and `atol` and `tol` are the absolute and relative tolerance
respectively.
mutation : float or tuple(float, float), optional
The mutation constant. In the literature this is also known as
differential weight, being denoted by F.
If specified as a float it should be in the range [0, 2].
If specified as a tuple ``(min, max)`` dithering is employed. Dithering
randomly changes the mutation constant on a generation by generation
basis. The mutation constant for that generation is taken from
``U[min, max)``. Dithering can help speed convergence significantly.
Increasing the mutation constant increases the search radius, but will
slow down convergence.
recombination : float, optional
The recombination constant, should be in the range [0, 1]. In the
literature this is also known as the crossover probability, being
denoted by CR. Increasing this value allows a larger number of mutants
to progress into the next generation, but at the risk of population
stability.
seed : int or `np.random.RandomState`, optional
If `seed` is not specified the `np.RandomState` singleton is used.
If `seed` is an int, a new `np.random.RandomState` instance is used,
seeded with seed.
If `seed` is already a `np.random.RandomState instance`, then that
`np.random.RandomState` instance is used.
Specify `seed` for repeatable minimizations.
disp : bool, optional
Prints the evaluated `func` at every iteration.
callback : callable, `callback(xk, convergence=val)`, optional
A function to follow the progress of the minimization. ``xk`` is
the current value of ``x0``. ``val`` represents the fractional
value of the population convergence. When ``val`` is greater than one
the function halts. If callback returns `True`, then the minimization
is halted (any polishing is still carried out).
polish : bool, optional
If True (default), then `scipy.optimize.minimize` with the `L-BFGS-B`
method is used to polish the best population member at the end, which
can improve the minimization slightly. If a constrained problem is
being studied then the `trust-constr` method is used instead.
init : str or array-like, optional
Specify which type of population initialization is performed. Should be
one of:
- 'latinhypercube'
- 'random'
- array specifying the initial population. The array should have
shape ``(M, len(x))``, where len(x) is the number of parameters.
`init` is clipped to `bounds` before use.
The default is 'latinhypercube'. Latin Hypercube sampling tries to
maximize coverage of the available parameter space. 'random'
initializes the population randomly - this has the drawback that
clustering can occur, preventing the whole of parameter space being
covered. Use of an array to specify a population subset could be used,
for example, to create a tight bunch of initial guesses in an location
where the solution is known to exist, thereby reducing time for
convergence.
atol : float, optional
Absolute tolerance for convergence, the solving stops when
``np.std(pop) <= atol + tol * np.abs(np.mean(population_energies))``,
where and `atol` and `tol` are the absolute and relative tolerance
respectively.
updating : {'immediate', 'deferred'}, optional
If ``'immediate'``, the best solution vector is continuously updated
within a single generation [4]_. This can lead to faster convergence as
trial vectors can take advantage of continuous improvements in the best
solution.
With ``'deferred'``, the best solution vector is updated once per
generation. Only ``'deferred'`` is compatible with parallelization, and
the `workers` keyword can over-ride this option.
.. versionadded:: 1.2.0
workers : int or map-like callable, optional
If `workers` is an int the population is subdivided into `workers`
sections and evaluated in parallel
(uses `multiprocessing.Pool <multiprocessing>`).
Supply -1 to use all available CPU cores.
Alternatively supply a map-like callable, such as
`multiprocessing.Pool.map` for evaluating the population in parallel.
This evaluation is carried out as ``workers(func, iterable)``.
This option will override the `updating` keyword to
``updating='deferred'`` if ``workers != 1``.
Requires that `func` be pickleable.
.. versionadded:: 1.2.0
constraints : {NonLinearConstraint, LinearConstraint, Bounds}
Constraints on the solver, over and above those applied by the `bounds`
kwd. Uses the approach by Lampinen [5]_.
.. versionadded:: 1.4.0
Returns
-------
res : OptimizeResult
The optimization result represented as a `OptimizeResult` object.
Important attributes are: ``x`` the solution array, ``success`` a
Boolean flag indicating if the optimizer exited successfully and
``message`` which describes the cause of the termination. See
`OptimizeResult` for a description of other attributes. If `polish`
was employed, and a lower minimum was obtained by the polishing, then
OptimizeResult also contains the ``jac`` attribute.
If the eventual solution does not satisfy the applied constraints
``success`` will be `False`.
Notes
-----
Differential evolution is a stochastic population based method that is
useful for global optimization problems. At each pass through the population
the algorithm mutates each candidate solution by mixing with other candidate
solutions to create a trial candidate. There are several strategies [2]_ for
creating trial candidates, which suit some problems more than others. The
'best1bin' strategy is a good starting point for many systems. In this
strategy two members of the population are randomly chosen. Their difference
is used to mutate the best member (the `best` in `best1bin`), :math:`b_0`,
so far:
.. math::
b' = b_0 + mutation * (population[rand0] - population[rand1])
A trial vector is then constructed. Starting with a randomly chosen ith
parameter the trial is sequentially filled (in modulo) with parameters from
``b'`` or the original candidate. The choice of whether to use ``b'`` or the
original candidate is made with a binomial distribution (the 'bin' in
'best1bin') - a random number in [0, 1) is generated. If this number is
less than the `recombination` constant then the parameter is loaded from
``b'``, otherwise it is loaded from the original candidate. The final
parameter is always loaded from ``b'``. Once the trial candidate is built
its fitness is assessed. If the trial is better than the original candidate
then it takes its place. If it is also better than the best overall
candidate it also replaces that.
To improve your chances of finding a global minimum use higher `popsize`
values, with higher `mutation` and (dithering), but lower `recombination`
values. This has the effect of widening the search radius, but slowing
convergence.
By default the best solution vector is updated continuously within a single
iteration (``updating='immediate'``). This is a modification [4]_ of the
original differential evolution algorithm which can lead to faster
convergence as trial vectors can immediately benefit from improved
solutions. To use the original Storn and Price behaviour, updating the best
solution once per iteration, set ``updating='deferred'``.
.. versionadded:: 0.15.0
Examples
--------
Let us consider the problem of minimizing the Rosenbrock function. This
function is implemented in `rosen` in `scipy.optimize`.
>>> from scipy.optimize import rosen, differential_evolution
>>> bounds = [(0,2), (0, 2), (0, 2), (0, 2), (0, 2)]
>>> result = differential_evolution(rosen, bounds)
>>> result.x, result.fun
(array([1., 1., 1., 1., 1.]), 1.9216496320061384e-19)
Now repeat, but with parallelization.
>>> bounds = [(0,2), (0, 2), (0, 2), (0, 2), (0, 2)]
>>> result = differential_evolution(rosen, bounds, updating='deferred',
... workers=2)
>>> result.x, result.fun
(array([1., 1., 1., 1., 1.]), 1.9216496320061384e-19)
Let's try and do a constrained minimization
>>> from scipy.optimize import NonlinearConstraint, Bounds
>>> def constr_f(x):
... return np.array(x[0] + x[1])
>>>
>>> # the sum of x[0] and x[1] must be less than 1.9
>>> nlc = NonlinearConstraint(constr_f, -np.inf, 1.9)
>>> # specify limits using a `Bounds` object.
>>> bounds = Bounds([0., 0.], [2., 2.])
>>> result = differential_evolution(rosen, bounds, constraints=(nlc),
... seed=1)
>>> result.x, result.fun
(array([0.96633867, 0.93363577]), 0.0011361355854792312)
Next find the minimum of the Ackley function
(https://en.wikipedia.org/wiki/Test_functions_for_optimization).
>>> from scipy.optimize import differential_evolution
>>> import numpy as np
>>> def ackley(x):
... arg1 = -0.2 * np.sqrt(0.5 * (x[0] ** 2 + x[1] ** 2))
... arg2 = 0.5 * (np.cos(2. * np.pi * x[0]) + np.cos(2. * np.pi * x[1]))
... return -20. * np.exp(arg1) - np.exp(arg2) + 20. + np.e
>>> bounds = [(-5, 5), (-5, 5)]
>>> result = differential_evolution(ackley, bounds)
>>> result.x, result.fun
(array([ 0., 0.]), 4.4408920985006262e-16)
References
----------
.. [1] Storn, R and Price, K, Differential Evolution - a Simple and
Efficient Heuristic for Global Optimization over Continuous Spaces,
Journal of Global Optimization, 1997, 11, 341 - 359.
.. [2] http://www1.icsi.berkeley.edu/~storn/code.html
.. [3] http://en.wikipedia.org/wiki/Differential_evolution
.. [4] Wormington, M., Panaccione, C., Matney, K. M., Bowen, D. K., -
Characterization of structures from X-ray scattering data using
genetic algorithms, Phil. Trans. R. Soc. Lond. A, 1999, 357,
2827-2848
.. [5] Lampinen, J., A constraint handling approach for the differential
evolution algorithm. Proceedings of the 2002 Congress on
Evolutionary Computation. CEC'02 (Cat. No. 02TH8600). Vol. 2. IEEE,
2002.
"""
# using a context manager means that any created Pool objects are
# cleared up.
with DifferentialEvolutionSolver(func, bounds, args=args,
strategy=strategy,
maxiter=maxiter,
popsize=popsize, tol=tol,
mutation=mutation,
recombination=recombination,
seed=seed, polish=polish,
callback=callback,
disp=disp, init=init, atol=atol,
updating=updating,
workers=workers,
constraints=constraints) as solver:
ret = solver.solve()
return ret
class DifferentialEvolutionSolver(object):
"""This class implements the differential evolution solver
Parameters
----------
func : callable
The objective function to be minimized. Must be in the form
``f(x, *args)``, where ``x`` is the argument in the form of a 1-D array
and ``args`` is a tuple of any additional fixed parameters needed to
completely specify the function.
bounds : sequence or `Bounds`, optional
Bounds for variables. There are two ways to specify the bounds:
1. Instance of `Bounds` class.
2. ``(min, max)`` pairs for each element in ``x``, defining the finite
lower and upper bounds for the optimizing argument of `func`. It is
required to have ``len(bounds) == len(x)``. ``len(bounds)`` is used
to determine the number of parameters in ``x``.
args : tuple, optional
Any additional fixed parameters needed to
completely specify the objective function.
strategy : str, optional
The differential evolution strategy to use. Should be one of:
- 'best1bin'
- 'best1exp'
- 'rand1exp'
- 'randtobest1exp'
- 'currenttobest1exp'
- 'best2exp'
- 'rand2exp'
- 'randtobest1bin'
- 'currenttobest1bin'
- 'best2bin'
- 'rand2bin'
- 'rand1bin'
The default is 'best1bin'
maxiter : int, optional
The maximum number of generations over which the entire population is
evolved. The maximum number of function evaluations (with no polishing)
is: ``(maxiter + 1) * popsize * len(x)``
popsize : int, optional
A multiplier for setting the total population size. The population has
``popsize * len(x)`` individuals (unless the initial population is
supplied via the `init` keyword).
tol : float, optional
Relative tolerance for convergence, the solving stops when
``np.std(pop) <= atol + tol * np.abs(np.mean(population_energies))``,
where and `atol` and `tol` are the absolute and relative tolerance
respectively.
mutation : float or tuple(float, float), optional
The mutation constant. In the literature this is also known as
differential weight, being denoted by F.
If specified as a float it should be in the range [0, 2].
If specified as a tuple ``(min, max)`` dithering is employed. Dithering
randomly changes the mutation constant on a generation by generation
basis. The mutation constant for that generation is taken from
U[min, max). Dithering can help speed convergence significantly.
Increasing the mutation constant increases the search radius, but will
slow down convergence.
recombination : float, optional
The recombination constant, should be in the range [0, 1]. In the
literature this is also known as the crossover probability, being
denoted by CR. Increasing this value allows a larger number of mutants
to progress into the next generation, but at the risk of population
stability.
seed : int or `np.random.RandomState`, optional
If `seed` is not specified the `np.random.RandomState` singleton is
used.
If `seed` is an int, a new `np.random.RandomState` instance is used,
seeded with `seed`.
If `seed` is already a `np.random.RandomState` instance, then that
`np.random.RandomState` instance is used.
Specify `seed` for repeatable minimizations.
disp : bool, optional
Prints the evaluated `func` at every iteration.
callback : callable, `callback(xk, convergence=val)`, optional
A function to follow the progress of the minimization. ``xk`` is
the current value of ``x0``. ``val`` represents the fractional
value of the population convergence. When ``val`` is greater than one
the function halts. If callback returns `True`, then the minimization
is halted (any polishing is still carried out).
polish : bool, optional
If True (default), then `scipy.optimize.minimize` with the `L-BFGS-B`
method is used to polish the best population member at the end, which
can improve the minimization slightly. If a constrained problem is
being studied then the `trust-constr` method is used instead.
maxfun : int, optional
Set the maximum number of function evaluations. However, it probably
makes more sense to set `maxiter` instead.
init : str or array-like, optional
Specify which type of population initialization is performed. Should be
one of:
- 'latinhypercube'
- 'random'
- array specifying the initial population. The array should have
shape ``(M, len(x))``, where len(x) is the number of parameters.
`init` is clipped to `bounds` before use.
The default is 'latinhypercube'. Latin Hypercube sampling tries to
maximize coverage of the available parameter space. 'random'
initializes the population randomly - this has the drawback that
clustering can occur, preventing the whole of parameter space being
covered. Use of an array to specify a population could be used, for
example, to create a tight bunch of initial guesses in an location
where the solution is known to exist, thereby reducing time for
convergence.
atol : float, optional
Absolute tolerance for convergence, the solving stops when
``np.std(pop) <= atol + tol * np.abs(np.mean(population_energies))``,
where and `atol` and `tol` are the absolute and relative tolerance
respectively.
updating : {'immediate', 'deferred'}, optional
If `immediate` the best solution vector is continuously updated within
a single generation. This can lead to faster convergence as trial
vectors can take advantage of continuous improvements in the best
solution.
With `deferred` the best solution vector is updated once per
generation. Only `deferred` is compatible with parallelization, and the
`workers` keyword can over-ride this option.
workers : int or map-like callable, optional
If `workers` is an int the population is subdivided into `workers`
sections and evaluated in parallel
(uses `multiprocessing.Pool <multiprocessing>`).
Supply `-1` to use all cores available to the Process.
Alternatively supply a map-like callable, such as
`multiprocessing.Pool.map` for evaluating the population in parallel.
This evaluation is carried out as ``workers(func, iterable)``.
This option will override the `updating` keyword to
`updating='deferred'` if `workers != 1`.
Requires that `func` be pickleable.
constraints : {NonLinearConstraint, LinearConstraint, Bounds}
Constraints on the solver, over and above those applied by the `bounds`
kwd. Uses the approach by Lampinen.
"""
# Dispatch of mutation strategy method (binomial or exponential).
_binomial = {'best1bin': '_best1',
'randtobest1bin': '_randtobest1',
'currenttobest1bin': '_currenttobest1',
'best2bin': '_best2',
'rand2bin': '_rand2',
'rand1bin': '_rand1'}
_exponential = {'best1exp': '_best1',
'rand1exp': '_rand1',
'randtobest1exp': '_randtobest1',
'currenttobest1exp': '_currenttobest1',
'best2exp': '_best2',
'rand2exp': '_rand2'}
__init_error_msg = ("The population initialization method must be one of "
"'latinhypercube' or 'random', or an array of shape "
"(M, N) where N is the number of parameters and M>5")
def __init__(self, func, bounds, args=(),
strategy='best1bin', maxiter=1000, popsize=15,
tol=0.01, mutation=(0.5, 1), recombination=0.7, seed=None,
maxfun=np.inf, callback=None, disp=False, polish=True,
init='latinhypercube', atol=0, updating='immediate',
workers=1, constraints=()):
if strategy in self._binomial:
self.mutation_func = getattr(self, self._binomial[strategy])
elif strategy in self._exponential:
self.mutation_func = getattr(self, self._exponential[strategy])
else:
raise ValueError("Please select a valid mutation strategy")
self.strategy = strategy
self.callback = callback
self.polish = polish
# set the updating / parallelisation options
if updating in ['immediate', 'deferred']:
self._updating = updating
# want to use parallelisation, but updating is immediate
if workers != 1 and updating == 'immediate':
warnings.warn("differential_evolution: the 'workers' keyword has"
" overridden updating='immediate' to"
" updating='deferred'", UserWarning)
self._updating = 'deferred'
# an object with a map method.
self._mapwrapper = MapWrapper(workers)
# relative and absolute tolerances for convergence
self.tol, self.atol = tol, atol
# Mutation constant should be in [0, 2). If specified as a sequence
# then dithering is performed.
self.scale = mutation
if (not np.all(np.isfinite(mutation)) or
np.any(np.array(mutation) >= 2) or
np.any(np.array(mutation) < 0)):
raise ValueError('The mutation constant must be a float in '
'U[0, 2), or specified as a tuple(min, max)'
' where min < max and min, max are in U[0, 2).')
self.dither = None
if hasattr(mutation, '__iter__') and len(mutation) > 1:
self.dither = [mutation[0], mutation[1]]
self.dither.sort()
self.cross_over_probability = recombination
# we create a wrapped function to allow the use of map (and Pool.map
# in the future)
self.func = _FunctionWrapper(func, args)
self.args = args
# convert tuple of lower and upper bounds to limits
# [(low_0, high_0), ..., (low_n, high_n]
# -> [[low_0, ..., low_n], [high_0, ..., high_n]]
if isinstance(bounds, Bounds):
self.limits = np.array(new_bounds_to_old(bounds.lb,
bounds.ub,
len(bounds.lb)),
dtype=float).T
else:
self.limits = np.array(bounds, dtype='float').T
if (np.size(self.limits, 0) != 2 or not
np.all(np.isfinite(self.limits))):
raise ValueError('bounds should be a sequence containing '
'real valued (min, max) pairs for each value'
' in x')
if maxiter is None: # the default used to be None
maxiter = 1000
self.maxiter = maxiter
if maxfun is None: # the default used to be None
maxfun = np.inf
self.maxfun = maxfun
# population is scaled to between [0, 1].
# We have to scale between parameter <-> population
# save these arguments for _scale_parameter and
# _unscale_parameter. This is an optimization
self.__scale_arg1 = 0.5 * (self.limits[0] + self.limits[1])
self.__scale_arg2 = np.fabs(self.limits[0] - self.limits[1])
self.parameter_count = np.size(self.limits, 1)
self.random_number_generator = check_random_state(seed)
# default population initialization is a latin hypercube design, but
# there are other population initializations possible.
# the minimum is 5 because 'best2bin' requires a population that's at
# least 5 long
self.num_population_members = max(5, popsize * self.parameter_count)
self.population_shape = (self.num_population_members,
self.parameter_count)
self._nfev = 0
if isinstance(init, string_types):
if init == 'latinhypercube':
self.init_population_lhs()
elif init == 'random':
self.init_population_random()
else:
raise ValueError(self.__init_error_msg)
else:
self.init_population_array(init)
# infrastructure for constraints
# dummy parameter vector for preparing constraints, this is required so
# that the number of constraints is known.
x0 = self._scale_parameters(self.population[0])
self.constraints = constraints
self._wrapped_constraints = []
if hasattr(constraints, '__len__'):
# sequence of constraints, this will also deal with default
# keyword parameter
for c in constraints:
self._wrapped_constraints.append(_ConstraintWrapper(c, x0))
else:
self._wrapped_constraints = [_ConstraintWrapper(constraints, x0)]
self.constraint_violation = np.zeros((self.num_population_members, 1))
self.feasible = np.ones(self.num_population_members, bool)
self.disp = disp
def init_population_lhs(self):
"""
Initializes the population with Latin Hypercube Sampling.
Latin Hypercube Sampling ensures that each parameter is uniformly
sampled over its range.
"""
rng = self.random_number_generator
# Each parameter range needs to be sampled uniformly. The scaled
# parameter range ([0, 1)) needs to be split into
# `self.num_population_members` segments, each of which has the following
# size:
segsize = 1.0 / self.num_population_members
# Within each segment we sample from a uniform random distribution.
# We need to do this sampling for each parameter.
samples = (segsize * rng.random_sample(self.population_shape)
# Offset each segment to cover the entire parameter range [0, 1)
+ np.linspace(0., 1., self.num_population_members,
endpoint=False)[:, np.newaxis])
# Create an array for population of candidate solutions.
self.population = np.zeros_like(samples)
# Initialize population of candidate solutions by permutation of the
# random samples.
for j in range(self.parameter_count):
order = rng.permutation(range(self.num_population_members))
self.population[:, j] = samples[order, j]
# reset population energies
self.population_energies = np.full(self.num_population_members,
np.inf)
# reset number of function evaluations counter
self._nfev = 0
def init_population_random(self):
"""
Initializes the population at random. This type of initialization
can possess clustering, Latin Hypercube sampling is generally better.
"""
rng = self.random_number_generator
self.population = rng.random_sample(self.population_shape)
# reset population energies
self.population_energies = np.full(self.num_population_members,
np.inf)
# reset number of function evaluations counter
self._nfev = 0
def init_population_array(self, init):
"""
Initializes the population with a user specified population.
Parameters
----------
init : np.ndarray
Array specifying subset of the initial population. The array should
have shape (M, len(x)), where len(x) is the number of parameters.
The population is clipped to the lower and upper bounds.
"""
# make sure you're using a float array
popn = np.asfarray(init)
if (np.size(popn, 0) < 5 or
popn.shape[1] != self.parameter_count or
len(popn.shape) != 2):
raise ValueError("The population supplied needs to have shape"
" (M, len(x)), where M > 4.")
# scale values and clip to bounds, assigning to population
self.population = np.clip(self._unscale_parameters(popn), 0, 1)
self.num_population_members = np.size(self.population, 0)
self.population_shape = (self.num_population_members,
self.parameter_count)
# reset population energies
self.population_energies = np.full(self.num_population_members,
np.inf)
# reset number of function evaluations counter
self._nfev = 0
@property
def x(self):
"""
The best solution from the solver
"""
return self._scale_parameters(self.population[0])
@property
def convergence(self):
"""
The standard deviation of the population energies divided by their
mean.
"""
if np.any(np.isinf(self.population_energies)):
return np.inf
return (np.std(self.population_energies) /
np.abs(np.mean(self.population_energies) + _MACHEPS))
def converged(self):
"""
Return True if the solver has converged.
"""
return (np.std(self.population_energies) <=
self.atol +
self.tol * np.abs(np.mean(self.population_energies)))
def solve(self):
"""
Runs the DifferentialEvolutionSolver.
Returns
-------
res : OptimizeResult
The optimization result represented as a ``OptimizeResult`` object.
Important attributes are: ``x`` the solution array, ``success`` a
Boolean flag indicating if the optimizer exited successfully and
``message`` which describes the cause of the termination. See
`OptimizeResult` for a description of other attributes. If `polish`
was employed, and a lower minimum was obtained by the polishing,
then OptimizeResult also contains the ``jac`` attribute.
"""
nit, warning_flag = 0, False
status_message = _status_message['success']
# The population may have just been initialized (all entries are
# np.inf). If it has you have to calculate the initial energies.
# Although this is also done in the evolve generator it's possible
# that someone can set maxiter=0, at which point we still want the
# initial energies to be calculated (the following loop isn't run).
if np.all(np.isinf(self.population_energies)):
self.feasible, self.constraint_violation = (
self._calculate_population_feasibilities(self.population))
# only work out population energies for feasible solutions
self.population_energies[self.feasible] = (
self._calculate_population_energies(
self.population[self.feasible]))
self._promote_lowest_energy()
# do the optimization.
for nit in xrange(1, self.maxiter + 1):
# evolve the population by a generation
try:
next(self)
except StopIteration:
warning_flag = True
if self._nfev > self.maxfun:
status_message = _status_message['maxfev']
elif self._nfev == self.maxfun:
status_message = ('Maximum number of function evaluations'
' has been reached.')
break
if self.disp:
print("differential_evolution step %d: f(x)= %g"
% (nit,
self.population_energies[0]))
# should the solver terminate?
convergence = self.convergence
if (self.callback and
self.callback(self._scale_parameters(self.population[0]),
convergence=self.tol / convergence) is True):
warning_flag = True
status_message = ('callback function requested stop early '
'by returning True')
break
if np.any(np.isinf(self.population_energies)):
intol = False
else:
intol = (np.std(self.population_energies) <=
self.atol +
self.tol * np.abs(np.mean(self.population_energies)))
if warning_flag or intol:
break
else:
status_message = _status_message['maxiter']
warning_flag = True
DE_result = OptimizeResult(
x=self.x,
fun=self.population_energies[0],
nfev=self._nfev,
nit=nit,
message=status_message,
success=(warning_flag is not True))
if self.polish:
polish_method = 'L-BFGS-B'
if self._wrapped_constraints:
polish_method = 'trust-constr'
constr_violation = self._constraint_violation_fn(DE_result.x)
if np.any(constr_violation > 0.):
warnings.warn("differential evolution didn't find a"
" solution satisfying the constraints,"
" attempting to polish from the least"
" infeasible solution", UserWarning)
result = minimize(self.func,
np.copy(DE_result.x),
method=polish_method,
bounds=self.limits.T,
constraints=self.constraints)
self._nfev += result.nfev
DE_result.nfev = self._nfev
# Polishing solution is only accepted if there is an improvement in
# cost function, the polishing was successful and the solution lies
# within the bounds.
if (result.fun < DE_result.fun and
result.success and
np.all(result.x <= self.limits[1]) and
np.all(self.limits[0] <= result.x)):
DE_result.fun = result.fun
DE_result.x = result.x
DE_result.jac = result.jac
# to keep internal state consistent
self.population_energies[0] = result.fun
self.population[0] = self._unscale_parameters(result.x)
if self._wrapped_constraints:
DE_result.constr = [c.violation(DE_result.x) for
c in self._wrapped_constraints]
DE_result.constr_violation = np.max(
np.concatenate(DE_result.constr))
DE_result.maxcv = DE_result.constr_violation
if DE_result.maxcv > 0:
# if the result is infeasible then success must be False
DE_result.success = False
DE_result.message = ("The solution does not satisfy the"
" constraints, MAXCV = " % DE_result.maxcv)
return DE_result
def _calculate_population_energies(self, population):
"""
Calculate the energies of a population.
Parameters
----------
population : ndarray
An array of parameter vectors normalised to [0, 1] using lower
and upper limits. Has shape ``(np.size(population, 0), len(x))``.
Returns
-------
energies : ndarray
An array of energies corresponding to each population member. If
maxfun will be exceeded during this call, then the number of
function evaluations will be reduced and energies will be
right-padded with np.inf. Has shape ``(np.size(population, 0),)``
"""
num_members = np.size(population, 0)
nfevs = min(num_members,
self.maxfun - num_members)
energies = np.full(num_members, np.inf)
parameters_pop = self._scale_parameters(population)
try:
calc_energies = list(self._mapwrapper(self.func,
parameters_pop[0:nfevs]))
energies[0:nfevs] = calc_energies
except (TypeError, ValueError):
# wrong number of arguments for _mapwrapper
# or wrong length returned from the mapper
raise RuntimeError("The map-like callable must be of the"
" form f(func, iterable), returning a sequence"
" of numbers the same length as 'iterable'")
self._nfev += nfevs
return energies
def _promote_lowest_energy(self):
# swaps 'best solution' into first population entry
idx = np.arange(self.num_population_members)
feasible_solutions = idx[self.feasible]
if feasible_solutions.size:
# find the best feasible solution
idx_t = np.argmin(self.population_energies[feasible_solutions])
l = feasible_solutions[idx_t]
else:
# no solution was feasible, use 'best' infeasible solution, which
# will violate constraints the least
l = np.argmin(np.sum(self.constraint_violation, axis=1))
self.population_energies[[0, l]] = self.population_energies[[l, 0]]
self.population[[0, l], :] = self.population[[l, 0], :]
self.feasible[[0, l]] = self.feasible[[l, 0]]
self.constraint_violation[[0, l], :] = (
self.constraint_violation[[l, 0], :])
def _constraint_violation_fn(self, x):
"""
Calculates total constraint violation for all the constraints, for a given
solution.
Parameters
----------
x : ndarray
Solution vector
Returns
-------
cv : ndarray
Total violation of constraints. Has shape ``(M,)``, where M is the
number of constraints (if each constraint function only returns one
value)
"""
return np.concatenate([c.violation(x) for c in self._wrapped_constraints])
def _calculate_population_feasibilities(self, population):
"""
Calculate the feasibilities of a population.
Parameters
----------
population : ndarray
An array of parameter vectors normalised to [0, 1] using lower
and upper limits. Has shape ``(np.size(population, 0), len(x))``.
Returns
-------
feasible, constraint_violation : ndarray, ndarray
Boolean array of feasibility for each population member, and an
array of the constraint violation for each population member.
constraint_violation has shape ``(np.size(population, 0), M)``,
where M is the number of constraints.
"""
num_members = np.size(population, 0)
if not self._wrapped_constraints:
# shortcut for no constraints
return np.ones(num_members, bool), np.zeros((num_members, 1))
parameters_pop = self._scale_parameters(population)
constraint_violation = np.array([self._constraint_violation_fn(x)
for x in parameters_pop])
feasible = ~(np.sum(constraint_violation, axis=1) > 0)
return feasible, constraint_violation
def __iter__(self):
return self
def __enter__(self):
return self
def __exit__(self, *args):
# to make sure resources are closed down
self._mapwrapper.close()
self._mapwrapper.terminate()
def __del__(self):
# to make sure resources are closed down
self._mapwrapper.close()
self._mapwrapper.terminate()
def _accept_trial(self, energy_trial, feasible_trial, cv_trial,
energy_orig, feasible_orig, cv_orig):
"""
Trial is accepted if:
* it satisfies all constraints and provides a lower or equal objective
function value, while both the compared solutions are feasible
- or -
* it is feasible while the original solution is infeasible,
- or -
* it is infeasible, but provides a lower or equal constraint violation
for all constraint functions.
This test corresponds to section III of Lampinen [1]_.
Parameters
----------
energy_trial : float
Energy of the trial solution
feasible_trial : float
Feasibility of trial solution
cv_trial : array-like
Excess constraint violation for the trial solution
energy_orig : float
Energy of the original solution
feasible_orig : float
Feasibility of original solution
cv_orig : array-like
Excess constraint violation for the original solution
Returns
-------
accepted : bool
"""
if feasible_orig and feasible_trial:
return energy_trial <= energy_orig
elif feasible_trial and not feasible_orig:
return True
elif not feasible_trial and (cv_trial <= cv_orig).all():
# cv_trial < cv_orig would imply that both trial and orig are not
# feasible
return True
return False
def __next__(self):
"""
Evolve the population by a single generation
Returns
-------
x : ndarray
The best solution from the solver.
fun : float
Value of objective function obtained from the best solution.
"""
# the population may have just been initialized (all entries are
# np.inf). If it has you have to calculate the initial energies
if np.all(np.isinf(self.population_energies)):
self.feasible, self.constraint_violation = (
self._calculate_population_feasibilities(self.population))
# only need to work out population energies for those that are
# feasible
self.population_energies[self.feasible] = (
self._calculate_population_energies(
self.population[self.feasible]))
self._promote_lowest_energy()
if self.dither is not None:
self.scale = (self.random_number_generator.rand()
* (self.dither[1] - self.dither[0]) + self.dither[0])
if self._updating == 'immediate':
# update best solution immediately
for candidate in range(self.num_population_members):
if self._nfev > self.maxfun:
raise StopIteration
# create a trial solution
trial = self._mutate(candidate)
# ensuring that it's in the range [0, 1)
self._ensure_constraint(trial)
# scale from [0, 1) to the actual parameter value
parameters = self._scale_parameters(trial)
# determine the energy of the objective function
if self._wrapped_constraints:
cv = self._constraint_violation_fn(parameters)
feasible = False
energy = np.inf
if not np.sum(cv) > 0:
# solution is feasible
feasible = True
energy = self.func(parameters)
self._nfev += 1
else:
feasible = True
cv = np.atleast_2d([0.])
energy = self.func(parameters)
self._nfev += 1
# compare trial and population member
if self._accept_trial(energy, feasible, cv,
self.population_energies[candidate],
self.feasible[candidate],
self.constraint_violation[candidate]):
self.population[candidate] = trial
self.population_energies[candidate] = energy
self.feasible[candidate] = feasible
self.constraint_violation[candidate] = cv
# if the trial candidate is also better than the best
# solution then promote it.
if self._accept_trial(energy, feasible, cv,
self.population_energies[0],
self.feasible[0],
self.constraint_violation[0]):
self._promote_lowest_energy()
elif self._updating == 'deferred':
# update best solution once per generation
if self._nfev >= self.maxfun:
raise StopIteration
# 'deferred' approach, vectorised form.
# create trial solutions
trial_pop = np.array(
[self._mutate(i) for i in range(self.num_population_members)])
# enforce bounds
self._ensure_constraint(trial_pop)
# determine the energies of the objective function, but only for
# feasible trials
feasible, cv = self._calculate_population_feasibilities(trial_pop)
trial_energies = np.full(self.num_population_members, np.inf)
# only calculate for feasible entries
trial_energies[feasible] = self._calculate_population_energies(
trial_pop[feasible])
# which solutions are 'improved'?
loc = [self._accept_trial(*val) for val in
zip(trial_energies, feasible, cv, self.population_energies,
self.feasible, self.constraint_violation)]
loc = np.array(loc)
self.population = np.where(loc[:, np.newaxis],
trial_pop,
self.population)
self.population_energies = np.where(loc,
trial_energies,
self.population_energies)
self.feasible = np.where(loc,
feasible,
self.feasible)
self.constraint_violation = np.where(loc[:, np.newaxis],
cv,
self.constraint_violation)
# make sure the best solution is updated if updating='deferred'.
# put the lowest energy into the best solution position.
self._promote_lowest_energy()
return self.x, self.population_energies[0]
next = __next__
def _scale_parameters(self, trial):
"""Scale from a number between 0 and 1 to parameters."""
return self.__scale_arg1 + (trial - 0.5) * self.__scale_arg2
def _unscale_parameters(self, parameters):
"""Scale from parameters to a number between 0 and 1."""
return (parameters - self.__scale_arg1) / self.__scale_arg2 + 0.5
def _ensure_constraint(self, trial):
"""Make sure the parameters lie between the limits."""
mask = np.where((trial > 1) | (trial < 0))
trial[mask] = self.random_number_generator.rand(mask[0].size)
def _mutate(self, candidate):
"""Create a trial vector based on a mutation strategy."""
trial = np.copy(self.population[candidate])
rng = self.random_number_generator
fill_point = rng.randint(0, self.parameter_count)
if self.strategy in ['currenttobest1exp', 'currenttobest1bin']:
bprime = self.mutation_func(candidate,
self._select_samples(candidate, 5))
else:
bprime = self.mutation_func(self._select_samples(candidate, 5))
if self.strategy in self._binomial:
crossovers = rng.rand(self.parameter_count)
crossovers = crossovers < self.cross_over_probability
# the last one is always from the bprime vector for binomial
# If you fill in modulo with a loop you have to set the last one to
# true. If you don't use a loop then you can have any random entry
# be True.
crossovers[fill_point] = True
trial = np.where(crossovers, bprime, trial)
return trial
elif self.strategy in self._exponential:
i = 0
while (i < self.parameter_count and
rng.rand() < self.cross_over_probability):
trial[fill_point] = bprime[fill_point]
fill_point = (fill_point + 1) % self.parameter_count
i += 1
return trial
def _best1(self, samples):
"""best1bin, best1exp"""
r0, r1 = samples[:2]
return (self.population[0] + self.scale *
(self.population[r0] - self.population[r1]))
def _rand1(self, samples):
"""rand1bin, rand1exp"""
r0, r1, r2 = samples[:3]
return (self.population[r0] + self.scale *
(self.population[r1] - self.population[r2]))
def _randtobest1(self, samples):
"""randtobest1bin, randtobest1exp"""
r0, r1, r2 = samples[:3]
bprime = np.copy(self.population[r0])
bprime += self.scale * (self.population[0] - bprime)
bprime += self.scale * (self.population[r1] -
self.population[r2])
return bprime
def _currenttobest1(self, candidate, samples):
"""currenttobest1bin, currenttobest1exp"""
r0, r1 = samples[:2]
bprime = (self.population[candidate] + self.scale *
(self.population[0] - self.population[candidate] +
self.population[r0] - self.population[r1]))
return bprime
def _best2(self, samples):
"""best2bin, best2exp"""
r0, r1, r2, r3 = samples[:4]
bprime = (self.population[0] + self.scale *
(self.population[r0] + self.population[r1] -
self.population[r2] - self.population[r3]))
return bprime
def _rand2(self, samples):
"""rand2bin, rand2exp"""
r0, r1, r2, r3, r4 = samples
bprime = (self.population[r0] + self.scale *
(self.population[r1] + self.population[r2] -
self.population[r3] - self.population[r4]))
return bprime
def _select_samples(self, candidate, number_samples):
"""
obtain random integers from range(self.num_population_members),
without replacement. You can't have the original candidate either.
"""
idxs = list(range(self.num_population_members))
idxs.remove(candidate)
self.random_number_generator.shuffle(idxs)
idxs = idxs[:number_samples]
return idxs
class _FunctionWrapper(object):
"""
Object to wrap user cost function, allowing picklability
"""
def __init__(self, f, args):
self.f = f
self.args = [] if args is None else args
def __call__(self, x):
return self.f(x, *self.args)
class _ConstraintWrapper(object):
"""Object to wrap/evaluate user defined constraints.
Very similar in practice to `PreparedConstraint`, except that no evaluation
of jac/hess is performed (explicit or implicit).
If created successfully, it will contain the attributes listed below.
Parameters
----------
constraint : {`NonlinearConstraint`, `LinearConstraint`, `Bounds`}
Constraint to check and prepare.
x0 : array_like
Initial vector of independent variables.
Attributes
----------
fun : callable
Function defining the constraint wrapped by one of the convenience
classes.
bounds : 2-tuple
Contains lower and upper bounds for the constraints --- lb and ub.
These are converted to ndarray and have a size equal to the number of
the constraints.
"""
def __init__(self, constraint, x0):
self.constraint = constraint
if isinstance(constraint, NonlinearConstraint):
def fun(x):
return np.atleast_1d(constraint.fun(x))
elif isinstance(constraint, LinearConstraint):
def fun(x):
A = np.atleast_2d(constraint.A)
return A.dot(x)
elif isinstance(constraint, Bounds):
def fun(x):
return x
else:
raise ValueError("`constraint` of an unknown type is passed.")
self.fun = fun
lb = np.asarray(constraint.lb, dtype=float)
ub = np.asarray(constraint.ub, dtype=float)
f0 = fun(x0)
m = f0.size
if lb.ndim == 0:
lb = np.resize(lb, m)
if ub.ndim == 0:
ub = np.resize(ub, m)
self.bounds = (lb, ub)
def __call__(self, x):
return np.atleast_1d(self.fun(x))
def violation(self, x):
"""How much the constraint is exceeded by.
Parameters
----------
x : array-like
Vector of independent variables
Returns
-------
excess : array-like
How much the constraint is exceeded by, for each of the
constraints specified by `_ConstraintWrapper.fun`.
"""
ev = self.fun(np.asarray(x))
excess_lb = np.maximum(self.bounds[0] - ev, 0)
excess_ub = np.maximum(ev - self.bounds[1], 0)
return excess_lb + excess_ub
| {
"content_hash": "fc80af11bc2bc7a0e57167123cf61e46",
"timestamp": "",
"source": "github",
"line_count": 1346,
"max_line_length": 82,
"avg_line_length": 42.54011887072809,
"alnum_prop": 0.5892348801061842,
"repo_name": "jamestwebber/scipy",
"id": "f8ff2bcf900cfe536704a37eb5a5841fc1f5cf55",
"size": "57259",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scipy/optimize/_differentialevolution.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4398425"
},
{
"name": "C++",
"bytes": "649746"
},
{
"name": "Dockerfile",
"bytes": "1291"
},
{
"name": "Fortran",
"bytes": "5368529"
},
{
"name": "MATLAB",
"bytes": "4346"
},
{
"name": "Makefile",
"bytes": "778"
},
{
"name": "Python",
"bytes": "12779698"
},
{
"name": "Shell",
"bytes": "538"
},
{
"name": "TeX",
"bytes": "52106"
}
],
"symlink_target": ""
} |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# SPDX-License-Identifier: Apache-2.0
# Copyright 2016-2020 Eotvos Lorand University, Budapest, Hungary
import argparse
from hlir16.hlir import *
from compiler_log_warnings_errors import *
import compiler_log_warnings_errors
from compiler_load_p4 import load_from_p4
from compiler_exception_handling import *
import compiler_common
import re
import os
import sys
import pkgutil
generate_code_files = True
# Inside the compiler, these variables are considered singleton.
args = []
hlir = None
def replace_insert2(insert):
simple = re.split(r'^\$([a-zA-Z_][a-zA-Z_0-9]*)$', insert)
if len(simple) == 3:
return ("{}", simple[1])
# replace $$[light][text1]{expr}{text2} inserts, where all parts except {expr} are optional
m = re.match(r'(?P<type>\$\$?)(\[(?P<light>[^\]]+)\])?(\[(?P<text1>[^\]]+)\])?{\s*(?P<expr>[^}]*)\s*}({(?P<text2>[^}]+)})?', insert)
light = m.group("light")
txt1 = m.group('text1') or ''
expr = m.group('expr')
txt2 = m.group('text2') or ''
# no highlighting
if m.group("type") == '$':
fmt = f'{escape_brace(txt1)}{{}}{escape_brace(txt2)}'
else:
light_param = f",{light}" if light not in (None, "") else ""
fmt = f'" T4LIT({escape_brace(txt1)}{{}}{escape_brace(txt2)}{light_param}) "'
return (fmt, expr)
def replace_insert(insert):
simple = re.split(r'^\$([a-zA-Z_][a-zA-Z_0-9]*)$', insert)
if len(simple) == 3:
yield (simple[1],)
return
# replace $$[light][text1]{expr}{text2} inserts, where all parts except {expr} are optional
m = re.match(r'(?P<type>\$\$?)(\[(?P<light>[^\]]+)\])?(\[(?P<text1>[^\]]+)\])?{\s*(?P<expr>[^}]*)\s*}({(?P<text2>[^}]+)})?', insert)
if not m:
yield insert
return
light = m.group("light")
txt1 = m.group('text1') or ''
expr = m.group('expr')
txt2 = m.group('text2') or ''
# no highlighting
if m.group("type") == '$':
yield escape_brace(txt1)
yield (escape_brace(expr),)
yield escape_brace(txt2)
else:
light_param = f",{light}" if light not in (None, "") else ""
yield '" T4LIT("'
yield escape_brace(txt1)
yield (escape_brace(expr),)
yield escape_brace(txt2)
if light:
yield f",{light}"
yield ') "'
def adjust_indentation(indenter, line_idx, file):
indent_levels = {
"[": ( 0, True),
"{": (+1, True),
"}": (-1, False),
}
old_indent = compiler_common.file_indentation_level
indent_change, return_old_indent = indent_levels[indenter]
compiler_common.file_indentation_level += indent_change
# #{ starts a new indentation level from the next line
# also, #} unindents starting this line
if indenter == '{' and compiler_common.file_indentation_level == 0:
addError("Compiler", f"Too much unindent in {file}:{line_idx}")
return old_indent if return_old_indent else compiler_common.file_indentation_level
def escape_slash(s):
return re.sub(r'(\\|")', r'\\\1', s)
def escape_brace(s):
return re.sub(r'(\{|\})', r'\1\1', s)
def split_and_translate(content, extra_content="", no_quote_allowed=False):
parts = re.split(r'(\$+(?:(?:\[[^\]]*\])*(?:\{[^\}]*\})+|[a-zA-Z_][a-zA-Z_0-9]*))', content)
return translate_line_main_content2(parts, extra_content, no_quote_allowed)
def translate_line_main_content(parts, extra_content, no_quote_allowed):
replaceds = [repl for part in parts for repl in replace_insert(part)]
raws = [part[0] if type(part) is tuple else part for part in replaceds]
no_apostrophes = all("'" not in raw for raw in raws)
no_quotes = all('"' not in raw for raw in raws)
if no_apostrophes or no_quotes:
quote = "'" if no_apostrophes else '"'
has_inserts = any(type(part) is tuple for part in replaceds)
has_bad_inserts = any(type(part) is tuple and any('(' in p for p in part) for part in replaceds)
if has_bad_inserts:
return translate_line_main_content2(parts, extra_content, no_quote_allowed)
esc = escape_brace if has_inserts else (lambda p: p)
content = "".join((f'{{{part[0]}}}' if part[0] != '' else '') + "".join(esc(p) for p in part[1:]) if type(part) is tuple else esc(part) for part in replaceds)
formatter = 'f' if has_inserts else ''
return False, f'{formatter}{quote}{content}{quote}'
return translate_line_main_content2(parts, extra_content, no_quote_allowed)
def translate_line_main_content2(parts, extra_content, no_quote_allowed):
if len(parts) == 1:
if no_quote_allowed and '\\' not in parts[0] and '"' not in parts[0]:
return False, parts[0]
return True, f'"{escape_slash(parts[0])}"'
match_with_rests = [(replace_insert2(parts[1+2*i]), parts[2+2*i]) for i in range((len(parts)-1)//2)]
all_fmt = "".join(((re.sub(r'\{\}', '', fmt) if expr == "" else fmt) + escape_brace(txt) for (fmt, expr), txt in match_with_rests))
all_fmt = escape_slash(f'{escape_brace(parts[0])}{all_fmt}') + extra_content
if "'" not in all_fmt:
quote = "'"
all_fmt = re.sub(r'\\"', '"', all_fmt)
else:
quote = '"'
all_escapes_txt = ", ".join((escape_brace(expr) or '""' for (fmt, expr), txt in match_with_rests if expr != ""))
if all_escapes_txt == "":
if no_quote_allowed:
return False, f'{all_fmt}'.strip()
return True, f'{quote}{all_fmt}{quote}'
else:
return True, f'{quote}{all_fmt}{quote}.format({all_escapes_txt})'
def translate_line_with_insert(file, genfile, line_idx, line):
"""Gets a line that contains an insert
and transforms it to a Python code section."""
_empty, indent, maybe_pre, indenter, content, _empty2 = re.split(r'^([ \t]*)#(pre|aft)?([\[\{\}])(.*)$', line)
line_indent = adjust_indentation(indenter, line_idx, file)
prepend_append_funname = "prepend" if maybe_pre == "pre" else "append" if maybe_pre == "aft" else ""
prepend_append_txt = f"[{maybe_pre}]" if maybe_pre != "" else ""
no_hint = "nohint" in args['hint']
extra_content = f" // {prepend_append_txt} {file_prefix(file, genfile)}{line_idx}" if not no_hint and maybe_pre else ""
_is_escaped, line = split_and_translate(content, extra_content)
if maybe_pre:
return f'{indent}{prepend_append_funname}_statement({line})'
par_indent = f', indent_level = {line_indent}' if line_indent != 0 else ''
par_lineno = f', lineno = {line_idx}' if line_idx is not None else ''
return f'{indent}generated_code += add_code({line}{par_indent}{par_lineno})'
def increase(idx):
if idx is None:
return None
return idx + 1
def add_empty_lines(code_lines):
"""Returns an enumerated list of the lines.
When an empty line separates follows an escaped code part,
an empty line is inserted into the generated list with None as line number."""
new_lines = []
is_block_with_sequence = False
last_indent = 0
already_added = False
for idx, line in code_lines:
if "#[" in line:
is_block_with_sequence = True
if not line.strip() and last_indent == 0 and not already_added:
new_lines.append((idx, line))
new_lines.append((None, "#["))
last_indent = 0
already_added = True
else:
if not line.strip():
continue
new_lines.append((increase(idx), line))
last_indent = len(line) - len(line.lstrip())
already_added = False
return new_lines
def add_gen_in_def(code_lines, orig_file):
"""If a function's name starts with 'gen_' in a generated file,
that function produces code.
This is a helper function that initialises and returns the appropriate variable.
Also, if "return" is encountered on a single line,
the requisite return value is inserted."""
new_lines = []
is_inside_gen = False
for idx, line in code_lines:
if is_inside_gen:
if re.match(r'^[ \t]*return[ \t]*$', line):
line = re.sub(r'^([ \t]*)return[ \t]*$', r'\1return generated_code', line)
is_separator_line = re.match(r'^#[ \t]*([^ \t])\1\1*', line)
is_method_line = re.sub(r'[ \t]*#.*', '', line).strip() != "" and line.lstrip() == line
is_unindented_line = re.match(r'^[^ \t]', line)
if is_separator_line or is_method_line or is_unindented_line:
new_lines.append((None, ' return generated_code'))
new_lines.append((None, ''))
is_inside_gen = False
if line.startswith('def gen_'):
new_lines.append((idx, line))
new_lines.append((None, ' generated_code = ""'))
is_inside_gen = True
continue
new_lines.append((idx, line))
if is_inside_gen:
new_lines.append((None, ' return generated_code'))
new_lines.append((None, ''))
return new_lines
def file_prefix(file, genfile):
nopath = os.path.basename(file)
noext = re.sub(r'([.](sugar|c|py))?[.][^.]*$', '', nopath)
if "on" in args['hint']:
# "set the default"
args['hint'] = ['simple']
if "nofile" in args['hint']: return ''
if "noext" in args['hint']: return f'{noext}:'
if "simple" in args['hint']: return ':' if genfile == '?' else f'{noext}:'
if "simpler" in args['hint']: return '' if genfile == '?' else 'g' if noext == 'codegen' else noext
if "nopath" in args['hint']: return f'{nopath}:'
return f'{file}:'
def translate_file_contents(file, genfile, code, prefix_lines="", add_lines=True, relpath=None):
"""Returns the code transformed into runnable Python code.
Translated are #[generated_code, #=generator_expression and ${var} constructs."""
no_hint = "nohint" in args['hint']
new_lines = prefix_lines.splitlines()
new_lines += """
# SPDX-License-Identifier: Apache-2.0
# Copyright 2019 Eotvos Lorand University, Budapest, Hungary
# Autogenerated file (from {0}), do not modify directly.
# Generator: T4P4S (https://github.com/P4ELTE/t4p4s/)
import compiler_common
import re
def add_code(line, indent_level = 0, lineno = None, file = "{0}"):
global generated_code
line_ends = {{
"line_comment": "\\n",
"inline_comment": "",
"no_comment": "\\n",
"no_comment_inline": "",
}}
sugar_style = compiler_common.file_sugar_style[-1]
stripped_line = line.strip()
no_sugar_on_line = stripped_line.startswith('//') or stripped_line.startswith('# ') or stripped_line == ""
indent = compiler_common.file_indent_str * indent_level
if 'inline' in compiler_common.file_sugar_style[-1]:
indent = ''
sugared = sugar(no_sugar_on_line, file, lineno, sugar_style)
line_end = line_ends[sugar_style]
return f'{{indent}}{{stripped_line}}{{sugared}}{{line_end}}'
def sugar(no_sugar_on_line, file, lineno, sugar_style):
if {1} or no_sugar_on_line or file is None or lineno is None:
return ""
if sugar_style == 'line_comment':
return f" // {{file}}{{lineno}}"
if sugar_style == 'inline_comment':
return f" /* {{file}}{{lineno}} */"
return ""
generated_code += "// Autogenerated file (from {0} via {2}), do not modify directly.\\n"
generated_code += "// Generator: T4P4S (https://github.com/P4ELTE/t4p4s/)\\n"
generated_code += "\\n"
""".format(file_prefix(file, genfile), no_hint, os.path.relpath(file, relpath) if relpath is not None else file).splitlines()
code_lines = enumerate(code.splitlines())
code_lines = add_gen_in_def(code_lines, file)
if add_lines:
code_lines = add_empty_lines(code_lines)
has_translatable_comment = re.compile(r'^([ \t]*)#(pre|aft)?([\[\{\}])(.*)$')
for idx, code_line in code_lines:
new_line = code_line
if has_translatable_comment.match(code_line):
new_line = translate_line_with_insert(file, genfile, idx, code_line)
elif re.match(r'^[ \t]*#= .*$', code_line):
line_regex = r'^([ \t]*)#=[ \t]*(.*)$'
with compiler_common.SugarStyle('no_comment'):
line_indent, line_content = re.match(line_regex, code_line).groups()
is_escaped, code_part = split_and_translate(line_content, no_quote_allowed=True)
if is_escaped:
code_part = f'eval({code_part})'
new_line = f'{line_indent}generated_code += {code_part}'
if args['desugar_info'] == "comment":
# sugar_filename = os.path.basename(file)
# sugar_filename = re.sub("([.]sugar)?[.]py", "", sugar_filename)
sugar_filename = file
new_line += f" ## {os.path.relpath(sugar_filename, '.')}:{idx}"
stripped = new_line.strip()
# won't mark empty lines and continued lines
if stripped != "" and new_line.strip()[-1] != '\\' and idx is not None and not stripped.startswith('generated_code +='):
# TODO idx is sometimes off by one?
new_line += f" ## {os.path.relpath(file, '.')}:{int(idx) + 1}"
new_lines.append(new_line)
return '\n'.join(new_lines) + "\n"
def generate_code(file, genfile, localvars={}):
"""The file contains Python code with #[ inserts.
The comments (which have to be indented properly)
contain code to be output,
their contents are collected in the variable generated_code.
Inside the comments, refer to Python variables as ${variable_name}."""
with open(file, "r") as orig_file:
code = orig_file.read()
code = translate_file_contents(file, genfile, code, relpath="src/")
if (depth := compiler_common.file_indentation_level) != 0:
print(f"Warning: indentation is {depth} level{'' if depth == 1 else 's'} too deep in file {file}", file=sys.stderr)
compiler_common.file_indentation_level = 0
if generate_code_files:
write_file(genfile, code)
localvars['generated_code'] = ""
module_name = genfile
localvars['t4p4sdir'] = os.path.relpath(os.path.join(args['generated_dir'], '..', "gen"))
exec(compile(code, module_name, 'exec'), localvars, localvars)
return re.sub(r'\n{3,}', '\n\n', localvars['generated_code'])
def generate_desugared_py():
"""Some Python source files also use the sugared syntax.
The desugared files are generated here."""
import glob
for fromfile in glob.glob("src/utils/*.sugar.py"):
with open(fromfile, "r") as orig_file:
code = orig_file.read()
prefix_lines = "generated_code = \"\"\n"
tofile = re.sub("[.]sugar[.]py$", ".py", fromfile)
compiler_common.current_compilation = { 'from': fromfile, 'to': tofile, 'use_real_random': args['use_real_random'], 'hlir': hlir }
code = translate_file_contents(fromfile, tofile, code, prefix_lines=prefix_lines, add_lines=False, relpath="src/")
compiler_common.current_compilation = None
write_file(tofile, code)
def output_desugared_c(filename, filepath, idx):
outfile = os.path.join(args['generated_dir'], re.sub(r'\.([ch])\.py$', r'.\1', filename))
outpyfile = os.path.join(args['desugared_path'], filename)
genfile = '?'
compiler_common.current_compilation = { 'orig': filename, 'from': genfile, 'to': outfile, 'use_real_random': args['use_real_random'], 'multi': args['multi'], 'multi_idx': idx, 'skip_output': False, 'hlir': hlir }
code = generate_code(filepath, outpyfile, {'hlir': hlir})
is_multicompiled = 'is_multicompiled' in compiler_common.current_compilation
skip_output = compiler_common.current_compilation['skip_output']
compiler_common.current_compilation = None
if not skip_output:
if is_multicompiled:
outfile = os.path.join(args['generated_dir'], 'multi', re.sub(r'\.([ch])\.py$', rf'_{idx}.\1', filename))
write_file(outfile, code)
return is_multicompiled
def generate_desugared_c(filename, filepath):
global hlir
genfile = os.path.join(args['desugared_path'], re.sub(r'\.([ch])\.py$', r'.\1.gen.py', filename))
compiler_log_warnings_errors.filename = filename
compiler_log_warnings_errors.filepath = filepath
compiler_log_warnings_errors.genfile = genfile
compiler_log_warnings_errors.outfile = outfile
is_multicompiled = output_desugared_c(filename, filepath, 0)
if is_multicompiled:
for idx in range(1, args['multi']):
output_desugared_c(filename, filepath, idx)
def make_dir(path, description):
if not os.path.isdir(path):
os.makedirs(path)
args['verbose'] and print(f" GEN {path} ({description})")
def make_dirs(cache_dir_name):
"""Makes directories if they do not exist"""
if not os.path.isdir(args['compiler_files_dir']):
print("Compiler files path is missing", file=sys.stderr)
sys.exit(1)
make_dir(args['desugared_path'], 'desugared compiler files')
make_dir(args['generated_dir'], 'generated files')
make_dir(os.path.join(args['generated_dir'], 'multi'), os.path.join('generated files', 'multi'))
if cache_dir_name and not os.path.isdir(cache_dir_name):
os.mkdir(cache_dir_name)
def file_contains_exact_text(filename, text):
"""Returns True iff the file exists and it already contains the given text."""
if not os.path.isfile(filename):
return False
with open(filename, "r") as infile:
intext = infile.read()
return text == intext
return False
def write_file(filename, text):
"""Writes the given text to the given file."""
if filename == '?':
return
if file_contains_exact_text(filename, text):
return
if filename.endswith(".gen.py"):
args['verbose'] and print(" P4", os.path.basename(filename))
with open(filename, "w") as genfile:
genfile.write(text)
def get_core_count():
try:
import psutil
return psutil.core_count()
except:
try:
import multiprocessing
return multiprocessing.cpu_count()
except:
return 1
def init_args():
"""Parses the command line arguments and loads them
into the global variable args."""
parser = argparse.ArgumentParser(description='T4P4S compiler')
parser.add_argument('p4_file', help='The source file')
parser.add_argument('-x', '--multi', help='Multiplex rate for multicompiled modules', required=True, type=int)
parser.add_argument('-v', '--p4v', help='Use P4-14 (default is P4-16)', required=False, choices=[16, 14], type=int, default=16)
parser.add_argument('-p', '--p4c_path', help='P4C path', required=False)
parser.add_argument('-c', '--compiler_files_dir', help='Source directory of the compiler\'s files', required=False, default=os.path.join("src", "hardware_indep"))
parser.add_argument('-g', '--generated_dir', help='Output directory for hardware independent files', required=True)
parser.add_argument('-desugared_path', help='Output directory for the compiler\'s files', required=False, default=argparse.SUPPRESS)
parser.add_argument('-desugar_info', help='Markings in the generated source code', required=False, choices=["comment", "pragma", "none"], default="comment")
parser.add_argument('-verbose', help='Verbosity', required=False, default=False, action='store_const', const=True)
parser.add_argument('-hint', help='Filename hint style in generated code comments', required=False, default=[], action='append')
parser.add_argument('-recompile', help='Force recompilation: ignore cache files', required=False, default=False, action='store_const', const=True)
parser.add_argument('-beautify', help='Beautification', required=False, default=False, action='store_const', const=True)
parser.add_argument('-use_real_random', help='Use random values in unspecified cases', required=False, default=False, action='store_const', const=True)
parser.add_argument('--p4dbg', help='Debugging', required=False, default=False, action='store_const', const=True)
parser.add_argument('--p4opt', help='Debug option passed to P4-to-JSON compiler', required=False, default=[], action='append')
parser.add_argument('--p4incdir', help='Include directory to P4-to-JSON compiler', required=False, default=[], action='append')
args = vars(parser.parse_args())
if 'desugared_path' not in args:
args['desugared_path'] = os.path.relpath(os.path.join(args['generated_dir'], '..', "gen"))
cache_dir_name = os.path.relpath(os.path.join(args['generated_dir'], '..', "cache"))
if args['hint'] == []:
args['hint'] = ['nohint']
return args, cache_dir_name
def generate_files():
bases = (args['compiler_files_dir'], os.path.join(args['compiler_files_dir'], 'multi'))
exts = [".c.py", ".h.py"]
generate_desugared_py()
for base, filename in ((base, f) for base in bases for f in os.listdir(base) if os.path.isfile(os.path.join(base, f)) for ext in exts if f.endswith(ext)):
generate_desugared_c(filename, os.path.join(base, filename))
def main():
try:
global args
args, cache_dir_name = init_args()
make_dirs(cache_dir_name)
global hlir
hlir = load_from_p4(args, cache_dir_name)
generate_files()
showErrors()
showWarnings()
except T4P4SHandledException:
sys.exit(1)
except:
cuco = compiler_common.current_compilation
if cuco:
stagetxt = f"{cuco['stage']['name']}: " if 'stage' in cuco else ""
print(f"{stagetxt}Error during the compilation of {cuco['from']} to {cuco['to']}")
print_with_backtrace(sys.exc_info(), cuco['from'] if cuco else "(no compiler_common file)", args['p4dbg'])
sys.exit(1)
global errors
if len(errors) > 0:
sys.exit(1)
if __name__ == '__main__':
main()
| {
"content_hash": "77b0fec7ae4e2d55d5ce795c24bc5bc2",
"timestamp": "",
"source": "github",
"line_count": 581,
"max_line_length": 216,
"avg_line_length": 38.09982788296041,
"alnum_prop": 0.6102276834116371,
"repo_name": "P4ELTE/t4p4s",
"id": "c584452da766d7bca3ab71405b4d8d30e9289657",
"size": "22136",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/compiler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "462753"
},
{
"name": "Makefile",
"bytes": "2617"
},
{
"name": "Python",
"bytes": "313481"
},
{
"name": "Shell",
"bytes": "86070"
}
],
"symlink_target": ""
} |
import random
##----------------------------------------------------------------##
from gii.core import app, signals
from gii.qt import QtEditorModule
from gii.qt.IconCache import getIcon
from gii.qt.controls.GenericTreeWidget import GenericTreeWidget
from gii.qt.controls.PropertyEditor import PropertyEditor
from gii.moai.MOAIRuntime import MOAILuaDelegate
from gii.SceneEditor import SceneEditorModule
from gii.qt.helpers import addWidgetWithLayout, QColorF, unpackQColor
from gii.SearchView import requestSearchView, registerSearchEnumerator
import datetime
##----------------------------------------------------------------##
from PyQt4 import QtCore, QtGui, uic
from PyQt4.QtCore import Qt
##----------------------------------------------------------------##
from mock import _MOCK, isMockInstance
##----------------------------------------------------------------##
_DEPLOY_CONFIG_FILE = 'deploy.json'
##----------------------------------------------------------------##
def _getModulePath( path ):
import os.path
return os.path.dirname( __file__ ) + '/' + path
##----------------------------------------------------------------##
class DeployManager( SceneEditorModule ):
def __init__(self):
super( DeployManager, self ).__init__()
def getName( self ):
return 'deploy_manager'
def getDependency( self ):
return [ 'mock' ]
def onLoad( self ):
self.configPath = self.getProject().getConfigPath( _DEPLOY_CONFIG_FILE )
#UI
self.container = self.requestDocumentWindow( 'DeployManager',
title = 'Deployment Manager',
allowDock = False,
minSize = ( 300, 300 ),
maxSize = ( 300, 300 )
)
#Components
self.window = self.container.addWidgetFromFile( _getModulePath('DeployManager.ui') )
self.delegate = MOAILuaDelegate( self )
self.delegate.load( _getModulePath( 'DeployManager.lua' ) )
#scene tree
layout = QtGui.QVBoxLayout()
self.window.containerSceneTree.setLayout( layout )
layout.setSpacing( 0 )
layout.setMargin( 0 )
self.treeScene = DeploySceneTree(
self.window.containerSceneTree,
editable = True,
sorting = False,
multiple_selection = False
)
self.treeScene.manager = self
layout.addWidget( self.treeScene )
sceneToolbar = QtGui.QToolBar( self.window.containerSceneTree )
layout.addWidget( sceneToolbar )
self.sceneTool = self.addToolBar( 'deploy_scene', sceneToolbar )
self.addTool( 'deploy_scene/add_scene', label = 'add' ,icon = 'add' )
self.addTool( 'deploy_scene/remove_scene', label = 'remove' ,icon = 'remove' )
self.addTool( 'deploy_scene/move_up_scene', label = 'up' ,icon = 'arrow-up' )
self.addTool( 'deploy_scene/move_down_scene', label = 'down' ,icon = 'arrow-down' )
self.addTool( 'deploy_scene/----' )
self.addTool( 'deploy_scene/edit_scene', label = 'change target scene' ,icon = 'pencil' )
self.addTool( 'deploy_scene/----' )
self.addTool( 'deploy_scene/set_entry_scene', label = 'set as entry' ,icon = 'flag' )
#deploy target tree
layout = QtGui.QVBoxLayout()
self.window.containerTargetTree.setLayout( layout )
layout.setSpacing( 0 )
layout.setMargin( 0 )
self.treeTarget = DeployTargetTree(
self.window.containerTargetTree,
editable = True,
multiple_selection = False
)
self.treeTarget.manager = self
layout.addWidget( self.treeTarget )
targetToolbar = QtGui.QToolBar( self.window.containerTargetTree )
layout.addWidget( targetToolbar )
self.targetTool = self.addToolBar( 'deploy_target', targetToolbar )
self.addTool( 'deploy_target/add_target', label = '+' )
self.addTool( 'deploy_target/remove_target', label = '-' )
#target property
self.propertyTarget = addWidgetWithLayout(
PropertyEditor( self.window.containerTargetProp )
)
#menu
self.addMenuItem( 'main/file/----' )
self.addMenuItem( 'main/file/deploy_manager',
dict( label = 'Deploy Manager', shortcut = 'F11' )
)
self.addMenuItem( 'main/file/deploy_build',
dict( label = 'Deploy Build', shortcut = 'Ctrl+F11' )
)
# self.container.show()
self.window.buttonOK.clicked.connect( self.onButtonOK )
#other
registerSearchEnumerator( deployTargetSearchEnumerator )
signals.connect( 'project.pre_deploy', self.preDeploy )
signals.connect( 'project.deploy', self.onDeploy )
signals.connect( 'project.post_deploy', self.postDeploy )
def onStart( self ):
#load config
self.loadConfig()
#fill trees
self.treeTarget.rebuild()
self.treeScene.rebuild()
def onStop( self ):
self.saveConfig()
def loadConfig( self ):
self.delegate.safeCall( 'loadDeployManagerConfig', self.configPath )
def saveConfig( self ):
self.delegate.safeCall( 'saveDeployManagerConfig', self.configPath )
def getDeployTargetTypes( self ):
registry = self.delegate.safeCall( 'getDeployTargetTypeRegistry' )
return [ name for name in registry.keys() ]
def getDeployTargets( self ):
targets = self.delegate.safeCallMethod( 'config', 'getTargets' )
return [ obj for obj in targets.values() ]
def addDeployTarget( self, targetType ):
target = self.delegate.safeCallMethod( 'config', 'addDeployTarget', targetType )
self.treeTarget.addNode( target )
self.treeTarget.editNode( target )
def changeDeployScene( self, targetScene ):
for sceneEntry in self.treeScene.getSelection():
self.delegate.safeCallMethod( 'config', 'changeTargetScene', sceneEntry, targetScene.getPath() )
self.treeScene.refreshNode( sceneEntry )
return
def renameDeployTarget( self, target, name ):
target.name = name #avoid duplicated name
def addDeployScene( self, sceneNode ):
if not sceneNode: return
entry = self.delegate.safeCallMethod( 'config', 'addDeployScene', sceneNode.getNodePath() )
self.treeScene.addNode( entry )
self.treeScene.editNode( entry )
def renameDeployScene( self, entry, alias ):
entry.alias = alias #TODO: avoid duplicated name
def getDeployScenes( self ):
scenes = self.delegate.safeCallMethod( 'config', 'getScenes' )
return [ obj for obj in scenes.values() ]
def updateGameConfig( self ):
self.delegate.safeCallMethod( 'config', 'updateGameConfig' )
def preDeploy( self, context ):
self.updateGameConfig()
def onDeploy( self, context ):
pass
def postDeploy( self, context ):
pass
def onTool( self, tool ):
name = tool.name
if name == 'add_target':
requestSearchView(
info = 'select deploy target type',
context = 'deploy_target_type',
on_selection = self.addDeployTarget
)
elif name == 'remove_target':
for target in self.treeTarget.getSelection():
self.treeTarget.removeNode( target )
self.delegate.safeCallMethod( 'config', 'removeDeployTarget', target )
elif name == 'add_scene':
requestSearchView(
info = 'select scene to deploy',
context = 'asset',
type = 'scene',
on_selection = self.addDeployScene
)
elif name == 'edit_scene':
requestSearchView(
info = 'select new target scene ',
context = 'asset',
type = 'scene',
on_selection = self.changeDeployScene
)
elif name == 'remove_scene':
for entry in self.treeScene.getSelection():
self.delegate.safeCallMethod( 'config', 'removeDeployScene', entry )
self.treeScene.removeNode( entry )
self.treeScene.refreshAllContent()
elif name == 'set_entry_scene':
for entry in self.treeScene.getSelection():
self.delegate.safeCallMethod( 'config', 'setEntryScene', entry )
break
self.treeScene.refreshAllContent()
elif name == 'move_up_scene':
for target in self.treeScene.getSelection():
self.delegate.safeCallMethod( 'config', 'moveSceneUp', target )
self.treeScene.rebuild()
self.treeScene.selectNode( target )
break
elif name == 'move_down_scene':
for target in self.treeScene.getSelection():
self.delegate.safeCallMethod( 'config', 'moveSceneDown', target )
self.treeScene.rebuild()
self.treeScene.selectNode( target )
break
def onMenu( self, node ):
name = node.name
if name == 'deploy_manager' :
self.onSetFocus()
elif name == 'deploy_build':
app.getProject().deploy()
def onSetFocus( self ):
self.container.show()
self.container.raise_()
def onButtonOK( self ):
self.saveConfig()
self.container.hide()
DeployManager().register()
##----------------------------------------------------------------##
def deployTargetSearchEnumerator( typeId, context, option ):
if not context in [ 'deploy_target_type' ] : return
result = []
mgr = app.getModule( 'deploy_manager' )
for name in mgr.getDeployTargetTypes():
entry = ( name, name, 'Deploy Target', None )
result.append( entry )
return result
##----------------------------------------------------------------##
_BrushEntryScene = QtGui.QBrush( QColorF( 0,0,1 ) )
_BrushDefault = QtGui.QBrush()
class DeploySceneTree( GenericTreeWidget ):
def getHeaderInfo( self ):
return [ ('Name',250), ('Path', 250), ('ID', 30) ]
def getRootNode( self ):
return self.manager
def getNodeParent( self, node ):
if node == self.manager: return None
return self.manager
def getNodeChildren( self, node ):
if node == self.manager:
return self.manager.getDeployScenes()
else:
return []
def updateItemContent( self, item, node, **option ):
if node == self.manager: return
#TODO:icon
item.setText( 0, node.alias )
item.setText( 1, node.path )
item.setText( 2, str(node.id) )
if node.entry:
item.setIcon(0, getIcon( 'scene' ) )
else:
item.setIcon(0, getIcon( 'obj' ) )
def onItemChanged( self, item, col ):
entry = item.node
self.manager.renameDeployScene( entry, item.text(0) )
##----------------------------------------------------------------##
class DeployTargetTree( GenericTreeWidget ):
def getHeaderInfo( self ):
return [ ('Name',150), ('Type', 30), ('State',30), ('Last Build',-1) ]
def getRootNode( self ):
return self.manager
def getNodeParent( self, node ):
if node == self.manager: return None
return self.manager
def getNodeChildren( self, node ):
if node == self.manager:
return self.manager.getDeployTargets()
else:
return []
def updateItemContent( self, item, node, **option ):
if node == self.manager: return
#TODO:icon
item.setText( 0, node.name )
item.setText( 1, node.getType( node ) )
item.setText( 2, node.state )
# item.setText( 3, node.getLastBuild() )
def onItemChanged( self, item, col ):
target = item.node
self.manager.renameDeployTarget( target, item.text(0) )
def onItemSelectionChanged( self ):
selection = self.getSelection()
if selection:
for node in selection:
self.manager.propertyTarget.setTarget( node )
else:
self.manager.propertyTarget.clear()
| {
"content_hash": "818caf8d689b41bf3b37f53e619489dd",
"timestamp": "",
"source": "github",
"line_count": 356,
"max_line_length": 99,
"avg_line_length": 30.303370786516854,
"alnum_prop": 0.6546162402669633,
"repo_name": "tommo/gii",
"id": "33c8a985e089e1464153c6573e9364f1b3abbe00",
"size": "10788",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/Mock/DeployManager/DeployManager.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "398"
},
{
"name": "C",
"bytes": "1118982"
},
{
"name": "C++",
"bytes": "743466"
},
{
"name": "CSS",
"bytes": "5956"
},
{
"name": "HTML",
"bytes": "126233"
},
{
"name": "JavaScript",
"bytes": "129855"
},
{
"name": "Lua",
"bytes": "1290198"
},
{
"name": "Makefile",
"bytes": "652"
},
{
"name": "Objective-C",
"bytes": "28896"
},
{
"name": "Objective-C++",
"bytes": "129214"
},
{
"name": "Python",
"bytes": "2676186"
},
{
"name": "Shell",
"bytes": "11215"
}
],
"symlink_target": ""
} |
import Gaffer
import GafferUI
from Qt import QtWidgets
## A simple PlugValueWidget which just displays the name of the plug,
# with the popup action menu for the plug.
#
# Supported plug metadata :
#
# - "labelPlugValueWidget:renameable"
class LabelPlugValueWidget( GafferUI.PlugValueWidget ) :
def __init__( self, plug, horizontalAlignment=GafferUI.Label.HorizontalAlignment.Left, verticalAlignment=GafferUI.Label.VerticalAlignment.Center, **kw ) :
GafferUI.PlugValueWidget.__init__( self, QtWidgets.QWidget(), plug, **kw )
layout = QtWidgets.QHBoxLayout()
layout.setContentsMargins( 0, 0, 0, 0 )
layout.setSizeConstraint( QtWidgets.QLayout.SetMinAndMaxSize )
self._qtWidget().setLayout( layout )
self.__label = GafferUI.NameLabel(
plug,
horizontalAlignment = horizontalAlignment,
verticalAlignment = verticalAlignment,
)
self.__label._qtWidget().setObjectName( "gafferPlugLabel" )
layout.addWidget( self.__label._qtWidget() )
self.__editableLabel = None # we'll make this lazily as needed
# connecting at group 0 so we're called before the slots
# connected by the NameLabel class.
self.__dragBeginConnection = self.__label.dragBeginSignal().connect( 0, Gaffer.WeakMethod( self.__dragBegin ) )
self.__dragEndConnection = self.__label.dragEndSignal().connect( 0, Gaffer.WeakMethod( self.__dragEnd ) )
self.__plugMetadataChangedConnection = Gaffer.Metadata.plugValueChangedSignal().connect( Gaffer.WeakMethod( self.__plugMetadataChanged ) )
self._addPopupMenu( self.__label )
self.setPlug( plug )
def label( self ) :
return self.__label
def setPlug( self, plug ) :
GafferUI.PlugValueWidget.setPlug( self, plug )
self.__label.setGraphComponent( plug )
if self.__editableLabel is not None :
self.__editableLabel.setGraphComponent( plug )
self.__updateFormatter()
self.__updateDoubleClickConnection()
def setHighlighted( self, highlighted ) :
GafferUI.PlugValueWidget.setHighlighted( self, highlighted )
self.__label.setHighlighted( highlighted )
def getToolTip( self ) :
result = GafferUI.PlugValueWidget.getToolTip( self )
if self.getPlug() is not None :
result += "<ul>"
result += "<li>Left drag to connect</li>"
if hasattr( self.getPlug(), "getValue" ) :
result += "<li>Shift-left or middle drag to transfer value</li>"
result += "<ul>"
return result
def _updateFromPlug( self ) :
plug = self.getPlug()
valueChanged = plug.getInput() is not None
if not valueChanged and isinstance( plug, Gaffer.ValuePlug ) :
with self.getContext() :
if Gaffer.NodeAlgo.hasUserDefault( plug ) :
try:
valueChanged = not Gaffer.NodeAlgo.isSetToUserDefault( plug )
except:
# an error here should not cause the ui to break, specially since the value widget corresponding could be indicating the error itself
valueChanged = True
else :
try:
valueChanged = not plug.isSetToDefault()
except:
# an error here should not cause the ui to break, specially since the value widget corresponding could be indicating the error itself
valueChanged = True
self.__setValueChanged( valueChanged )
# Sets whether or not the label be rendered in a ValueChanged state.
def __setValueChanged( self, valueChanged ) :
if valueChanged == self.__getValueChanged() :
return
self.__label._qtWidget().setProperty( "gafferValueChanged", GafferUI._Variant.toVariant( valueChanged ) )
self.__label._repolish()
def __getValueChanged( self ) :
if "gafferValueChanged" not in self.__label._qtWidget().dynamicPropertyNames() :
return False
return GafferUI._Variant.fromVariant( self.__label._qtWidget().property( "gafferValueChanged" ) )
def __dragBegin( self, widget, event ) :
# initiate a drag containing the value of the plug
# for shift-left drag or a middle drag. initiate a
# drag containing the plug for a straight left-drag.
shift = event.modifiers & event.Modifiers.Shift
left = event.buttons == event.Buttons.Left
middle = event.buttons == event.Buttons.Middle
if ( shift and left ) or middle :
if not hasattr( self.getPlug(), "getValue" ) :
return None
GafferUI.Pointer.setCurrent( "values" )
with self.getContext() :
return self.getPlug().getValue()
elif left :
GafferUI.Pointer.setCurrent( "plug" )
return self.getPlug()
def __dragEnd( self, widget, event ) :
GafferUI.Pointer.setCurrent( None )
def __updateFormatter( self ) :
plug = self.getPlug()
label = Gaffer.Metadata.value( plug, "label" ) if plug is not None else None
if label is not None :
self.__label.setFormatter( lambda graphComponents : label )
else :
self.__label.setFormatter( self.__label.defaultFormatter )
def __updateDoubleClickConnection( self ) :
self.__labelDoubleClickConnection = None
if self.getPlug() is None or not Gaffer.Metadata.value( self.getPlug(), "labelPlugValueWidget:renameable" ) :
return
self.__labelDoubleClickConnection = self.__label.buttonDoubleClickSignal().connect( Gaffer.WeakMethod( self.__labelDoubleClicked ) )
def __labelDoubleClicked( self, label, event ) :
assert( label is self.__label )
if Gaffer.MetadataAlgo.readOnly( self.getPlug() ) :
return
if self.__editableLabel is None :
self.__editableLabel = GafferUI.NameWidget( self.getPlug() )
self.__editableLabel._qtWidget().setMinimumSize( self.label()._qtWidget().minimumSize() )
self.__editableLabel._qtWidget().setMaximumSize( self.label()._qtWidget().maximumSize() )
# Connect at group 0 so we're called before the NameWidget's own slots.
self.__labelEditingFinishedConnection = self.__editableLabel.editingFinishedSignal().connect( 0, Gaffer.WeakMethod( self.__labelEditingFinished ) )
self._qtWidget().layout().insertWidget( 0, self.__editableLabel._qtWidget() )
self.__label.setVisible( False )
self.__editableLabel.setVisible( True )
self.__editableLabel.setSelection( 0, len( self.__editableLabel.getText() ) )
self.__editableLabel.grabFocus()
def __labelEditingFinished( self, nameWidget ) :
with Gaffer.UndoScope( self.getPlug().ancestor( Gaffer.ScriptNode ) ) :
# Do what the NameWidget would have done for us anyway, so we
# can group it with the metadata deregistration in the undo queue.
self.getPlug().setName( nameWidget.getText() )
# Remove any metadata label which would mask the name - if a user
# has gone to the trouble of setting a sensible name, then it should
# take precedence.
Gaffer.Metadata.deregisterValue( self.getPlug(), "label" )
self.__label.setVisible( True )
self.__editableLabel.setVisible( False )
# Return True so that the NameWidget's handler isn't run, since we
# did all the work ourselves.
return True
def __plugMetadataChanged( self, nodeTypeId, plugPath, key, plug ) :
if self.getPlug() is None :
return
if key=="label" and Gaffer.MetadataAlgo.affectedByChange( self.getPlug(), nodeTypeId, plugPath, plug ) :
self.__updateFormatter()
| {
"content_hash": "f52b12e6406d713843a2685fd6975bf2",
"timestamp": "",
"source": "github",
"line_count": 201,
"max_line_length": 155,
"avg_line_length": 34.756218905472636,
"alnum_prop": 0.7182937303177784,
"repo_name": "ivanimanishi/gaffer",
"id": "0b2c0284c9b95ab90ce4d27569a342acdb6740a8",
"size": "8794",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/GafferUI/LabelPlugValueWidget.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "39753"
},
{
"name": "C++",
"bytes": "6086015"
},
{
"name": "CMake",
"bytes": "83446"
},
{
"name": "CSS",
"bytes": "28027"
},
{
"name": "GLSL",
"bytes": "6236"
},
{
"name": "Python",
"bytes": "6120483"
},
{
"name": "Shell",
"bytes": "13049"
},
{
"name": "Slash",
"bytes": "2870"
}
],
"symlink_target": ""
} |
__author__ = 'aaronweaver'
from datetime import datetime
import json
from dojo.models import Finding
class BanditParser(object):
def __init__(self, filename, test):
tree = filename.read()
try:
data = json.loads(str(tree, 'utf-8'))
except:
data = json.loads(tree)
dupes = dict()
if "generated_at" in data:
find_date = datetime.strptime(data["generated_at"], '%Y-%m-%dT%H:%M:%SZ')
for item in data["results"]:
categories = ''
language = ''
mitigation = ''
impact = ''
references = ''
findingdetail = ''
title = ''
group = ''
status = ''
title = "Test Name: " + item["test_name"] + " Test ID: " + item["test_id"]
# ##### Finding details information ######
findingdetail += "Filename: " + item["filename"] + "\n"
findingdetail += "Line number: " + str(item["line_number"]) + "\n"
findingdetail += "Issue Confidence: " + item["issue_confidence"] + "\n\n"
findingdetail += "Code:\n"
findingdetail += item["code"] + "\n"
sev = item["issue_severity"]
mitigation = item["issue_text"]
references = item["test_id"]
dupe_key = title + item["filename"] + str(item["line_number"])
if dupe_key in dupes:
find = dupes[dupe_key]
else:
dupes[dupe_key] = True
find = Finding(title=title,
test=test,
active=False,
verified=False,
description=findingdetail,
severity=sev.title(),
numerical_severity=Finding.get_numerical_severity(sev),
mitigation=mitigation,
impact=impact,
references=references,
file_path=item["filename"],
line=item["line_number"],
url='N/A',
date=find_date,
static_finding=True)
dupes[dupe_key] = find
findingdetail = ''
self.items = list(dupes.values())
| {
"content_hash": "4167d83c8c6f3a73aafd52e7475bfb80",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 86,
"avg_line_length": 35.3768115942029,
"alnum_prop": 0.4354772634166325,
"repo_name": "OWASP/django-DefectDojo",
"id": "b635a4f3994f34c0d0a30b39a1350eb093574c9c",
"size": "2441",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dojo/tools/bandit/parser.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "18512"
},
{
"name": "HTML",
"bytes": "848751"
},
{
"name": "JavaScript",
"bytes": "6717"
},
{
"name": "Python",
"bytes": "869791"
},
{
"name": "Ruby",
"bytes": "998"
},
{
"name": "Shell",
"bytes": "30386"
},
{
"name": "Smarty",
"bytes": "3485"
},
{
"name": "XSLT",
"bytes": "6624"
}
],
"symlink_target": ""
} |
"""create database
Revision ID: 166ff2dcc48d
Revises:
Create Date: 2014-10-24 11:43:23.886123
"""
try:
from hashlib import sha1
sha1 # suppress pyflakes warning
except ImportError: # pragma: nocover
from sha import new as sha1
from alembic import op, context
from sqlalchemy import ForeignKey, Column, Table, MetaData
from sqlalchemy.types import Integer, Boolean, Unicode, String, Float, \
UserDefinedType, DateTime
# revision identifiers, used by Alembic.
revision = "166ff2dcc48d"
down_revision = None
class TsVector(UserDefinedType):
""" A custom type for PostgreSQL's tsvector type. """
def get_col_spec(self):
return "TSVECTOR"
def upgrade():
schema = context.get_context().config.get_main_option("schema")
parentschema = context.get_context().config.get_main_option("parentschema")
srid = context.get_context().config.get_main_option("srid")
engine = op.get_bind().engine
if op.get_context().dialect.has_table(
engine, "functionality", schema=schema
): # pragma: nocover
return
op.create_table(
"functionality",
Column("id", Integer, primary_key=True),
Column("name", Unicode, nullable=False),
Column("value", Unicode, nullable=False),
Column("description", Unicode),
schema=schema,
)
op.create_table(
"treeitem",
Column("type", String(10), nullable=False),
Column("id", Integer, primary_key=True),
Column("name", Unicode),
Column("order", Integer, nullable=False),
Column("metadataURL", Unicode),
schema=schema,
)
op.create_table(
"restrictionarea",
Column("id", Integer, primary_key=True),
Column("name", Unicode),
Column("description", Unicode),
Column("readwrite", Boolean, default=False),
schema=schema,
)
op.execute(
"SELECT AddGeometryColumn('%(schema)s', 'restrictionarea', "
"'area', %(srid)s, 'POLYGON', 2)" % {
"schema": schema, "srid": srid
}
)
op.create_table(
"shorturl",
Column("id", Integer, primary_key=True),
Column("url", Unicode(1000)),
Column("ref", String(20), index=True, unique=True, nullable=False),
Column("creator_email", Unicode(200)),
Column("creation", DateTime),
Column("last_hit", DateTime),
Column("nb_hits", Integer),
schema=schema + "_static",
)
op.create_table(
"role",
Column("id", Integer, primary_key=True),
Column("name", Unicode, unique=True, nullable=False),
Column("description", Unicode),
schema=schema,
)
op.execute(
"SELECT AddGeometryColumn('%(schema)s', 'role', "
"'extent', %(srid)s, 'POLYGON', 2)" % {
"schema": schema, "srid": srid
}
)
role = Table(
"role", MetaData(),
Column("name", Unicode, unique=True, nullable=False),
schema=schema,
)
op.bulk_insert(role, [
{"name": "role_admin"}
])
op.create_table(
"layer",
Column(
"id", Integer,
ForeignKey(schema + ".treeitem.id"), primary_key=True
),
Column("public", Boolean, default=True),
Column("inMobileViewer", Boolean, default=True),
Column("inDesktopViewer", Boolean, default=True),
Column("isChecked", Boolean, default=True),
Column("icon", Unicode),
Column("layerType", Unicode(12)),
Column("url", Unicode),
Column("imageType", Unicode(10)),
Column("style", Unicode),
Column("dimensions", Unicode),
Column("matrixSet", Unicode),
Column("wmsUrl", Unicode),
Column("wmsLayers", Unicode),
Column("queryLayers", Unicode),
Column("kml", Unicode),
Column("isSingleTile", Boolean),
Column("legend", Boolean, default=True),
Column("legendImage", Unicode),
Column("legendRule", Unicode),
Column("isLegendExpanded", Boolean, default=False),
Column("minResolution", Float),
Column("maxResolution", Float),
Column("disclaimer", Unicode),
Column("identifierAttributeField", Unicode),
Column("geoTable", Unicode),
Column("excludeProperties", Unicode),
Column("timeMode", Unicode(8)),
schema=schema,
)
op.create_table(
"role_restrictionarea",
Column(
"role_id", Integer,
ForeignKey(schema + ".role.id"), primary_key=True
),
Column(
"restrictionarea_id", Integer,
ForeignKey(schema + ".restrictionarea.id"),
primary_key=True
),
schema=schema,
)
op.create_table(
"tsearch",
Column("id", Integer, primary_key=True),
Column("label", Unicode),
Column("layer_name", Unicode),
Column("role_id", Integer, ForeignKey(schema + ".role.id"), nullable=True),
Column("public", Boolean, server_default="true"),
Column("ts", TsVector),
Column("params", Unicode, nullable=True),
schema=schema,
)
op.execute(
"SELECT AddGeometryColumn('%(schema)s', 'tsearch', 'the_geom', "
"%(srid)s, 'GEOMETRY', 2)" % {
"schema": schema, "srid": srid
}
)
op.create_index(
"tsearch_ts_idx", "tsearch", ["ts"],
schema=schema, postgresql_using="gin"
)
op.create_table(
"treegroup",
Column(
"id", Integer,
ForeignKey(schema + ".treeitem.id"), primary_key=True
),
schema=schema,
)
op.create_table(
"user",
Column("type", String(10), nullable=False),
Column("id", Integer, primary_key=True),
Column("username", Unicode, unique=True, nullable=False),
Column("password", Unicode, nullable=False),
Column("email", Unicode, nullable=False),
Column("is_password_changed", Boolean, default=False),
Column("role_id", Integer, ForeignKey(schema + ".role.id"), nullable=False),
schema=schema,
)
if parentschema is not None and parentschema is not "": # pragma: nocover
op.add_column(
"user",
Column("parent_role_id", Integer, ForeignKey(parentschema + ".role.id")),
schema=schema
)
op.execute(
"INSERT INTO %(schema)s.user (type, username, email, password, role_id) "
"(SELECT 'user', 'admin', '[email protected]', '%(pass)s', r.id "
"FROM %(schema)s.role AS r "
"WHERE r.name = 'role_admin')" % {
"schema": schema,
"pass": sha1("admin").hexdigest()
}
)
op.create_table(
"role_functionality",
Column(
"role_id", Integer,
ForeignKey(schema + ".role.id"), primary_key=True
),
Column(
"functionality_id", Integer,
ForeignKey(schema + ".functionality.id"), primary_key=True
),
schema=schema,
)
op.create_table(
"user_functionality",
Column(
"user_id", Integer,
ForeignKey(schema + ".user.id"), primary_key=True
),
Column(
"functionality_id", Integer,
ForeignKey(schema + ".functionality.id"), primary_key=True
),
schema=schema,
)
op.create_table(
"layergroup",
Column(
"id", Integer,
ForeignKey(schema + ".treegroup.id"), primary_key=True
),
Column("isExpanded", Boolean),
Column("isInternalWMS", Boolean),
# children have radio button instance of check box
Column("isBaseLayer", Boolean),
schema=schema,
)
op.create_table(
"layer_restrictionarea",
Column(
"layer_id", Integer,
ForeignKey(schema + ".layer.id"), primary_key=True
),
Column(
"restrictionarea_id", Integer,
ForeignKey(schema + ".restrictionarea.id"), primary_key=True
),
schema=schema,
)
op.create_table(
"layergroup_treeitem",
Column(
"treegroup_id", Integer,
ForeignKey(schema + ".treegroup.id"), primary_key=True
),
Column(
"treeitem_id", Integer,
ForeignKey(schema + ".treeitem.id"), primary_key=True
),
schema=schema,
)
op.create_table(
"theme",
Column(
"id", Integer,
ForeignKey(schema + ".treegroup.id"), primary_key=True
),
Column("icon", Unicode),
Column("inMobileViewer", Boolean, default=False),
Column("inDesktopViewer", Boolean, default=True),
schema=schema,
)
op.create_table(
"theme_functionality",
Column(
"theme_id", Integer,
ForeignKey(schema + ".theme.id"), primary_key=True
),
Column(
"functionality_id", Integer,
ForeignKey(schema + ".functionality.id"), primary_key=True
),
schema=schema,
)
def downgrade():
schema = context.get_context().config.get_main_option("schema")
op.drop_table("theme_functionality", schema=schema)
op.drop_table("theme", schema=schema)
op.drop_table("layergroup_treeitem", schema=schema)
op.drop_table("layer_restrictionarea", schema=schema)
op.drop_table("layergroup", schema=schema)
op.drop_table("user_functionality", schema=schema)
op.drop_table("role_functionality", schema=schema)
op.drop_table("user", schema=schema)
op.drop_table("treegroup", schema=schema)
op.drop_table("tsearch", schema=schema)
op.drop_table("role_restrictionarea", schema=schema)
op.drop_table("layer", schema=schema)
op.drop_table("role", schema=schema)
op.drop_table("shorturl", schema=schema + "_static")
op.drop_table("restrictionarea", schema=schema)
op.drop_table("treeitem", schema=schema)
op.drop_table("functionality", schema=schema)
| {
"content_hash": "0a86d82d825b13c0b932c4e8ba85260c",
"timestamp": "",
"source": "github",
"line_count": 315,
"max_line_length": 85,
"avg_line_length": 32.076190476190476,
"alnum_prop": 0.571159936658749,
"repo_name": "geoportallux/geoportailv3-gisgr",
"id": "4c51053c36083282444c3a069285d12b380ba746",
"size": "11692",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "CONST_alembic/main/versions/166ff2dcc48d_create_database.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "65556"
},
{
"name": "HTML",
"bytes": "170344"
},
{
"name": "JavaScript",
"bytes": "751190"
},
{
"name": "Makefile",
"bytes": "8693"
},
{
"name": "Mako",
"bytes": "19465"
},
{
"name": "Python",
"bytes": "363192"
},
{
"name": "Roff",
"bytes": "161"
},
{
"name": "Shell",
"bytes": "64"
}
],
"symlink_target": ""
} |
"""This module contains different useful functions for manipulating models.
Functions defined here:
get_fields(class) -- return the class's field
get_name(class) -- return the model's name
get_plural_name(class) -- return the plural class name
get_pkey_names -- return a list of primary key fields name
get_pkey_values -- return a list of primary key fields values
update_attr -- update an object attribute
update -- update a whole object
"""
from model.types import BaseType
def get_fields(model):
"""Return a list of the defined fields in this model."""
fields = [getattr(model, name) for name in dir(model)]
fields = [field for field in fields if isinstance(field, BaseType)]
fields = sorted(fields, key=lambda field: field.nid)
return fields
def get_name(model):
"""Return the model name."""
name = model.__name__
name = name.split(".")[-1]
return name.lower()
def get_plural_name(model):
"""Return the plural model's name.
The plural name is:
The value of the 'plural_name' class attribute if exists
The singular name extended with the 's / es' rule otherwise
"""
if hasattr(model, "plural_name"):
return model.plural_name
else:
singular_name = get_name(model)
if singular_name.endswith("y"):
singular_name = singular_name[:-1] + "ies"
elif singular_name.endswith("s"):
singular_name += "es"
else:
singular_name += "s"
return singular_name
def get_pkey_names(model):
"""Return a list of field names (those defined as primary key)."""
fields = get_fields(model)
p_fields = [field.field_name for field in fields if field.pkey]
return p_fields
def get_pkey_values(object):
"""Return a tuple of datas (those defined as primary key).
NOTE: the 'get_pkeys_name' function expects a model as argument
(a class). This function, however, expects an object created on a
Model class.
"""
fields = get_fields(type(object))
p_fields = [field.field_name for field in fields if field.pkey]
p_fields = [getattr(object, field) for field in p_fields]
return tuple(p_fields)
def update_attr(to_update, attribute, value):
"""Update the object passed as first argument.
NOTE: this function is really close to 'setattr' but it only writes
the new attribute in the object, without calling its '__setattr__'
magic method, which is useful for a model if you don't want to
update it in the data connector.
"""
object.__setattr__(to_update, attribute, value)
def update(to_update, dict_of_values):
"""Update the attributes of an object using update_attr."""
for name, value in dict_of_values.items():
update_attr(to_update, name, value)
| {
"content_hash": "f125913f0271e63df72478e5693caf65",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 75,
"avg_line_length": 34.265060240963855,
"alnum_prop": 0.6550632911392406,
"repo_name": "v-legoff/pa-poc1",
"id": "ab08dade9814440c2675ed9fb2ba24b66b8a279d",
"size": "4387",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "model/functions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "86362"
}
],
"symlink_target": ""
} |
from bs4 import BeautifulSoup as Soup
import urls
import re
import proxy
from datetime import *
import functions
def subjects ( config ):
teamsList = []
url = urls.teams.replace("{{SCHOOL_ID}}", str(config["school_id"])).replace("{{BRANCH_ID}}", str(config["branch_id"]))
response = proxy.session.get(url)
html = response.text
soup = Soup(html)
if soup.find("table", attrs={"id" : "m_Content_contenttbl"}) is None:
return {
"status" : False,
"error" : "Data not found"
}
rows = soup.find("table", attrs={"id" : "m_Content_contenttbl"}).findAll("a")
idProg = re.compile(r"\/lectio\/(?P<school_id>.*)\/FindSkema.aspx\?type=(?P<type_name>.*)&fag=(?P<subject_id>.*)&(?P<the_rest>.*)")
for initial, name in functions.grouped(rows, 2):
groups = idProg.match(initial["href"])
teamsList.append({
"school_id" : config["school_id"],
"branch_id" : config["branch_id"],
"initial" : unicode(initial.text),
"name" : unicode(name.text),
"subject_id" : groups.group("subject_id") if "subject_id" in groups.groupdict() else "",
"type" : "team" if groups.group("type_name") and "type_name" in groups.groupdict() else ""
})
return {
"status" : "ok",
"subjects" : teamsList,
"term" : {
"value" : soup.find("select", attrs={"id" : "m_ChooseTerm_term"}).select('option[selected="selected"]')[0]["value"],
"years_string" : soup.find("select", attrs={"id" : "m_ChooseTerm_term"}).select('option[selected="selected"]')[0].text
}
} | {
"content_hash": "925d4afca81134ddb1b48b93b422b153",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 132,
"avg_line_length": 31.872340425531913,
"alnum_prop": 0.6261682242990654,
"repo_name": "boh1996/LectioAPI",
"id": "5adc68f6a1e8798d2706cc65a003b348a8ac50d1",
"size": "1540",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scrapers/subjects.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "396682"
}
],
"symlink_target": ""
} |
import os
import re
import homely._vcs
from homely._errors import ConnectionError, RepoError, RepoHasNoCommitsError
from homely._utils import _expandpath, run
from homely.system import execute
class Repo(homely._vcs.Repo):
type = homely._vcs.HANDLER_GIT_v1
pulldesc = 'git pull'
@classmethod
def _from_parts(class_, repo_path, user, domain, name):
if name.endswith('.git'):
name = name[0:-4]
canonical = 'https://%s/%s/%s.git' % (domain, user, name)
return class_(
repo_path,
isremote=True,
iscanonical=repo_path == canonical,
canonical=canonical,
suggestedlocal=name,
)
@classmethod
def frompath(class_, repo_path):
if os.path.isdir(repo_path):
if not os.path.isdir(os.path.join(repo_path, '.git')):
return
return class_(_expandpath(repo_path),
isremote=False,
iscanonical=False,
suggestedlocal=None
)
if (repo_path.startswith('ssh://') or
repo_path.startswith('https://') or
repo_path.startswith('git@')):
# handle git@ url
m = re.match(r'^git@([^/]+)[/:]([^/]+)/([^/]+)', repo_path)
if m:
domain, user, name = m.groups()
return class_._from_parts(repo_path, user=user, domain=domain, name=name)
# TODO: gitlab allows '/' in the org name so we actually want all but the last '/part'
# to be the 'user'
m = re.match(r'^https://([^/]+)/([^/]+)/([^/]+)', repo_path)
if m:
domain, user, name = m.groups()
return class_._from_parts(repo_path, user=user, domain=domain, name=name)
return class_(repo_path,
isremote=True,
iscanonical=False,
suggestedlocal=None)
def pullchanges(self):
assert not self.isremote
cmd = ['git', 'pull']
code, _, err = execute(cmd,
cwd=self.repo_path,
stderr=True,
expectexit=(0, 1))
if code == 0:
return
assert code == 1
needle = b'fatal: Could not read from remote repository.'
if needle in err:
raise ConnectionError()
raise SystemError("Unexpected output from 'git pull': {}".format(err))
def clonetopath(self, dest):
origin = self.repo_path
execute(['git', 'clone', origin, dest])
def getrepoid(self):
assert not self.isremote
cmd = ['git', 'rev-list', '--max-parents=0', 'HEAD']
returncode, stdout = run(cmd,
cwd=self.repo_path,
stdout=True,
stderr="STDOUT")[:2]
if returncode == 0:
return self._getfirsthash(stdout)
if returncode != 128:
raise Exception("Unexpected returncode {} from git rev-list"
.format(returncode))
if b"ambiguous argument 'HEAD'" not in stdout:
raise Exception("Unexpected exitcode {}".format(returncode))
# there's no HEAD revision, so we'll do the command again with
# --all instead
cmd = ['git', 'rev-list', '--max-parents=0', '--all']
# use run() instead of execute() so that we don't print script output
returncode, stdout = run(cmd,
cwd=self.repo_path,
stdout=True,
stderr="STDOUT")[:2]
if returncode == 0:
if stdout == b'':
raise RepoHasNoCommitsError()
return self._getfirsthash(stdout)
if returncode != 129:
raise Exception("Unexpected returncode {} from git rev-list"
.format(returncode))
if b"usage: git rev-list" in stdout:
raise RepoHasNoCommitsError()
raise SystemError("Unexpected exitcode {}".format(returncode))
def _getfirsthash(self, stdout):
stripped = stdout.rstrip().decode('utf-8')
if '\n' in stripped:
raise RepoError("Git repo has multiple initial commits")
return stripped
@staticmethod
def shortid(repoid):
return repoid[0:8]
def isdirty(self):
cmd = ['git', 'status', '--porcelain']
out = execute(cmd, cwd=self.repo_path, stdout=True)[1]
for line in out.split(b'\n'):
if len(line) and not line.startswith(b'?? '):
return True
return False
| {
"content_hash": "3ead8dce23986df777b6272e00fee246",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 98,
"avg_line_length": 35.82835820895522,
"alnum_prop": 0.5084357425536347,
"repo_name": "phodge/homely",
"id": "f1499238aff51746f65e85c0a0abba61e11960ce",
"size": "4801",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "homely/_vcs/git.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "364"
},
{
"name": "Python",
"bytes": "207210"
},
{
"name": "Shell",
"bytes": "570"
},
{
"name": "Vim script",
"bytes": "272"
}
],
"symlink_target": ""
} |
from django.test import SimpleTestCase
from unittest import mock
from zeusci.zeus.exceptions import FetcherError
from zeusci.zeus.fetchers import Fetcher
from zeusci.zeus.fetchers import GitFetcher
class TestFetcher(SimpleTestCase):
def setUp(self):
self.fetcher = Fetcher()
@mock.patch('zeusci.zeus.fetchers.runcmd')
def test_run_cmd(self, runcmd):
finished_command = mock.Mock()
runcmd.return_value = finished_command
self.assertEqual(self.fetcher.run_cmd(['mycmd']), finished_command)
runcmd.assert_called_once_with(['mycmd'])
@mock.patch('zeusci.zeus.fetchers.runcmd')
def test_run_cmd_accepts_only_list_as_cmd(self, runcmd):
finished_command = mock.Mock()
runcmd.return_value = finished_command
with self.assertRaises(FetcherError):
self.fetcher.run_cmd('my cmd')
def test_get_fetch_cmd(self):
self.fetcher.fetch_cmd = 'foo {url} | bar {dst}'
cmd = self.fetcher.get_fetch_cmd(url='URL', dst='DST')
self.assertEqual(cmd, 'foo URL | bar DST')
def test_fetch(self):
self.fetcher.get_fetch_cmd = mock.Mock(return_value=['cmd'])
finished_command = mock.Mock()
self.fetcher.run_cmd = mock.Mock(return_value=finished_command)
cmd = self.fetcher.fetch(url='URL', dst='DST')
self.assertEqual(cmd, finished_command)
self.fetcher.get_fetch_cmd.assert_called_once_with('URL', 'DST')
self.fetcher.run_cmd.assert_called_once_with(['cmd'])
class TestGitFetcher(SimpleTestCase):
def setUp(self):
self.fetcher = GitFetcher()
def test_get_git_bin(self):
self.assertEqual(self.fetcher.get_git_bin(), 'git')
def test_get_fetch_cmd(self):
self.fetcher.get_git_bin = mock.Mock(return_value='git')
cmd = self.fetcher.get_fetch_cmd('http://xmp.com/foo', '/tmp/foo')
self.assertEqual(cmd, [
'git',
'clone',
'--depth=1',
'http://xmp.com/foo',
'/tmp/foo',
])
def test_get_fetch_cmd_respects_depth(self):
self.fetcher.get_git_bin = mock.Mock(return_value='git')
cmd = self.fetcher.get_fetch_cmd('http://xmp.com/foo', '/tmp/foo',
depth=3)
self.assertEqual(cmd, [
'git',
'clone',
'--depth=3',
'http://xmp.com/foo',
'/tmp/foo',
])
def test_get_fetch_cmd_respects_branch(self):
self.fetcher.get_git_bin = mock.Mock(return_value='git')
cmd = self.fetcher.get_fetch_cmd('http://xmp.com/foo', '/tmp/foo',
branch='devel')
self.assertEqual(cmd, [
'git',
'clone',
'--depth=1',
'--branch=devel',
'http://xmp.com/foo',
'/tmp/foo',
])
| {
"content_hash": "0b86c6c561a924e89ff33fe5dfd6afef",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 75,
"avg_line_length": 33.104651162790695,
"alnum_prop": 0.587284861257464,
"repo_name": "lukaszb/zeusci",
"id": "deb40e5e3f9b1f23163b52685d943579037ba61f",
"size": "2847",
"binary": false,
"copies": "1",
"ref": "refs/heads/devel",
"path": "zeusci/zeus/tests/test_fetchers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11819"
},
{
"name": "JavaScript",
"bytes": "13360"
},
{
"name": "Python",
"bytes": "112872"
},
{
"name": "Ruby",
"bytes": "1262"
},
{
"name": "Shell",
"bytes": "335"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns
from rest_framework import routers
from users.api_views import UserViewSet
from blog.api_views import PostViewSet
router = routers.SimpleRouter()
router.register(r'users', UserViewSet)
router.register(r'posts', PostViewSet)
urlpatterns = patterns('',)
urlpatterns = router.urls
| {
"content_hash": "ece9125f101cfb9cb04308115d5d0cf5",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 39,
"avg_line_length": 20.1875,
"alnum_prop": 0.7925696594427245,
"repo_name": "djangocali/blog-api",
"id": "54e2d2035567161af71b695786f7203e5a058213",
"size": "347",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blog-api/apps/api/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1212"
},
{
"name": "HTML",
"bytes": "23206"
},
{
"name": "JavaScript",
"bytes": "2387"
},
{
"name": "Python",
"bytes": "33730"
}
],
"symlink_target": ""
} |
import boto3
import json
import sure # noqa # pylint: disable=unused-import
from moto import mock_ses, mock_sns, mock_sqs
from moto.ses.models import SESFeedback
from moto.core import DEFAULT_ACCOUNT_ID as ACCOUNT_ID
@mock_ses
def test_enable_disable_ses_sns_communication():
conn = boto3.client("ses", region_name="us-east-1")
conn.set_identity_notification_topic(
Identity="test.com", NotificationType="Bounce", SnsTopic="the-arn"
)
conn.set_identity_notification_topic(Identity="test.com", NotificationType="Bounce")
def __setup_feedback_env__(
ses_conn, sns_conn, sqs_conn, domain, topic, queue, region, expected_msg
):
"""Setup the AWS environment to test the SES SNS Feedback"""
# Environment setup
# Create SQS queue
sqs_conn.create_queue(QueueName=queue)
# Create SNS topic
create_topic_response = sns_conn.create_topic(Name=topic)
topic_arn = create_topic_response["TopicArn"]
# Subscribe the SNS topic to the SQS queue
sns_conn.subscribe(
TopicArn=topic_arn,
Protocol="sqs",
Endpoint=f"arn:aws:sqs:{region}:{ACCOUNT_ID}:{queue}",
)
# Verify SES domain
ses_conn.verify_domain_identity(Domain=domain)
# Specify email address to allow for raw e-mails to be processed
ses_conn.verify_email_identity(EmailAddress="[email protected]")
# Setup SES notification topic
if expected_msg is not None:
ses_conn.set_identity_notification_topic(
Identity=domain, NotificationType=expected_msg, SnsTopic=topic_arn
)
def __test_sns_feedback__(addr, expected_msg, raw_email=False):
region_name = "us-east-1"
ses_conn = boto3.client("ses", region_name=region_name)
sns_conn = boto3.client("sns", region_name=region_name)
sqs_conn = boto3.resource("sqs", region_name=region_name)
domain = "example.com"
topic = "bounce-arn-feedback"
queue = "feedback-test-queue"
__setup_feedback_env__(
ses_conn, sns_conn, sqs_conn, domain, topic, queue, region_name, expected_msg
)
# Send the message
kwargs = dict(
Source="test@" + domain,
Destination={
"ToAddresses": [addr + "@" + domain],
"CcAddresses": ["test_cc@" + domain],
"BccAddresses": ["test_bcc@" + domain],
},
Message={
"Subject": {"Data": "test subject"},
"Body": {"Text": {"Data": "test body"}},
},
)
if raw_email:
kwargs.pop("Message")
kwargs.pop("Destination")
kwargs.update(
{
"Destinations": [addr + "@" + domain],
"RawMessage": {"Data": bytearray("raw_email", "utf-8")},
}
)
ses_conn.send_raw_email(**kwargs)
else:
ses_conn.send_email(**kwargs)
# Wait for messages in the queues
queue = sqs_conn.get_queue_by_name(QueueName=queue)
messages = queue.receive_messages(MaxNumberOfMessages=1)
if expected_msg is not None:
msg = messages[0].body
msg = json.loads(msg)
assert msg["Message"] == SESFeedback.generate_message(ACCOUNT_ID, expected_msg)
else:
assert len(messages) == 0
@mock_sqs
@mock_sns
@mock_ses
def test_no_sns_feedback():
__test_sns_feedback__("test", None)
@mock_sqs
@mock_sns
@mock_ses
def test_sns_feedback_bounce():
__test_sns_feedback__(SESFeedback.BOUNCE_ADDR, SESFeedback.BOUNCE)
@mock_sqs
@mock_sns
@mock_ses
def test_sns_feedback_complaint():
__test_sns_feedback__(SESFeedback.COMPLAINT_ADDR, SESFeedback.COMPLAINT)
@mock_sqs
@mock_sns
@mock_ses
def test_sns_feedback_delivery():
__test_sns_feedback__(SESFeedback.SUCCESS_ADDR, SESFeedback.DELIVERY)
@mock_sqs
@mock_sns
@mock_ses
def test_sns_feedback_delivery_raw_email():
__test_sns_feedback__(
SESFeedback.SUCCESS_ADDR, SESFeedback.DELIVERY, raw_email=True
)
@mock_ses
def test_get_identity_notification_attributes_default_values():
ses = boto3.client("ses", region_name="us-east-1")
ses.verify_domain_identity(Domain="example.com")
ses.verify_email_identity(EmailAddress="[email protected]")
resp = ses.get_identity_notification_attributes(
Identities=["[email protected]", "[email protected]"]
)["NotificationAttributes"]
resp.should.have.length_of(2)
resp.should.have.key("[email protected]")
resp.should.have.key("[email protected]")
resp["[email protected]"].should.have.key("ForwardingEnabled").equal(True)
resp["[email protected]"].should.have.key(
"HeadersInBounceNotificationsEnabled"
).equal(False)
resp["[email protected]"].should.have.key(
"HeadersInComplaintNotificationsEnabled"
).equal(False)
resp["[email protected]"].should.have.key(
"HeadersInDeliveryNotificationsEnabled"
).equal(False)
resp["[email protected]"].shouldnt.have.key("BounceTopic")
resp["[email protected]"].shouldnt.have.key("ComplaintTopic")
resp["[email protected]"].shouldnt.have.key("DeliveryTopic")
@mock_ses
def test_set_identity_feedback_forwarding_enabled():
ses = boto3.client("ses", region_name="us-east-1")
ses.verify_domain_identity(Domain="example.com")
ses.verify_email_identity(EmailAddress="[email protected]")
resp = ses.get_identity_notification_attributes(Identities=["[email protected]"])[
"NotificationAttributes"
]
resp["[email protected]"].should.have.key("ForwardingEnabled").equal(True)
ses.set_identity_feedback_forwarding_enabled(
Identity="[email protected]", ForwardingEnabled=False
)
resp = ses.get_identity_notification_attributes(Identities=["[email protected]"])[
"NotificationAttributes"
]
resp["[email protected]"].should.have.key("ForwardingEnabled").equal(False)
ses.set_identity_feedback_forwarding_enabled(
Identity="[email protected]", ForwardingEnabled=True
)
resp = ses.get_identity_notification_attributes(Identities=["[email protected]"])[
"NotificationAttributes"
]
resp["[email protected]"].should.have.key("ForwardingEnabled").equal(True)
| {
"content_hash": "a8206e8c1d3f807c6c41622860818a60",
"timestamp": "",
"source": "github",
"line_count": 187,
"max_line_length": 88,
"avg_line_length": 32.657754010695186,
"alnum_prop": 0.6648108727689537,
"repo_name": "spulec/moto",
"id": "3040021b52824c5d8393899d5374e0c61be76dcf",
"size": "6107",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_ses/test_ses_sns_boto3.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "255"
},
{
"name": "HTML",
"bytes": "5983"
},
{
"name": "Java",
"bytes": "1688"
},
{
"name": "JavaScript",
"bytes": "1424"
},
{
"name": "Jinja",
"bytes": "2502"
},
{
"name": "Makefile",
"bytes": "2284"
},
{
"name": "Python",
"bytes": "14737868"
},
{
"name": "Ruby",
"bytes": "188"
},
{
"name": "Scala",
"bytes": "782"
},
{
"name": "Shell",
"bytes": "5515"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, print_function
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
import os
import json
import tweepy
import ConfigParser
import time
import datetime
import threading
import sys
import urllib2
import urllib
import requests
tweet_hash = {}
tweet_cash = {}
ct_updater = 0
savefile=''
savepath=''
date=''
machine=''
maxsize=0
partcount=1
def config():
try:
global machine
global path
global savepath
global maxsize
config = ConfigParser.RawConfigParser()
config.read('config.cfg')
consumer_key = str(config.get("Twitter","consumer_key"))
consumer_secret = str(config.get("Twitter","consumer_secret"))
access_token = str(config.get("Twitter","access_token"))
access_token_secret = str(config.get("Twitter","access_token_secret"))
machine = str(config.get("Config","machine"))
savepath = str(config.get("Config","savepath"))
maxsize = str(config.get("Config","maxsize"))
maxsize = int(maxsize) * 1073741824 #convert MB to bytes
hashtrack = str(config.get("Config","track"))
#check if savefile directory exists and creates it if not
try:
os.makedirs(savepath)
except OSError:
if not os.path.isdir(savepath):
raise
return consumer_key, consumer_secret, access_token, access_token_secret, hashtrack
except Exception as e:
print(e)
def f():
#this function gets executed every 60 seconds
#sets the filename that you save to (size limit included)
try:
global date
global savefile
global savepath
global maxsize
global partcount
date = datetime.date.today()
#print(urllib2.getproxies())
if os.path.isfile(savefile):
size = os.path.getsize(savefile)
if int(size)>int(maxsize):
partcount+=1
savefile = str('%s%s-%s_part%d.txt'%(savepath, date, machine, partcount))
else:
savefile = str('%s%s-%s_part%d.txt'%(savepath, date, machine, partcount))
##Testing communication with Flask webserver. Not implemented
## url = 'http://127.0.0.1:5000/base'
## data = urllib.urlencode([('query', tweet_cash)])
## #print(data)
## req = urllib2.Request(url, headers={'Content-Type': 'application/json'})
## #fd = urllib2.urlopen(req, data)
## fd = urllib2.urlopen(req, data=json.dumps({'text': 'lalala'}))
## print("server response:")
## print(fd.read())
threading.Timer(60, f).start()
except Exception as e:
print(e)
return True
class StdOutListener(StreamListener):
""" A listener handles tweets are the received from the stream.
This is a basic listener that just prints received tweets to stdout.
"""
def on_data(self, data):
try:
global date
global ct_updater
global tweet_cash
global savefile
# print(data)
# Save JSON data to txt. Could implement buffer to reduce IO operations
with open(savefile,"a+") as text:
text.write(data)
text.close()
##Code to count the number of cashtags and hashtags
##Stores in dictionaries tweet_hash and tweet_cash
##Code is working but not used
##
## line = json.loads(data)
## if "entities" in line.keys(): # Check whether entities tags present
## hashtags = line["entities"]["hashtags"] # - if present then extract the hashtags
## for ht in hashtags: # For every hashtag get the hashtag text value
## if ht != None:
## #print(ht["text"])
## #check for cashtag here?
## if ht["text"].encode("utf-8") in tweet_hash.keys(): # Check whether hashtag already in dictionary
## tweet_hash[ht["text"].encode("utf-8")] += 1 # - If it is then increment its frequency by 1
## else:
## tweet_hash[ht["text"].encode("utf-8")] = 1 # - Else initialise the hashtag with frequency as 1
## cashtags = line["entities"]["symbols"]
## for ct in cashtags:
## if ct != None:
## print(ct["text"])
## #print(line['text'])
## if ct["text"].encode("utf-8") in tweet_cash.keys(): # Check whether hashtag already in dictionary
## tweet_cash[ct["text"].encode("utf-8")] += 1 # - If it is then increment its frequency by 1
## else:
## tweet_cash[ct["text"].encode("utf-8")] = 1 # - Else initialise the hashtag with frequency as 1
## if ct_updater<20:
## ct_updater += 1
## else:
## for key, value in tweet_cash.items():
## print("cashtag: %s -- times: %d" % (key, value))
## ct_updater=0
##
return True
except Exception as e:
print(e)
def on_error(self, status):
print >> sys.stderr, 'Encountered error with status code:', status
return True
def on_timeout(self):
print >> sys.stderr, 'Timeout...'
time.sleep(10)
return True
if __name__ == '__main__':
try:
consumer_key, consumer_secret, access_token, access_token_secret, hashtrack = config()
f()
l = StdOutListener()
#print(consumer_key, consumer_secret, access_token, access_token_secret)
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
stream = Stream(auth, l)
stream.filter(track=hashtrack)
#stream.sample()
except Exception as e:
print(e)
| {
"content_hash": "47d3ea4946bc74a8a44367b6e0041ada",
"timestamp": "",
"source": "github",
"line_count": 178,
"max_line_length": 124,
"avg_line_length": 34.58988764044944,
"alnum_prop": 0.5570894916355368,
"repo_name": "JeffX89/Twitter-stream-gatherer",
"id": "fb9b895dbfcc02eae15387d87cd4739b30be7a8d",
"size": "6157",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "streaming.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6157"
}
],
"symlink_target": ""
} |
import simplegui
import math
import random
# globals for user interface
WIDTH = 800
HEIGHT = 600
score = 0
lives = 3
time = 0
started = False
rock_set = set()
missile_set = set()
a_explosion = None
explosion_center = 64
class ImageInfo:
def __init__(self, center, size, radius = 0, lifespan = None, animated = False):
self.center = center
self.size = size
self.radius = radius
if lifespan:
self.lifespan = lifespan
else:
self.lifespan = float('inf')
self.animated = animated
def get_center(self):
return self.center
def get_size(self):
return self.size
def get_radius(self):
return self.radius
def get_lifespan(self):
return self.lifespan
def get_animated(self):
return self.animated
def add_lifespan(self, delta):
self.lifespan += delta
# art assets created by Kim Lathrop, may be freely re-used in non-commercial projects, please credit Kim
# debris images - debris1_brown.png, debris2_brown.png, debris3_brown.png, debris4_brown.png
# debris1_blue.png, debris2_blue.png, debris3_blue.png, debris4_blue.png, debris_blend.png
debris_info = ImageInfo([320, 240], [640, 480])
debris_image = simplegui.load_image("http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/debris2_blue.png")
# nebula images - nebula_brown.png, nebula_blue.png
nebula_info = ImageInfo([400, 300], [800, 600])
nebula_image = simplegui.load_image("http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/nebula_blue.f2014.png")
# splash image
splash_info = ImageInfo([200, 150], [400, 300])
splash_image = simplegui.load_image("http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/splash.png")
# ship image
ship_info = ImageInfo([45, 45], [90, 90], 35)
ship_image = simplegui.load_image("http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/double_ship.png")
# missile image - shot1.png, shot2.png, shot3.png
missile_image = simplegui.load_image("http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/shot2.png")
# asteroid images - asteroid_blue.png, asteroid_brown.png, asteroid_blend.png
asteroid_info = ImageInfo([45, 45], [90, 90], 40)
asteroid_image = simplegui.load_image("http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/asteroid_blue.png")
# animated explosion - explosion_orange.png, explosion_blue.png, explosion_blue2.png, explosion_alpha.png
explosion_info = ImageInfo([64, 64], [128, 128], 17, 24, True)
explosion_image = simplegui.load_image("http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/explosion_alpha.png")
# sound assets purchased from sounddogs.com, please do not redistribute
# .ogg versions of sounds are also available, just replace .mp3 by .ogg
soundtrack = simplegui.load_sound("http://commondatastorage.googleapis.com/codeskulptor-assets/sounddogs/soundtrack.mp3")
# soundtrack = simplegui.load_sound("http://lx.cdn.baidupcs.com/file/f11c3ef4836f483c0615623a2e91e40e?bkt=p2-nj-483&xcode=42c6f91464a2acdd94a9947a26e95aebd2cf741278840c4e6d7e9d37fc40c58e&fid=2953674213-250528-468443053188654&time=1415897910&sign=FDTAXERLB-DCb740ccc5511e5e8fedcff06b081203-SDUy7ZyCsMIuAbXk3RcxezHr%2Bfw%3D&to=sc&fm=Nan,B,U,nc&sta_dx=3&sta_cs=414&sta_ft=mp3&sta_ct=0&newver=1&newfm=1&flow_ver=3&sl=81723486&expires=8h&rt=sh&r=259011803&mlogid=1963161765&vuk=2953674213&vbdid=789803841&fin=Teaser_musicmp3.mp3&fn=Teaser_musicmp3.mp3")
missile_sound = simplegui.load_sound("http://commondatastorage.googleapis.com/codeskulptor-assets/sounddogs/missile.mp3")
missile_sound.set_volume(.5)
ship_thrust_sound = simplegui.load_sound("http://commondatastorage.googleapis.com/codeskulptor-assets/sounddogs/thrust.mp3")
explosion_sound = simplegui.load_sound("http://commondatastorage.googleapis.com/codeskulptor-assets/sounddogs/explosion.mp3")
# helper functions to handle transformations
def angle_to_vector(ang):
return [math.cos(ang), math.sin(ang)]
def dist(p, q):
return math.sqrt((p[0] - q[0]) ** 2 + (p[1] - q[1]) ** 2)
# Ship class
class Ship:
def __init__(self, pos, vel, angle, image, info):
self.pos = [pos[0], pos[1]]
self.vel = [vel[0], vel[1]]
self.thrust = False
self.angle = angle
self.angle_vel = 0
self.image = image
self.image_center = info.get_center()
self.image_size = info.get_size()
self.radius = info.get_radius()
def get_pos(self):
return self.pos
def draw(self,canvas):
if self.thrust:
canvas.draw_image(self.image, [self.image_center[0] + self.image_size[0], self.image_center[1]] , self.image_size,
self.pos, self.image_size, self.angle)
else:
canvas.draw_image(self.image, self.image_center, self.image_size,
self.pos, self.image_size, self.angle)
def update(self):
# update angle
self.angle += self.angle_vel
# update position
self.pos[0] = (self.pos[0] + self.vel[0]) % WIDTH
self.pos[1] = (self.pos[1] + self.vel[1]) % HEIGHT
# update velocity
if self.thrust:
acc = angle_to_vector(self.angle)
self.vel[0] += acc[0] * .1
self.vel[1] += acc[1] * .1
self.vel[0] *= .99
self.vel[1] *= .99
def set_thrust(self, on):
self.thrust = on
if on:
ship_thrust_sound.rewind()
ship_thrust_sound.play()
else:
ship_thrust_sound.pause()
def increment_angle_vel(self):
self.angle_vel += .05
def decrement_angle_vel(self):
self.angle_vel -= .05
def shoot(self):
global missile_set
forward = angle_to_vector(self.angle)
missile_pos = [self.pos[0] + self.radius * forward[0], self.pos[1] + self.radius * forward[1]]
missile_vel = [self.vel[0] + 6 * forward[0], self.vel[1] + 6 * forward[1]]
missile_info = ImageInfo([5,5], [10, 10], 3, time+60)
missile_set.add(Sprite(missile_pos, missile_vel, self.angle, 0, missile_image, missile_info, missile_sound))
# Sprite class
class Sprite:
def __init__(self, pos, vel, ang, ang_vel, image, info, sound = None):
self.pos = [pos[0],pos[1]]
self.vel = [vel[0],vel[1]]
self.angle = ang
self.angle_vel = ang_vel
self.image = image
self.image_center = info.get_center()
self.image_size = info.get_size()
self.radius = info.get_radius()
self.lifespan = info.get_lifespan()
self.animated = info.get_animated()
self.age = 0
if sound:
sound.rewind()
sound.play()
def get_lifespan(self):
return self.lifespan
def get_pos(self):
return self.pos
def set_age(self, agee):
self.age = agee
def get_age(self):
return self.age
def draw(self, canvas):
canvas.draw_image(self.image, self.image_center, self.image_size,
self.pos, self.image_size, self.angle)
def update(self):
# update angle
self.angle += self.angle_vel
if self.age>0 and self.age<25:
self.image_center[0] = 128*self.age + explosion_center
self.age += 1
# update position
self.pos[0] = (self.pos[0] + self.vel[0]) % WIDTH
self.pos[1] = (self.pos[1] + self.vel[1]) % HEIGHT
# key handlers to control ship
def keydown(key):
if key == simplegui.KEY_MAP['left']:
my_ship.decrement_angle_vel()
elif key == simplegui.KEY_MAP['right']:
my_ship.increment_angle_vel()
elif key == simplegui.KEY_MAP['up']:
my_ship.set_thrust(True)
elif key == simplegui.KEY_MAP['space']:
my_ship.shoot()
def keyup(key):
if key == simplegui.KEY_MAP['left']:
my_ship.increment_angle_vel()
elif key == simplegui.KEY_MAP['right']:
my_ship.decrement_angle_vel()
elif key == simplegui.KEY_MAP['up']:
my_ship.set_thrust(False)
# mouseclick handlers that reset UI and conditions whether splash image is drawn
def click(pos):
global started
center = [WIDTH / 2, HEIGHT / 2]
size = splash_info.get_size()
inwidth = (center[0] - size[0] / 2) < pos[0] < (center[0] + size[0] / 2)
inheight = (center[1] - size[1] / 2) < pos[1] < (center[1] + size[1] / 2)
if (not started) and inwidth and inheight:
reset(2)
started = True
def draw(canvas):
global time, started, rock_set, missile_set, score, lives, a_explosion
# animiate background
time += 1
wtime = (time / 4) % WIDTH
center = debris_info.get_center()
size = debris_info.get_size()
canvas.draw_image(nebula_image, nebula_info.get_center(), nebula_info.get_size(), [WIDTH / 2, HEIGHT / 2], [WIDTH, HEIGHT])
canvas.draw_image(debris_image, center, size, (wtime - WIDTH / 2, HEIGHT / 2), (WIDTH, HEIGHT))
canvas.draw_image(debris_image, center, size, (wtime + WIDTH / 2, HEIGHT / 2), (WIDTH, HEIGHT))
# draw UI
canvas.draw_text("Lives", [50, 50], 22, "White")
canvas.draw_text("Score", [680, 50], 22, "White")
canvas.draw_text(str(lives), [50, 80], 22, "White")
canvas.draw_text(str(score), [680, 80], 22, "White")
# draw ship and sprites
my_ship.draw(canvas)
if not a_explosion == None:
a_explosion.draw(canvas)
for each_rock in rock_set:
each_rock.draw(canvas)
for each_missile in missile_set:
each_missile.draw(canvas)
# update ship and sprites
my_ship.update()
if not a_explosion == None:
a_explosion.update()
for each_rock in rock_set:
each_rock.update()
for each_missile in missile_set:
if time<=each_missile.get_lifespan():
each_missile.update()
else:
missile_set.remove(each_missile)
# judge the collision info
for each_rock in rock_set:
for each_missile in missile_set:
if dist(each_missile.get_pos(), each_rock.get_pos())<=40:
missile_set.remove(each_missile)
rock_set.remove(each_rock)
explosion_sound.rewind()
explosion_sound.play()
# explosion animation
a_explosion = Sprite(each_rock.get_pos(), [0, 0], 0, 0, explosion_image, explosion_info)
a_explosion.set_age(1)
score += 1
break
for each_rock in rock_set:
if dist(my_ship.get_pos(), each_rock.get_pos())<= 70:
lives -= 1
rock_set.remove(each_rock)
explosion_sound.rewind()
explosion_sound.play()
# explosion animation
a_explosion = Sprite(each_rock.get_pos(), [0, 0], 0, 0, explosion_image, explosion_info)
a_explosion.set_age(1)
if lives<1:
reset(1)
started = False
# draw splash screen if not started
if not started:
canvas.draw_image(splash_image, splash_info.get_center(),
splash_info.get_size(), [WIDTH / 2, HEIGHT / 2],
splash_info.get_size())
# timer handler that spawns a rock
def rock_spawner():
global rock_set
if started and len(rock_set)<12:
rock_pos = [random.randrange(0, WIDTH), random.randrange(0, HEIGHT)]
rock_vel = [random.random() * .6 - .3, random.random() * .6 - .3]
rock_avel = random.random() * .2 - .1
rock_set.add(Sprite(rock_pos, rock_vel, 0, rock_avel, asteroid_image, asteroid_info))
else:
pass
def reset(step):
# pass
global rock_setset, missile_set, score, lives
time = 0
if step==2:
score = 0
lives = 3
soundtrack.rewind()
soundtrack.play()
else:
for each in rock_set:
rock_set.remove(each)
for each in missile_set:
missile_set.remove(each)
# initialize stuff
frame = simplegui.create_frame("Asteroids", WIDTH, HEIGHT)
# initialize ship and two sprites
my_ship = Ship([WIDTH / 2, HEIGHT / 2], [0, 0], 0, ship_image, ship_info)
# register handlers
frame.set_keyup_handler(keyup)
frame.set_keydown_handler(keydown)
frame.set_mouseclick_handler(click)
frame.set_draw_handler(draw)
timer = simplegui.create_timer(1000.0, rock_spawner)
# get things rolling
timer.start()
frame.start()
| {
"content_hash": "3583d24f5ec16580368a660d232db5b4",
"timestamp": "",
"source": "github",
"line_count": 352,
"max_line_length": 548,
"avg_line_length": 35.79545454545455,
"alnum_prop": 0.6197619047619047,
"repo_name": "winlandiano/An-Introduction-to-Interactive-Programming-in-Python",
"id": "b6d14c01bb84cb75c72d66664a045de8d5454c0c",
"size": "12663",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "week8-Space_ship.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "38050"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import sys
import os
import re
import calendar
import datetime
import netCDF4 as nc
import numpy as np
from lib_util import create_output_file
"""
What this does:
Calculate monthly means over a number of years from model output.
How to use:
See ./cice_monthly_mean.py or mom_monthly_mean.py
"""
def calc_monthly_mean(input_files, vars, output_filename):
"""
FIXME: parallelise this.
"""
output_file = nc.Dataset(output_filename, 'r+')
for var_name in vars:
var = output_file.variables[var_name]
# Search through all inputs accumulating values the months
# that it contains.
num_data_points = [0]*12
for input_file in input_files:
print('*', end='')
sys.stdout.flush()
with nc.Dataset(input_file) as f:
# Find the start time.
time_var = f.variables['time']
assert('days since' in time_var.units)
m = re.search('\d{4}-\d{2}-\d{2}', time_var.units)
assert(m is not None)
start_date = datetime.datetime.strptime(m.group(0), '%Y-%m-%d')
# Time axis is number of days since start.
for (i, ndays) in enumerate(time_var):
d = start_date + datetime.timedelta(days=int(ndays))
month = d.month - 1
if num_data_points[month] == 0:
var[month, :] = f.variables[var_name][i, :]
else:
var[month, :] += f.variables[var_name][i, :]
num_data_points[month] += 1
# We want the same number of samples for each month.
# FIXME: this is disabled for production runs.
# assert(len(set(num_data_points)) == 1)
if len(set(num_data_points)) != 1:
print("""WARNING from calc_monthly_mean.py: not all months have the
same number of samples.""", file=sys.stderr)
# Make the average.
for i, num in enumerate(num_data_points):
# Means that there is no input data for this month.
assert(num != 0)
var[i, :] /= num
output_file.close()
def calc_overall_mean(input_files, vars, output_file=None):
"""
Calculate the overall mean of vars along the time axis.
One or more input files can be given. Each one will have
all vars but different time points.
<vars>: the variables names to average.
<input_files>: a list of the input file names.
<output_file>: the output file name.
A suitable output file can be made with create_output_file().
FIXME: this should update the field that describes the averaging time scale
(if there is one).
This is similar to executing ncra <input> <output>.
"""
output = nc.Dataset(output_file, 'r+')
# Create sums and time points for all vars.
sum_dict = {}
time_dict = {}
for var_name in vars:
# We use an array of np.float64 to do accumulation.
sum_dict[var_name] = np.zeros_like(output.variables[var_name][:],
dtype='float64')
time_dict[var_name] = 0
assert(output.variables[var_name].dimensions[0] == 'time')
for input_file in input_files:
input = nc.Dataset(input_file)
for var_name in vars:
in_var = input.variables[var_name]
# Assume that the first dimension is time.
assert(in_var.dimensions[0] == 'time')
# There is a difference between this:
for i in range(in_var.shape[0]):
sum_dict[var_name][0,:] += in_var[i,:]
# And this:
# sum_dict[var_name][0, :] += np.sum(in_var, axis=0).
# The latter is probably more correct (less rounding) however
# the former is closer to what NCO does which makes testing
# easier.
time_dict[var_name] += in_var.shape[0]
input.close()
# Copy over to destination, may be np.float32.
for var_name in vars:
output.variables[var_name][0, :] = sum_dict[var_name][:] / time_dict[var_name]
output.close()
def update_file_history(file, history):
"""
Update the history attribute of the file.
"""
with nc.Dataset(file, 'r+') as f:
f.history = f.history + '\n' + history
| {
"content_hash": "fddd2bcd07cf31a3855b4d66bef8dfcd",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 86,
"avg_line_length": 30.923076923076923,
"alnum_prop": 0.5696517412935324,
"repo_name": "CWSL/access-cm-tools",
"id": "10f1e408e7f6870db8a2d5b723d7fabf386d1f25",
"size": "4445",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "analyse/lib_mean.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "2513"
},
{
"name": "Python",
"bytes": "239564"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('radio', '0017_auto_20161014_2225'),
]
operations = [
migrations.AlterField(
model_name='agency',
name='short',
field=models.CharField(max_length=5, unique=True),
),
]
| {
"content_hash": "7ea47c76138f09481ca83c0c6a096d22",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 62,
"avg_line_length": 21.5,
"alnum_prop": 0.5917312661498708,
"repo_name": "ScanOC/trunk-player",
"id": "ad15e25868f704458e66e5a6618050c698737d9b",
"size": "459",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "radio/migrations/0018_auto_20161023_1622.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5493"
},
{
"name": "Dockerfile",
"bytes": "768"
},
{
"name": "HTML",
"bytes": "47516"
},
{
"name": "JavaScript",
"bytes": "22401"
},
{
"name": "Python",
"bytes": "167619"
},
{
"name": "Shell",
"bytes": "5505"
}
],
"symlink_target": ""
} |
from sys import exit
from flask import Flask
from feature.config import ConfigManager
from feature.model import db, ProductTypes, Client
class DatabaseInit:
def __init__(self, file='config.json'):
self.app, env_config = Flask(__name__), ConfigManager(file)
env_config.apply_config(self.app)
env_config.init_db(self.app)
@staticmethod
def load_data_file(file):
try:
return [f for f in open(file).read().split('\n') if len(f) > 0]
except Exception as ex:
print('we encountered an issue reading {}'.format(file), ex)
@staticmethod
def add_data_to_sess(obj, obj_keys, obj_vals, sess, val_split=None):
"""flexible wrapper for adding data to db session (does not perform commits \
obj: Model object we want obj_vals (actual data) to get initialized with
obj_keys: should be read from datafile whee obj_vals comes from
sess: db (warning do not pass db.session
val_split: if obj_keys is not a list, then leave blank"""
try:
if val_split is None: # setting model 'obj' with one key only for all records
for val in obj_vals:
dct = {obj_keys: val}
rec = obj(**dct)
sess.session.add(rec)
else: # for every record, define dct object where obj keys are set with respective val
for val in obj_vals:
dct = {}
tokens = val.split(val_split)
# only initialize model 'obj' with the data we have
token_len, itr = len(tokens), 0
while itr < token_len:
dct[obj_keys[itr]] = tokens[itr]
itr += 1
rec = obj(**dct)
sess.session.add(rec)
except Exception as ex:
print('issue encountered adding data to db session', ex)
def rebuild(self):
with self.app.app_context():
# load sample data
product_data = self.load_data_file('data/product.csv')
client_data = self.load_data_file('data/client.csv')
print('\n\n>> loaded sample data..')
# create databases
db.drop_all() # make sure we're recreating everything
print('>> dropped tables for clean setup')
db.create_all()
print('>> created database tables')
# should be the same as properties in ProductTypes model
self.add_data_to_sess(ProductTypes, 'product_code', product_data, db)
print('>> setup product data')
self.add_data_to_sess(Client, ['name', 'email', 'phone_number', 'product_id'], client_data, db, ',')
print('>> setup client data')
# commit data to database
db.session.commit()
print('>> committing session')
exit()
print('>> database setup complete!')
DatabaseInit().rebuild()
| {
"content_hash": "13aa08bd435e93138379246e61106f50",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 112,
"avg_line_length": 44.82608695652174,
"alnum_prop": 0.5444552214678305,
"repo_name": "parejadan/feature-center",
"id": "fe428ff9691e46f7295152ffce6d418bb65248a5",
"size": "3093",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "initdb.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "157"
},
{
"name": "HTML",
"bytes": "8946"
},
{
"name": "JavaScript",
"bytes": "6742"
},
{
"name": "Python",
"bytes": "22265"
}
],
"symlink_target": ""
} |
import unittest
from hyapi.auth import HYAuthHandler
class HYAuthHandlerTests(unittest.TestCase):
def test_get_code_url(self):
auth = HYAuthHandler('a', 'b', 'c', 'd')
self.assertEqual('https://api.hanyang.ac.kr/oauth/authorize?scope=c&redirect_uri=d&response_type=code&client_id=a', auth.get_code_url()) # noqa
| {
"content_hash": "31990993a62ca9d9d00dc6e6b1bed38b",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 152,
"avg_line_length": 33.7,
"alnum_prop": 0.7002967359050445,
"repo_name": "kimtree/hyapi",
"id": "5abd2ed31e0fd366c11a460529a854891e9f1bbc",
"size": "362",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_auth.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7688"
}
],
"symlink_target": ""
} |
import re
from datetime import datetime, timedelta
from time import timezone
from urllib2 import urlopen, quote
from django.db import models
from django.utils.html import urlize
from django.utils.simplejson import loads
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from mezzanine.twitter.managers import TweetManager
from mezzanine.utils.timezone import make_aware
from mezzanine.twitter import (QUERY_TYPE_CHOICES, QUERY_TYPE_USER,
QUERY_TYPE_LIST, QUERY_TYPE_SEARCH)
re_usernames = re.compile("@([0-9a-zA-Z+_]+)", re.IGNORECASE)
re_hashtags = re.compile("#([0-9a-zA-Z+_]+)", re.IGNORECASE)
replace_hashtags = "<a href=\"http://twitter.com/search?q=%23\\1\">#\\1</a>"
replace_usernames = "<a href=\"http://twitter.com/\\1\">@\\1</a>"
class Query(models.Model):
type = models.CharField(_("Type"), choices=QUERY_TYPE_CHOICES,
max_length=10)
value = models.CharField(_("Value"), max_length=140)
interested = models.BooleanField("Interested", default=True)
class Meta:
verbose_name = _("Twitter query")
verbose_name_plural = _("Twitter queries")
ordering = ("-id",)
def __unicode__(self):
return "%s: %s" % (self.get_type_display(), self.value)
def run(self):
"""
Request new tweets from the Twitter API.
"""
urls = {
QUERY_TYPE_USER: ("http://api.twitter.com/1/statuses/"
"user_timeline/%s.json?include_rts=true" %
self.value.lstrip("@")),
QUERY_TYPE_LIST: ("http://api.twitter.com/1/%s/statuses.json"
"?include_rts=true" %
self.value.lstrip("@").replace("/", "/lists/")),
QUERY_TYPE_SEARCH: "http://search.twitter.com/search.json?q=%s" %
quote(self.value.encode("utf-8")),
}
try:
url = urls[self.type]
except KeyError:
return
try:
tweets = loads(urlopen(url).read())
except:
return
if self.type == "search":
tweets = tweets["results"]
for tweet_json in tweets:
remote_id = str(tweet_json["id"])
tweet, created = self.tweets.get_or_create(remote_id=remote_id)
if not created:
continue
if "retweeted_status" in tweet_json:
user = tweet_json["user"]
tweet.retweeter_user_name = user["screen_name"]
tweet.retweeter_full_name = user["name"]
tweet.retweeter_profile_image_url = user["profile_image_url"]
tweet_json = tweet_json["retweeted_status"]
if self.type == QUERY_TYPE_SEARCH:
tweet.user_name = tweet_json["from_user"]
tweet.full_name = tweet_json["from_user"]
tweet.profile_image_url = tweet_json["profile_image_url"]
date_format = "%a, %d %b %Y %H:%M:%S +0000"
else:
user = tweet_json["user"]
tweet.user_name = user["screen_name"]
tweet.full_name = user["name"]
tweet.profile_image_url = user["profile_image_url"]
date_format = "%a %b %d %H:%M:%S +0000 %Y"
tweet.text = urlize(tweet_json["text"])
tweet.text = re_usernames.sub(replace_usernames, tweet.text)
tweet.text = re_hashtags.sub(replace_hashtags, tweet.text)
if getattr(settings, 'TWITTER_STRIP_HIGH_MULTIBYTE', False):
chars = [ch for ch in tweet.text if ord(ch) < 0x800]
tweet.text = ''.join(chars)
d = datetime.strptime(tweet_json["created_at"], date_format)
d -= timedelta(seconds=timezone)
tweet.created_at = make_aware(d)
tweet.save()
self.interested = False
self.save()
class Tweet(models.Model):
remote_id = models.CharField(_("Twitter ID"), max_length=50)
created_at = models.DateTimeField(_("Date/time"), null=True)
text = models.TextField(_("Message"), null=True)
profile_image_url = models.URLField(_("Profile image URL"), null=True)
user_name = models.CharField(_("User name"), max_length=100, null=True)
full_name = models.CharField(_("Full name"), max_length=100, null=True)
retweeter_profile_image_url = models.URLField(
_("Profile image URL (Retweeted by)"), null=True)
retweeter_user_name = models.CharField(
_("User name (Retweeted by)"), max_length=100, null=True)
retweeter_full_name = models.CharField(
_("Full name (Retweeted by)"), max_length=100, null=True)
query = models.ForeignKey("Query", related_name="tweets")
objects = TweetManager()
class Meta:
verbose_name = _("Tweet")
verbose_name_plural = _("Tweets")
ordering = ("-created_at",)
def __unicode__(self):
return "%s: %s" % (self.user_name, self.text)
def is_retweet(self):
return self.retweeter_user_name is not None
| {
"content_hash": "783ea3a6c0649307af84a72878a06b79",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 78,
"avg_line_length": 40.84920634920635,
"alnum_prop": 0.5712065280746066,
"repo_name": "gbosh/mezzanine",
"id": "ce7be6718d41d072c4f0d2c6dd5fb1999f65db45",
"size": "5148",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mezzanine/twitter/models.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "63609"
},
{
"name": "HTML",
"bytes": "75923"
},
{
"name": "JavaScript",
"bytes": "166378"
},
{
"name": "Nginx",
"bytes": "1168"
},
{
"name": "Python",
"bytes": "850616"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import logging
import urlparse
from glance import client as glance_client
from horizon.api.base import APIDictWrapper, url_for
LOG = logging.getLogger(__name__)
class Image(APIDictWrapper):
"""
Wrapper around glance image dictionary to make it object-like and provide
access to image properties.
"""
_attrs = ['checksum', 'container_format', 'created_at', 'deleted',
'deleted_at', 'disk_format', 'id', 'is_public', 'location',
'name', 'properties', 'size', 'status', 'updated_at', 'owner']
def __getattr__(self, attrname):
if attrname == "properties":
if not hasattr(self, "_properties"):
properties_dict = super(Image, self).__getattr__(attrname)
self._properties = ImageProperties(properties_dict)
return self._properties
else:
return super(Image, self).__getattr__(attrname)
class ImageProperties(APIDictWrapper):
"""
Wrapper around glance image properties dictionary to make it object-like.
"""
_attrs = ['architecture', 'image_location', 'image_state', 'kernel_id',
'project_id', 'ramdisk_id', 'image_type']
def glanceclient(request):
o = urlparse.urlparse(url_for(request, 'image'))
LOG.debug('glanceclient connection created for host "%s:%d"' %
(o.hostname, o.port))
return glance_client.Client(o.hostname,
o.port,
auth_tok=request.user.token)
def image_create(request, image_meta, image_file):
return Image(glanceclient(request).add_image(image_meta, image_file))
def image_delete(request, image_id):
return glanceclient(request).delete_image(image_id)
def image_get(request, image_id):
"""
Returns the actual image file from Glance for image with
supplied identifier
"""
return glanceclient(request).get_image(image_id)[1]
def image_get_meta(request, image_id):
"""
Returns an Image object populated with metadata for image
with supplied identifier.
"""
return Image(glanceclient(request).get_image_meta(image_id))
def image_list_detailed(request):
return [Image(i) for i in glanceclient(request).get_images_detailed()]
def image_update(request, image_id, image_meta=None):
image_meta = image_meta and image_meta or {}
return Image(glanceclient(request).update_image(image_id,
image_meta=image_meta))
def snapshot_list_detailed(request):
filters = {}
filters['property-image_type'] = 'snapshot'
filters['is_public'] = 'none'
return [Image(i) for i in glanceclient(request)
.get_images_detailed(filters=filters)]
| {
"content_hash": "c5a1867404dfff244c8e70740956d860",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 77,
"avg_line_length": 31.471910112359552,
"alnum_prop": 0.632631203141735,
"repo_name": "developerworks/horizon",
"id": "694705da6e292fe1016db1bca3353f34d1f0a95b",
"size": "3610",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "horizon/api/glance.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "154893"
},
{
"name": "Python",
"bytes": "697221"
},
{
"name": "Shell",
"bytes": "11065"
}
],
"symlink_target": ""
} |
import lzma
import os
class XZ(object):
"""
Implements decompression of lzma compressed files
"""
LZMA_STREAM_BUFFER_SIZE = 8192
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.lzma_stream.close()
def __init__(self, lzma_stream, buffer_size=LZMA_STREAM_BUFFER_SIZE):
self.buffer_size = int(buffer_size)
self.lzma = lzma.LZMADecompressor()
self.lzma_stream = lzma_stream
self.buffered_bytes = b''
def read(self, size):
if self.lzma.eof and not self.buffered_bytes:
return None
chunks = self.buffered_bytes
bytes_uncompressed = len(chunks)
while not self.lzma.eof and bytes_uncompressed < size:
chunks += self.lzma.decompress(
self.lzma.unused_data + self.lzma_stream.read(self.buffer_size)
)
bytes_uncompressed = len(chunks)
self.buffered_bytes = chunks[size:]
return chunks[:size]
@classmethod
def close(self):
self.lzma_stream.close()
@classmethod
def open(self, file_name, buffer_size=LZMA_STREAM_BUFFER_SIZE):
self.lzma_stream = open(file_name, 'rb')
return XZ(self.lzma_stream, buffer_size)
@classmethod
def uncompressed_size(self, file_name):
with lzma.open(file_name) as lzma_stream:
lzma_stream.seek(0, os.SEEK_END)
return lzma_stream.tell()
| {
"content_hash": "eb0f413326103df5d4130b1adeae31ee",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 79,
"avg_line_length": 28.346153846153847,
"alnum_prop": 0.6058344640434192,
"repo_name": "SUSE/azurectl",
"id": "aeee2c2b6fe896d3a85fe363d0a52a8078a0e40b",
"size": "2078",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azurectl/utils/xz.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1840"
},
{
"name": "Perl",
"bytes": "3582"
},
{
"name": "Python",
"bytes": "500660"
},
{
"name": "Shell",
"bytes": "8022"
}
],
"symlink_target": ""
} |
import numpy as np
import pandas as pd
import inspect
from numba import jit, vectorize, guvectorize
from functools import wraps
from six import StringIO
import ast
import toolz
class GetReturnNode(ast.NodeVisitor):
"""
A Visitor to get the return tuple names from a calc-style
function
"""
def visit_Return(self, node):
if isinstance(node.value, ast.Tuple):
return [e.id for e in node.value.elts]
else:
return [node.value.id]
def dataframe_guvectorize(dtype_args, dtype_sig):
"""
Extracts numpy arrays from caller arguments and passes them
to guvectorized numba functions
"""
def make_wrapper(func):
vecd_f = guvectorize(dtype_args, dtype_sig)(func)
@wraps(func)
def wrapper(*args, **kwargs):
# np_arrays = [getattr(args[0], i).values for i in theargs]
arrays = [arg.values for arg in args]
ans = vecd_f(*arrays)
return ans
return wrapper
return make_wrapper
def dataframe_vectorize(dtype_args):
"""
Extracts numpy arrays from caller arguments and passes them
to vectorized numba functions
"""
def make_wrapper(func):
vecd_f = vectorize(dtype_args)(func)
@wraps(func)
def wrapper(*args, **kwargs):
arrays = [arg.values for arg in args]
ans = vecd_f(*arrays)
return ans
return wrapper
return make_wrapper
def dataframe_wrap_guvectorize(dtype_args, dtype_sig):
"""
Extracts particular numpy arrays from caller argments and passes
them to guvectorize. Goes one step further than dataframe_guvectorize
by looking for the column names in the dataframe and just extracting those
"""
def make_wrapper(func):
theargs = inspect.getargspec(func).args
vecd_f = guvectorize(dtype_args, dtype_sig)(func)
def wrapper(*args, **kwargs):
np_arrays = [getattr(args[0], i).values for i in theargs]
ans = vecd_f(*np_arrays)
return ans
return wrapper
return make_wrapper
def create_apply_function_string(sigout, sigin, parameters):
"""
Create a string for a function of the form:
def ap_fuc(x_0, x_1, x_2, ...):
for i in range(len(x_0)):
x_0[i], ... = jitted_f(x_j[i],....)
return x_0[i], ...
where the specific args to jitted_f and the number of
values to return is destermined by sigout and signn
Parameters
----------
sigout: iterable of the out arguments
sigin: iterable of the in arguments
parameters: iterable of which of the args (from in_args) are parameter
variables (as opposed to column records). This influences
how we construct the '_apply' function
Returns
-------
a String representing the function
"""
s = StringIO()
total_len = len(sigout) + len(sigin)
out_args = ["x_" + str(i) for i in range(0, len(sigout))]
in_args = ["x_" + str(i) for i in range(len(sigout), total_len)]
s.write("def ap_func({0}):\n".format(",".join(out_args + in_args)))
s.write(" for i in range(len(x_0)):\n")
out_index = [x + "[i]" for x in out_args]
in_index = []
for arg, _var in zip(in_args, sigin):
in_index.append(arg + "[i]" if _var not in parameters else arg)
s.write(" " + ",".join(out_index) + " = ")
s.write("jitted_f(" + ",".join(in_index) + ")\n")
s.write(" return " + ",".join(out_args) + "\n")
return s.getvalue()
def create_toplevel_function_string(args_out, args_in, pm_or_pf,
kwargs_for_func={}):
"""
Create a string for a function of the form:
def hl_func(x_0, x_1, x_2, ...):
outputs = (...) = calc_func(...)
header = [...]
return DataFrame(data, columns=header)
where the specific args to jitted_f and the number of
values to return is destermined by sigout and signn
Parameters
----------
args_out: iterable of the out arguments
args_in: iterable of the in arguments
pm_or_pf: iterable of strings for object that holds each arg
kwargs_for_func: dictionary of keyword args for the function
Returns
-------
a String representing the function
"""
s = StringIO()
s.write("def hl_func(pm, pf")
if kwargs_for_func:
kwargs = ",".join(str(k) + "=" + str(v) for k, v in
kwargs_for_func.items())
s.write(", " + kwargs + " ")
s.write("):\n")
s.write(" from pandas import DataFrame\n")
s.write(" import numpy as np\n")
s.write(" outputs = \\\n")
outs = []
for arg in kwargs_for_func:
args_in.remove(arg)
for p, attr in zip(pm_or_pf, args_out + args_in):
outs.append(p + "." + attr + ", ")
outs = [m_or_f + "." + arg for m_or_f, arg in zip(pm_or_pf, args_out)]
s.write(" (" + ", ".join(outs) + ") = \\\n")
s.write(" " + "applied_f(")
for p, attr in zip(pm_or_pf, args_out + args_in):
s.write(p + "." + attr + ", ")
for arg in kwargs_for_func:
s.write(arg + ", ")
s.write(")\n")
s.write(" header = [")
col_headers = ["'" + out + "'" for out in args_out]
s.write(", ".join(col_headers))
s.write("]\n")
if len(args_out) == 1:
s.write(" return DataFrame(data=outputs,"
"columns=header)")
else:
s.write(" return DataFrame(data=np.column_stack("
"outputs),columns=header)")
return s.getvalue()
def make_apply_function(func, out_args, in_args, parameters, do_jit=True,
**kwargs):
"""
Takes a '_calc' function and creates the necessary Python code for an
_apply style function. Will also jit the function if desired
Parameters
----------
func: the 'calc' style function
out_args: list of out arguments for the apply function
in_args: list of in arguments for the apply function
parameters: iterable of which of the args (from in_args) are parameter
variables (as opposed to column records). This influences
how we construct the '_apply' function
do_jit: Bool, if True, jit the resulting apply function
Returns
-------
'_apply' style function
"""
jitted_f = jit(**kwargs)(func)
apfunc = create_apply_function_string(out_args, in_args, parameters)
func_code = compile(apfunc, "<string>", "exec")
fakeglobals = {}
eval(func_code, {"jitted_f": jitted_f}, fakeglobals)
if do_jit:
return jit(**kwargs)(fakeglobals['ap_func'])
else:
return fakeglobals['ap_func']
def apply_jit(dtype_sig_out, dtype_sig_in, parameters=None, **kwargs):
"""
make a decorator that takes in a _calc-style function, handle
the apply step
"""
if not parameters:
parameters = []
def make_wrapper(func):
theargs = inspect.getargspec(func).args
jitted_f = jit(**kwargs)(func)
jitted_apply = make_apply_function(func, dtype_sig_out,
dtype_sig_in, parameters,
**kwargs)
def wrapper(*args, **kwargs):
in_arrays = []
out_arrays = []
for farg in theargs:
if hasattr(args[0], farg):
in_arrays.append(getattr(args[0], farg))
else:
in_arrays.append(getattr(args[1], farg))
for farg in dtype_sig_out:
if hasattr(args[0], farg):
out_arrays.append(getattr(args[0], farg))
else:
out_arrays.append(getattr(args[1], farg))
final_array = out_arrays + in_arrays
ans = jitted_apply(*final_array)
return ans
return wrapper
return make_wrapper
def iterate_jit(parameters=None, **kwargs):
"""
make a decorator that takes in a _calc-style function, create a
function that handles the "high-level" function and the "_apply"
style function
"""
if not parameters:
parameters = []
def make_wrapper(func):
# Step 1. Wrap this function in apply_jit
# from apply_jit
# Get the input arguments from the function
in_args = inspect.getargspec(func).args
try:
jit_args = inspect.getargspec(jit).args + ['nopython']
except TypeError:
print ("This should only be seen in RTD, if not install numba!")
return func
kwargs_for_func = toolz.keyfilter(in_args.__contains__, kwargs)
kwargs_for_jit = toolz.keyfilter(jit_args.__contains__, kwargs)
src = inspect.getsourcelines(func)[0]
# Discover the return arguments by walking
# the AST of the function
all_returned_vals = []
gnr = GetReturnNode()
all_out_args = None
for node in ast.walk(ast.parse(''.join(src))):
all_out_args = gnr.visit(node)
if all_out_args:
break
if not all_out_args:
raise ValueError("Can't find return statement in function!")
# Now create the apply jitted function
applied_jitted_f = make_apply_function(func,
list(reversed(all_out_args)),
in_args,
parameters=parameters,
do_jit=True,
**kwargs_for_jit)
def wrapper(*args, **kwargs):
in_arrays = []
out_arrays = []
pm_or_pf = []
for farg in all_out_args + in_args:
if hasattr(args[0], farg):
in_arrays.append(getattr(args[0], farg))
pm_or_pf.append("pm")
elif hasattr(args[1], farg):
in_arrays.append(getattr(args[1], farg))
pm_or_pf.append("pf")
elif not farg in kwargs_for_func:
raise ValueError("Unknown arg: " + farg)
# Create the high level function
high_level_func = create_toplevel_function_string(all_out_args,
list(in_args),
pm_or_pf,
kwargs_for_func)
func_code = compile(high_level_func, "<string>", "exec")
fakeglobals = {}
eval(func_code, {"applied_f": applied_jitted_f}, fakeglobals)
high_level_fn = fakeglobals['hl_func']
ans = high_level_fn(*args, **kwargs)
return ans
return wrapper
return make_wrapper
| {
"content_hash": "e324e00282f74e6571045a1074910df1",
"timestamp": "",
"source": "github",
"line_count": 344,
"max_line_length": 78,
"avg_line_length": 31.997093023255815,
"alnum_prop": 0.5426546742981739,
"repo_name": "xiyuw123/Tax-Calculator",
"id": "f2f8fcda7931c29f5afe1e80c6f0e2dbb754554f",
"size": "11007",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "taxcalc/decorators.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "195748"
},
{
"name": "SAS",
"bytes": "180446"
},
{
"name": "Shell",
"bytes": "222"
}
],
"symlink_target": ""
} |
import threading
import SocketServer
import time
from Tkinter import *
import socket
import sys
import PIL
from io import BytesIO
from cStringIO import StringIO
import base64
import hashlib
import ImageTk
lock = threading.Lock()
waitkey = threading.Event()
sendClick = '0'
sendRightClick = '0'
sendKey = '0'
class pyspyRequestHandler(SocketServer.BaseRequestHandler):
def handle(self):
while True:
while True:
stream = self.request.recv(4)
prefix = int(''.join(reversed(stream)).encode('hex'), 16)
print "Got prefix "+ str(prefix)
break
maindata = ""
while(len(maindata)!=prefix):
data = self.request.recv(prefix)
maindata = maindata+data
print "Got data "+ str(len(maindata))
test1 = StringIO(maindata)
jpegdata = StringIO(base64.b64encode(maindata))
readyframe = PIL.Image.open(test1)
newIMG = ImageTk.PhotoImage(readyframe)
streamvar.set(newIMG)
# self.SCREENTRANSFER = False
# self.MOUSETRANSFER = False
# self.KEYTRANSFER = False
# cur_thread = threading.currentThread()
# selectChannel = self.request.recv(14)
# print cur_thread.getName()+" ->> "+ selectChannel
# if selectChannel == 'SCREENTRANSFER':
# print cur_thread.getName() + ' SCREEN'
# self.SCREENTRANSFER = True
# elif selectChannel == 'MOUSETRANSFER':
# print cur_thread.getName() + ' MOUSE'
# self.MOUSETRANSFER = True
# elif selectChannel == 'KEYTRANSFER':
# print cur_thread.getName() + ' KEYBOARD'
# self.KEYTRANSFER = True
# if(self.SCREENTRANSFER == True):
# totaldata = ''
# endofdata = '@'
# while True:
# stream = self.request.recv(1024)
# if(endofdata in stream):
# totaldata+=stream[:stream.find(endofdata)]
# jpegdata = StringIO(base64.b64decode(totaldata))
# readyframe = PIL.Image.open(jpegdata)
# newIMG = ImageTk.PhotoImage(readyframe)
# streamvar.set(newIMG)
# totaldata = ''
# else:
# totaldata+=stream
# if(self.MOUSETRANSFER == True):
# global sendClick
# global sendRightClick
# while True:
# time.sleep(0.1)
# if (sendClick != '0'):
# self.request.send(sendClick)
# sendClick = '0'
# elif (sendRightClick != '0'):
# self.request.send(sendRightClick)
# sendRightClick = '0'
# if(self.KEYTRANSFER == True):
# global sendKey
# while True:
# waitkey.wait()
# if(sendKey != '0'):
# print 'Sending Key: '+sendKey
# self.request.send(sendKey)
# with lock:
# sendKey = '0'
return
class pyspyNetworkServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
pass
def clicked(event):
global sendClick
sendClick = str(event.x)+','+str(event.y)+',1@'
def rightclicked(event):
global sendRightClick
sendRightClick = str(event.x)+','+str(event.y)+',2@'
def callback(*args):
readyframe = streamvar.get()
if readyframe != '':
label.configure(image=readyframe)
label.image = readyframe
def keypress(event):
global sendKey
with lock:
sendKey = repr(event.char)+'!+!'.ljust(10, '#')
waitkey.set()
waitkey.clear()
address = ('192.168.56.1', 3320) # let the kernel give us a port
server = pyspyNetworkServer(address, pyspyRequestHandler)
t = threading.Thread(target=server.serve_forever)
t.setDaemon(True) # don't hang on exit
t.start()
print 'Server loop running in thread:', t.getName()
app = Tk()
app.title("Example")
#app.bind('<Motion>', motion)
app.bind("<Button-1>", clicked)
app.bind("<Button-3>", rightclicked)
app.bind("<Key>", keypress)
streamvar = StringVar()
streamvar.trace("w", callback)
# List of photoimages for each image
photo = PhotoImage(data=streamvar.get())
label = Label(image=photo)
label.pack()
app.mainloop()
| {
"content_hash": "388e98cd3db9c34ece0900958d379bb6",
"timestamp": "",
"source": "github",
"line_count": 142,
"max_line_length": 78,
"avg_line_length": 31.45774647887324,
"alnum_prop": 0.5556301768524737,
"repo_name": "speknet/pyspy",
"id": "f96d677bd6ccf36684242116f40b69299f630f09",
"size": "4467",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyspy-server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "4865"
},
{
"name": "Python",
"bytes": "8092"
}
],
"symlink_target": ""
} |
from setuptools import find_packages
from setuptools import setup
REQUIRED_PACKAGES = [
'tensorflow>=1.12.1',
]
setup(
name='trainer',
version='0.1',
install_requires=REQUIRED_PACKAGES,
packages=find_packages(),
include_package_data=True,
requires=[]
)
| {
"content_hash": "b16bd2e73890c926eacebc30e64a7403",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 39,
"avg_line_length": 18.733333333333334,
"alnum_prop": 0.6868327402135231,
"repo_name": "GoogleCloudPlatform/cloudml-samples",
"id": "5dac0bf86976c8b198f1e1868a73a46ac6cb94a3",
"size": "878",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tensorflow/standard/legacy/flowers/setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "7846"
},
{
"name": "Jupyter Notebook",
"bytes": "1081052"
},
{
"name": "OpenEdge ABL",
"bytes": "1846"
},
{
"name": "Python",
"bytes": "1174159"
},
{
"name": "Shell",
"bytes": "50370"
}
],
"symlink_target": ""
} |
import logging,lasagne
import numpy as np
import theano.tensor as T
logging.basicConfig()
logger= logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# adapted from https://github.com/nouiz/lisa_emotiw/blob/master/emotiw/wardefar/crf_theano.py
def theano_logsumexp(x, axis=None):
"""
Compute log(sum(exp(x), axis=axis) in a numerically stable
fashion.
Parameters
----------
x : tensor_like
A Theano tensor (any dimension will do).
axis : int or symbolic integer scalar, or None
Axis over which to perform the summation. `None`, the
default, performs over all axes.
Returns
-------
result : ndarray or scalar
The result of the log(sum(exp(...))) operation.
"""
xmax = T.max(x,axis=axis, keepdims=True)
xmax_ = T.max(x,axis=axis)
return xmax_ + T.log(T.exp(x - xmax).sum(axis=axis))
| {
"content_hash": "fdafe31a0791f6a30d1027819f697cf3",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 93,
"avg_line_length": 31.357142857142858,
"alnum_prop": 0.6548974943052391,
"repo_name": "abhyudaynj/LSTM-CRF-models",
"id": "48466f8d2fa346a06539a32c10a9c689cbbe0010",
"size": "878",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bionlp/utils/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "116988"
}
],
"symlink_target": ""
} |
import os
from twisted.trial import unittest
from pyrake.contrib.djangoitem import DjangoItem, Field
from pyrake import optional_features
os.environ['DJANGO_SETTINGS_MODULE'] = 'tests.test_djangoitem.settings'
if 'django' in optional_features:
from .models import Person, IdentifiedPerson
class BasePersonItem(DjangoItem):
django_model = Person
class NewFieldPersonItem(BasePersonItem):
other = Field()
class OverrideFieldPersonItem(BasePersonItem):
age = Field()
class IdentifiedPersonItem(DjangoItem):
django_model = IdentifiedPerson
class DjangoItemTest(unittest.TestCase):
def assertSortedEqual(self, first, second, msg=None):
return self.assertEqual(sorted(first), sorted(second), msg)
def setUp(self):
if 'django' not in optional_features:
raise unittest.SkipTest("Django is not available")
def test_base(self):
i = BasePersonItem()
self.assertSortedEqual(i.fields.keys(), ['age', 'name'])
def test_new_fields(self):
i = NewFieldPersonItem()
self.assertSortedEqual(i.fields.keys(), ['age', 'other', 'name'])
def test_override_field(self):
i = OverrideFieldPersonItem()
self.assertSortedEqual(i.fields.keys(), ['age', 'name'])
def test_custom_primary_key_field(self):
"""
Test that if a custom primary key exists, it is
in the field list.
"""
i = IdentifiedPersonItem()
self.assertSortedEqual(i.fields.keys(), ['age', 'identifier', 'name'])
def test_save(self):
i = BasePersonItem()
self.assertSortedEqual(i.fields.keys(), ['age', 'name'])
i['name'] = 'John'
i['age'] = '22'
person = i.save(commit=False)
self.assertEqual(person.name, 'John')
self.assertEqual(person.age, '22')
def test_override_save(self):
i = OverrideFieldPersonItem()
i['name'] = 'John'
# it is not obvious that "age" should be saved also, since it was
# redefined in child class
i['age'] = '22'
person = i.save(commit=False)
self.assertEqual(person.name, 'John')
self.assertEqual(person.age, '22')
def test_validation(self):
long_name = 'z' * 300
i = BasePersonItem(name=long_name)
self.assertFalse(i.is_valid())
self.assertEqual(set(i.errors), set(['age', 'name']))
i = BasePersonItem(name='John')
self.assertTrue(i.is_valid(exclude=['age']))
self.assertEqual({}, i.errors)
# once the item is validated, it does not validate again
i['name'] = long_name
self.assertTrue(i.is_valid())
def test_override_validation(self):
i = OverrideFieldPersonItem()
i['name'] = 'John'
self.assertFalse(i.is_valid())
i = i = OverrideFieldPersonItem()
i['name'] = 'John'
i['age'] = '22'
self.assertTrue(i.is_valid())
def test_default_field_values(self):
i = BasePersonItem()
person = i.save(commit=False)
self.assertEqual(person.name, 'Robot')
| {
"content_hash": "13d30e51d952d85004f1d5bd4eac92e6",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 78,
"avg_line_length": 30.368932038834952,
"alnum_prop": 0.6160485933503836,
"repo_name": "elkingtowa/pyrake",
"id": "bce2ba8b30e5ca887d7e6c169111c1f70b4399b2",
"size": "3128",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_djangoitem/__init__.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "9681"
},
{
"name": "Perl",
"bytes": "1311"
},
{
"name": "Python",
"bytes": "1950905"
},
{
"name": "Shell",
"bytes": "3209"
}
],
"symlink_target": ""
} |
from yambopy import *
import numpy as np
import matplotlib.pyplot as plt
# pack files of convergence GW calculations
pack_files_in_folder('gw_conv')
# Start Analyser
ya = YamboAnalyser('gw_conv')
# Plot of all the k-points converging one parameter
ya.plot_gw_all_kpoints_convergence(tag='EXX')
ya.plot_gw_all_kpoints_convergence(tag='Bnds')
ya.plot_gw_all_kpoints_convergence(tag='NGsBlk')
ya.plot_gw_all_kpoints_convergence(tag='GbndRnge')
| {
"content_hash": "449a3d7c6782b28eaebfb14e1976064e",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 51,
"avg_line_length": 24.833333333333332,
"alnum_prop": 0.7651006711409396,
"repo_name": "alexmoratalla/yambopy",
"id": "22807b02a22e8e822205ffcff71628cc6ec88e22",
"size": "499",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tutorial/bn/plot-gw-conv.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "887890"
},
{
"name": "Shell",
"bytes": "1296"
}
],
"symlink_target": ""
} |
import os
import sys
import caffe
import numpy as np
from caffe import layers as L
from caffe import params as P
from sklearn.metrics import roc_auc_score
import random
def ribo_cnn_net1(train_size, val_size, test_size, val_file, test_file, solver_file):
#random.seed(1024)
os.chdir('..')
sys.path.insert(0, './python')
caffe.set_device(0)
caffe.set_mode_gpu()
solver = caffe.SGDSolver(solver_file)
niter = 100000
test_interval = 500
# losses will also be stored in the log
train_loss = np.zeros(niter)
test_auc = []
# the main solver loop
for it in range(niter):
solver.step(1) # SGD by Caffe
# store the train loss
train_loss[it] = solver.net.blobs['loss'].data
# run a full test every so often
# Caffe can also do this for us and write to a log, but we show here
if it % test_interval == 0:
print 'Iteration', it, 'testing...'
prob = [] # predicted probability
label = [] # true label
# calculate validation auc
for test_it in range(val_size/256+1):
solver.test_nets[0].forward()
output = solver.test_nets[0].blobs['ip2'].data
output_label = solver.test_nets[0].blobs['label'].data
prob.extend(list(np.divide(np.exp(output[:,1]), (np.exp(output[:,0])+np.exp(output[:,1])))))
label.extend(list(output_label))
test_auc.append(roc_auc_score(label, prob))
# get the best model
maxv = max(test_auc)
maxp = test_auc.index(maxv)
maxi = maxp * test_interval
best_model = '/home/szhang/Riboseq/r64/Pop14/model_file/ribo_iter_' + str(maxi) + '.caffemodel'
net_t = caffe.Net(test_file, best_model, caffe.TEST)
# calculate auc score of test data
prob = []
label = []
for test_it in range(test_size/1902):
net_t.forward()
output = net_t.blobs['ip2'].data
output_label = net_t.blobs['label'].data
prob.extend(list(np.divide(np.exp(output[:,1]), (np.exp(output[:,0])+np.exp(output[:,1])))))
label.extend(list(output_label))
# return best validation and test auc scores
#return maxv, roc_auc_score(label, prob)
os.rename(best_model, '/home/szhang/Riboseq/r64/Pop14/model_file/best_model.caffemodel')
return label, prob
def ribo_cnn_net2(train_size, val_size, test_size, val_file, test_file, solver_file, mnum):
#random.seed(1024)
os.chdir('..')
sys.path.insert(0, './python')
caffe.set_device(0)
caffe.set_mode_gpu()
solver = caffe.SGDSolver(solver_file)
niter = 100000
test_interval = 500
# losses will also be stored in the log
train_loss = np.zeros(niter)
test_auc = []
# the main solver loop
for it in range(niter):
solver.step(1) # SGD by Caffe
# store the train loss
train_loss[it] = solver.net.blobs['loss'].data
# run a full test every so often
# Caffe can also do this for us and write to a log, but we show here
if it % test_interval == 0:
print 'Iteration', it, 'testing...'
prob = [] # predicted probability
label = [] # true label
# calculate validation auc
for test_it in range(val_size/256+1):
solver.test_nets[0].forward()
output = solver.test_nets[0].blobs['ip2'].data
output_label = solver.test_nets[0].blobs['label'].data
prob.extend(list(np.divide(np.exp(output[:,1]), (np.exp(output[:,0])+np.exp(output[:,1])))))
label.extend(list(output_label))
test_auc.append(roc_auc_score(label, prob))
# get the best model
maxv = max(test_auc)
maxp = test_auc.index(maxv)
maxi = maxp * test_interval
best_model = '/home/szhang/Riboseq/r64/Pop14/model_file/ribo_iter_' + str(maxi) + '.caffemodel'
net_t = caffe.Net(test_file, best_model, caffe.TEST)
# calculate auc score of test data
prob = []
label = []
for test_it in range(test_size/1902):
net_t.forward()
output = net_t.blobs['ip2'].data
output_label = net_t.blobs['label'].data
prob.extend(list(np.divide(np.exp(output[:,1]), (np.exp(output[:,0])+np.exp(output[:,1])))))
label.extend(list(output_label))
# return best validation and test auc scores
#return maxv, roc_auc_score(label, prob)
os.rename(best_model, '/home/szhang/Riboseq/r64/Pop14/model_file/best_model_'+str(mnum)+'.caffemodel')
return label, prob
def cv_test_cnn(test_size, trained_model, test_file):
#random.seed(1024)
os.chdir('..')
sys.path.insert(0, './python')
caffe.set_device(0)
caffe.set_mode_gpu()
best_model = trained_model
net_t = caffe.Net(test_file, best_model, caffe.TEST)
# calculate auc score of test data
prob = []
label = []
for test_it in range(1):
net_t.forward()
#output1 = net_t.blobs['conv1'].data
#output2 = net_t.blobs['conv2'].data
#output3 = net_t.blobs['conv3'].data
#output_label = net_t.blobs['label'].data
output = net_t.blobs['ip2'].data
prob.extend(list(np.divide(np.exp(output[:,1]), (np.exp(output[:,0])+np.exp(output[:,1])))))
#label.extend(list(output_label))
#return output1, output2, output3
return np.asarray(prob)
| {
"content_hash": "372463275531c1c4f30776ce56cc32bb",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 108,
"avg_line_length": 37.414965986394556,
"alnum_prop": 0.5896363636363636,
"repo_name": "zhangsaithu/rose_demo",
"id": "d2365400eef2d277ad60cc1c2ccc048e1812b5ea",
"size": "5500",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "script/ribo_convnet.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21043"
}
],
"symlink_target": ""
} |
import os
import re
import json
import base64
import logging
import datetime
import time
import copy
import decimal
import cgi
import numpy
import pymongo
from lib import config, util, util_metrocoin
D = decimal.Decimal
def get_market_price(price_data, vol_data):
assert len(price_data) == len(vol_data)
assert len(price_data) <= config.MARKET_PRICE_DERIVE_NUM_POINTS
market_price = numpy.average(price_data, weights=vol_data)
return market_price
def get_market_price_summary(asset1, asset2, with_last_trades=0, start_dt=None, end_dt=None):
"""Gets a synthesized trading "market price" for a specified asset pair (if available), as well as additional info.
If no price is available, False is returned.
"""
mongo_db = config.mongo_db
if not end_dt:
end_dt = datetime.datetime.utcnow()
if not start_dt:
start_dt = end_dt - datetime.timedelta(days=10) #default to 10 days in the past
#look for the last max 6 trades within the past 10 day window
base_asset, quote_asset = util.assets_to_asset_pair(asset1, asset2)
base_asset_info = mongo_db.tracked_assets.find_one({'asset': base_asset})
quote_asset_info = mongo_db.tracked_assets.find_one({'asset': quote_asset})
if not isinstance(with_last_trades, int) or with_last_trades < 0 or with_last_trades > 30:
raise Exception("Invalid with_last_trades")
if not base_asset_info or not quote_asset_info:
raise Exception("Invalid asset(s)")
last_trades = mongo_db.trades.find({
"base_asset": base_asset,
"quote_asset": quote_asset,
'block_time': { "$gte": start_dt, "$lte": end_dt }
},
{'_id': 0, 'block_index': 1, 'block_time': 1, 'unit_price': 1, 'base_quantity_normalized': 1, 'quote_quantity_normalized': 1}
).sort("block_time", pymongo.DESCENDING).limit(max(config.MARKET_PRICE_DERIVE_NUM_POINTS, with_last_trades))
if not last_trades.count():
return None #no suitable trade data to form a market price (return None, NOT False here)
last_trades = list(last_trades)
last_trades.reverse() #from newest to oldest
market_price = get_market_price(
[last_trades[i]['unit_price'] for i in xrange(min(len(last_trades), config.MARKET_PRICE_DERIVE_NUM_POINTS))],
[(last_trades[i]['base_quantity_normalized'] + last_trades[i]['quote_quantity_normalized']) for i in xrange(min(len(last_trades), config.MARKET_PRICE_DERIVE_NUM_POINTS))])
result = {
'market_price': float(D(market_price)),
'base_asset': base_asset,
'quote_asset': quote_asset,
}
if with_last_trades:
#[0]=block_time, [1]=unit_price, [2]=base_quantity_normalized, [3]=quote_quantity_normalized, [4]=block_index
result['last_trades'] = [[
t['block_time'],
t['unit_price'],
t['base_quantity_normalized'],
t['quote_quantity_normalized'],
t['block_index']
] for t in last_trades]
else:
result['last_trades'] = []
return result
def calc_inverse(quantity):
return float( (D(1) / D(quantity) ))
def calc_price_change(open, close):
return float((D(100) * (D(close) - D(open)) / D(open)))
def get_price_primatives(start_dt=None, end_dt=None):
mps_xmn_met = get_market_price_summary(config.XMN, config.MET, start_dt=start_dt, end_dt=end_dt)
xmn_met_price = mps_xmn_met['market_price'] if mps_xmn_met else None # == XMN/MET
met_xmn_price = calc_inverse(mps_xmn_met['market_price']) if mps_xmn_met else None #MET/XMN
return mps_xmn_met, xmn_met_price, met_xmn_price
def get_asset_info(asset, at_dt=None):
mongo_db = config.mongo_db
asset_info = mongo_db.tracked_assets.find_one({'asset': asset})
if asset not in (config.XMN, config.MET) and at_dt and asset_info['_at_block_time'] > at_dt:
#get the asset info at or before the given at_dt datetime
for e in reversed(asset_info['_history']): #newest to oldest
if e['_at_block_time'] <= at_dt:
asset_info = e
break
else: #asset was created AFTER at_dt
asset_info = None
if asset_info is None: return None
assert asset_info['_at_block_time'] <= at_dt
#modify some of the properties of the returned asset_info for MET and XMN
if asset == config.MET:
if at_dt:
start_block_index, end_block_index = util.get_block_indexes_for_dates(end_dt=at_dt)
asset_info['total_issued'] = util_metrocoin.get_met_supply(normalize=False, at_block_index=end_block_index)
asset_info['total_issued_normalized'] = util_metrocoin.normalize_quantity(asset_info['total_issued'])
else:
asset_info['total_issued'] = util_metrocoin.get_met_supply(normalize=False)
asset_info['total_issued_normalized'] = util_metrocoin.normalize_quantity(asset_info['total_issued'])
elif asset == config.XMN:
#BUG: this does not take end_dt (if specified) into account. however, the deviation won't be too big
# as XMN doesn't deflate quickly at all, and shouldn't matter that much since there weren't any/much trades
# before the end of the burn period (which is what is involved with how we use at_dt with currently)
asset_info['total_issued'] = util.call_jsonrpc_api("get_xmn_supply", abort_on_error=True)['result']
asset_info['total_issued_normalized'] = util_metrocoin.normalize_quantity(asset_info['total_issued'])
if not asset_info:
raise Exception("Invalid asset: %s" % asset)
return asset_info
def get_xmn_met_price_info(asset, mps_xmn_met, xmn_met_price, met_xmn_price, with_last_trades=0, start_dt=None, end_dt=None):
if asset not in [config.MET, config.XMN]:
#get price data for both the asset with XMN, as well as MET
price_summary_in_xmn = get_market_price_summary(asset, config.XMN,
with_last_trades=with_last_trades, start_dt=start_dt, end_dt=end_dt)
price_summary_in_met = get_market_price_summary(asset, config.MET,
with_last_trades=with_last_trades, start_dt=start_dt, end_dt=end_dt)
#aggregated (averaged) price (expressed as XMN) for the asset on both the XMN and MET markets
if price_summary_in_xmn: # no trade data
price_in_xmn = price_summary_in_xmn['market_price']
if xmn_met_price:
aggregated_price_in_xmn = float(((D(price_summary_in_xmn['market_price']) + D(xmn_met_price)) / D(2)))
else: aggregated_price_in_xmn = None
else:
price_in_xmn = None
aggregated_price_in_xmn = None
if price_summary_in_met: # no trade data
price_in_met = price_summary_in_met['market_price']
if met_xmn_price:
aggregated_price_in_met = float(((D(price_summary_in_met['market_price']) + D(met_xmn_price)) / D(2)))
else: aggregated_price_in_met = None
else:
aggregated_price_in_met = None
price_in_met = None
else:
#here we take the normal XMN/MET pair, and invert it to MET/XMN, to get XMN's data in terms of a MET base
# (this is the only area we do this, as MET/XMN is NOT standard pair ordering)
price_summary_in_xmn = mps_xmn_met #might be None
price_summary_in_met = copy.deepcopy(mps_xmn_met) if mps_xmn_met else None #must invert this -- might be None
if price_summary_in_met:
price_summary_in_met['market_price'] = calc_inverse(price_summary_in_met['market_price'])
price_summary_in_met['base_asset'] = config.MET
price_summary_in_met['quote_asset'] = config.XMN
for i in xrange(len(price_summary_in_met['last_trades'])):
#[0]=block_time, [1]=unit_price, [2]=base_quantity_normalized, [3]=quote_quantity_normalized, [4]=block_index
price_summary_in_met['last_trades'][i][1] = calc_inverse(price_summary_in_met['last_trades'][i][1])
price_summary_in_met['last_trades'][i][2], price_summary_in_met['last_trades'][i][3] = \
price_summary_in_met['last_trades'][i][3], price_summary_in_met['last_trades'][i][2] #swap
if asset == config.XMN:
price_in_xmn = 1.0
price_in_met = price_summary_in_met['market_price'] if price_summary_in_met else None
aggregated_price_in_xmn = 1.0
aggregated_price_in_met = met_xmn_price #might be None
else:
assert asset == config.MET
price_in_xmn = price_summary_in_xmn['market_price'] if price_summary_in_xmn else None
price_in_met = 1.0
aggregated_price_in_xmn = xmn_met_price #might be None
aggregated_price_in_met = 1.0
return (price_summary_in_xmn, price_summary_in_met, price_in_xmn, price_in_met, aggregated_price_in_xmn, aggregated_price_in_met)
def calc_market_cap(asset_info, price_in_xmn, price_in_met):
market_cap_in_xmn = float( (D(asset_info['total_issued_normalized']) / D(price_in_xmn))) if price_in_xmn else None
market_cap_in_met = float( (D(asset_info['total_issued_normalized']) / D(price_in_met))) if price_in_met else None
return market_cap_in_xmn, market_cap_in_met
def compile_summary_market_info(asset, mps_xmn_met, xmn_met_price, met_xmn_price):
"""Returns information related to capitalization, volume, etc for the supplied asset(s)
NOTE: in_met == base asset is MET, in_xmn == base asset is XMN
@param assets: A list of one or more assets
"""
asset_info = get_asset_info(asset)
(price_summary_in_xmn, price_summary_in_met, price_in_xmn, price_in_met, aggregated_price_in_xmn, aggregated_price_in_met
) = get_xmn_met_price_info(asset, mps_xmn_met, xmn_met_price, met_xmn_price, with_last_trades=30)
market_cap_in_xmn, market_cap_in_met = calc_market_cap(asset_info, price_in_xmn, price_in_met)
return {
'price_in_{}'.format(config.XMN.lower()): price_in_xmn, #current price of asset vs XMN (e.g. how many units of asset for 1 unit XMN)
'price_in_{}'.format(config.MET.lower()): price_in_met, #current price of asset vs MET (e.g. how many units of asset for 1 unit MET)
'price_as_{}'.format(config.XMN.lower()): calc_inverse(price_in_xmn) if price_in_xmn else None, #current price of asset AS XMN
'price_as_{}'.format(config.MET.lower()): calc_inverse(price_in_met) if price_in_met else None, #current price of asset AS MET
'aggregated_price_in_{}'.format(config.XMN.lower()): aggregated_price_in_xmn,
'aggregated_price_in_{}'.format(config.MET.lower()): aggregated_price_in_met,
'aggregated_price_as_{}'.format(config.XMN.lower()): calc_inverse(aggregated_price_in_xmn) if aggregated_price_in_xmn else None,
'aggregated_price_as_{}'.format(config.MET.lower()): calc_inverse(aggregated_price_in_met) if aggregated_price_in_met else None,
'total_supply': asset_info['total_issued_normalized'],
'market_cap_in_{}'.format(config.XMN.lower()): market_cap_in_xmn,
'market_cap_in_{}'.format(config.MET.lower()): market_cap_in_met,
}
def compile_24h_market_info(asset):
asset_data = {}
start_dt_1d = datetime.datetime.utcnow() - datetime.timedelta(days=1)
mongo_db = config.mongo_db
#perform aggregation to get 24h statistics
#TOTAL volume and count across all trades for the asset (on ALL markets, not just XMN and MET pairings)
_24h_vols = {'vol': 0, 'count': 0}
_24h_vols_as_base = mongo_db.trades.aggregate([
{"$match": {
"base_asset": asset,
"block_time": {"$gte": start_dt_1d } }},
{"$project": {
"base_quantity_normalized": 1 #to derive volume
}},
{"$group": {
"_id": 1,
"vol": {"$sum": "$base_quantity_normalized"},
"count": {"$sum": 1},
}}
])
_24h_vols_as_base = {} if not _24h_vols_as_base['ok'] \
or not len(_24h_vols_as_base['result']) else _24h_vols_as_base['result'][0]
_24h_vols_as_quote = mongo_db.trades.aggregate([
{"$match": {
"quote_asset": asset,
"block_time": {"$gte": start_dt_1d } }},
{"$project": {
"quote_quantity_normalized": 1 #to derive volume
}},
{"$group": {
"_id": 1,
"vol": {"$sum": "quote_quantity_normalized"},
"count": {"$sum": 1},
}}
])
_24h_vols_as_quote = {} if not _24h_vols_as_quote['ok'] \
or not len(_24h_vols_as_quote['result']) else _24h_vols_as_quote['result'][0]
_24h_vols['vol'] = _24h_vols_as_base.get('vol', 0) + _24h_vols_as_quote.get('vol', 0)
_24h_vols['count'] = _24h_vols_as_base.get('count', 0) + _24h_vols_as_quote.get('count', 0)
#XMN market volume with stats
if asset != config.XMN:
_24h_ohlc_in_xmn = mongo_db.trades.aggregate([
{"$match": {
"base_asset": config.XMN,
"quote_asset": asset,
"block_time": {"$gte": start_dt_1d } }},
{"$project": {
"unit_price": 1,
"base_quantity_normalized": 1 #to derive volume
}},
{"$group": {
"_id": 1,
"open": {"$first": "$unit_price"},
"high": {"$max": "$unit_price"},
"low": {"$min": "$unit_price"},
"close": {"$last": "$unit_price"},
"vol": {"$sum": "$base_quantity_normalized"},
"count": {"$sum": 1},
}}
])
_24h_ohlc_in_xmn = {} if not _24h_ohlc_in_xmn['ok'] \
or not len(_24h_ohlc_in_xmn['result']) else _24h_ohlc_in_xmn['result'][0]
if _24h_ohlc_in_xmn: del _24h_ohlc_in_xmn['_id']
else:
_24h_ohlc_in_xmn = {}
#MET market volume with stats
if asset != config.MET:
_24h_ohlc_in_met = mongo_db.trades.aggregate([
{"$match": {
"base_asset": config.MET,
"quote_asset": asset,
"block_time": {"$gte": start_dt_1d } }},
{"$project": {
"unit_price": 1,
"base_quantity_normalized": 1 #to derive volume
}},
{"$group": {
"_id": 1,
"open": {"$first": "$unit_price"},
"high": {"$max": "$unit_price"},
"low": {"$min": "$unit_price"},
"close": {"$last": "$unit_price"},
"vol": {"$sum": "$base_quantity_normalized"},
"count": {"$sum": 1},
}}
])
_24h_ohlc_in_met = {} if not _24h_ohlc_in_met['ok'] \
or not len(_24h_ohlc_in_met['result']) else _24h_ohlc_in_met['result'][0]
if _24h_ohlc_in_met: del _24h_ohlc_in_met['_id']
else:
_24h_ohlc_in_met = {}
return {
'24h_summary': _24h_vols,
#^ total quantity traded of that asset in all markets in last 24h
'24h_ohlc_in_{}'.format(config.XMN.lower()): _24h_ohlc_in_xmn,
#^ quantity of asset traded with MET in last 24h
'24h_ohlc_in_{}'.format(config.MET.lower()): _24h_ohlc_in_met,
#^ quantity of asset traded with XMN in last 24h
'24h_vol_price_change_in_{}'.format(config.XMN.lower()): calc_price_change(_24h_ohlc_in_xmn['open'], _24h_ohlc_in_xmn['close'])
if _24h_ohlc_in_xmn else None,
#^ aggregated price change from 24h ago to now, expressed as a signed float (e.g. .54 is +54%, -1.12 is -112%)
'24h_vol_price_change_in_{}'.format(config.MET.lower()): calc_price_change(_24h_ohlc_in_met['open'], _24h_ohlc_in_met['close'])
if _24h_ohlc_in_met else None,
}
def compile_7d_market_info(asset):
mongo_db = config.mongo_db
start_dt_7d = datetime.datetime.utcnow() - datetime.timedelta(days=7)
#get XMN and MET market summarized trades over a 7d period (quantize to hour long slots)
_7d_history_in_xmn = None # xmn/asset market (or xmn/met for xmn or met)
_7d_history_in_met = None # met/asset market (or met/xmn for xmn or met)
if asset not in [config.MET, config.XMN]:
for a in [config.XMN, config.MET]:
_7d_history = mongo_db.trades.aggregate([
{"$match": {
"base_asset": a,
"quote_asset": asset,
"block_time": {"$gte": start_dt_7d }
}},
{"$project": {
"year": {"$year": "$block_time"},
"month": {"$month": "$block_time"},
"day": {"$dayOfMonth": "$block_time"},
"hour": {"$hour": "$block_time"},
"unit_price": 1,
"base_quantity_normalized": 1 #to derive volume
}},
{"$sort": {"block_time": pymongo.ASCENDING}},
{"$group": {
"_id": {"year": "$year", "month": "$month", "day": "$day", "hour": "$hour"},
"price": {"$avg": "$unit_price"},
"vol": {"$sum": "$base_quantity_normalized"},
}},
])
_7d_history = [] if not _7d_history['ok'] else _7d_history['result']
if a == config.XMN: _7d_history_in_xmn = _7d_history
else: _7d_history_in_met = _7d_history
else: #get the XMN/MET market and invert for MET/XMN (_7d_history_in_met)
_7d_history = mongo_db.trades.aggregate([
{"$match": {
"base_asset": config.XMN,
"quote_asset": config.MET,
"block_time": {"$gte": start_dt_7d }
}},
{"$project": {
"year": {"$year": "$block_time"},
"month": {"$month": "$block_time"},
"day": {"$dayOfMonth": "$block_time"},
"hour": {"$hour": "$block_time"},
"unit_price": 1,
"base_quantity_normalized": 1 #to derive volume
}},
{"$sort": {"block_time": pymongo.ASCENDING}},
{"$group": {
"_id": {"year": "$year", "month": "$month", "day": "$day", "hour": "$hour"},
"price": {"$avg": "$unit_price"},
"vol": {"$sum": "$base_quantity_normalized"},
}},
])
_7d_history = [] if not _7d_history['ok'] else _7d_history['result']
_7d_history_in_xmn = _7d_history
_7d_history_in_met = copy.deepcopy(_7d_history_in_xmn)
for i in xrange(len(_7d_history_in_met)):
_7d_history_in_met[i]['price'] = calc_inverse(_7d_history_in_met[i]['price'])
_7d_history_in_met[i]['vol'] = calc_inverse(_7d_history_in_met[i]['vol'])
for l in [_7d_history_in_xmn, _7d_history_in_met]:
for e in l: #convert our _id field out to be an epoch ts (in ms), and delete _id
e['when'] = time.mktime(datetime.datetime(e['_id']['year'], e['_id']['month'], e['_id']['day'], e['_id']['hour']).timetuple()) * 1000
del e['_id']
return {
'7d_history_in_{}'.format(config.XMN.lower()): [[e['when'], e['price']] for e in _7d_history_in_xmn],
'7d_history_in_{}'.format(config.MET.lower()): [[e['when'], e['price']] for e in _7d_history_in_met],
}
def compile_asset_pair_market_info():
"""Compiles the pair-level statistics that show on the View Prices page of counterwallet, for instance"""
#loop through all open orders, and compile a listing of pairs, with a count of open orders for each pair
mongo_db = config.mongo_db
end_dt = datetime.datetime.utcnow()
start_dt = end_dt - datetime.timedelta(days=1)
start_block_index, end_block_index = util.get_block_indexes_for_dates(start_dt=start_dt, end_dt=end_dt)
open_orders = util.call_jsonrpc_api("get_orders",
{ 'filters': [
{'field': 'give_remaining', 'op': '>', 'value': 0},
{'field': 'get_remaining', 'op': '>', 'value': 0},
{'field': 'fee_required_remaining', 'op': '>=', 'value': 0},
{'field': 'fee_provided_remaining', 'op': '>=', 'value': 0},
],
'status': 'open',
'show_expired': False,
}, abort_on_error=True)['result']
pair_data = {}
asset_info = {}
def get_price(base_quantity_normalized, quote_quantity_normalized):
return float(D(quote_quantity_normalized / base_quantity_normalized ))
#COMPOSE order depth, lowest ask, and highest bid column data
for o in open_orders:
(base_asset, quote_asset) = util.assets_to_asset_pair(o['give_asset'], o['get_asset'])
pair = '%s/%s' % (base_asset, quote_asset)
base_asset_info = asset_info.get(base_asset, mongo_db.tracked_assets.find_one({ 'asset': base_asset }))
if base_asset not in asset_info: asset_info[base_asset] = base_asset_info
quote_asset_info = asset_info.get(quote_asset, mongo_db.tracked_assets.find_one({ 'asset': quote_asset }))
if quote_asset not in asset_info: asset_info[quote_asset] = quote_asset_info
pair_data.setdefault(pair, {'open_orders_count': 0, 'lowest_ask': None, 'highest_bid': None,
'completed_trades_count': 0, 'vol_base': 0, 'vol_quote': 0})
#^ highest ask = open order selling base, highest bid = open order buying base
#^ we also initialize completed_trades_count, vol_base, vol_quote because every pair inited here may
# not have cooresponding data out of the trades_data_by_pair aggregation below
pair_data[pair]['open_orders_count'] += 1
base_quantity_normalized = util_metrocoin.normalize_quantity(o['give_quantity'] if base_asset == o['give_asset'] else o['get_quantity'], base_asset_info['divisible'])
quote_quantity_normalized = util_metrocoin.normalize_quantity(o['give_quantity'] if quote_asset == o['give_asset'] else o['get_quantity'], quote_asset_info['divisible'])
order_price = get_price(base_quantity_normalized, quote_quantity_normalized)
if base_asset == o['give_asset']: #selling base
if pair_data[pair]['lowest_ask'] is None or order_price < pair_data[pair]['lowest_ask']:
pair_data[pair]['lowest_ask'] = order_price
elif base_asset == o['get_asset']: #buying base
if pair_data[pair]['highest_bid'] is None or order_price > pair_data[pair]['highest_bid']:
pair_data[pair]['highest_bid'] = order_price
#COMPOSE volume data (in XMN and MET), and % change data
#loop through all trade volume over the past 24h, and match that to the open orders
trades_data_by_pair = mongo_db.trades.aggregate([
{"$match": {
"block_time": {"$gte": start_dt, "$lte": end_dt } }
},
{"$project": {
"base_asset": 1,
"quote_asset": 1,
"base_quantity_normalized": 1, #to derive base volume
"quote_quantity_normalized": 1 #to derive quote volume
}},
{"$group": {
"_id": {"base_asset": "$base_asset", "quote_asset": "$quote_asset"},
"vol_base": {"$sum": "$base_quantity_normalized"},
"vol_quote": {"$sum": "$quote_quantity_normalized"},
"count": {"$sum": 1},
}}
])
trades_data_by_pair = [] if not trades_data_by_pair['ok'] else trades_data_by_pair['result']
for e in trades_data_by_pair:
pair = '%s/%s' % (e['_id']['base_asset'], e['_id']['quote_asset'])
pair_data.setdefault(pair, {'open_orders_count': 0, 'lowest_ask': None, 'highest_bid': None})
#^ initialize an empty pair in the event there are no open orders for that pair, but there ARE completed trades for it
pair_data[pair]['completed_trades_count'] = e['count']
pair_data[pair]['vol_base'] = e['vol_base']
pair_data[pair]['vol_quote'] = e['vol_quote']
#compose price data, relative to MET and XMN
mps_xmn_met, xmn_met_price, met_xmn_price = get_price_primatives()
for pair, e in pair_data.iteritems():
base_asset, quote_asset = pair.split('/')
_24h_vol_in_met = None
_24h_vol_in_xmn = None
#derive asset price data, expressed in MET and XMN, for the given volumes
if base_asset == config.XMN:
_24h_vol_in_xmn = e['vol_base']
_24h_vol_in_met = util_metrocoin.round_out(e['vol_base'] * xmn_met_price) if xmn_met_price else 0
elif base_asset == config.MET:
_24h_vol_in_xmn = util_metrocoin.round_out(e['vol_base'] * met_xmn_price) if met_xmn_price else 0
_24h_vol_in_met = e['vol_base']
else: #base is not XMN or MET
price_summary_in_xmn, price_summary_in_met, price_in_xmn, price_in_met, aggregated_price_in_xmn, aggregated_price_in_met = \
get_xmn_met_price_info(base_asset, mps_xmn_met, xmn_met_price, met_xmn_price, with_last_trades=0, start_dt=start_dt, end_dt=end_dt)
if price_in_xmn:
_24h_vol_in_xmn = util_metrocoin.round_out(e['vol_base'] * price_in_xmn)
if price_in_met:
_24h_vol_in_met = util_metrocoin.round_out(e['vol_base'] * price_in_met)
if _24h_vol_in_xmn is None or _24h_vol_in_met is None:
#the base asset didn't have price data against MET or XMN, or both...try against the quote asset instead
price_summary_in_xmn, price_summary_in_met, price_in_xmn, price_in_met, aggregated_price_in_xmn, aggregated_price_in_met = \
get_xmn_met_price_info(quote_asset, mps_xmn_met, xmn_met_price, met_xmn_price, with_last_trades=0, start_dt=start_dt, end_dt=end_dt)
if _24h_vol_in_xmn is None and price_in_xmn:
_24h_vol_in_xmn = util_metrocoin.round_out(e['vol_quote'] * price_in_xmn)
if _24h_vol_in_met is None and price_in_met:
_24h_vol_in_met = util_metrocoin.round_out(e['vol_quote'] * price_in_met)
pair_data[pair]['24h_vol_in_{}'.format(config.XMN.lower())] = _24h_vol_in_xmn #might still be None
pair_data[pair]['24h_vol_in_{}'.format(config.MET.lower())] = _24h_vol_in_met #might still be None
#get % change stats -- start by getting the first trade directly before the 24h period starts
prev_trade = mongo_db.trades.find({
"base_asset": base_asset,
"quote_asset": quote_asset,
"block_time": {'$lt': start_dt}}).sort('block_time', pymongo.DESCENDING).limit(1)
latest_trade = mongo_db.trades.find({
"base_asset": base_asset,
"quote_asset": quote_asset}).sort('block_time', pymongo.DESCENDING).limit(1)
if not prev_trade.count(): #no previous trade before this 24hr period
pair_data[pair]['24h_pct_change'] = None
else:
prev_trade = prev_trade[0]
latest_trade = latest_trade[0]
prev_trade_price = get_price(prev_trade['base_quantity_normalized'], prev_trade['quote_quantity_normalized'])
latest_trade_price = get_price(latest_trade['base_quantity_normalized'], latest_trade['quote_quantity_normalized'])
pair_data[pair]['24h_pct_change'] = ((latest_trade_price - prev_trade_price) / prev_trade_price) * 100
pair_data[pair]['last_updated'] = end_dt
#print "PRODUCED", pair, pair_data[pair]
mongo_db.asset_pair_market_info.update( {'base_asset': base_asset, 'quote_asset': quote_asset}, {"$set": pair_data[pair]}, upsert=True)
#remove any old pairs that were not just updated
mongo_db.asset_pair_market_info.remove({'last_updated': {'$lt': end_dt}})
logging.info("Recomposed 24h trade statistics for %i asset pairs: %s" % (len(pair_data), ', '.join(pair_data.keys())))
def compile_asset_market_info():
"""Run through all assets and compose and store market ranking information."""
mongo_db = config.mongo_db
if not config.CAUGHT_UP:
logging.warn("Not updating asset market info as CAUGHT_UP is false.")
return False
#grab the last block # we processed assets data off of
last_block_assets_compiled = mongo_db.app_config.find_one()['last_block_assets_compiled']
last_block_time_assets_compiled = util.get_block_time(last_block_assets_compiled)
#logging.debug("Comping info for assets traded since block %i" % last_block_assets_compiled)
current_block_index = config.CURRENT_BLOCK_INDEX #store now as it may change as we are compiling asset data :)
current_block_time = util.get_block_time(current_block_index)
if current_block_index == last_block_assets_compiled:
#all caught up -- call again in 10 minutes
return True
mps_xmn_met, xmn_met_price, met_xmn_price = get_price_primatives()
all_traded_assets = list(set(list([config.MET, config.XMN]) + list(mongo_db.trades.find({}, {'quote_asset': 1, '_id': 0}).distinct('quote_asset'))))
#######################
#get a list of all assets with a trade within the last 24h (not necessarily just against XMN and MET)
# ^ this is important because compiled market info has a 24h vol parameter that designates total volume for the asset across ALL pairings
start_dt_1d = datetime.datetime.utcnow() - datetime.timedelta(days=1)
assets = list(set(
list(mongo_db.trades.find({'block_time': {'$gte': start_dt_1d}}).distinct('quote_asset'))
+ list(mongo_db.trades.find({'block_time': {'$gte': start_dt_1d}}).distinct('base_asset'))
))
for asset in assets:
market_info_24h = compile_24h_market_info(asset)
mongo_db.asset_market_info.update({'asset': asset}, {"$set": market_info_24h})
#for all others (i.e. no trade in the last 24 hours), zero out the 24h trade data
non_traded_assets = list(set(all_traded_assets) - set(assets))
mongo_db.asset_market_info.update( {'asset': {'$in': non_traded_assets}}, {"$set": {
'24h_summary': {'vol': 0, 'count': 0},
'24h_ohlc_in_{}'.format(config.XMN.lower()): {},
'24h_ohlc_in_{}'.format(config.MET.lower()): {},
'24h_vol_price_change_in_{}'.format(config.XMN.lower()): None,
'24h_vol_price_change_in_{}'.format(config.MET.lower()): None,
}}, multi=True)
logging.info("Block: %s -- Calculated 24h stats for: %s" % (current_block_index, ', '.join(assets)))
#######################
#get a list of all assets with a trade within the last 7d up against XMN and MET
start_dt_7d = datetime.datetime.utcnow() - datetime.timedelta(days=7)
assets = list(set(
list(mongo_db.trades.find({'block_time': {'$gte': start_dt_7d}, 'base_asset': {'$in': [config.XMN, config.MET]}}).distinct('quote_asset'))
+ list(mongo_db.trades.find({'block_time': {'$gte': start_dt_7d}}).distinct('base_asset'))
))
for asset in assets:
market_info_7d = compile_7d_market_info(asset)
mongo_db.asset_market_info.update({'asset': asset}, {"$set": market_info_7d})
non_traded_assets = list(set(all_traded_assets) - set(assets))
mongo_db.asset_market_info.update( {'asset': {'$in': non_traded_assets}}, {"$set": {
'7d_history_in_{}'.format(config.XMN.lower()): [],
'7d_history_in_{}'.format(config.MET.lower()): [],
}}, multi=True)
logging.info("Block: %s -- Calculated 7d stats for: %s" % (current_block_index, ', '.join(assets)))
#######################
#update summary market data for assets traded since last_block_assets_compiled
#get assets that were traded since the last check with either MET or XMN, and update their market summary data
assets = list(set(
list(mongo_db.trades.find({'block_index': {'$gt': last_block_assets_compiled}, 'base_asset': {'$in': [config.XMN, config.MET]}}).distinct('quote_asset'))
+ list(mongo_db.trades.find({'block_index': {'$gt': last_block_assets_compiled}}).distinct('base_asset'))
))
#update our storage of the latest market info in mongo
for asset in assets:
logging.info("Block: %s -- Updating asset market info for %s ..." % (current_block_index, asset))
summary_info = compile_summary_market_info(asset, mps_xmn_met, xmn_met_price, met_xmn_price)
mongo_db.asset_market_info.update( {'asset': asset}, {"$set": summary_info}, upsert=True)
#######################
#next, compile market cap historicals (and get the market price data that we can use to update assets with new trades)
#NOTE: this algoritm still needs to be fleshed out some...I'm not convinced it's laid out/optimized like it should be
#start by getting all trades from when we last compiled this data
trades = mongo_db.trades.find({'block_index': {'$gt': last_block_assets_compiled}}).sort('block_index', pymongo.ASCENDING)
trades_by_block = [] #tracks assets compiled per block, as we only want to analyze any given asset once per block
trades_by_block_mapping = {}
#organize trades by block
for t in trades:
if t['block_index'] in trades_by_block_mapping:
assert trades_by_block_mapping[t['block_index']]['block_index'] == t['block_index']
assert trades_by_block_mapping[t['block_index']]['block_time'] == t['block_time']
trades_by_block_mapping[t['block_index']]['trades'].append(t)
else:
e = {'block_index': t['block_index'], 'block_time': t['block_time'], 'trades': [t,]}
trades_by_block.append(e)
trades_by_block_mapping[t['block_index']] = e
for t_block in trades_by_block:
#reverse the tradelist per block, and ensure that we only process an asset that hasn't already been processed for this block
# (as there could be multiple trades in a single block for any specific asset). we reverse the list because
# we'd rather process a later trade for a given asset, as the market price for that will take into account
# the earlier trades on that same block for that asset, and we don't want/need multiple cap points per block
assets_in_block = {}
mps_xmn_met, xmn_met_price, met_xmn_price = get_price_primatives(end_dt=t_block['block_time'])
for t in reversed(t_block['trades']):
assets = []
if t['base_asset'] not in assets_in_block:
assets.append(t['base_asset'])
assets_in_block[t['base_asset']] = True
if t['quote_asset'] not in assets_in_block:
assets.append(t['quote_asset'])
assets_in_block[t['quote_asset']] = True
if not len(assets): continue
for asset in assets:
#recalculate the market cap for the asset this trade is for
asset_info = get_asset_info(asset, at_dt=t['block_time'])
(price_summary_in_xmn, price_summary_in_met, price_in_xmn, price_in_met, aggregated_price_in_xmn, aggregated_price_in_met
) = get_xmn_met_price_info(asset, mps_xmn_met, xmn_met_price, met_xmn_price, with_last_trades=0, end_dt=t['block_time'])
market_cap_in_xmn, market_cap_in_met = calc_market_cap(asset_info, price_in_xmn, price_in_met)
#^ this will get price data from the block time of this trade back the standard number of days and trades
# to determine our standard market price, relative (anchored) to the time of this trade
for market_cap_as in (config.XMN, config.MET):
market_cap = market_cap_in_xmn if market_cap_as == config.XMN else market_cap_in_met
#if there is a previously stored market cap for this asset, add a new history point only if the two caps differ
prev_market_cap_history = mongo_db.asset_marketcap_history.find({'market_cap_as': market_cap_as, 'asset': asset,
'block_index': {'$lt': t['block_index']}}).sort('block_index', pymongo.DESCENDING).limit(1)
prev_market_cap_history = list(prev_market_cap_history)[0] if prev_market_cap_history.count() == 1 else None
if market_cap and (not prev_market_cap_history or prev_market_cap_history['market_cap'] != market_cap):
mongo_db.asset_marketcap_history.insert({
'block_index': t['block_index'],
'block_time': t['block_time'],
'asset': asset,
'market_cap': market_cap,
'market_cap_as': market_cap_as,
})
logging.info("Block %i -- Calculated market cap history point for %s as %s (mID: %s)" % (t['block_index'], asset, market_cap_as, t['message_index']))
mongo_db.app_config.update({}, {'$set': {'last_block_assets_compiled': current_block_index}})
return True
| {
"content_hash": "be1b38afcdf5ccd5b8281f695e6a012f",
"timestamp": "",
"source": "github",
"line_count": 659,
"max_line_length": 179,
"avg_line_length": 56.43854324734446,
"alnum_prop": 0.589922834942059,
"repo_name": "metronotes-beta/metroblockd",
"id": "9bc6119ebac713c3a1830836552bf7a1eb586efc",
"size": "37193",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/components/assets_trading.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "305503"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from datetime import datetime
from django.conf import settings
from django.conf.urls import (
include,
patterns,
url,
)
from django.conf.urls.static import static
from django.contrib import admin
from .views import (
HomeView,
StoryDetailView,
StoryMonthArchiveView,
)
admin.autodiscover()
urlpatterns = patterns(
'',
url(regex=r'^$',
view=HomeView.as_view(),
name='project.home'
),
url(regex=r'^story/(?P<pk>\d+)/$',
view=StoryDetailView.as_view(),
name='project.story.detail'
),
url(regex=r'^story/(?P<year>\d{4})/(?P<month>[-\w]+)/$',
view=StoryMonthArchiveView.as_view(),
name='project.story.archive'
),
url(regex=r'^',
view=include('login.urls')
),
url(regex=r'^admin/',
view=include(admin.site.urls)
),
url(regex=r'^captcha/',
view=include('captcha.urls')
),
url(regex=r'^pump/',
view=include('pump.urls')
),
url(regex=r'^article/',
view=include('templatepages.urls'),
kwargs=dict(
extra_context=dict(today=datetime.today(),)
)),
)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
# ^ helper function to return a URL pattern for serving files in debug mode.
# https://docs.djangoproject.com/en/1.5/howto/static-files/#serving-files-uploaded-by-a-user
| {
"content_hash": "288379485d9a6e391d7f4a20a9f80e96",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 92,
"avg_line_length": 24.864406779661017,
"alnum_prop": 0.6087252897068848,
"repo_name": "pkimber/hatherleigh_net",
"id": "14e5edc83a74dc9fabac3e726a693b766b6f42b4",
"size": "1493",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "76"
},
{
"name": "Python",
"bytes": "64796"
}
],
"symlink_target": ""
} |
from unlock.state.state import UnlockState
class FastPadState(UnlockState):
UP = 1
DOWN = 2
LEFT = 3
RIGHT = 4
SELECT_TIME = 2
def __init__(self):
# Initialize the state
super(FastPadState, self).__init__()
self.previous_mode = "CURSOR"
self.mode = "CURSOR"
self.currButton = None
self.button = None
self.selTime = 0
self.noop = False
def process_command(self, command):
"""
Update the screen; called periodically on refresh.
Input:
timeDelta -- (float) Number of seconds since the
previous call to update()
decision -- (int) Decision, if any; one of UP, DOWN,
LEFT, or RIGHT
selection -- (int) 1 if a selection was made
Raises an Exception if anything goes wrong.
"""
self.noop = False
if command.decision == FastPadState.LEFT:
self.mode = "CURSOR"
self.button = self.currButton.left
elif command.decision == FastPadState.RIGHT:
self.mode = "CURSOR"
self.button = self.currButton.right
elif command.decision == FastPadState.UP:
self.mode = "CURSOR"
self.button = self.currButton.up
elif command.decision == FastPadState.DOWN:
self.mode = "CURSOR"
self.button = self.currButton.down
elif command.selection:
self.mode = "SELECT"
self.button = self.currButton
# We've changed our selection, so reset the timer
self.selTime = 0
else:
# If we're in selection mode, track the time
if self.mode == "SELECT":
# Add the time
self.selTime += command.delta
# Should we select self item?
if self.selTime >= FastPadState.SELECT_TIME:
self.selTime = 0
self.mode = "CURSOR"
self.button = self.currButton
else:
self.noop = True
# If we're not in selection mode, reset the timer
else:
self.selTime = 0
self.noop = True
| {
"content_hash": "0a7a3a28a766449b6af93d351a85064f",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 64,
"avg_line_length": 31.649350649350648,
"alnum_prop": 0.48707427164546574,
"repo_name": "NeuralProsthesisLab/unlock",
"id": "6327ab2d3cdeca773dcde0123211638565b07181",
"size": "2437",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "unlock/state/fastpad_state.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1386"
},
{
"name": "C++",
"bytes": "994297"
},
{
"name": "CSS",
"bytes": "8977"
},
{
"name": "Go",
"bytes": "62639"
},
{
"name": "HTML",
"bytes": "33643"
},
{
"name": "JavaScript",
"bytes": "711666"
},
{
"name": "Makefile",
"bytes": "402"
},
{
"name": "Matlab",
"bytes": "81353"
},
{
"name": "Python",
"bytes": "493447"
},
{
"name": "Shell",
"bytes": "3842"
},
{
"name": "TeX",
"bytes": "29718"
}
],
"symlink_target": ""
} |
import pytest
from app.models.organisation import Organisation
from tests import organisation_json
@pytest.mark.parametrize(
"purchase_order_number,expected_result", [[None, None], ["PO1234", [None, None, None, "PO1234"]]]
)
def test_organisation_billing_details(purchase_order_number, expected_result):
organisation = Organisation(organisation_json(purchase_order_number=purchase_order_number))
assert organisation.billing_details == expected_result
| {
"content_hash": "7da65687e8a037c18c1aa8503fea3af5",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 101,
"avg_line_length": 38.833333333333336,
"alnum_prop": 0.776824034334764,
"repo_name": "alphagov/notifications-admin",
"id": "053d9b3104dde1b0e4bb4d61e3a15f47d4f9c753",
"size": "466",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/app/models/test_organisation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1094"
},
{
"name": "HTML",
"bytes": "691367"
},
{
"name": "JavaScript",
"bytes": "435783"
},
{
"name": "Jinja",
"bytes": "1979"
},
{
"name": "Makefile",
"bytes": "6501"
},
{
"name": "Procfile",
"bytes": "117"
},
{
"name": "Python",
"bytes": "2762624"
},
{
"name": "SCSS",
"bytes": "117758"
},
{
"name": "Shell",
"bytes": "1890"
}
],
"symlink_target": ""
} |
from argparse import ArgumentParser
from greengraph import Greengraph
from matplotlib import pyplot as plt
def process():
parser = ArgumentParser(description = "Plot the 'green-ness' of satellite images between two places")
parser.add_argument('--start', help='Choose a start location')
parser.add_argument('--end', help='Choose an end location')
parser.add_argument('--steps', help='Choose number of steps')
parser.add_argument('--out', help='Choose name of output file')
arguments = parser.parse_args()
mygraph = Greengraph(arguments.start, arguments.end)
data = mygraph.green_between(arguments.steps)
plt.plot(data)
plt.savefig(arguments.out)
if __name__ == "__main__":
process() | {
"content_hash": "40bea03bd88ef06867229d32cf649190",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 105,
"avg_line_length": 38.78947368421053,
"alnum_prop": 0.7014925373134329,
"repo_name": "CDTjamie/Greengraph",
"id": "854095abd6b1af20dea388c96ccd5e2c0b940a35",
"size": "759",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "greengraph/command_line.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7544"
}
],
"symlink_target": ""
} |
from tornado.concurrent import return_future
from thumbor.storages import BaseStorage
from ..aws.storage import AwsStorage
class Storage(AwsStorage, BaseStorage):
"""
S3 Storage
"""
def __init__(self, context):
"""
Constructor
:param Context context: Thumbor's context
"""
BaseStorage.__init__(self, context)
AwsStorage.__init__(self, context, 'TC_AWS_STORAGE')
@return_future
def put(self, path, bytes, callback=None):
"""
Stores image
:param string path: Path to store data at
:param bytes bytes: Data to store
:param callable callback:
:rtype: string
"""
def once_written(response):
if response is None or self._get_error(response) is not None:
callback(None)
else:
callback(path)
self.set(bytes, self._normalize_path(path), callback=once_written)
@return_future
def get(self, path, callback):
"""
Gets data at path
:param string path: Path for data
:param callable callback: Callback function for once the retrieval is done
"""
def parse_body(key):
if key is None or self._get_error(key) is not None:
callback(None)
else:
callback(key['Body'].read())
super(Storage, self).get(path, callback=parse_body)
def resolve_original_photo_path(self, filename):
"""
Determines original path for file
:param string filename: File to look at
:return: Resolved path (here it is the same)
:rtype: string
"""
return filename
| {
"content_hash": "d8f40bf7f4e78b55f1760ce8c1c6c4f3",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 82,
"avg_line_length": 28.4,
"alnum_prop": 0.5786384976525821,
"repo_name": "abaldwin1/tc_aws",
"id": "5ce31f93415c5abb75ed08771d455a620769b194",
"size": "1864",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tc_aws/storages/s3_storage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "207"
},
{
"name": "Python",
"bytes": "50206"
}
],
"symlink_target": ""
} |
import flask
from flask import request, make_response, current_app
from errorcodes import ErrorCodes
import json
from functools import wraps
def rest_api(f):
"""
A decorator for rest API
:param f:
:return:
"""
@wraps(f)
def decorator(*args, **kwargs):
json_object = None
if request.data:
try:
json_object = json.loads(request.data)
except ValueError as v:
current_app.logger.info("Invalid input = {}, error = {}".format(request.data, v))
return make_rest_response(make_error(ErrorCodes.InvalidInput.value, "Input is invalid"), 400)
if json_object:
result = f(*args, **dict(kwargs, json_body=json_object))
else:
result = f(*args, **kwargs)
if isinstance(result, flask.Response):
return result
else:
return flask.Response(json.dumps(result), content_type='application/json; charset=utf-8')
return decorator
def make_rest_response(error, status_code):
response = make_response(json.dumps(error), status_code)
response.headers["Content-Type"] = "'application/json; charset=utf-8'"
return response
def make_error(code, message):
return {
"code": code,
"message": message
}
| {
"content_hash": "1c72e5d4f54dc55fc16b0a015165afdf",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 109,
"avg_line_length": 27.375,
"alnum_prop": 0.6080669710806698,
"repo_name": "pureelk/pureelk",
"id": "73bfb5a327909f46039ad5066a57b6c22f8ab5d3",
"size": "1314",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "container/web/rest_api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2280"
},
{
"name": "HTML",
"bytes": "12345"
},
{
"name": "JavaScript",
"bytes": "13086"
},
{
"name": "Python",
"bytes": "36178"
},
{
"name": "Shell",
"bytes": "9277"
}
],
"symlink_target": ""
} |
from setuptools import setup
from pympris import __version__, __description__, requires, README
setup(name='pympris',
version=__version__,
description=__description__,
author='Mikhail Mamrouski',
author_email='[email protected]',
url="https://github.com/wistful/pympris",
license="MIT License",
packages=['pympris'],
long_description=README,
install_requires=requires,
test_suite='tests.convert_test',
platforms=["Unix,"],
keywords="mpris, dbus",
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: X11 Applications",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.3",
"Topic :: Software Development :: Libraries :: Python Modules",
],
)
| {
"content_hash": "09bcfcdc6268e83ae04c8f09e9a74f19",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 73,
"avg_line_length": 34.370370370370374,
"alnum_prop": 0.5980603448275862,
"repo_name": "wistful/pympris",
"id": "700edef0631355806705d9e3f7989dbd474a6707",
"size": "966",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "44038"
}
],
"symlink_target": ""
} |
from django.db import models
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from jsonfield import JSONField
#Local Imports
from utils.models import TimeStampedModel,BaseQuerySet
class ForwardMessage(TimeStampedModel):
"""
A ForwardMessage is a message *instance* for incoming messages forwarded
to a different instance of mWACH
"""
STATUS_CHOICES = (
('success','Success'),
('failed','Failed'),
('none','No Forward In Transport')
)
#Set Custom Manager
objects = BaseQuerySet.as_manager()
class Meta:
ordering = ('-created',)
app_label = 'transports'
identity = models.CharField(max_length=25)
text = models.TextField(help_text='Text of the SMS message')
transport = models.CharField(max_length=25,help_text='Transport name')
fwrd_status = models.CharField(max_length=25,choices=STATUS_CHOICES,help_text='Forward Status')
url = models.CharField(max_length=250,help_text='Forward URL')
#Africa's Talking Data Only for outgoing messages
external_id = models.CharField(max_length=50,blank=True)
external_data = JSONField(blank=True)
| {
"content_hash": "6861cb04a86d75e3e0198e2d80905c4a",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 99,
"avg_line_length": 32.888888888888886,
"alnum_prop": 0.7077702702702703,
"repo_name": "tperrier/mwachx",
"id": "b8f71080d819e40bb7eef6af8d73d0480d3c322a",
"size": "1219",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "transports/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "14250"
},
{
"name": "HTML",
"bytes": "56213"
},
{
"name": "JavaScript",
"bytes": "43448"
},
{
"name": "Python",
"bytes": "343924"
},
{
"name": "Shell",
"bytes": "1989"
}
],
"symlink_target": ""
} |
"""project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from django.contrib.auth import views as auth_views
from website.views import dashboard, new_settings
urlpatterns = [
url(r'^login/$', auth_views.login, {'template_name': 'login.html'}, name='login'),
url(r'^logout/$', auth_views.logout, {'next_page': '/login'}, name='logout'),
url(r'^smitadmin/', admin.site.urls),
url(r'^settings/', new_settings, name="new_settings"),
url(r'^$', dashboard, name="dashboard"),
]
| {
"content_hash": "7d14b496cc7672bfb09177ec2fccc16d",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 86,
"avg_line_length": 40.857142857142854,
"alnum_prop": 0.6896853146853147,
"repo_name": "ksamuel/smit",
"id": "16cdc8f3f1ffad95da94732e0424c27fccb8e947",
"size": "1144",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2574"
},
{
"name": "HTML",
"bytes": "15037"
},
{
"name": "JavaScript",
"bytes": "1415096"
},
{
"name": "Python",
"bytes": "66993"
}
],
"symlink_target": ""
} |
import re
from streamlink.plugin import Plugin
from streamlink.plugin.api import http, validate
from streamlink.stream import HDSStream, HLSStream, RTMPStream
_url_re = re.compile(r"""http(?:s)?://(?:\w+\.)?rtlxl.nl/#!/(?:.*)/(?P<uuid>.*?)\Z""", re.IGNORECASE)
class rtlxl(Plugin):
@classmethod
def can_handle_url(cls, url):
return _url_re.match(url)
def _get_streams(self):
match = _url_re.match(self.url)
uuid = match.group("uuid")
html = http.get('http://www.rtl.nl/system/s4m/vfd/version=2/uuid={}/d=pc/fmt=adaptive/'.format(uuid)).text
playlist_url = "http://manifest.us.rtl.nl" + re.compile(r'videopath":"(?P<playlist_url>.*?)",', re.IGNORECASE).search(html).group("playlist_url")
return HLSStream.parse_variant_playlist(self.session, playlist_url)
__plugin__ = rtlxl
| {
"content_hash": "1c59861c7f82802d81572d2602eb1827",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 153,
"avg_line_length": 36.083333333333336,
"alnum_prop": 0.6316397228637414,
"repo_name": "mmetak/streamlink",
"id": "11462502a53144ca74d30f48f7302ce5af294893",
"size": "866",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/streamlink/plugins/rtlxl.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "838"
},
{
"name": "Python",
"bytes": "932019"
},
{
"name": "Shell",
"bytes": "16668"
}
],
"symlink_target": ""
} |
from keras.layers import Dense, Activation, Conv2D, MaxPool2D, Reshape
model = Sequential()
model.add(Reshape((28, 28, 1), input_shape=(784,) ))
model.add(Conv2D(filters=32, kernel_size=(3,3), padding='same', activation="relu"))
model.add(MaxPool2D())
model.add(Conv2D(filters=64, kernel_size=(3,3), padding='same', activation="relu"))
model.add(MaxPool2D())
model.add(Reshape((-1,)))
model.add(Dense(units=1024, activation="relu"))
model.add(Dense(units=10, activation="softmax"))
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
display(SVG(model_to_dot(model, show_shapes=True).create(prog='dot', format='svg')))
model.fit(train_X, train_Y, validation_data=(validation_X, validation_Y), batch_size=128, epochs=15)
rtn = model.evaluate(test_X, test_Y)
print("\ntest accuracy=", rtn[1]) | {
"content_hash": "6246bf342c29f984fdc3c139ef3d8a1a",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 100,
"avg_line_length": 50.1764705882353,
"alnum_prop": 0.7045720984759671,
"repo_name": "tjwei/HackNTU_Data_2017",
"id": "80a0c4412420a1c9b00d08217563b54ba38a72e0",
"size": "853",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Week06/q_keras_cnn.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "53448703"
},
{
"name": "Python",
"bytes": "20881"
}
],
"symlink_target": ""
} |
import logging
import time
import unicodedata
from urllib2 import URLError
from apps.cowry.adapters import AbstractPaymentAdapter
from apps.cowry.models import PaymentStatuses, PaymentLogLevels
from django.conf import settings
from django.utils.http import urlencode
from suds.client import Client
from suds.plugin import MessagePlugin, DocumentPlugin
from .exceptions import DocDataPaymentException
from .models import DocDataPaymentOrder, DocDataPayment, DocDataWebDirectDirectDebit, DocDataPaymentLogEntry
logger = logging.getLogger(__name__)
cowry_docdata_logger = logging.getLogger('cowry.docdata')
# Workaround for SSL problem on Debian Wheezy connecting to DocData live payment address.
#
# if getattr(settings, "COWRY_LIVE_PAYMENTS", False):
# import ssl
# from ssl import SSLSocket
#
# def wrap_socket(sock, keyfile=None, certfile=None,
# server_side=False, cert_reqs=ssl.CERT_NONE,
# ssl_version=ssl.PROTOCOL_SSLv3, ca_certs=None,
# do_handshake_on_connect=True,
# suppress_ragged_eofs=True, ciphers=None):
#
# return SSLSocket(sock, keyfile=keyfile, certfile=certfile,
# server_side=server_side, cert_reqs=cert_reqs,
# ssl_version=ssl_version, ca_certs=ca_certs,
# do_handshake_on_connect=do_handshake_on_connect,
# suppress_ragged_eofs=suppress_ragged_eofs,
# ciphers=ciphers)
#
# ssl.wrap_socket = wrap_socket
# These defaults can be overridden with the COWRY_PAYMENT_METHODS setting.
default_payment_methods = {
'dd-ideal': {
'id': 'IDEAL',
'profile': 'ideal',
'name': 'iDeal',
'submethods': {
'0081': 'Fortis',
'0021': 'Rabobank',
'0721': 'ING Bank',
'0751': 'SNS Bank',
'0031': 'ABN Amro Bank',
'0761': 'ASN Bank',
'0771': 'SNS Regio Bank',
'0511': 'Triodos Bank',
'0091': 'Friesland Bank',
'0161': 'van Lanschot Bankiers'
},
'restricted_countries': ('NL',),
'supports_recurring': False,
},
'dd-direct-debit': {
'id': 'SEPA_DIRECT_DEBIT',
'profile': 'directdebit',
'name': 'Direct Debit',
'max_amount': 10000, # €100
'restricted_countries': ('NL',),
'supports_recurring': True,
'supports_single': False,
},
'dd-creditcard': {
'profile': 'creditcard',
'name': 'Credit Cards',
'supports_recurring': False,
},
'dd-webmenu': {
'profile': 'webmenu',
'name': 'Web Menu',
'supports_recurring': True,
}
}
def docdata_payment_logger(payment, level, message):
if level == PaymentLogLevels.error:
cowry_docdata_logger.error("{0} - {1}".format(payment, message))
log_entry = DocDataPaymentLogEntry(docdata_payment_order=payment, level=level, message=message)
log_entry.save()
class DocDataAPIVersionPlugin(MessagePlugin):
"""
This adds the API version number to the body element. This is required for the DocData soap API.
"""
def marshalled(self, context):
body = context.envelope.getChild('Body')
request = body[0]
request.set('version', '1.0')
class DocDataBrokenWSDLPlugin(DocumentPlugin):
def parsed(self, context):
""" Called after parsing a WSDL or XSD document. The context contains the url & document root. """
# The WSDL for the live payments API incorrectly references the wrong location.
if len(context.document.children) == 19 and len(context.document.children[18]) > 0:
location_attribute = context.document.children[18].children[0].getChild('address').attributes[0]
location_attribute.setValue('https://secure.docdatapayments.com:443/ps/services/paymentservice/1_0')
class DocDataPaymentAdapter(AbstractPaymentAdapter):
# Mapping of DocData statuses to Cowry statuses. Statuses are from:
#
# Integration Manual Order API 1.0 - Document version 1.0, 08-12-2012 - Page 35
#
# The documentation is incorrect for the following statuses:
#
# Documented Actual
# ========== ======
#
# CANCELLED CANCELED
# CLOSED_CANCELED CLOSED_CANCELED (guessed based on old api)
#
status_mapping = {
'NEW': PaymentStatuses.new,
'STARTED': PaymentStatuses.in_progress,
'REDIRECTED_FOR_AUTHENTICATION': PaymentStatuses.in_progress,
'AUTHORIZED': PaymentStatuses.pending,
'AUTHORIZATION_REQUESTED': PaymentStatuses.pending,
'PAID': PaymentStatuses.pending,
'CANCELED': PaymentStatuses.cancelled,
'CHARGED-BACK': PaymentStatuses.chargedback,
'CONFIRMED_PAID': PaymentStatuses.paid,
'CONFIRMED_CHARGEDBACK': PaymentStatuses.chargedback,
'CLOSED_SUCCESS': PaymentStatuses.paid,
'CLOSED_CANCELED': PaymentStatuses.cancelled,
}
id_to_model_mapping = {
'dd-ideal': DocDataPayment,
'dd-webdirect': DocDataWebDirectDirectDebit,
'dd-webmenu': DocDataPayment,
}
def _init_docdata(self):
""" Creates the DocData test or live Suds client. """
error_message = 'Could not create Suds client to connect to DocData.'
if self.test:
# Test API.
test_url = 'https://test.docdatapayments.com/ps/services/paymentservice/1_0?wsdl'
logger.info('Using the test DocData API: {0}'.format(test_url))
try:
self.client = Client(test_url, plugins=[DocDataAPIVersionPlugin()])
except URLError as e:
self.client = None
logger.error('{0} {1}'.format(error_message, str(e)))
else:
# Setup the merchant soap object with the test password for use in all requests.
self.merchant = self.client.factory.create('ns0:merchant')
self.merchant._name = getattr(settings, "COWRY_DOCDATA_MERCHANT_NAME", None)
self.merchant._password = getattr(settings, "COWRY_DOCDATA_TEST_MERCHANT_PASSWORD", None)
else:
# Live API.
live_url = 'https://secure.docdatapayments.com/ps/services/paymentservice/1_0?wsdl'
logger.info('Using the live DocData API: {0}'.format(live_url))
try:
self.client = Client(live_url, plugins=[DocDataAPIVersionPlugin(), DocDataBrokenWSDLPlugin()])
except URLError as e:
self.client = None
logger.error('{0} {1}'.format(error_message, str(e)))
else:
# Setup the merchant soap object for use in all requests.
self.merchant = self.client.factory.create('ns0:merchant')
self.merchant._name = getattr(settings, "COWRY_DOCDATA_MERCHANT_NAME", None)
self.merchant._password = getattr(settings, "COWRY_DOCDATA_LIVE_MERCHANT_PASSWORD", None)
def __init__(self):
super(DocDataPaymentAdapter, self).__init__()
self._init_docdata()
def get_payment_methods(self):
# Override the payment_methods if they're set. This isn't in __init__ because
# we want to override the settings in the tests.
if not hasattr(self, '_payment_methods'):
settings_payment_methods = getattr(settings, 'COWRY_PAYMENT_METHODS', None)
if settings_payment_methods:
# Only override the parameters that are set.
self._payment_methods = {}
for pmi in settings_payment_methods:
settings_pm = settings_payment_methods[pmi]
if pmi in default_payment_methods:
default_pm = default_payment_methods[pmi]
for param in settings_pm:
default_pm[param] = settings_pm[param]
self._payment_methods[pmi] = default_pm
else:
self._payment_methods[pmi] = settings_pm
else:
self._payment_methods = default_payment_methods
return self._payment_methods
def create_payment_object(self, order, payment_method_id='', payment_submethod_id='', amount=0, currency=''):
payment = DocDataPaymentOrder(payment_method_id=payment_method_id,
payment_submethod_id=payment_submethod_id,
amount=amount, currency=currency)
payment.order = order
payment.save()
return payment
def generate_merchant_order_reference(self, payment):
other_payments = DocDataPaymentOrder.objects.filter(order=payment.order).exclude(id=payment.id).order_by('-merchant_order_reference')
dd_prefix = ''
if self.test:
try:
dd_prefix = settings.DOCDATA_PREFIX_NAME
except AttributeError:
logger.error("DOCDATA_PREFIX_NAME not set. Make sure secrets.py has a DOCDATA_PREFIX_NAME='<developer name>'")
return
if not other_payments:
return '{0}{1}-0'.format(dd_prefix, payment.order.order_number)
else:
latest_mor = other_payments[0].merchant_order_reference
order_payment_nums = latest_mor.split('-')
payment_num = int(order_payment_nums[1]) + 1
return '{0}{1}-{2}'.format(dd_prefix, payment.order.order_number, payment_num)
# TODO Find a way to use UTF-8 / unicode strings with Suds to make this truly international.
def convert_to_ascii(self, value):
""" Normalize / convert unicode characters to ascii equivalents. """
if isinstance(value, unicode):
return unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')
else:
return value
def create_remote_payment_order(self, payment):
# Some preconditions.
if payment.payment_order_id:
raise DocDataPaymentException('ERROR', 'Cannot create two remote DocData Payment orders for same payment.')
if not payment.payment_method_id:
raise DocDataPaymentException('ERROR', 'payment_method_id is not set')
# We can't do anything if DocData isn't available.
if not self.client:
self._init_docdata()
if not self.client:
logger.error("Suds client is not configured. Can't create a remote DocData payment order.")
return
# Preferences for the DocData system.
paymentPreferences = self.client.factory.create('ns0:paymentPreferences')
paymentPreferences.profile = self.get_payment_methods()[payment.payment_method_id]['profile'],
paymentPreferences.numberOfDaysToPay = 5
menuPreferences = self.client.factory.create('ns0:menuPreferences')
# Order Amount.
amount = self.client.factory.create('ns0:amount')
amount.value = str(payment.amount)
amount._currency = payment.currency
# Customer information.
language = self.client.factory.create('ns0:language')
language._code = payment.language
name = self.client.factory.create('ns0:name')
name.first = self.convert_to_ascii(payment.first_name)[:35]
name.last = self.convert_to_ascii(payment.last_name)[:35]
shopper = self.client.factory.create('ns0:shopper')
shopper.gender = "U" # Send unknown gender.
shopper.language = language
shopper.email = payment.email
shopper._id = payment.customer_id
shopper.name = name
# Billing information.
address = self.client.factory.create('ns0:address')
address.street = self.convert_to_ascii(payment.address)[:35]
address.houseNumber = 'N/A'
address.postalCode = payment.postal_code.replace(' ', '') # Spaces aren't allowed in the DocData postal code.
address.city = payment.city[:35]
country = self.client.factory.create('ns0:country')
country._code = payment.country
address.country = country
billTo = self.client.factory.create('ns0:destination')
billTo.address = address
billTo.name = name
# Set the description if there's an order.
description = payment.order.__unicode__()[:50]
if not description:
# TODO Add a setting for default description.
description = "1%Club"
payment.merchant_order_reference = self.generate_merchant_order_reference(payment)
# Execute create payment order request.
reply = self.client.service.create(self.merchant, payment.merchant_order_reference, paymentPreferences,
menuPreferences, shopper, amount, billTo, description)
if hasattr(reply, 'createSuccess'):
payment.payment_order_id = str(reply['createSuccess']['key'])
self._change_status(payment, PaymentStatuses.in_progress) # Note: _change_status calls payment.save().
elif hasattr(reply, 'createError'):
payment.save()
error = reply['createError']['error']
error_message = "{0} {1}".format(error['_code'], error['value'])
logger.error(error_message)
# Log this error to db too.
docdata_payment_logger(payment, 'warn', error_message)
raise DocDataPaymentException(error['_code'], error['value'])
else:
payment.save()
error_message = 'Received unknown reply from DocData. Remote Payment not created.'
logger.error(error_message)
# Log this error to db too.
docdata_payment_logger(payment, 'warn', error_message)
raise DocDataPaymentException('REPLY_ERROR', error_message)
def cancel_payment(self, payment):
# Some preconditions.
if not self.client:
logger.error("Suds client is not configured. Can't cancel a DocData payment order.")
return
if not payment.payment_order_id:
logger.warn('Attempt to cancel payment on Order id {0} which has no payment_order_id.'.format(payment.payment_order_id))
return
# Execute create payment order request.
reply = self.client.service.cancel(self.merchant, payment.payment_order_id)
if hasattr(reply, 'cancelSuccess'):
for docdata_payment in payment.docdata_payments.all():
docdata_payment.status = 'CANCELLED'
docdata_payment.save()
self._change_status(payment, PaymentStatuses.cancelled) # Note: change_status calls payment.save().
elif hasattr(reply, 'cancelError'):
error = reply['cancelError']['error']
error_message = "{0} {1}".format(error['_code'], error['value'])
logger.error(error_message)
raise DocDataPaymentException(error['_code'], error['value'])
else:
error_message = 'Received unknown reply from DocData. Remote Payment not cancelled.'
logger.error(error_message)
raise DocDataPaymentException('REPLY_ERROR', error_message)
def get_payment_url(self, payment, return_url_base=None):
""" Return the Payment URL """
if payment.amount <= 0 or not payment.payment_method_id or \
not self.id_to_model_mapping[payment.payment_method_id] == DocDataPayment:
return None
if not payment.payment_order_id:
self.create_remote_payment_order(payment)
# The basic parameters.
params = {
'payment_cluster_key': payment.payment_order_id,
'merchant_name': self.merchant._name,
'client_language': payment.language,
}
# Add a default payment method if the config has an id.
payment_methods = self.get_payment_methods()
if hasattr(payment_methods[payment.payment_method_id], 'id'):
params['default_pm'] = payment_methods[payment.payment_method_id]['id'],
# Add return urls.
if return_url_base:
params['return_url_success'] = return_url_base + '/' + payment.language + '/#!/support/thanks/' + str(payment.order.id)
params['return_url_pending'] = return_url_base + '/' + payment.language + '/#!/support/thanks/' + str(payment.order.id)
# TODO This assumes that the order is always a donation order. These Urls will be used when buying vouchers
# TODO too which is incorrect.
params['return_url_canceled'] = return_url_base + '/' + payment.language + '/#!/support/donations'
params['return_url_error'] = return_url_base + '/' + payment.language + '/#!/support/payment/error'
# Special parameters for iDeal.
if payment.payment_method_id == 'dd-ideal' and payment.payment_submethod_id:
params['ideal_issuer_id'] = payment.payment_submethod_id
params['default_act'] = 'true'
if self.test:
payment_url_base = 'https://test.docdatapayments.com/ps/menu'
else:
payment_url_base = 'https://secure.docdatapayments.com/ps/menu'
# Create a DocDataPayment when we need it.
docdata_payment = payment.latest_docdata_payment
if not docdata_payment or not isinstance(docdata_payment, DocDataPayment):
docdata_payment = DocDataPayment()
docdata_payment.docdata_payment_order = payment
docdata_payment.save()
return payment_url_base + '?' + urlencode(params)
def update_payment_status(self, payment, status_changed_notification=False):
# Don't do anything if there's no payment or payment_order_id.
if not payment or not payment.payment_order_id:
return
# Execute status request.
reply = self.client.service.status(self.merchant, payment.payment_order_id)
if hasattr(reply, 'statusSuccess'):
report = reply['statusSuccess']['report']
elif hasattr(reply, 'statusError'):
error = reply['statusError']['error']
error_message = "{0} {1}".format(error['_code'], error['value'])
logger.error(error_message)
docdata_payment_logger(payment, PaymentLogLevels.error, error_message)
return
else:
error_message = "REPLY_ERROR Received unknown status reply from DocData."
logger.error(error_message)
docdata_payment_logger(payment, PaymentLogLevels.error, error_message)
return
if not hasattr(report, 'payment'):
docdata_payment_logger(payment, PaymentLogLevels.info, "DocData status report has no payment reports.")
return
for payment_report in report.payment:
# Find or create the correct payment object for current report.
payment_class = self.id_to_model_mapping[payment.payment_method_id]
try:
ddpayment = payment_class.objects.get(payment_id=str(payment_report.id))
except payment_class.MultipleObjectsReturned:
# FIXME. This is a hack to fix errors with duplicate payments to direct debit payments.
ddpayment = payment_class.objects.filter(payment_id=str(payment_report.id)).order_by('created').all()[0]
except payment_class.DoesNotExist:
ddpayment_list = payment.docdata_payments.filter(status='NEW')
ddpayment_list_len = len(ddpayment_list)
if ddpayment_list_len == 0:
ddpayment = payment_class()
ddpayment.docdata_payment_order = payment
elif ddpayment_list_len == 1:
ddpayment = ddpayment_list[0]
else:
docdata_payment_logger(payment, PaymentLogLevels.error,
"Cannot determine where to save the payment report.")
continue
# Save some information from the report.
ddpayment.payment_id = str(payment_report.id)
ddpayment.payment_method = str(payment_report.paymentMethod)
ddpayment.save()
# Some additional checks.
if not payment_report.paymentMethod == ddpayment.payment_method:
docdata_payment_logger(payment, PaymentLogLevels.warn,
"Payment method from DocData doesn't match saved payment method. "
"Storing the payment method received from DocData for payment id {0}: {1}".format(
ddpayment.payment_id, payment_report.paymentMethod))
ddpayment.payment_method = str(payment_report.paymentMethod)
ddpayment.save()
if not payment_report.authorization.status in self.status_mapping:
# Note: We continue to process the payment status change on this error.
docdata_payment_logger(payment, PaymentLogLevels.error,
"Received unknown payment status from DocData: {0}".format(
payment_report.authorization.status))
# Update the DocDataPayment status.
if ddpayment.status != payment_report.authorization.status:
docdata_payment_logger(payment, PaymentLogLevels.info,
"DocData payment status changed for payment id {0}: {1} -> {2}".format(
payment_report.id, ddpayment.status, payment_report.authorization.status))
ddpayment.status = str(payment_report.authorization.status)
ddpayment.save()
# Use the latest DocDataPayment status to set the status on the Cowry Payment.
latest_ddpayment = payment.latest_docdata_payment
latest_payment_report = None
for payment_report in report.payment:
if payment_report.id == latest_ddpayment.payment_id:
latest_payment_report = payment_report
break
old_status = payment.status
new_status = self._map_status(latest_ddpayment.status, payment, report.approximateTotals,
latest_payment_report.authorization)
# Detect a nasty error condition that needs to be manually fixed.
total_registered = report.approximateTotals.totalRegistered
if new_status != PaymentStatuses.cancelled and total_registered != payment.order.total:
docdata_payment_logger(payment, PaymentLogLevels.error,
"Order total: {0} does not equal Total Registered: {1}.".format(payment.order.total,
total_registered))
# TODO: Move this logging to AbstractPaymentAdapter when PaymentLogEntry is not abstract.
if old_status != new_status:
if new_status not in PaymentStatuses.values:
docdata_payment_logger(payment, PaymentLogLevels.error,
"Payment status changed {0} -> {1}".format(old_status, PaymentStatuses.unknown))
else:
docdata_payment_logger(payment, PaymentLogLevels.info,
"Payment status changed {0} -> {1}".format(old_status, new_status))
self._change_status(payment, new_status) # Note: change_status calls payment.save().
# Set the payment fee when Payment status is pending or paid.
if payment.status == PaymentStatuses.pending or payment.status == PaymentStatuses.paid:
self.update_payment_fee(payment, payment.latest_docdata_payment.payment_method, 'COWRY_DOCDATA_FEES',
docdata_payment_logger)
def _map_status(self, status, payment=None, totals=None, authorization=None):
new_status = super(DocDataPaymentAdapter, self)._map_status(status)
# Some status mapping overrides.
#
# Integration Manual Order API 1.0 - Document version 1.0, 08-12-2012 - Page 33:
#
# Safe route: The safest route to check whether all payments were made is for the merchants
# to refer to the “Total captured” amount to see whether this equals the “Total registered
# amount”. While this may be the safest indicator, the downside is that it can sometimes take a
# long time for acquirers or shoppers to actually have the money transferred and it can be
# captured.
#
if status == 'AUTHORIZED':
registered_captured_logged = False
if totals.totalRegistered == totals.totalCaptured:
payment_sum = totals.totalCaptured - totals.totalChargedback - totals.totalRefunded
if payment_sum > 0:
new_status = PaymentStatuses.paid
elif payment_sum == 0:
docdata_payment_logger(payment, PaymentLogLevels.info,
"Total Registered: {0} Total Captured: {1} Total Chargedback: {2} Total Refunded: {3}".format(
totals.totalRegistered, totals.totalCaptured, totals.totalChargedback, totals.totalRefunded))
registered_captured_logged = True
# FIXME: Add chargeback fee somehow (currently €0.50).
# Chargeback.
if totals.totalCaptured == totals.totalChargedback:
if hasattr(authorization, 'chargeback') and len(authorization['chargeback']) > 0:
if hasattr(authorization['chargeback'][0], 'reason'):
docdata_payment_logger(payment, PaymentLogLevels.info,
"Payment chargedback: {0}".format(authorization['chargeback'][0]['reason']))
else:
docdata_payment_logger(payment, PaymentLogLevels.info, "Payment chargedback.")
new_status = PaymentStatuses.chargedback
# Refund.
# TODO: Log more info from refund when we have an example.
if totals.totalCaptured == totals.totalRefunded:
docdata_payment_logger(payment, PaymentLogLevels.info, "Payment refunded.")
new_status = PaymentStatuses.refunded
payment.amount = 0
payment.save()
else:
docdata_payment_logger(payment, PaymentLogLevels.error,
"Total Registered: {0} Total Captured: {1} Total Chargedback: {2} Total Refunded: {3}".format(
totals.totalRegistered, totals.totalCaptured, totals.totalChargedback, totals.totalRefunded))
registered_captured_logged = True
docdata_payment_logger(payment, PaymentLogLevels.error,
"Captured, chargeback and refunded sum is negative. Please investigate.")
new_status = PaymentStatuses.unknown
if not registered_captured_logged:
docdata_payment_logger(payment, PaymentLogLevels.info,
"Total Registered: {0} Total Captured: {1}".format(totals.totalRegistered,
totals.totalCaptured))
return new_status
# TODO Use status change log to investigate if these overrides are needed.
# # These overrides are really just guessing.
# latest_capture = authorization.capture[-1]
# if status == 'AUTHORIZED':
# if hasattr(authorization, 'refund') or hasattr(authorization, 'chargeback'):
# new_status = 'cancelled'
# if latest_capture.status == 'FAILED' or latest_capture == 'ERROR':
# new_status = 'failed'
# elif latest_capture.status == 'CANCELLED':
# new_status = 'cancelled'
class WebDirectDocDataDirectDebitPaymentAdapter(DocDataPaymentAdapter):
def get_payment_url(self, payment, return_url_base=None):
raise NotImplementedError
def generate_merchant_order_reference(self, payment):
if self.test:
# For testing we need unique merchant order references that are not based on the order number.
return str(time.time()).replace('.', '-')
else:
return super(WebDirectDocDataDirectDebitPaymentAdapter, self).generate_merchant_order_reference(payment)
def start_payment(self, payment, monthly_order):
# Some preconditions.
if not self.client:
raise DocDataPaymentException('ERROR',
"Suds client is not configured. Can't start a DocData WebDirect payment.")
if not payment.payment_order_id:
raise DocDataPaymentException('ERROR',
"Attempt to start WebDirect payment on Order id {0} which has no payment_order_id.".format(
payment.payment_order_id))
paymentRequestInput = self.client.factory.create('ns0:paymentRequestInput')
# We only need to set amount because of bug in suds library. Otherwise it defaults to order amount.
amount = self.client.factory.create('ns0:amount')
amount.value = str(payment.amount)
amount._currency = payment.currency
paymentRequestInput.paymentAmount = amount
paymentRequestInput.paymentMethod = 'SEPA_DIRECT_DEBIT'
directDebitPaymentInput = self.client.factory.create('ddp:directDebitPaymentInput')
directDebitPaymentInput.iban = monthly_order.iban
directDebitPaymentInput.bic = monthly_order.bic
directDebitPaymentInput.holderCity = self.convert_to_ascii(monthly_order.city)
directDebitPaymentInput.holderName = self.convert_to_ascii(monthly_order.name)
country = self.client.factory.create('ns0:country')
country._code = payment.country
directDebitPaymentInput.holderCountry = country
paymentRequestInput.directDebitPaymentInput = directDebitPaymentInput
# Execute start payment request.
reply = self.client.service.start(self.merchant, payment.payment_order_id, paymentRequestInput)
if hasattr(reply, 'startSuccess'):
self._change_status(payment, PaymentStatuses.in_progress) # Note: _change_status calls payment.save().
update_docdata_webdirect_direct_debit_payment(payment, str(reply['startSuccess']['paymentId']),
monthly_order)
elif hasattr(reply, 'startError'):
error = reply['startError']['error']
error_message = "{0} {1}".format(error['_code'], error['value'])
logger.error(error_message)
raise DocDataPaymentException(error['_code'], error['value'])
else:
error_message = 'Received unknown reply from DocData. WebDirect payment not created.'
logger.error(error_message)
raise DocDataPaymentException('REPLY_ERROR', error_message)
# TODO This method (and delay) should be processed asynchronously by celery.
def update_docdata_webdirect_direct_debit_payment(payment, payment_id, recurring_payment):
# The delay is here to give DocData some time to call our status changed API which creates the
# DocDataWebDirectDirectDebit object.
time.sleep(2)
try:
ddpayment = DocDataWebDirectDirectDebit.objects.get(payment_id=payment_id)
except DocDataWebDirectDirectDebit.DoesNotExist:
# Create the DocDataPayment object to save the info and statuses for the WebDirect payment.
ddpayment = DocDataWebDirectDirectDebit()
ddpayment.docdata_payment_order = payment
ddpayment.payment_method = 'SEPA_DIRECT_DEBIT'
ddpayment.payment_id = payment_id
ddpayment.account_city = recurring_payment.city
ddpayment.account_name = recurring_payment.name
ddpayment.iban = recurring_payment.iban
ddpayment.bic = recurring_payment.bic
ddpayment.save()
| {
"content_hash": "2c384a10c1d63190201f864cc2c3d1ef",
"timestamp": "",
"source": "github",
"line_count": 674,
"max_line_length": 141,
"avg_line_length": 48.4406528189911,
"alnum_prop": 0.6093601641704187,
"repo_name": "onepercentclub/onepercentclub-site",
"id": "42d1d472e837dd4aa70b72bd70bfbc22f43e28f1",
"size": "32676",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/cowry_docdata/adapters.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "13896"
},
{
"name": "CSS",
"bytes": "351343"
},
{
"name": "HTML",
"bytes": "898027"
},
{
"name": "Handlebars",
"bytes": "246489"
},
{
"name": "JavaScript",
"bytes": "168884"
},
{
"name": "Python",
"bytes": "1511371"
},
{
"name": "Ruby",
"bytes": "1050"
},
{
"name": "Shell",
"bytes": "74046"
}
],
"symlink_target": ""
} |
import os
from setuptools import setup, find_packages
from misc import VERSION, PROJECT
MODULE_NAME = 'django-misc'
PACKAGE_DATA = list()
for directory in [ 'templates', 'static' ]:
for root, dirs, files in os.walk( os.path.join( MODULE_NAME, directory )):
for filename in files:
PACKAGE_DATA.append("%s/%s" % ( root[len(MODULE_NAME)+1:], filename ))
def read( fname ):
try:
return open( os.path.join( os.path.dirname( __file__ ), fname ) ).read()
except IOError:
return ''
META_DATA = dict(
name = PROJECT,
version = VERSION,
description = read('DESCRIPTION'),
long_description = read('README.rst'),
license='MIT',
author = "Illia Polosukhin, Vlad Frolov",
author_email = "[email protected]",
url = "http://github.com/ilblackdragon/django-misc.git",
packages = find_packages(),
package_data = { '': PACKAGE_DATA, },
install_requires = [ 'django>=1.2', ],
)
if __name__ == "__main__":
setup( **META_DATA )
| {
"content_hash": "2230eec12bb7ff4ab1de8502fb71c882",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 82,
"avg_line_length": 23.767441860465116,
"alnum_prop": 0.6135029354207436,
"repo_name": "ilblackdragon/django-misc",
"id": "af24befb77cc0038b54b1c9699d8e655491909e8",
"size": "1041",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "205"
},
{
"name": "Python",
"bytes": "54133"
}
],
"symlink_target": ""
} |
"""Sensors for the Elexa Guardian integration."""
from __future__ import annotations
from typing import Callable
from homeassistant.components.sensor import SensorEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
DEVICE_CLASS_BATTERY,
DEVICE_CLASS_TEMPERATURE,
PERCENTAGE,
TEMP_FAHRENHEIT,
TIME_MINUTES,
)
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from . import PairedSensorEntity, ValveControllerEntity
from .const import (
API_SENSOR_PAIRED_SENSOR_STATUS,
API_SYSTEM_DIAGNOSTICS,
API_SYSTEM_ONBOARD_SENSOR_STATUS,
CONF_UID,
DATA_COORDINATOR,
DATA_UNSUB_DISPATCHER_CONNECT,
DOMAIN,
SIGNAL_PAIRED_SENSOR_COORDINATOR_ADDED,
)
SENSOR_KIND_BATTERY = "battery"
SENSOR_KIND_TEMPERATURE = "temperature"
SENSOR_KIND_UPTIME = "uptime"
SENSOR_ATTRS_MAP = {
SENSOR_KIND_BATTERY: ("Battery", DEVICE_CLASS_BATTERY, None, PERCENTAGE),
SENSOR_KIND_TEMPERATURE: (
"Temperature",
DEVICE_CLASS_TEMPERATURE,
None,
TEMP_FAHRENHEIT,
),
SENSOR_KIND_UPTIME: ("Uptime", None, "mdi:timer", TIME_MINUTES),
}
PAIRED_SENSOR_SENSORS = [SENSOR_KIND_BATTERY, SENSOR_KIND_TEMPERATURE]
VALVE_CONTROLLER_SENSORS = [SENSOR_KIND_TEMPERATURE, SENSOR_KIND_UPTIME]
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: Callable
) -> None:
"""Set up Guardian switches based on a config entry."""
@callback
def add_new_paired_sensor(uid: str) -> None:
"""Add a new paired sensor."""
coordinator = hass.data[DOMAIN][DATA_COORDINATOR][entry.entry_id][
API_SENSOR_PAIRED_SENSOR_STATUS
][uid]
entities = []
for kind in PAIRED_SENSOR_SENSORS:
name, device_class, icon, unit = SENSOR_ATTRS_MAP[kind]
entities.append(
PairedSensorSensor(
entry, coordinator, kind, name, device_class, icon, unit
)
)
async_add_entities(entities, True)
# Handle adding paired sensors after HASS startup:
hass.data[DOMAIN][DATA_UNSUB_DISPATCHER_CONNECT][entry.entry_id].append(
async_dispatcher_connect(
hass,
SIGNAL_PAIRED_SENSOR_COORDINATOR_ADDED.format(entry.data[CONF_UID]),
add_new_paired_sensor,
)
)
sensors = []
# Add all valve controller-specific binary sensors:
for kind in VALVE_CONTROLLER_SENSORS:
name, device_class, icon, unit = SENSOR_ATTRS_MAP[kind]
sensors.append(
ValveControllerSensor(
entry,
hass.data[DOMAIN][DATA_COORDINATOR][entry.entry_id],
kind,
name,
device_class,
icon,
unit,
)
)
# Add all paired sensor-specific binary sensors:
for coordinator in hass.data[DOMAIN][DATA_COORDINATOR][entry.entry_id][
API_SENSOR_PAIRED_SENSOR_STATUS
].values():
for kind in PAIRED_SENSOR_SENSORS:
name, device_class, icon, unit = SENSOR_ATTRS_MAP[kind]
sensors.append(
PairedSensorSensor(
entry, coordinator, kind, name, device_class, icon, unit
)
)
async_add_entities(sensors)
class PairedSensorSensor(PairedSensorEntity, SensorEntity):
"""Define a binary sensor related to a Guardian valve controller."""
def __init__(
self,
entry: ConfigEntry,
coordinator: DataUpdateCoordinator,
kind: str,
name: str,
device_class: str | None,
icon: str | None,
unit: str | None,
) -> None:
"""Initialize."""
super().__init__(entry, coordinator, kind, name, device_class, icon)
self._state = None
self._unit = unit
@property
def available(self) -> bool:
"""Return whether the entity is available."""
return self.coordinator.last_update_success
@property
def state(self) -> str:
"""Return the sensor state."""
return self._state
@property
def unit_of_measurement(self) -> str:
"""Return the unit of measurement of this entity, if any."""
return self._unit
@callback
def _async_update_from_latest_data(self) -> None:
"""Update the entity."""
if self._kind == SENSOR_KIND_BATTERY:
self._state = self.coordinator.data["battery"]
elif self._kind == SENSOR_KIND_TEMPERATURE:
self._state = self.coordinator.data["temperature"]
class ValveControllerSensor(ValveControllerEntity, SensorEntity):
"""Define a generic Guardian sensor."""
def __init__(
self,
entry: ConfigEntry,
coordinators: dict[str, DataUpdateCoordinator],
kind: str,
name: str,
device_class: str | None,
icon: str | None,
unit: str | None,
) -> None:
"""Initialize."""
super().__init__(entry, coordinators, kind, name, device_class, icon)
self._state = None
self._unit = unit
@property
def available(self) -> bool:
"""Return whether the entity is available."""
if self._kind == SENSOR_KIND_TEMPERATURE:
return self.coordinators[
API_SYSTEM_ONBOARD_SENSOR_STATUS
].last_update_success
if self._kind == SENSOR_KIND_UPTIME:
return self.coordinators[API_SYSTEM_DIAGNOSTICS].last_update_success
return False
@property
def state(self) -> str:
"""Return the sensor state."""
return self._state
@property
def unit_of_measurement(self) -> str:
"""Return the unit of measurement of this entity, if any."""
return self._unit
async def _async_continue_entity_setup(self) -> None:
"""Register API interest (and related tasks) when the entity is added."""
if self._kind == SENSOR_KIND_TEMPERATURE:
self.async_add_coordinator_update_listener(API_SYSTEM_ONBOARD_SENSOR_STATUS)
@callback
def _async_update_from_latest_data(self) -> None:
"""Update the entity."""
if self._kind == SENSOR_KIND_TEMPERATURE:
self._state = self.coordinators[API_SYSTEM_ONBOARD_SENSOR_STATUS].data[
"temperature"
]
elif self._kind == SENSOR_KIND_UPTIME:
self._state = self.coordinators[API_SYSTEM_DIAGNOSTICS].data["uptime"]
| {
"content_hash": "d91a5446afc04b85adb135c0977e65d3",
"timestamp": "",
"source": "github",
"line_count": 210,
"max_line_length": 88,
"avg_line_length": 31.733333333333334,
"alnum_prop": 0.6182472989195679,
"repo_name": "adrienbrault/home-assistant",
"id": "48807c9cfeb78977e2608a359d27b900a5b14460",
"size": "6664",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/guardian/sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1795"
},
{
"name": "Python",
"bytes": "32021043"
},
{
"name": "Shell",
"bytes": "4900"
}
],
"symlink_target": ""
} |
import sys
import epyc
if __name__ == "__main__":
if len(sys.argv) > 1:
print(epyc.render(sys.argv[1]))
else:
string = "\n".join(iter(input, ""))
print(epyc._render(string, {"a": [1, 2, 3]}))
# Node Testing
#import render
#print(render.ForNode("a", "[1, 2, 3]", render.GroupNode([render.TextNode("hello, a:"), render.ExprNode("a"), render.TextNode("!\n")])).render({}))
#print(render.IfNode("x > 10", render.TextNode("Statement is TRUE"), render.TextNode("Statement is FALSE")).render({'x':5}))
#print(render.ForNode("a", "range(100)", render.GroupNode([render.IfNode("a % 3 == 0", render.TextNode("Fizz"), render.TextNode("Buzz"))])).render())
| {
"content_hash": "d6664ea0b527f2d5398fa5faa5934eb6",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 150,
"avg_line_length": 43.93333333333333,
"alnum_prop": 0.6312594840667678,
"repo_name": "cyphar/epyc",
"id": "fcf8df62d3d11058935d5a5b597702e3bfd86690",
"size": "1834",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16514"
}
],
"symlink_target": ""
} |
import logging
import pickle
from typing import Dict, List, Optional, Tuple, Union
from ray.tune.result import DEFAULT_METRIC
from ray.tune.sample import Categorical, Domain, Float, Integer, Quantized
from ray.tune.suggest.suggestion import UNRESOLVED_SEARCH_SPACE, \
UNDEFINED_METRIC_MODE, UNDEFINED_SEARCH_SPACE
from ray.tune.suggest.variant_generator import parse_spec_vars
from ray.tune.utils import flatten_dict
from ray.tune.utils.util import is_nan_or_inf, unflatten_dict
try:
import skopt as sko
except ImportError:
sko = None
from ray.tune.suggest import Searcher
logger = logging.getLogger(__name__)
def _validate_warmstart(parameter_names: List[str],
points_to_evaluate: List[List],
evaluated_rewards: List):
if points_to_evaluate:
if not isinstance(points_to_evaluate, list):
raise TypeError(
"points_to_evaluate expected to be a list, got {}.".format(
type(points_to_evaluate)))
for point in points_to_evaluate:
if not isinstance(point, list):
raise TypeError(
"points_to_evaluate expected to include list, got {}.".
format(point))
if not len(point) == len(parameter_names):
raise ValueError("Dim of point {}".format(point) +
" and parameter_names {}".format(
parameter_names) + " do not match.")
if points_to_evaluate and evaluated_rewards:
if not isinstance(evaluated_rewards, list):
raise TypeError(
"evaluated_rewards expected to be a list, got {}.".format(
type(evaluated_rewards)))
if not len(evaluated_rewards) == len(points_to_evaluate):
raise ValueError(
"Dim of evaluated_rewards {}".format(evaluated_rewards) +
" and points_to_evaluate {}".format(points_to_evaluate) +
" do not match.")
class SkOptSearch(Searcher):
"""Uses Scikit Optimize (skopt) to optimize hyperparameters.
Scikit-optimize is a black-box optimization library.
Read more here: https://scikit-optimize.github.io.
You will need to install Scikit-Optimize to use this module.
.. code-block:: bash
pip install scikit-optimize
This Search Algorithm requires you to pass in a `skopt Optimizer object`_.
This searcher will automatically filter out any NaN, inf or -inf
results.
Parameters:
optimizer (skopt.optimizer.Optimizer): Optimizer provided
from skopt.
space (dict|list): A dict mapping parameter names to valid parameters,
i.e. tuples for numerical parameters and lists for categorical
parameters. If you passed an optimizer instance as the
`optimizer` argument, this should be a list of parameter names
instead.
metric (str): The training result objective value attribute. If None
but a mode was passed, the anonymous metric `_metric` will be used
per default.
mode (str): One of {min, max}. Determines whether objective is
minimizing or maximizing the metric attribute.
points_to_evaluate (list of lists): A list of points you'd like to run
first before sampling from the optimiser, e.g. these could be
parameter configurations you already know work well to help
the optimiser select good values. Each point is a list of the
parameters using the order definition given by parameter_names.
evaluated_rewards (list): If you have previously evaluated the
parameters passed in as points_to_evaluate you can avoid
re-running those trials by passing in the reward attributes
as a list so the optimiser can be told the results without
needing to re-compute the trial. Must be the same length as
points_to_evaluate. (See tune/examples/skopt_example.py)
max_concurrent: Deprecated.
use_early_stopped_trials: Deprecated.
Tune automatically converts search spaces to SkOpt's format:
.. code-block:: python
config = {
"width": tune.uniform(0, 20),
"height": tune.uniform(-100, 100)
}
current_best_params = [[10, 0], [15, -20]]
skopt_search = SkOptSearch(
metric="mean_loss",
mode="min",
points_to_evaluate=current_best_params)
tune.run(my_trainable, config=config, search_alg=skopt_search)
If you would like to pass the search space/optimizer manually,
the code would look like this:
.. code-block:: python
parameter_names = ["width", "height"]
parameter_ranges = [(0,20),(-100,100)]
current_best_params = [[10, 0], [15, -20]]
skopt_search = SkOptSearch(
parameter_names=parameter_names,
parameter_ranges=parameter_ranges,
metric="mean_loss",
mode="min",
points_to_evaluate=current_best_params)
tune.run(my_trainable, search_alg=skopt_search)
"""
def __init__(self,
optimizer: Optional[sko.optimizer.Optimizer] = None,
space: Union[List[str], Dict[str, Union[Tuple, List]]] = None,
metric: Optional[str] = None,
mode: Optional[str] = None,
points_to_evaluate: Optional[List[List]] = None,
evaluated_rewards: Optional[List] = None,
max_concurrent: Optional[int] = None,
use_early_stopped_trials: Optional[bool] = None):
assert sko is not None, """skopt must be installed!
You can install Skopt with the command:
`pip install scikit-optimize`."""
if mode:
assert mode in ["min", "max"], "`mode` must be 'min' or 'max'."
self.max_concurrent = max_concurrent
super(SkOptSearch, self).__init__(
metric=metric,
mode=mode,
max_concurrent=max_concurrent,
use_early_stopped_trials=use_early_stopped_trials)
self._initial_points = []
self._parameters = None
self._parameter_names = None
self._parameter_ranges = None
if isinstance(space, dict) and space:
resolved_vars, domain_vars, grid_vars = parse_spec_vars(space)
if domain_vars or grid_vars:
logger.warning(
UNRESOLVED_SEARCH_SPACE.format(
par="space", cls=type(self)))
space = self.convert_search_space(space, join=True)
self._space = space
if self._space:
if isinstance(optimizer, sko.Optimizer):
if not isinstance(space, list):
raise ValueError(
"You passed an optimizer instance to SkOpt. Your "
"`space` parameter should be a list of parameter"
"names.")
self._parameter_names = space
else:
self._parameter_names = list(space.keys())
self._parameter_ranges = space.values()
self._points_to_evaluate = points_to_evaluate
self._evaluated_rewards = evaluated_rewards
self._skopt_opt = optimizer
if self._skopt_opt or self._space:
self._setup_skopt()
self._live_trial_mapping = {}
def _setup_skopt(self):
_validate_warmstart(self._parameter_names, self._points_to_evaluate,
self._evaluated_rewards)
if not self._skopt_opt:
if not self._space:
raise ValueError(
"If you don't pass an optimizer instance to SkOptSearch, "
"pass a valid `space` parameter.")
self._skopt_opt = sko.Optimizer(self._parameter_ranges)
if self._points_to_evaluate and self._evaluated_rewards:
self._skopt_opt.tell(self._points_to_evaluate,
self._evaluated_rewards)
elif self._points_to_evaluate:
self._initial_points = self._points_to_evaluate
self._parameters = self._parameter_names
# Skopt internally minimizes, so "max" => -1
if self._mode == "max":
self._metric_op = -1.
elif self._mode == "min":
self._metric_op = 1.
if self._metric is None and self._mode:
# If only a mode was passed, use anonymous metric
self._metric = DEFAULT_METRIC
def set_search_properties(self, metric: Optional[str], mode: Optional[str],
config: Dict) -> bool:
if self._skopt_opt:
return False
space = self.convert_search_space(config)
self._space = space
self._parameter_names = space.keys()
self._parameter_ranges = space.values()
if metric:
self._metric = metric
if mode:
self._mode = mode
self._setup_skopt()
return True
def suggest(self, trial_id: str) -> Optional[Dict]:
if not self._skopt_opt:
raise RuntimeError(
UNDEFINED_SEARCH_SPACE.format(
cls=self.__class__.__name__, space="space"))
if not self._metric or not self._mode:
raise RuntimeError(
UNDEFINED_METRIC_MODE.format(
cls=self.__class__.__name__,
metric=self._metric,
mode=self._mode))
if self.max_concurrent:
if len(self._live_trial_mapping) >= self.max_concurrent:
return None
if self._initial_points:
suggested_config = self._initial_points[0]
del self._initial_points[0]
else:
suggested_config = self._skopt_opt.ask()
self._live_trial_mapping[trial_id] = suggested_config
return unflatten_dict(dict(zip(self._parameters, suggested_config)))
def on_trial_complete(self,
trial_id: str,
result: Optional[Dict] = None,
error: bool = False):
"""Notification for the completion of trial.
The result is internally negated when interacting with Skopt
so that Skopt Optimizers can "maximize" this value,
as it minimizes on default.
"""
if result:
self._process_result(trial_id, result)
self._live_trial_mapping.pop(trial_id)
def _process_result(self, trial_id: str, result: Dict):
skopt_trial_info = self._live_trial_mapping[trial_id]
if result and not is_nan_or_inf(result[self._metric]):
self._skopt_opt.tell(skopt_trial_info,
self._metric_op * result[self._metric])
def save(self, checkpoint_path: str):
trials_object = (self._initial_points, self._skopt_opt)
with open(checkpoint_path, "wb") as outputFile:
pickle.dump(trials_object, outputFile)
def restore(self, checkpoint_path: str):
with open(checkpoint_path, "rb") as inputFile:
trials_object = pickle.load(inputFile)
self._initial_points = trials_object[0]
self._skopt_opt = trials_object[1]
@staticmethod
def convert_search_space(spec: Dict, join: bool = False) -> Dict:
spec = flatten_dict(spec, prevent_delimiter=True)
resolved_vars, domain_vars, grid_vars = parse_spec_vars(spec)
if grid_vars:
raise ValueError(
"Grid search parameters cannot be automatically converted "
"to a SkOpt search space.")
def resolve_value(domain: Domain) -> Union[Tuple, List]:
sampler = domain.get_sampler()
if isinstance(sampler, Quantized):
logger.warning("SkOpt search does not support quantization. "
"Dropped quantization.")
sampler = sampler.get_sampler()
if isinstance(domain, Float):
if domain.sampler is not None:
logger.warning(
"SkOpt does not support specific sampling methods."
" The {} sampler will be dropped.".format(sampler))
return domain.lower, domain.upper
if isinstance(domain, Integer):
if domain.sampler is not None:
logger.warning(
"SkOpt does not support specific sampling methods."
" The {} sampler will be dropped.".format(sampler))
return domain.lower, domain.upper
if isinstance(domain, Categorical):
return domain.categories
raise ValueError("SkOpt does not support parameters of type "
"`{}`".format(type(domain).__name__))
# Parameter name is e.g. "a/b/c" for nested dicts
space = {
"/".join(path): resolve_value(domain)
for path, domain in domain_vars
}
if join:
spec.update(space)
space = spec
return space
| {
"content_hash": "6da07cbfe4f91c67701a20eb73cd7265",
"timestamp": "",
"source": "github",
"line_count": 343,
"max_line_length": 79,
"avg_line_length": 38.86880466472303,
"alnum_prop": 0.5755325532553255,
"repo_name": "richardliaw/ray",
"id": "f8b8d9196ab0e3e487b655016f8a7b92c29122da",
"size": "13332",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/ray/tune/suggest/skopt.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "62178"
},
{
"name": "C++",
"bytes": "4258483"
},
{
"name": "CSS",
"bytes": "8025"
},
{
"name": "Dockerfile",
"bytes": "6292"
},
{
"name": "HTML",
"bytes": "30414"
},
{
"name": "Java",
"bytes": "1263157"
},
{
"name": "JavaScript",
"bytes": "444"
},
{
"name": "Jupyter Notebook",
"bytes": "1615"
},
{
"name": "Makefile",
"bytes": "234"
},
{
"name": "Python",
"bytes": "7515224"
},
{
"name": "Shell",
"bytes": "117425"
},
{
"name": "Starlark",
"bytes": "200955"
},
{
"name": "TypeScript",
"bytes": "149068"
}
],
"symlink_target": ""
} |
class DummyRPC(object):
def __init__(self):
self._CALL_TIME_MAP = {}
def call_by_id(self, i=0):
check = self._CALL_TIME_MAP.get(i)
assert(check is None)
self._CALL_TIME_MAP[i] = True
return i
def call_by_ids(self, ids=[]):
check = self._CALL_TIME_MAP.get(str(ids))
assert(check is None)
self._CALL_TIME_MAP[str(ids)] = True
return ids
class DummyRedis(object):
def __init__(self):
self._CALL_TIME_MAP = {}
self._DATA = {'DummyRPC.call_by_id:i,1': 1,
'DummyRPC.call_by_id:i,2': 2}
def get(self, key=None):
check = self._CALL_TIME_MAP.get(key)
assert(check is None)
self._CALL_TIME_MAP[key] = True
if key > 5:
return None
return key
def mget(self, keys=[]):
check = self._CALL_TIME_MAP.get(str(keys))
assert(check is None)
self._CALL_TIME_MAP[str(keys)] = True
result = []
for key in keys:
result.append(self._DATA.get(key))
return result
def mset(self, kv={}):
for key, value in kv.iteritems():
self._DATA[key] = value
return len(kv)
def set(self, key, value):
self._DATA[key] = value
return 1
| {
"content_hash": "86d5d9d8a9a6624c0f1e7060bef1663a",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 51,
"avg_line_length": 25.057692307692307,
"alnum_prop": 0.5203376822716808,
"repo_name": "reAsOn2010/webnodeEx",
"id": "1c767152980af86498c32a21e8ee6428bb89809d",
"size": "1351",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "webnodeex/dummy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22114"
},
{
"name": "Shell",
"bytes": "12"
}
],
"symlink_target": ""
} |
from rpython.rtyper.lltypesystem import lltype, llmemory
from rpython.rtyper.lltypesystem import rdict
from rpython.rlib.objectmodel import we_are_translated
from rpython.memory.support import mangle_hash
# This is a low-level AddressDict, reusing a lot of the logic from rdict.py.
# xxx this is very dependent on the details of rdict.py
alloc_count = 0 # for debugging
def count_alloc(delta):
"NOT_RPYTHON"
global alloc_count
alloc_count += delta
def newdict(length_estimate=0):
return rdict.ll_newdict_size(DICT, length_estimate)
def dict_allocate():
if not we_are_translated(): count_alloc(+1)
return lltype.malloc(DICT, flavor="raw")
def dict_delete(d):
dict_delete_entries(d.entries)
lltype.free(d, flavor="raw")
if not we_are_translated(): count_alloc(-1)
def dict_allocate_entries(n):
if not we_are_translated(): count_alloc(+1)
# 'raw zero varsize malloc with length field' is not really implemented.
# we can initialize the memory to zero manually
entries = lltype.malloc(ENTRIES, n, flavor="raw")
i = 0
while i < n:
entries[i].key = llmemory.NULL
i += 1
return entries
def dict_delete_entries(entries):
lltype.free(entries, flavor="raw")
if not we_are_translated(): count_alloc(-1)
def _hash(adr):
return mangle_hash(llmemory.cast_adr_to_int(adr))
def dict_keyhash(d, key):
return _hash(key)
def dict_entry_valid(entries, i):
return entries[i].key != llmemory.NULL
def dict_entry_hash(entries, i):
return _hash(entries[i].key)
def dict_get(d, key, default=llmemory.NULL):
return rdict.ll_get(d, key, default)
def dict_add(d, key):
rdict.ll_dict_setitem(d, key, llmemory.NULL)
def dict_insertclean(d, key, value):
rdict.ll_dict_insertclean(d, key, value, _hash(key))
def dict_foreach(d, callback, arg):
entries = d.entries
i = len(entries) - 1
while i >= 0:
if dict_entry_valid(entries, i):
callback(entries[i].key, entries[i].value, arg)
i -= 1
dict_foreach._annspecialcase_ = 'specialize:arg(1)'
ENTRY = lltype.Struct('ENTRY', ('key', llmemory.Address),
('value', llmemory.Address))
ENTRIES = lltype.Array(ENTRY,
adtmeths = {
'allocate': dict_allocate_entries,
'delete': dict_delete_entries,
'valid': dict_entry_valid,
'everused': dict_entry_valid,
'hash': dict_entry_hash,
})
DICT = lltype.Struct('DICT', ('entries', lltype.Ptr(ENTRIES)),
('num_items', lltype.Signed),
('resize_counter', lltype.Signed),
adtmeths = {
'allocate': dict_allocate,
'delete': dict_delete,
'length': rdict.ll_dict_len,
'contains': rdict.ll_contains,
'setitem': rdict.ll_dict_setitem,
'get': dict_get,
'add': dict_add,
'insertclean': dict_insertclean,
'clear': rdict.ll_clear,
'foreach': dict_foreach,
'keyhash': dict_keyhash,
'keyeq': None,
})
| {
"content_hash": "b9824f3ac740d94e1b3a120b660dc8f2",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 76,
"avg_line_length": 34.23,
"alnum_prop": 0.563248612328367,
"repo_name": "oblique-labs/pyVM",
"id": "56657e6c003de5ff37f1e5bfaf9a2e81daad4faf",
"size": "3423",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "rpython/memory/lldict.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "161293"
},
{
"name": "Awk",
"bytes": "271"
},
{
"name": "Batchfile",
"bytes": "5289"
},
{
"name": "C",
"bytes": "771638"
},
{
"name": "C++",
"bytes": "12850"
},
{
"name": "Emacs Lisp",
"bytes": "3149"
},
{
"name": "HCL",
"bytes": "155"
},
{
"name": "M4",
"bytes": "12737"
},
{
"name": "Makefile",
"bytes": "35222"
},
{
"name": "Objective-C",
"bytes": "2224"
},
{
"name": "Python",
"bytes": "18329219"
},
{
"name": "Shell",
"bytes": "15396"
},
{
"name": "Vim script",
"bytes": "1107"
}
],
"symlink_target": ""
} |
"""Train a Fast R-CNN network."""
import caffe
from fast_rcnn.config import cfg
import roi_data_layer.roidb as rdl_roidb
from utils.timer import Timer
import numpy as np
import os
from caffe.proto import caffe_pb2
import google.protobuf as pb2
import google.protobuf.text_format
class SolverWrapper(object):
"""A simple wrapper around Caffe's solver.
This wrapper gives us control over he snapshotting process, which we
use to unnormalize the learned bounding-box regression weights.
"""
def __init__(self, solver_prototxt, roidb, output_dir,
pretrained_model=None):
"""Initialize the SolverWrapper."""
self.output_dir = output_dir
if (cfg.TRAIN.HAS_RPN and cfg.TRAIN.BBOX_REG and
cfg.TRAIN.BBOX_NORMALIZE_TARGETS):
# RPN can only use precomputed normalization because there are no
# fixed statistics to compute a priori
assert cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED
if cfg.TRAIN.BBOX_REG:
print 'Computing bounding-box regression targets...'
self.bbox_means, self.bbox_stds = \
rdl_roidb.add_bbox_regression_targets(roidb)
print 'done'
self.solver = caffe.SGDSolver(solver_prototxt)
if pretrained_model is not None:
print ('Loading pretrained model '
'weights from {:s}').format(pretrained_model)
self.solver.net.copy_from(pretrained_model)
self.solver_param = caffe_pb2.SolverParameter()
with open(solver_prototxt, 'rt') as f:
pb2.text_format.Merge(f.read(), self.solver_param)
self.solver.net.layers[0].set_roidb(roidb)
def snapshot(self):
"""Take a snapshot of the network after unnormalizing the learned
bounding-box regression weights. This enables easy use at test-time.
"""
net = self.solver.net
scale_bbox_params = (cfg.TRAIN.BBOX_REG and
cfg.TRAIN.BBOX_NORMALIZE_TARGETS and
net.params.has_key('bbox_pred'))
if scale_bbox_params:
# save original values
orig_0 = net.params['bbox_pred'][0].data.copy()
orig_1 = net.params['bbox_pred'][1].data.copy()
# scale and shift with bbox reg unnormalization; then save snapshot
net.params['bbox_pred'][0].data[...] = \
(net.params['bbox_pred'][0].data *
self.bbox_stds[:, np.newaxis])
net.params['bbox_pred'][1].data[...] = \
(net.params['bbox_pred'][1].data *
self.bbox_stds + self.bbox_means)
infix = ('_' + cfg.TRAIN.SNAPSHOT_INFIX
if cfg.TRAIN.SNAPSHOT_INFIX != '' else '')
filename = (self.solver_param.snapshot_prefix + infix +
'_iter_{:d}'.format(self.solver.iter) + '.caffemodel')
filename = os.path.join(self.output_dir, filename)
net.save(str(filename))
print 'Wrote snapshot to: {:s}'.format(filename)
if scale_bbox_params:
# restore net to original state
net.params['bbox_pred'][0].data[...] = orig_0
net.params['bbox_pred'][1].data[...] = orig_1
return filename
def train_model(self, max_iters):
"""Network training loop."""
last_snapshot_iter = -1
timer = Timer()
model_paths = []
while self.solver.iter < max_iters:
# Make one SGD update
timer.tic()
self.solver.step(1)
timer.toc()
if self.solver.iter % (10 * self.solver_param.display) == 0:
print 'speed: {:.3f}s / iter'.format(timer.average_time)
if self.solver.iter % cfg.TRAIN.SNAPSHOT_ITERS == 0:
last_snapshot_iter = self.solver.iter
model_paths.append(self.snapshot())
if last_snapshot_iter != self.solver.iter:
model_paths.append(self.snapshot())
return model_paths
def get_training_roidb(imdb):
"""Returns a roidb (Region of Interest database) for use in training."""
if cfg.TRAIN.USE_FLIPPED:
print 'Appending horizontally-flipped training examples...'
imdb.append_flipped_images()
print 'done'
print 'Preparing training data...'
rdl_roidb.prepare_roidb(imdb)
print 'done'
return imdb.roidb
def filter_roidb(roidb):
"""Remove roidb entries that have no usable RoIs."""
def is_valid(entry):
# Valid images have:
# (1) At least one foreground RoI OR
# (2) At least one background RoI
overlaps = entry['max_overlaps']
# find boxes with sufficient overlap
fg_inds = np.where(overlaps >= cfg.TRAIN.FG_THRESH)[0]
# Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)
bg_inds = np.where((overlaps < cfg.TRAIN.BG_THRESH_HI) &
(overlaps >= cfg.TRAIN.BG_THRESH_LO))[0]
# image is only valid if such boxes exist
valid = len(fg_inds) > 0 or len(bg_inds) > 0
return valid
num = len(roidb)
filtered_roidb = [entry for entry in roidb if is_valid(entry)]
num_after = len(filtered_roidb)
print 'Filtered {} roidb entries: {} -> {}'.format(num - num_after,
num, num_after)
return filtered_roidb
def train_net(solver_prototxt, roidb, output_dir,
pretrained_model=None, max_iters=40000):
"""Train a Fast R-CNN network."""
roidb = filter_roidb(roidb)
sw = SolverWrapper(solver_prototxt, roidb, output_dir,
pretrained_model=pretrained_model)
print 'Solving...'
model_paths = sw.train_model(max_iters)
print 'done solving'
return model_paths
| {
"content_hash": "148017939e90fc297d1a0dc7e1cbc00f",
"timestamp": "",
"source": "github",
"line_count": 156,
"max_line_length": 79,
"avg_line_length": 37.57692307692308,
"alnum_prop": 0.588877516206073,
"repo_name": "ucloud/uai-sdk",
"id": "4a6d362ecfbba6d0c4920d0b37e5d3418d8eaba7",
"size": "6111",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/caffe/train/faster-rcnn/code/lib/fast_rcnn/train.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "470557"
}
],
"symlink_target": ""
} |
revision = '2a3bf49ef34'
down_revision = '25d765ee53b'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
op.create_table('proposal_activity_item',
sa.Column('id', postgresql.UUID(), nullable=False),
sa.Column('date', sa.Date(), nullable=True),
sa.Column('location', sa.Text(), nullable=True),
sa.Column('html', sa.Text(), nullable=True),
sa.Column('order', sa.Integer(), nullable=True),
sa.Column('proposal_id', postgresql.UUID(), nullable=False),
sa.ForeignKeyConstraint(['proposal_id'], ['proposal.id']),
sa.PrimaryKeyConstraint('id'),
)
def downgrade():
op.drop_table('proposal_activity_item')
| {
"content_hash": "48fed51740b94ef018b3756a2c6aaf92",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 68,
"avg_line_length": 31.869565217391305,
"alnum_prop": 0.6562073669849932,
"repo_name": "mgax/mptracker",
"id": "03d5cbd6bb1a70aaca02bb3b948b84625fde1bd5",
"size": "733",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "alembic/versions/2a3bf49ef34_proposalactivity.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "388319"
},
{
"name": "HTML",
"bytes": "146505"
},
{
"name": "JavaScript",
"bytes": "300815"
},
{
"name": "Makefile",
"bytes": "85"
},
{
"name": "Mako",
"bytes": "268"
},
{
"name": "Python",
"bytes": "438347"
},
{
"name": "Shell",
"bytes": "1893"
}
],
"symlink_target": ""
} |
"""
Python Quake 3 Library
http://misc.slowchop.com/misc/wiki/pyquake3
Copyright (C) 2006-2007 Gerald Kaszuba
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
import socket
import re
class Player(object):
"""
Player class
"""
def __init__(self, num, name, frags, ping, address=None, bot=-1):
"""
create a new instance of Player
"""
self.num = num
self.name = name
self.frags = frags
self.ping = ping
self.address = address
self.bot = bot
def __str__(self):
return self.name
def __repr__(self):
return str(self)
class PyQuake3(object):
"""
PyQuake3 class
"""
packet_prefix = '\xff' * 4
player_reo = re.compile(r'^(\d+) (\d+) "(.*)"')
rcon_password = None
port = None
address = None
players = None
values = None
def __init__(self, server, rcon_password=''):
"""
create a new instance of PyQuake3
"""
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.set_server(server)
self.set_rcon_password(rcon_password)
def set_server(self, server):
"""
set IP address and port and connect to socket
"""
try:
self.address, self.port = server.split(':')
except ValueError:
raise ValueError('Server address format must be: "address:port"')
self.port = int(self.port)
self.sock.connect((self.address, self.port))
def get_address(self):
"""
get IP address and port
"""
return '%s:%s' % (self.address, self.port)
def set_rcon_password(self, rcon_password):
"""
set RCON password
"""
self.rcon_password = rcon_password
def send_packet(self, data):
"""
send packet
"""
self.sock.send('%s%s\n' % (self.packet_prefix, data))
def recv(self, timeout=1):
"""
receive packets
"""
self.sock.settimeout(timeout)
try:
return self.sock.recv(8192)
except socket.error as err:
raise Exception('Error receiving the packet: %s' % err[1])
def command(self, cmd, timeout=1, retries=5):
"""
send command and receive response
"""
while retries:
self.send_packet(cmd)
try:
data = self.recv(timeout)
except Exception:
data = None
if data:
return self.parse_packet(data)
retries -= 1
raise Exception('Server response timed out')
def rcon(self, cmd):
"""
send RCON command
"""
r_cmd = self.command('rcon "%s" %s' % (self.rcon_password, cmd))
if r_cmd[1] == 'No rconpassword set on the server.\n' or r_cmd[1] == 'Bad rconpassword.\n':
raise Exception(r_cmd[1][:-1])
return r_cmd
def parse_packet(self, data):
"""
parse the received packet
"""
if data.find(self.packet_prefix) != 0:
raise Exception('Malformed packet')
first_line_length = data.find('\n')
if first_line_length == -1:
raise Exception('Malformed packet')
response_type = data[len(self.packet_prefix):first_line_length]
response_data = data[first_line_length + 1:]
return response_type, response_data
def parse_status(self, data):
"""
parse the response message and return a list
"""
split = data[1:].split('\\')
values = dict(zip(split[::2], split[1::2]))
# if there are \n's in one of the values, it's the list of players
for var, val in values.items():
pos = val.find('\n')
if pos == -1:
continue
split = val.split('\n', 1)
values[var] = split[0]
self.parse_players(split[1])
return values
def parse_players(self, data):
"""
parse player information - name, frags and ping
"""
self.players = []
for player in data.split('\n'):
if not player:
continue
match = self.player_reo.match(player)
if not match:
continue
frags, ping, name = match.groups()
self.players.append(Player(1, name, frags, ping))
def update(self):
"""
get status
"""
data = self.command('getstatus')[1]
self.values = self.parse_status(data)
def rcon_update(self):
"""
perform RCON status update
"""
status, data = self.rcon('status')
if status == 'print' and data.startswith('map'):
lines = data.split('\n')
players = lines[3:]
self.players = []
for ply in players:
while ply.find(' ') != -1:
ply = ply.replace(' ', ' ')
while ply.find(' ') == 0:
ply = ply[1:]
if ply == '':
continue
ply = ply.split(' ')
try:
self.players.append(Player(int(ply[0]), ply[3], int(ply[1]), int(ply[2]), ply[5]))
except (IndexError, ValueError):
continue
if __name__ == '__main__':
QUAKE = PyQuake3(server='localhost:27960', rcon_password='secret')
QUAKE.update()
print("The server name of '%s' is %s, running map %s with %s player(s)." % (QUAKE.get_address(), QUAKE.values['sv_hostname'], QUAKE.values['mapname'], len(QUAKE.players)))
for gamer in QUAKE.players:
print("%s with %s frags and a %s ms ping" % (gamer.name, gamer.frags, gamer.ping))
QUAKE.rcon_update()
for gamer in QUAKE.players:
print("%s (%s) has IP address of %s" % (gamer.name, gamer.num, gamer.address))
QUAKE.rcon('bigtext "pyquake3 is great"')
| {
"content_hash": "e31d39062a27318c911a1674b3a1610c",
"timestamp": "",
"source": "github",
"line_count": 223,
"max_line_length": 175,
"avg_line_length": 29.668161434977577,
"alnum_prop": 0.5459492140266021,
"repo_name": "SpunkyBot/spunkybot",
"id": "bf509c813a038e88046b512c35ad8bc62ef94b12",
"size": "6616",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "lib/pyquake3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "235"
},
{
"name": "Makefile",
"bytes": "3467"
},
{
"name": "Python",
"bytes": "279580"
},
{
"name": "Shell",
"bytes": "2209"
}
],
"symlink_target": ""
} |
import logging
import os
import re
import pyqtgraph as pg
from .browser import Browser
from .curves import ResultsCurve, Crosshairs
from .inputs import BooleanInput, IntegerInput, ListInput, ScientificInput, StringInput
from .log import LogHandler
from .Qt import QtCore, QtGui
from ..experiment import parameters, Procedure
from ..experiment.results import Results
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
class PlotFrame(QtGui.QFrame):
""" Combines a PyQtGraph Plot with Crosshairs. Refreshes
the plot based on the refresh_time, and allows the axes
to be changed on the fly, which updates the plotted data
"""
LABEL_STYLE = {'font-size': '10pt', 'font-family': 'Arial', 'color': '#000000'}
updated = QtCore.QSignal()
x_axis_changed = QtCore.QSignal(str)
y_axis_changed = QtCore.QSignal(str)
def __init__(self, x_axis=None, y_axis=None, refresh_time=0.2, check_status=True, parent=None):
super().__init__(parent)
self.refresh_time = refresh_time
self.check_status = check_status
self._setup_ui()
self.change_x_axis(x_axis)
self.change_y_axis(y_axis)
def _setup_ui(self):
self.setAutoFillBackground(False)
self.setStyleSheet("background: #fff")
self.setFrameShape(QtGui.QFrame.StyledPanel)
self.setFrameShadow(QtGui.QFrame.Sunken)
self.setMidLineWidth(1)
vbox = QtGui.QVBoxLayout(self)
self.plot_widget = pg.PlotWidget(self, background='#ffffff')
self.coordinates = QtGui.QLabel(self)
self.coordinates.setMinimumSize(QtCore.QSize(0, 20))
self.coordinates.setStyleSheet("background: #fff")
self.coordinates.setText("")
self.coordinates.setAlignment(
QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter)
vbox.addWidget(self.plot_widget)
vbox.addWidget(self.coordinates)
self.setLayout(vbox)
self.plot = self.plot_widget.getPlotItem()
self.crosshairs = Crosshairs(self.plot,
pen=pg.mkPen(color='#AAAAAA', style=QtCore.Qt.DashLine))
self.crosshairs.coordinates.connect(self.update_coordinates)
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.update_curves)
self.timer.timeout.connect(self.crosshairs.update)
self.timer.timeout.connect(self.updated)
self.timer.start(int(self.refresh_time * 1e3))
def update_coordinates(self, x, y):
self.coordinates.setText("(%g, %g)" % (x, y))
def update_curves(self):
for item in self.plot.items:
if isinstance(item, ResultsCurve):
if self.check_status:
if item.results.procedure.status == Procedure.RUNNING:
item.update()
else:
item.update()
def parse_axis(self, axis):
""" Returns the units of an axis by searching the string
"""
units_pattern = "\((?P<units>\w+)\)"
match = re.search(units_pattern, axis)
if match:
if 'units' in match.groupdict():
label = re.sub(units_pattern, '', axis)
return label, match.groupdict()['units']
else:
return axis, None
def change_x_axis(self, axis):
for item in self.plot.items:
if isinstance(item, ResultsCurve):
item.x = axis
item.update()
label, units = self.parse_axis(axis)
self.plot.setLabel('bottom', label, units=units, **self.LABEL_STYLE)
self.x_axis = axis
self.x_axis_changed.emit(axis)
def change_y_axis(self, axis):
for item in self.plot.items:
if isinstance(item, ResultsCurve):
item.y = axis
item.update()
label, units = self.parse_axis(axis)
self.plot.setLabel('left', label, units=units, **self.LABEL_STYLE)
self.y_axis = axis
self.y_axis_changed.emit(axis)
class PlotWidget(QtGui.QWidget):
""" Extends the PlotFrame to allow different columns
of the data to be dynamically choosen
"""
def __init__(self, columns, x_axis=None, y_axis=None, refresh_time=0.2, check_status=True,
parent=None):
super().__init__(parent)
self.columns = columns
self.refresh_time = refresh_time
self.check_status = check_status
self._setup_ui()
self._layout()
if x_axis is not None:
self.columns_x.setCurrentIndex(self.columns_x.findText(x_axis))
self.plot_frame.change_x_axis(x_axis)
if y_axis is not None:
self.columns_y.setCurrentIndex(self.columns_y.findText(y_axis))
self.plot_frame.change_y_axis(y_axis)
def _setup_ui(self):
self.columns_x_label = QtGui.QLabel(self)
self.columns_x_label.setMaximumSize(QtCore.QSize(45, 16777215))
self.columns_x_label.setText('X Axis:')
self.columns_y_label = QtGui.QLabel(self)
self.columns_y_label.setMaximumSize(QtCore.QSize(45, 16777215))
self.columns_y_label.setText('Y Axis:')
self.columns_x = QtGui.QComboBox(self)
self.columns_y = QtGui.QComboBox(self)
for column in self.columns:
self.columns_x.addItem(column)
self.columns_y.addItem(column)
self.columns_x.activated.connect(self.update_x_column)
self.columns_y.activated.connect(self.update_y_column)
self.plot_frame = PlotFrame(
self.columns[0],
self.columns[1],
self.refresh_time,
self.check_status
)
self.updated = self.plot_frame.updated
self.plot = self.plot_frame.plot
self.columns_x.setCurrentIndex(0)
self.columns_y.setCurrentIndex(1)
def _layout(self):
vbox = QtGui.QVBoxLayout(self)
vbox.setSpacing(0)
hbox = QtGui.QHBoxLayout()
hbox.setSpacing(10)
hbox.setContentsMargins(-1, 6, -1, 6)
hbox.addWidget(self.columns_x_label)
hbox.addWidget(self.columns_x)
hbox.addWidget(self.columns_y_label)
hbox.addWidget(self.columns_y)
vbox.addLayout(hbox)
vbox.addWidget(self.plot_frame)
self.setLayout(vbox)
def sizeHint(self):
return QtCore.QSize(300, 600)
def new_curve(self, results, color=pg.intColor(0), **kwargs):
if 'pen' not in kwargs:
kwargs['pen'] = pg.mkPen(color=color, width=2)
if 'antialias' not in kwargs:
kwargs['antialias'] = False
curve = ResultsCurve(results,
x=self.plot_frame.x_axis,
y=self.plot_frame.y_axis,
**kwargs
)
curve.setSymbol(None)
curve.setSymbolBrush(None)
return curve
def update_x_column(self, index):
axis = self.columns_x.itemText(index)
self.plot_frame.change_x_axis(axis)
def update_y_column(self, index):
axis = self.columns_y.itemText(index)
self.plot_frame.change_y_axis(axis)
class BrowserWidget(QtGui.QWidget):
def __init__(self, *args, parent=None):
super().__init__(parent)
self.browser_args = args
self._setup_ui()
self._layout()
def _setup_ui(self):
self.browser = Browser(*self.browser_args, parent=self)
self.clear_button = QtGui.QPushButton('Clear all', self)
self.clear_button.setEnabled(False)
self.hide_button = QtGui.QPushButton('Hide all', self)
self.hide_button.setEnabled(False)
self.show_button = QtGui.QPushButton('Show all', self)
self.show_button.setEnabled(False)
self.open_button = QtGui.QPushButton('Open', self)
self.open_button.setEnabled(True)
def _layout(self):
vbox = QtGui.QVBoxLayout(self)
vbox.setSpacing(0)
hbox = QtGui.QHBoxLayout()
hbox.setSpacing(10)
hbox.setContentsMargins(-1, 6, -1, 6)
hbox.addWidget(self.show_button)
hbox.addWidget(self.hide_button)
hbox.addWidget(self.clear_button)
hbox.addStretch()
hbox.addWidget(self.open_button)
vbox.addLayout(hbox)
vbox.addWidget(self.browser)
self.setLayout(vbox)
class InputsWidget(QtGui.QWidget):
def __init__(self, procedure_class, inputs=(), parent=None):
super().__init__(parent)
self._procedure_class = procedure_class
self._procedure = procedure_class()
self._inputs = inputs
self._setup_ui()
self._layout()
def _setup_ui(self):
parameter_objects = self._procedure.parameter_objects()
for name in self._inputs:
parameter = parameter_objects[name]
if parameter.ui_class is not None:
element = parameter.ui_class(parameter)
elif isinstance(parameter, parameters.FloatParameter):
element = ScientificInput(parameter)
elif isinstance(parameter, parameters.IntegerParameter):
element = IntegerInput(parameter)
elif isinstance(parameter, parameters.BooleanParameter):
# noinspection PyArgumentList
element = BooleanInput(parameter) # TODO not implemented yet
elif isinstance(parameter, parameters.ListParameter):
# noinspection PyArgumentList
element = ListInput(parameter) # TODO not implemented yet
elif isinstance(parameter, parameters.Parameter):
element = StringInput(parameter)
setattr(self, name, element)
def _layout(self):
vbox = QtGui.QVBoxLayout(self)
vbox.setSpacing(6)
parameters = self._procedure.parameter_objects()
for name in self._inputs:
label = QtGui.QLabel(self)
label.setText("%s:" % parameters[name].name)
vbox.addWidget(label)
vbox.addWidget(getattr(self, name))
self.setLayout(vbox)
def set_parameters(self, parameter_objects):
for name in self._inputs:
element = getattr(self, name)
element.set_parameter(parameter_objects[name])
def get_procedure(self):
""" Returns the current procedure """
self._procedure = self._procedure_class()
parameter_values = {}
for name in self._inputs:
element = getattr(self, name)
parameter_values[name] = element.parameter.value
self._procedure.set_parameters(parameter_values)
return self._procedure
class LogWidget(QtGui.QWidget):
def __init__(self, parent=None):
super().__init__(parent)
self._setup_ui()
self._layout()
def _setup_ui(self):
self.view = QtGui.QPlainTextEdit()
self.view.setReadOnly(True)
self.handler = LogHandler()
self.handler.setFormatter(logging.Formatter(
fmt='%(asctime)s : %(message)s (%(levelname)s)',
datefmt='%m/%d/%Y %I:%M:%S %p'
))
self.handler.record.connect(self.view.appendPlainText)
def _layout(self):
vbox = QtGui.QVBoxLayout(self)
vbox.setSpacing(0)
vbox.addWidget(self.view)
self.setLayout(vbox)
class ResultsDialog(QtGui.QFileDialog):
def __init__(self, columns, x_axis=None, y_axis=None, parent=None):
super().__init__(parent)
self.columns = columns
self.x_axis, self.y_axis = x_axis, y_axis
self._setup_ui()
def _setup_ui(self):
preview_tab = QtGui.QTabWidget()
vbox = QtGui.QVBoxLayout()
param_vbox = QtGui.QVBoxLayout()
vbox_widget = QtGui.QWidget()
param_vbox_widget = QtGui.QWidget()
self.plot_widget = PlotWidget(self.columns, self.x_axis, self.y_axis, parent=self)
self.plot = self.plot_widget.plot
self.preview_param = QtGui.QTreeWidget()
param_header = QtGui.QTreeWidgetItem(["Name", "Value"])
self.preview_param.setHeaderItem(param_header)
self.preview_param.setColumnWidth(0, 150)
self.preview_param.setAlternatingRowColors(True)
vbox.addWidget(self.plot_widget)
param_vbox.addWidget(self.preview_param)
vbox_widget.setLayout(vbox)
param_vbox_widget.setLayout(param_vbox)
preview_tab.addTab(vbox_widget, "Plot Preview")
preview_tab.addTab(param_vbox_widget, "Run Parameters")
self.layout().addWidget(preview_tab, 0, 5, 4, 1)
self.layout().setColumnStretch(5, 1)
self.setMinimumSize(900, 500)
self.resize(900, 500)
self.setFileMode(QtGui.QFileDialog.ExistingFiles)
self.currentChanged.connect(self.update_plot)
def update_plot(self, filename):
self.plot.clear()
if not os.path.isdir(filename) and filename != '':
try:
results = Results.load(str(filename))
except ValueError:
return
except Exception as e:
raise e
curve = ResultsCurve(results,
x=self.plot_widget.plot_frame.x_axis,
y=self.plot_widget.plot_frame.y_axis,
pen=pg.mkPen(color=(255, 0, 0), width=1.75),
antialias=True
)
curve.update()
self.plot.addItem(curve)
self.preview_param.clear()
for key, param in results.procedure.parameter_objects().items():
new_item = QtGui.QTreeWidgetItem([param.name, str(param)])
self.preview_param.addTopLevelItem(new_item)
self.preview_param.sortItems(0, QtCore.Qt.AscendingOrder)
| {
"content_hash": "ed5365fae41dfe912e04e9036cba8151",
"timestamp": "",
"source": "github",
"line_count": 389,
"max_line_length": 99,
"avg_line_length": 35.75064267352185,
"alnum_prop": 0.6019270870784497,
"repo_name": "dvspirito/pymeasure",
"id": "c004ff06f1b2f01f47e5ae645bd0cf792d61808c",
"size": "15064",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pymeasure/display/widgets.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "458273"
}
],
"symlink_target": ""
} |
import random
import sys
import time
import numpy as np
from pybrain.structure import LinearLayer, SigmoidLayer, FeedForwardNetwork, FullConnection
from pybrain.structure.networks import NeuronDecomposableNetwork
import gym
# ---- TASKS
class ClassifyTask(object):
def get_problem_size(self):
"""Returns the number of individuals to be scored simultaneously"""
return 1
def fitness(self, individual, n=10):
"""Maps individuals to scores"""
shape = (n, individual.net.indim)
patterns = np.reshape(np.random.randint(
low=0, high=10, size=shape[0] * shape[1]), shape)
score = 0
for p in patterns:
# Definition of problem
true_output = np.zeros((individual.net.outdim, 1))
true_output[0] = 1 if p[0] > 5 else 0
result = individual.net.activate(p)
# Scoring of problem
score += 100 / (np.linalg.norm(result - true_output) + 1)
individual.score = score / n
def max_score(self):
return 100
class AIGymTask(object):
def __init__(self, env):
self.env = env
def fitness(self, individual, steps, trials, render=False):
total_score = 0
for t in range(trials):
observation = self.env.reset()
for s in range(steps):
if(render):
self.env.render()
action = np.argmax(individual.net.activate(observation))
observation, reward, done, info = self.env.step(action)
total_score += reward
if done:
break
individual.score = total_score / trials
class Individual(object):
"""Associates a network with the components which created it"""
def __init__(self, net, components, gen_num):
self.net = net
self.components = components
self.gen_num = gen_num
self.score = 0
class GeneticNetworkOptimizer(object):
"""
Optimizes a network for an external, decomposing the network
using ESP. Each individual is a combination of independantly
evolved neurons. These are evolved in separate populations.
"""
# Population stats
pop_size = 25
runs = 1
elitism = 10
# The mutation rate, in percent
mutation_rate = 1
def __init__(self, template_net):
"""Initializes the optimizer"""
self.template_net = template_net
self._generate_populations()
self.gen_num = 0
def _mutate_population(self, population):
"""A mutation funtion that mutates a population."""
for individual in population:
for position, trait in enumerate(individual):
if random.randint(0, 100) < self.mutation_rate:
individual[position] += np.random.normal()
# ---- HELPER FUNCTIONS
def _reproduce(self, scores, population, elitism=False):
"""
Reproduces a population
"""
# If we're elitest, give the best performing individual an advantage
if elitism:
scores[np.argmax(scores)] *= elitism
# Make scores positive
min_score = np.min(scores)
if(min_score <= 0):
scores = np.add(scores, min_score+1)
# Normalize scores into probabilities
total_score = sum(scores)
scores /= total_score
if (np.nan in scores):
print("Error")
print(scores)
# Choose parents
choices = np.random.choice(
range(population.shape[0]),
(len(population), 2),
p=scores
)
# Make new population
new_pop = np.zeros(population.shape)
# ---- CROSS OVER
# Generate cross-over points
raw_cross_over_points = np.random.normal(
loc=population.shape[1] / 2,
scale=population.shape[1] / 6,
size=(population.shape[0])
)
cross_over_points = np.clip(
np.rint(raw_cross_over_points), 0, population.shape[1])
for index, parents in enumerate(choices):
cp = int(cross_over_points[index])
new_pop[index, :] = np.concatenate(
(population[parents[0], :cp], population[parents[1], cp:]))
return new_pop
def _generate_populations(self):
"""Generate populations based on the template net"""
template_decomposition = self.template_net.getDecomposition()
self.num_populations = len(template_decomposition)
self.populations = []
for i in range(self.num_populations):
shape = (self.pop_size, len(template_decomposition))
population = np.reshape(np.random.normal(
size=shape[0] * shape[1]), shape)
self.populations.append(population)
# ---- EXTERNAL INTERFACE
def generate_individuals(self, num_individuals):
"""Generates individuals from this generation for use in testing"""
shape = (num_individuals, len(self.populations))
combinations = np.reshape(np.random.randint(
0, self.pop_size, size=shape[0] * shape[1]), shape)
individuals = []
for i, cb in enumerate(combinations):
net = self.template_net.copy()
net.setDecomposition([pop[cb[k], :] for k, pop in enumerate(self.populations)])
individuals.append(Individual(net, cb, self.gen_num))
return individuals
def run_generation(self, individuals):
"""Use scoring of individuals to define fitness of the network
components, and evolve them"""
# Accumulate scores
scores = np.zeros((len(self.populations), self.pop_size))
count = np.zeros((len(self.populations), self.pop_size))
for i, individual in enumerate(individuals):
assert(individual.gen_num == self.gen_num)
for pop, comp in enumerate(individual.components):
scores[pop, comp] += individual.score
count[pop, comp] += 1
with np.errstate(divide='ignore', invalid='ignore'):
norm_scores = np.divide(scores, count)
norm_scores[norm_scores == np.inf] = 0
norm_scores = np.nan_to_num(norm_scores)
print("Generation: {}, Average score: {:.3f} Max score: {:.3f}".format(
self.gen_num, norm_scores.mean(), np.nanmax(norm_scores)))
for p in range(len(self.populations)):
new_population = self._reproduce(
norm_scores[p, :], self.populations[p])
self._mutate_population(new_population)
self.populations[p] = new_population
self.gen_num += 1
def get_dim(space):
if(isinstance(space, gym.spaces.Discrete)):
return space.n
else:
return space.shape[0]
def main():
env = gym.make('CartPole-v0')
in_dim = get_dim(env.observation_space)
out_dim = get_dim(env.action_space)
# Create the network
net = FeedForwardNetwork()
# Interface layers
inLayer = LinearLayer(in_dim)
outLayer = LinearLayer(out_dim)
# Internal Layers
hiddenLayer1 = SigmoidLayer(6)
net.addInputModule(inLayer)
net.addModule(hiddenLayer1)
net.addOutputModule(outLayer)
net.addConnection(FullConnection(inLayer, hiddenLayer1))
net.addConnection(FullConnection(hiddenLayer1, outLayer))
network = NeuronDecomposableNetwork.convertNormalNetwork(net)
network.sortModules()
optimizer = GeneticNetworkOptimizer(network)
task = AIGymTask(env)
gen_count = 10000
epsilon = 0.01
for gen in range(gen_count):
pop = optimizer.generate_individuals(100)
for p in pop:
task.fitness(p, steps=200, trials=10)
optimizer.run_generation(pop)
if (__name__ == "__main__"):
main()
| {
"content_hash": "8ba18e05003278965770635fb1474a40",
"timestamp": "",
"source": "github",
"line_count": 264,
"max_line_length": 91,
"avg_line_length": 29.893939393939394,
"alnum_prop": 0.5964267612772428,
"repo_name": "A-Malone/coop-neural-nets",
"id": "a73e48f2c02be325b86c4e150f31d02da9ae615a",
"size": "7892",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openai_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "36129"
}
],
"symlink_target": ""
} |
import unittest
try:
from unittest.mock import Mock, patch, mock_open, MagicMock
except ImportError:
from mock import Mock, patch, mock_open, MagicMock
class TestCase(unittest.TestCase):
def __init__(self, *args, **kwargs):
self.mock_open = mock_open
self.Mock = Mock
self.MagicMock = MagicMock
super(TestCase, self).__init__(*args, **kwargs)
def patch(self, *args, **kwargs):
patcher = patch(*args, **kwargs)
thing = patcher.start()
self.addCleanup(patcher.stop)
return thing
def patch_object(self, *args, **kwargs):
patcher = patch.object(*args, **kwargs)
thing = patcher.start()
self.addCleanup(patcher.stop)
return thing
def patch_multiple(self, *args, **kwargs):
patcher = patch.multiple(*args, **kwargs)
thing = patcher.start()
self.addCleanup(patcher.stop)
return thing
def patch_dict(self, *args, **kwargs):
patcher = patch.dict(*args, **kwargs)
thing = patcher.start()
self.addCleanup(patcher.stop)
return thing
def assertCalledOnceWith(self, mock_obj, *args, **kwargs):
mock_obj.assert_called_once_with(*args, **kwargs)
def assertAnyCallWith(self, mock_obj, *args, **kwargs):
mock_obj.assert_any_call(*args, **kwargs)
def assertCalled(self, mock_obj):
self.assertTrue(mock_obj.called)
def assertIsMock(self, mock_obj):
self.assertIsInstance(mock_obj, Mock)
def assertIsMagicMock(self, mock_obj):
self.assertIsInstance(mock_obj, MagicMock)
def assertIsMocked(self, mock_obj):
self.assertIsInstance(mock_obj, (Mock, MagicMock))
| {
"content_hash": "d43738724f8bea824d6f29322a15ceeb",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 63,
"avg_line_length": 30,
"alnum_prop": 0.6304093567251462,
"repo_name": "marcwebbie/mockie",
"id": "6ec780c1624191248e9cb7b52506c85a0f199d63",
"size": "1710",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mockie/test_case.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1469"
},
{
"name": "Python",
"bytes": "5037"
}
],
"symlink_target": ""
} |
import contextlib
import os
import json
import sys
import jsonpickle
import plac
from selenium import webdriver
from huxley.run import TestRun
from huxley.errors import TestError
DRIVERS = {
'firefox': webdriver.Firefox,
'chrome': webdriver.Chrome,
'ie': webdriver.Ie,
'opera': webdriver.Opera
}
CAPABILITIES = {
'firefox': webdriver.DesiredCapabilities.FIREFOX,
'chrome': webdriver.DesiredCapabilities.CHROME,
'ie': webdriver.DesiredCapabilities.INTERNETEXPLORER,
'opera': webdriver.DesiredCapabilities.OPERA
}
@plac.annotations(
url=plac.Annotation('URL to hit'),
filename=plac.Annotation('Test file location'),
postdata=plac.Annotation('File for POST data or - for stdin'),
record=plac.Annotation('Record a test', 'flag', 'r', metavar='URL'),
rerecord=plac.Annotation('Re-run the test but take new screenshots', 'flag', 'R'),
sleepfactor=plac.Annotation('Sleep interval multiplier', 'option', 'f', float, metavar='FLOAT'),
browser=plac.Annotation(
'Browser to use, either firefox, chrome, phantomjs, ie or opera.', 'option', 'b', str, metavar='NAME'
),
remote=plac.Annotation('Remote WebDriver to use', 'option', 'w', metavar='URL'),
local=plac.Annotation('Local WebDriver URL to use', 'option', 'l', metavar='URL'),
diffcolor=plac.Annotation('Diff color for errors (i.e. 0,255,0)', 'option', 'd', str, metavar='RGB'),
screensize=plac.Annotation('Width and height for screen (i.e. 1024x768)', 'option', 's', metavar='SIZE'),
autorerecord=plac.Annotation('Playback test and automatically rerecord if it fails', 'flag', 'a'),
save_diff=plac.Annotation('Save information about failures as last.png and diff.png', 'flag', 'e')
)
def main(
url,
filename,
postdata=None,
record=False,
rerecord=False,
sleepfactor=1.0,
browser='firefox',
remote=None,
local=None,
diffcolor='0,255,0',
screensize='1024x768',
autorerecord=False,
save_diff=False):
if postdata:
if postdata == '-':
postdata = sys.stdin.read()
else:
with open(postdata, 'r') as f:
postdata = json.loads(f.read())
try:
if remote:
d = webdriver.Remote(remote, CAPABILITIES[browser])
else:
d = DRIVERS[browser]()
screensize = tuple(int(x) for x in screensize.split('x'))
except KeyError:
raise ValueError(
'Invalid browser %r; valid browsers are %r.' % (browser, DRIVERS.keys())
)
try:
os.makedirs(filename)
except:
pass
diffcolor = tuple(int(x) for x in diffcolor.split(','))
jsonfile = os.path.join(filename, 'record.json')
with contextlib.closing(d):
if record:
if local:
local_d = webdriver.Remote(local, CAPABILITIES[browser])
else:
local_d = d
with contextlib.closing(local_d):
with open(jsonfile, 'w') as f:
f.write(
jsonpickle.encode(
TestRun.record(local_d, d, (url, postdata), screensize, filename, diffcolor, sleepfactor, save_diff)
)
)
print 'Test recorded successfully'
return 0
elif rerecord:
with open(jsonfile, 'r') as f:
TestRun.rerecord(jsonpickle.decode(f.read()), filename, (url, postdata), d, sleepfactor, diffcolor, save_diff)
print 'Test rerecorded successfully'
return 0
elif autorerecord:
with open(jsonfile, 'r') as f:
test = jsonpickle.decode(f.read())
try:
print 'Running test to determine if we need to rerecord'
TestRun.playback(test, filename, (url, postdata), d, sleepfactor, diffcolor, save_diff)
print 'Test played back successfully'
return 0
except TestError:
print 'Test failed, rerecording...'
TestRun.rerecord(test, filename, (url, postdata), d, sleepfactor, diffcolor, save_diff)
print 'Test rerecorded successfully'
return 2
else:
with open(jsonfile, 'r') as f:
TestRun.playback(jsonpickle.decode(f.read()), filename, (url, postdata), d, sleepfactor, diffcolor, save_diff)
print 'Test played back successfully'
return 0
if __name__ == '__main__':
sys.exit(plac.call(main))
| {
"content_hash": "9fc6f870e36a85e6a0c17ff46e726e4e",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 128,
"avg_line_length": 36.952,
"alnum_prop": 0.5897380385364798,
"repo_name": "10io/huxley",
"id": "2139379ee242df34903e41b547df14e6e2fb652a",
"size": "5196",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "huxley/main.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "26169"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.manager
class Migration(migrations.Migration):
dependencies = [
('books', '0002_auto_20170131_0803'),
]
operations = [
migrations.CreateModel(
name='Person',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=50)),
('last_name', models.CharField(max_length=50)),
('sex', models.CharField(choices=[('M', 'Male'), ('F', 'Female')], max_length=1)),
],
managers=[
('people', django.db.models.manager.Manager()),
],
),
migrations.AddField(
model_name='book',
name='num_pages',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='author',
name='email',
field=models.EmailField(blank=True, max_length=254, verbose_name='e-mail'),
),
]
| {
"content_hash": "6c1f4c816390afde24b857d6ee568213",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 114,
"avg_line_length": 32.166666666666664,
"alnum_prop": 0.5397236614853195,
"repo_name": "widodopangestu/mysite",
"id": "8c25fc4061fb8abe3469fdd7d999fc812dc54808",
"size": "1231",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "books/migrations/0003_auto_20170208_1149.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "7323"
},
{
"name": "Python",
"bytes": "24235"
}
],
"symlink_target": ""
} |
from data_types.user import User
class DiscussionComment:
"""
GitHub DiscussionComment
https://developer.github.com/v3/discussions/comments/
Attributes:
id: Comment id
node_id: Node id
html_url: Public URL for discussion comment on github.com
body: Discussion comment body text
user: Discussion comment creator User object
created_at: Opening time
updated_at: Updating time
"""
def __init__(self, data):
# Internal GitHub id
self.id = data.get('id', 0)
self.node_id = data.get('node_id', 0)
# Body
self.body = data.get('body', '')
# Public link
self.html_url = data.get('html_url', '')
# Who created
self.user = None
if 'user' in data:
self.user = User(data['user'])
# Dates
self.created_at = data.get('created_at', '')
self.updated_at = data.get('updated_at', '')
| {
"content_hash": "f6502eab82d57be3f9d4f0b4233b987a",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 65,
"avg_line_length": 24.3,
"alnum_prop": 0.565843621399177,
"repo_name": "codex-bot/github",
"id": "ef1371fd8185ba3a1cebfbb9103a0771028ffdaf",
"size": "972",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "github/data_types/discussion_comment.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "208"
},
{
"name": "Python",
"bytes": "66506"
}
],
"symlink_target": ""
} |
"""
relshell.columndef
~~~~~~~~~~~~~~~~~~
:synopsis: Provides column definitions.
"""
import re
from relshell.type import Type
class ColumnDef(object):
"""Specifies column's features (name, type, ...)."""
required_fields = [
'name',
]
"""Required fields for column definition.
:param name: name of column with `name_format <#relshell.columndef.ColumnDef.name_format>`_
"""
optional_fields = [
'type',
]
"""Optional fields for column definition.
:param type: shellstream types used for strict type checking (one of `Type.type_list <#relshell.type.Type.type_list>`_)
"""
name_format = '^[_a-zA-Z][_a-zA-Z0-9]*$'
"""`name` field's format. """
_pat_name = re.compile(name_format)
# APIs
def __init__(self, column_def):
"""Creates column definition object.
:param column_def: e.g. ``{'name': 'col1', 'type': 'STRING'}``
:raises: `AttributeError` if `column_def` has invalid format
"""
ColumnDef._chk_unsupported_fields(column_def)
ColumnDef._chk_required_fields(column_def)
self._set_attrs(column_def)
# Private functions
@staticmethod
def _chk_unsupported_fields(coldef):
all_fields = set(ColumnDef.required_fields) | set(ColumnDef.optional_fields)
for k in coldef.iterkeys():
if k not in all_fields:
raise AttributeError("Key '%s' is invalid" % (k))
@staticmethod
def _chk_required_fields(coldef):
for k in ColumnDef.required_fields:
if k not in coldef.keys():
raise AttributeError("Key '%s' is required" % (k))
def _set_attrs(self, coldef):
# required attributes
self.name = ColumnDef._gen_name(coldef['name'])
# optional attributes
if 'type' in coldef: self.type = ColumnDef._gen_type(coldef['type'])
@staticmethod
def _gen_name(name):
if not ColumnDef._pat_name.match(name):
raise AttributeError("'%s' is invalid for 'name'" % (name))
return name
@staticmethod
def _gen_type(_type):
try:
return Type(_type)
except NotImplementedError as e:
raise AttributeError("'%s' is invalid for 'type'" % (_type))
| {
"content_hash": "62089dff98f1b236271b77b47e3656e8",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 123,
"avg_line_length": 30.05263157894737,
"alnum_prop": 0.5923817863397548,
"repo_name": "laysakura/relshell",
"id": "796c8efaeb7c79fa6a251adb67d93e9e7defd694",
"size": "2308",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "relshell/columndef.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "7962"
},
{
"name": "Python",
"bytes": "78154"
}
],
"symlink_target": ""
} |
from base import WithDaemonTestCase
from pysource.tests import command
class SourceInlineTest(WithDaemonTestCase):
def test_source_inline(self):
output = self.run_pysource_script([
command.source_inline('name=3'),
command.source_def('function1(): return name'),
command.run('function1')
])
self.assertEqual(output, '3')
def test_source_inline_multi_line(self):
output = self.run_pysource_script([
command.source_inline('''name="John Doe"
age=13
'''),
command.source_def('function1(): return name,age'),
command.run('function1')
])
self.assertEqual(output, str(("John Doe", 13)))
| {
"content_hash": "0d4741fd5f59b758db78d600075dcfe1",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 63,
"avg_line_length": 30.91304347826087,
"alnum_prop": 0.6118143459915611,
"repo_name": "dankilman/pysource",
"id": "df4977d2ac2c06338d64385485bebeab14478bae",
"size": "1283",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pysource/tests/test_source_inline.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "95586"
},
{
"name": "Shell",
"bytes": "2104"
}
],
"symlink_target": ""
} |
from fitness_evaluator import FitnessEvaluator
from genetic_algorithm import GeneticAlgorithm
from individual import Individual
from individual_factory import IndividualFactory
from termination_criteria import TerminationCriteria
from termination_criteria import ExecutionTimeTerminationCriteria
from termination_criteria import NumberOfGenerationsTerminationCriteria | {
"content_hash": "107ef245140ae4a75ee1028aa8d37c72",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 71,
"avg_line_length": 52.42857142857143,
"alnum_prop": 0.9100817438692098,
"repo_name": "fberanizo/sin5006",
"id": "739617b8f8999a127d38fb90b2a1f1f288e2f3bd",
"size": "367",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ga/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "63"
},
{
"name": "Python",
"bytes": "100504"
},
{
"name": "TeX",
"bytes": "492162"
}
],
"symlink_target": ""
} |
import plyj.parser as plyj
import pretty_printer
import zipfile
import json
import sys
def package_job(files, location):
parser = plyj.Parser()
printer = pretty_printer.PrettyPretter(2)
with zipfile.ZipFile(location, mode='w') as zip_file:
for f in files:
parsed = parser.parse_file(file(f['r_path']))
remove_package_declaration(parsed)
remove_princeton_imports(parsed)
zip_file.writestr(f['w_path'], printer.print_tree(parsed))
def remove_package_declaration(compilation_unit):
compilation_unit.package_declaration = None
def remove_princeton_imports(tree):
princeton_prefix = 'edu.princeton.cs'
new_prefixes = []
for dec in tree.import_declarations:
if not dec.name.value.startswith(princeton_prefix):
new_prefixes.append(dec)
tree.import_declarations = new_prefixes
def add_class_as_inner_class(into, outo):
inner_class = into.type_declarations[0]
inner_imports = into.import_declarations
for ii in inner_imports:
if all(ii.name.value != oi.name.value for oi in outo.import_declarations):
outo.import_declarations.append(ii)
inner_class.modifiers.insert(1, 'static')
if 'public' in inner_class.modifiers:
inner_class.modifiers[inner_class.modifiers.index('public')] = 'private'
elif 'private' not in inner_class.modifiers:
inner_class.modifiers.insert(0, 'private')
outo.type_declarations[0].body.append(inner_class)
if __name__ == '__main__':
with open('config/zip_targets.json', 'r') as f:
weeks = json.load(f)['weeks']
package_job(weeks[sys.argv[1]], sys.argv[2])
| {
"content_hash": "4c43667b7b03af512732c8b22b0d0151",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 82,
"avg_line_length": 31.09259259259259,
"alnum_prop": 0.669446098868374,
"repo_name": "AKST/algos",
"id": "9c63c6fb2af6dee7c38d52d9b3147c042057fc05",
"size": "1679",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/package.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "31748"
},
{
"name": "Python",
"bytes": "13811"
},
{
"name": "Shell",
"bytes": "5122"
}
],
"symlink_target": ""
} |
"""
settings.py
Configuration for Flask app
Important: Place your keys in the secret_keys.py module,
which should be kept out of version control.
"""
from secret_keys import *
class Config(object):
# Set secret keys for CSRF protection
SECRET_KEY = CSRF_SECRET_KEY
CSRF_SESSION_KEY = SESSION_KEY
GOOGLE_ID = GOOGLE_ID
GOOGLE_SECRET = GOOGLE_SECRET
# Flask-Cache settings
CACHE_TYPE = 'gaememcached'
# Email settings
SERVER_EMAIL = '[email protected]'
class Development(Config):
DEBUG = True
# Flask-DebugToolbar settings
CSRF_ENABLED = True
class Testing(Config):
TESTING = True
DEBUG = True
CSRF_ENABLED = True
class Production(Config):
DEBUG = False
CSRF_ENABLED = True
| {
"content_hash": "1ab5609db559c06aca2336be6da60d36",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 57,
"avg_line_length": 21.13888888888889,
"alnum_prop": 0.6793692509855453,
"repo_name": "gdbelvin/mail-safe-test",
"id": "3162ef60b03152ddae98a2b14e1342f4e1b6ab7e",
"size": "761",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mail_safe_test/settings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "81897"
},
{
"name": "Shell",
"bytes": "180"
}
],
"symlink_target": ""
} |
class HostUnavailable(Exception):
pass
class UnknownHost(Exception):
pass
| {
"content_hash": "7d62be334046b7ea01eaee07b35ff632",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 33,
"avg_line_length": 14,
"alnum_prop": 0.7380952380952381,
"repo_name": "ph147/tv",
"id": "7520fad93092591a9ff222be90290998a9d2b5c0",
"size": "104",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tv/excepts.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7037"
}
],
"symlink_target": ""
} |
import json, sys, csv
def convert(path):
keywords = json.load(open('./twows/{}/dict.json'.format(path),'r'))
vote_strings={}
votes = []
final_votes = []
with open('./twows/{}/votes.csv'.format(path),'r') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
try:
vote_strings[row[0]].append(row[1])
except:
vote_strings[row[0]] = [row[1]]
for user_vote_str in vote_strings.values():
user_vote = []
for vote in user_vote_str:
current = vote.strip('[')
current = current.strip('\n')
current = current.strip(']')
current = current.split(' ')
user_vote.append(current)
votes.append(user_vote)
for user_vote in votes:
final_vote = []
for vote in user_vote:
indexes = []
mapping = []
not_voted_for = []
vote = remove_dups(vote)
try:
mapping = keywords[vote[0]]
except:
continue
order = []
not_voted_for = list(mapping)
for c in vote[1].upper():
indexes.append(ord(c)-65)
for index in indexes:
order.append(mapping[index])
not_voted_for.remove(mapping[index])
order.append(not_voted_for)
final_vote.append(order)
final_votes.append(final_vote)
return final_votes
def remove_dups(seq):
seen = set()
seen_add = seen.add
return [x for x in seq if not (x in seen or seen_add(x))]
if __name__ == '__main__':
votes = convert(sys.argv[1])
print(votes)
open('./twows/{}/votes.json'.format(sys.argv[1]),'w').write(json.dumps(votes)) | {
"content_hash": "261b8c616a1982750bd64b8ce2eaf426",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 79,
"avg_line_length": 21.140845070422536,
"alnum_prop": 0.6062624916722186,
"repo_name": "Noahkiq/TWOWBot",
"id": "8da90b3cae6685b8dc9326f15070ccbb7f5754ca",
"size": "1501",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "TWOWBot/voteConverter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "16512"
},
{
"name": "Python",
"bytes": "20114"
},
{
"name": "Shell",
"bytes": "38"
}
],
"symlink_target": ""
} |
import numpy as np
import pandas as pd
from bokeh.models import Band, ColumnDataSource
from bokeh.plotting import figure, output_file, show
output_file("band.html", title="band.py example")
# Create some random data
x = np.random.random(2500) * 140 - 20
y = np.random.normal(size=2500) * 2 + 5
df = pd.DataFrame(data=dict(x=x, y=y)).sort_values(by="x")
sem = lambda x: x.std() / np.sqrt(x.size)
df2 = df.y.rolling(window=100).agg({"y_mean": np.mean, "y_std": np.std, "y_sem": sem})
df2 = df2.fillna(method='bfill')
df = pd.concat([df, df2], axis=1)
df['lower'] = df.y_mean - df.y_std
df['upper'] = df.y_mean + df.y_std
source = ColumnDataSource(df.reset_index())
TOOLS = "pan,wheel_zoom,box_zoom,reset,save"
p = figure(tools=TOOLS)
p.scatter(x='x', y='y', line_color=None, fill_alpha=0.3, size=5, source=source)
band = Band(base='x', lower='lower', upper='upper', source=source, level='underlay',
fill_alpha=1.0, line_width=1, line_color='black')
p.add_layout(band)
p.title.text = "Rolling Standard Deviation"
p.xgrid[0].grid_line_color=None
p.ygrid[0].grid_line_alpha=0.5
p.xaxis.axis_label = 'X'
p.yaxis.axis_label = 'Y'
show(p)
| {
"content_hash": "0da5a065b903c086f72752c75c65b9c2",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 86,
"avg_line_length": 28.875,
"alnum_prop": 0.6744588744588744,
"repo_name": "ericmjl/bokeh",
"id": "413f4e205ecf27590093240c09f1655c5eb04916",
"size": "1155",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sphinx/source/docs/user_guide/examples/plotting_band.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1442"
},
{
"name": "CSS",
"bytes": "102094"
},
{
"name": "CoffeeScript",
"bytes": "462899"
},
{
"name": "HTML",
"bytes": "46193"
},
{
"name": "JavaScript",
"bytes": "24563"
},
{
"name": "Makefile",
"bytes": "1150"
},
{
"name": "Python",
"bytes": "2705341"
},
{
"name": "Shell",
"bytes": "8995"
},
{
"name": "TypeScript",
"bytes": "1468288"
}
],
"symlink_target": ""
} |
__author__ = "laike9m ([email protected])"
__title__ = 'ezcf'
__version__ = '0.2.0'
__license__ = 'MIT'
__copyright__ = 'Copyright 2015 laike9m'
import sys
from .api import ConfigFinder
sys.meta_path.append(ConfigFinder()) | {
"content_hash": "16c194da69e2a55530b30f63cb19a9f8",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 42,
"avg_line_length": 22.4,
"alnum_prop": 0.6651785714285714,
"repo_name": "hzruandd/ezcf",
"id": "ebf60c5a4041b13c42098e442e19eff19fcf47d9",
"size": "224",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ezcf/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "49329"
},
{
"name": "Shell",
"bytes": "345"
}
],
"symlink_target": ""
} |
import os
import pygame
from pygame.locals import *
from GIFMode import GIFMode
from GradientMode import GradientMode
from Grid import *
from LEDController import NEOPixel
(width, height) = (640, 480)
class App:
def __init__(self):
self.running = True
self.screen = None
self.fullscreen = False
self.lifted = None
self.image_library = {}
self.cat_pos_x = 0
self.cat_pos_y = 0
self.gif = None
self.GIFMode = None
self.GradientMode = None
self.NeoPixel = NEOPixel()
def get_image(self, path):
image = self.image_library.get(path)
if image is None:
canonicalized_path = path.replace('/', os.sep).replace('\\', os.sep)
image = pygame.image.load(canonicalized_path)
self.image_library[path] = image.convert()
return image
def on_setScreenSize(self):
if self.fullscreen:
modes = pygame.display.list_modes()
self.screen = pygame.display.set_mode(
modes[0], FULLSCREEN | pygame.HWSURFACE | pygame.DOUBLEBUF
)
else:
self.screen = pygame.display.set_mode((width, height), RESIZABLE)
self.GIFmode.surface = self.screen
self.GradientMode.surface = self.screen
self.NeoPixel.surface = self.screen
def on_toggleFullscreen(self):
if self.fullscreen:
self.fullscreen = False
else:
self.fullscreen = True
self.on_setScreenSize()
def on_init(self):
pygame.init()
self.GIFmode = GIFMode()
self.GIFmode.isActive = True
self.GradientMode = GradientMode()
self.GradientMode.isActive = False
self.on_setScreenSize()
pygame.mouse.set_visible(False)
def toggle_modes(self):
if self.GIFmode.isActive is True:
self.GIFmode.isActive = False
self.GradientMode.isActive = True
else:
self.GIFmode.isActive = True
self.GradientMode.isActive = False
def on_event(self, event):
if event.type == pygame.KEYUP:
if event.key == pygame.K_s:
self.toggle_modes()
if event.key == pygame.K_f:
self.on_toggleFullscreen()
if event.key == pygame.K_q:
self.running = False
#self.on_cleanup()
if event.type == pygame.QUIT:
self.running = False
def on_loop(self):
self.GIFmode.run()
self.GradientMode.run()
self.NeoPixel.run()
def on_render(self):
pygame.display.flip()
def on_cleanup(self):
pygame.quit()
def on_execute(self):
if self.on_init() is False:
self.running = False
while(self.running):
for event in pygame.event.get():
self.on_event(event)
self.on_loop()
self.on_render()
self.on_cleanup()
# /////////////////////////////////////////////////////////////////////////////
if __name__ == '__main__':
_app = App()
_app.on_execute() | {
"content_hash": "bf96bb1bc6247eed603332ea7de0f734",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 80,
"avg_line_length": 27.55263157894737,
"alnum_prop": 0.5495065265838904,
"repo_name": "theceremony/pyramids-installation",
"id": "6de024438aee16ee4ad79be0c6aaf7ca476b3096",
"size": "3165",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/projector-sync/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "35422"
}
],
"symlink_target": ""
} |
"""Module for testing the deploy domain command."""
import os.path
from shutil import rmtree
import unittest
if __name__ == "__main__":
import utils
utils.import_depends()
from brokertest import TestBrokerCommand
class TestDeployDomain(TestBrokerCommand):
def head_commit(self, sandbox, ref="HEAD"):
sandboxdir = os.path.join(self.sandboxdir, sandbox)
head, _ = self.gitcommand(["rev-parse", "%s^{commit}" % ref],
cwd=sandboxdir)
head = head.strip()
return head
def test_100_deploychangetest1domain(self):
command = ["deploy", "--source", "changetest1",
"--target", "deployable", "--reason", "Test reason"]
out = self.statustest(command)
self.matchoutput(out, "Updating the checked out copy of domain "
"deployable...", command)
def test_110_verifydeploy(self):
template = self.find_template("aquilon", "archetype", "base",
domain="deployable")
with open(template) as f:
contents = f.readlines()
self.assertEqual(contents[-1], "#Added by unittest\n")
def test_110_verifydeploylog(self):
kingdir = self.config.get("broker", "kingdir")
command = ["show", "--no-patch", "--pretty=full", "deployable"]
out, _ = self.gitcommand(command, cwd=kingdir)
self.matchoutput(out, "User:", command)
self.matchoutput(out, "Request-ID:", command)
self.matchoutput(out, "Reason: Test reason", command)
self.matchclean(out, "Justification:", command)
self.matchclean(out, "Code-Review-URL", command)
self.matchclean(out, "Testing-URL", command)
author_email = self.config.get("broker", "git_author_email")
self.matchoutput(out, "Author: %s <%s>" % (self.user, author_email),
command)
def test_120_deployfail(self):
command = ["deploy", "--source", "changetest1",
"--target", "prod"]
_, err = self.failuretest(command, 4)
self.matchoutput(err,
"Domain prod is under change management control. "
"Please specify --justification.",
command)
def test_120_deploydryrun(self):
kingdir = self.config.get("broker", "kingdir")
old_prod, _ = self.gitcommand(["rev-list", "--max-count=1", "prod"],
cwd=kingdir)
command = ["deploy", "--source", "changetest1",
"--target", "prod", "--dryrun"]
self.successtest(command)
new_prod, _ = self.gitcommand(["rev-list", "--max-count=1", "prod"],
cwd=kingdir)
self.assertEqual(old_prod, new_prod,
"Domain prod changed despite --dryrun")
def test_120_deploybadjustification(self):
command = ["deploy", "--source", "changetest1", "--target", "prod",
"--justification", "I felt like deploying changes."]
out = self.badrequesttest(command)
self.matchoutput(out, "Failed to parse the justification", command)
def test_123_request_review(self):
command = ["request_review", "--source", "changetest1", "--target", "prod"]
self.noouttest(command)
def test_123_request_review_tracking(self):
command = ["request_review", "--source", "changetest1", "--target", "ut-prod"]
out = self.badrequesttest(command)
self.matchoutput(out, "The target needs to be a non-tracking domain, "
"maybe you meant prod?", command)
def test_124_show_review(self):
changetest1_head = self.head_commit("changetest1")
command = ["show_review", "--source", "changetest1", "--target", "prod"]
out = self.commandtest(command)
self.output_equals(out, """
Review request
Target Domain: prod
Source Sandbox: changetest1
Published Commit: %s
Testing Status: Untested
Approval Status: No decision
""" % changetest1_head,
command)
def test_124_show_review_all(self):
changetest1_head = self.head_commit("changetest1")
command = ["show_review", "--all"]
out = self.commandtest(command)
self.matchoutput(out, changetest1_head, command)
def test_125_update_review_cr(self):
command = ["update_review", "--source", "changetest1", "--target", "prod",
"--review_url", "http://review.example.org/changes/1234"]
self.noouttest(command)
def test_126_update_review_testing(self):
changetest1_head = self.head_commit("changetest1")
prod_head = self.head_commit("changetest1", ref="origin/prod")
command = ["update_review", "--source", "changetest1", "--target", "prod",
"--commit_id", changetest1_head,
"--target_commit_tested", prod_head,
"--testing_url", "http://ci.example.org/builds/5678",
"--testing_succeeded"]
self.noouttest(command)
def test_126_update_review_approval(self):
changetest1_head = self.head_commit("changetest1")
command = ["update_review", "--source", "changetest1", "--target", "prod",
"--commit_id", changetest1_head,
"--approved"]
self.noouttest(command)
def test_128_show_review(self):
changetest1_head = self.head_commit("changetest1")
prod_head = self.head_commit("changetest1", ref="origin/prod")
command = ["show_review", "--source", "changetest1", "--target", "prod"]
out = self.commandtest(command)
self.output_equals(out, """
Review request
Target Domain: prod
Tested Commit: %s
Source Sandbox: changetest1
Published Commit: %s
Code Review URL: http://review.example.org/changes/1234
Testing URL: http://ci.example.org/builds/5678
Testing Status: Success
Approval Status: Approved
""" % (prod_head, changetest1_head),
command)
def test_128_show_review_csv(self):
changetest1_head = self.head_commit("changetest1")
prod_head = self.head_commit("changetest1", ref="origin/prod")
command = ["show_review", "--source", "changetest1", "--target", "prod",
"--format", "csv"]
out = self.commandtest(command)
self.matchoutput(out,
"prod,changetest1,%s,http://review.example.org/changes/1234,http://ci.example.org/builds/5678,%s,True,True"
% (changetest1_head, prod_head),
command)
def test_129_bad_target_commit_id(self):
changetest1_head = self.head_commit("changetest1")
commit_not_in_templates = "576afd9bd9f620293a9e0e249032be5157ba5d29"
command = ["update_review", "--source", "changetest1", "--target", "prod",
"--commit_id", changetest1_head, "--testing_failed",
"--target_commit_tested", commit_not_in_templates]
out = self.badrequesttest(command)
self.matchoutput(out, "Domain prod does not contain commit %s." %
commit_not_in_templates, command)
def test_129_stale_testing(self):
changetest1_head = self.head_commit("changetest1")
commit_not_in_templates = "576afd9bd9f620293a9e0e249032be5157ba5d29"
command = ["update_review", "--source", "changetest1", "--target", "prod",
"--commit_id", commit_not_in_templates,
"--testing_url", "http://ci.example.org/builds/5677",
"--testing_failed"]
out = self.badrequesttest(command)
self.matchoutput(out,
"Possible attempt to update an old review record - "
"the commit being reviewed is %s, not %s." %
(changetest1_head, commit_not_in_templates),
command)
def test_129_short_commit(self):
abbrev_hash_not_in_templates = "576afd9bd9f620"
command = ["update_review", "--source", "changetest1", "--target", "prod",
"--commit_id", abbrev_hash_not_in_templates,
"--testing_url", "http://ci.example.org/builds/5677",
"--testing_failed"]
out = self.badrequesttest(command)
self.matchoutput(out, "Invalid commit ID (%s), make sure to pass the "
"full hash." % abbrev_hash_not_in_templates, command)
def test_130_deploynosync(self):
command = ["deploy", "--source", "changetest1", "--target", "prod",
"--nosync", "--justification", "tcm=12345678",
"--reason", "Just because"]
out = self.statustest(command)
self.matchoutput(out, "Updating the checked out copy of domain prod...",
command)
self.matchclean(out, "ut-prod", command)
self.matchclean(out, "not approved", command)
def test_131_verifydeploylog(self):
kingdir = self.config.get("broker", "kingdir")
command = ["show", "--no-patch", "--format=%B", "prod"]
out, _ = self.gitcommand(command, cwd=kingdir)
self.matchoutput(out, "User:", command)
self.matchoutput(out, "Request-ID:", command)
self.matchoutput(out, "Justification: tcm=12345678", command)
self.matchoutput(out, "Reason: Just because", command)
self.matchoutput(out,
"Code-Review-URL: http://review.example.org/changes/1234",
command)
self.matchoutput(out,
"Testing-URL: http://ci.example.org/builds/5678",
command)
def test_200_verifynosync(self):
# The change should be in prod...
template = self.find_template("aquilon", "archetype", "base",
domain="prod")
with open(template) as f:
contents = f.readlines()
self.assertEqual(contents[-1], "#Added by unittest\n")
# ...but not in the ut-prod tracking domain.
template = self.find_template("aquilon", "archetype", "base",
domain="ut-prod")
with open(template) as f:
contents = f.readlines()
self.assertNotEqual(contents[-1], "#Added by unittest\n")
def test_210_verifynosynclog(self):
kingdir = self.config.get("broker", "kingdir")
# Note: "prod" is a copy of the real thing so limit the amount of
# history checked to avoid being fooled by real commits
# The change must be in prod...
command = ["show", "--no-patch", "--format=%B", "prod"]
out, _ = self.gitcommand(command, cwd=kingdir)
self.matchoutput(out, "Justification: tcm=12345678", command)
self.matchoutput(out, "Reason: Just because", command)
# ... but not in ut-prod
command = ["show", "--no-patch", "--format=%B", "ut-prod"]
out, _ = self.gitcommand(command, cwd=kingdir)
self.matchclean(out, "tcm=12345678", command)
def test_300_add_advanced(self):
self.successtest(["add", "sandbox", "--sandbox", "advanced",
"--start", "prod"])
def test_310_deploy_leftbehind(self):
command = ["deploy", "--source", "advanced", "--target", "leftbehind"]
out = self.badrequesttest(command)
self.matchoutput(out,
"You're trying to deploy a sandbox to a domain that "
"does not contain the commit where the sandbox was "
"branched from.",
command)
def test_310_review_leftbehild(self):
command = ["request_review", "--source", "advanced", "--target", "leftbehind"]
out = self.badrequesttest(command)
self.matchoutput(out,
"Domain leftbehind does not contain the commit where "
"sandbox advanced was branched from.",
command)
def test_320_update_leftbehind(self):
command = ["deploy", "--source", "prod", "--target", "leftbehind"]
self.successtest(command)
def test_330_deploy_again(self):
command = ["deploy", "--source", "advanced", "--target", "leftbehind"]
self.successtest(command)
def test_340_cleanup_advanced(self):
self.successtest(["del_sandbox", "--sandbox", "advanced"])
sandboxdir = os.path.join(self.sandboxdir, "advanced")
rmtree(sandboxdir, ignore_errors=True)
def test_800_deploy_utsandbox(self):
# utsandbox contains changes needed to compile test hosts
command = ["deploy", "--source", "utsandbox", "--target", "prod",
"--justification", "tcm=12345678"]
out = self.statustest(command)
for domain in ["prod", "ut-prod", "netinfra"]:
self.matchoutput(out,
"Updating the checked out copy of domain %s..." %
domain, command)
#self.matchoutput(out, "Warning: this deployment request was "
# "not approved", command)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestDeployDomain)
unittest.TextTestRunner(verbosity=2).run(suite)
| {
"content_hash": "609dad3a380c961c5ed28cea26aae9bb",
"timestamp": "",
"source": "github",
"line_count": 301,
"max_line_length": 132,
"avg_line_length": 45.39202657807309,
"alnum_prop": 0.5608577911146893,
"repo_name": "guillaume-philippon/aquilon",
"id": "b115a09372e0a6074c5e7d786d6585fda0e5ce8b",
"size": "14413",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/broker/test_deploy_domain.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "DIGITAL Command Language",
"bytes": "3791"
},
{
"name": "Makefile",
"bytes": "5024"
},
{
"name": "Mako",
"bytes": "3996"
},
{
"name": "PLSQL",
"bytes": "69088"
},
{
"name": "Perl",
"bytes": "5030"
},
{
"name": "Python",
"bytes": "4257490"
},
{
"name": "SQLPL",
"bytes": "869"
},
{
"name": "Shell",
"bytes": "22083"
}
],
"symlink_target": ""
} |
'''
Created on Jul 14, 2012
Copyright © 2013
The Board of Trustees of The Leland Stanford Junior University.
All Rights Reserved
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
@author: dstrauss
'''
'''
Created on Jul 13, 2012
@author: dstrauss
'''
import numpy as np
D = {'solverType':'sba', 'flavor':'TE', 'numRuns':1100, 'expt':'standard', 'numProcs':16}
def getMyVars(parseNumber, D):
'''routine to return the parameters to test at the current iteration.'''
D['bkgNo'] = parseNumber
return D
| {
"content_hash": "d63b27c25fd82c0dd094a38e0b96b066",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 89,
"avg_line_length": 28.514285714285716,
"alnum_prop": 0.7374749498997996,
"repo_name": "daStrauss/subsurface",
"id": "649c99d311f6de0ede97691e9fa19f8a38972283",
"size": "999",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/expts/redoSBA.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "295580"
}
],
"symlink_target": ""
} |
import pytest
# local imports
from stretch import application
from stretch.triggers import results
from tests.utils import fixture_loader
@pytest.fixture
def configured_app():
app_json = fixture_loader.json_fixture("web-service-configured")
return application.Application(app_json)
@pytest.fixture
def configured_app_missing_tasks():
app_json = fixture_loader.json_fixture("web-service-missing-tasks")
return application.Application(app_json)
@pytest.fixture
def badly_configured_app():
app_json = fixture_loader.json_fixture("web-service-badly-configured")
return application.Application(app_json)
@pytest.mark.parametrize("property_name,expected", [
("app_id", "/web-service"),
("instances", 2),
("min_instances", 2),
("max_instances", 8),
("scaling_factor", 1.5),
])
def test_application_parsing(configured_app, property_name, expected):
assert getattr(configured_app, property_name) == expected
def test_application_parsing_autoscaling_enabled(configured_app):
assert configured_app.autoscaling_enabled()
def test_application_parsing_new_instances(configured_app):
assert isinstance(configured_app.new_instances, application.InstanceCalculator)
def test_application_parsing_validate(configured_app):
assert configured_app.validate()
def test_application_parsing_validate_missing_tasks(configured_app_missing_tasks):
assert not configured_app_missing_tasks.validate()
def test_application_parsing_validate_badly_configured(badly_configured_app):
assert not badly_configured_app.validate()
@pytest.mark.parametrize("instances,min_instances,max_instances,scaling_factor,scaled_up,scaled_down", [
(0, 0, 8, 1.5, 1, 0),
(1, 2, 8, 1.5, 2, 2),
(2, 2, 8, 1.5, 3, 2),
(3, 2, 8, 1.5, 5, 2),
(5, 2, 8, 1.5, 8, 3),
(8, 2, 8, 1.5, 8, 5),
(10, 2, 8, 1.5, 8, 6),
(100, 2, 8, 1.5, 8, 8),
(4, 1, 10, 1.5, 6, 2),
])
def test_instance_calculator_scale_up(instances, min_instances, max_instances,
scaling_factor, scaled_up, scaled_down):
calc = application.InstanceCalculator(instances,
min_instances,
max_instances,
scaling_factor)
assert calc.calculate(results.CheckResults.SCALE_UP) == scaled_up
assert calc.calculate(results.CheckResults.SCALE_DOWN) == scaled_down
def test_instance_calculator_invalid_result_type():
calc = application.InstanceCalculator(3, 2, 10, 1.5)
assert calc.calculate(results.CheckResults.DONT_SCALE) == 3
assert calc.calculate(results.CheckResults.FAILED) == 3
| {
"content_hash": "42bd5ccca22fb5c064b2c8900db53839",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 104,
"avg_line_length": 32.792682926829265,
"alnum_prop": 0.6719970249163257,
"repo_name": "paddycarey/stretch",
"id": "cc7551b00ad1b7d029d7b2a7d4a65b092df011f6",
"size": "2711",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/stretch/test_application.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "737"
},
{
"name": "Python",
"bytes": "27677"
}
],
"symlink_target": ""
} |
from pudzu.charts import *
df = pd.read_csv("datasets/flagsgyw.csv")
groups = list(remove_duplicates(df.group))
array = [[dict(r) for _,r in df.iterrows() if r.group == g] for g in groups]
data = pd.DataFrame(array, index=list(remove_duplicates(df.group)))
FONT = calibri or sans
fg, bg="black", "#EEEEEE"
default_img = "https://s-media-cache-ak0.pinimg.com/736x/0d/36/e7/0d36e7a476b06333d9fe9960572b66b9.jpg"
N = 1
def process(d):
if not d or d["name"] == "_": return None
description = get_non(d, 'description')
description = "({})".format(description) if description else " "
flag = Image.from_url_with_cache(get_non(d, 'image', default_img)).to_rgba()
flag = flag.resize_fixed_aspect(height=N*198) if flag.width / flag.height < 1.3 else flag.resize((N*318,N*198))
flag = flag.pad(0 if "coat" in d['group'] else (N,0,0,0) if "Maratha" in str(d['name']) else N, "grey")
return Image.from_column([
Image.from_text_bounded(d['name'].replace(r"\n","\n"), (N*320 if "Switzerland" not in description else N*200,N*200), N*32, partial(FONT, bold=True), beard_line=True, align="center", fg=fg),
Image.from_text_bounded(description, (N*320 if "Switzerland" not in description else N*200,N*200), N*24, partial(FONT, italics=True), align="center", fg=fg),
flag
], padding=N*2, bg=bg, equal_widths=True)
title = Image.from_text(f"Green, White and Yellow flags".upper(), FONT(N*80, bold=True), fg=fg, bg=bg).pad(N*40, bg)
grid = grid_chart(data, process, padding=(N*10,N*20), fg=fg, bg=bg, yalign=(0.5,1,0.5), row_label=lambda r: None if data.index[r].startswith("_") else Image.from_text("{}".format(data.index[r].replace(r"\n","\n")).upper(), FONT(N*32, bold=True), align="center", line_spacing=N*3))
img = Image.from_column([title, grid, Rectangle((0,N*50))], bg=bg)
img.place(Image.from_text("/u/Udzu", FONT(N*24), fg=fg, bg=bg, padding=5).pad((N*1,N*1,0,0), fg), align=1, padding=N*5, copy=False)
img.save("output/flagsgyw.png")
| {
"content_hash": "e28e1c2bec0bc20d2ddfb15050530f28",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 280,
"avg_line_length": 60.06060606060606,
"alnum_prop": 0.6644803229061554,
"repo_name": "Udzu/pudzu",
"id": "218c360ad93cbf0cc96c0a53f1620abd16b04bf5",
"size": "1982",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dataviz/flagsgyw.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "7945"
},
{
"name": "Python",
"bytes": "867429"
},
{
"name": "Roff",
"bytes": "3702309"
}
],
"symlink_target": ""
} |
"""
.. codeauthor:: Cédric Dumay <[email protected]>
"""
import logging
from uuid import uuid4
from cdumay_error.types import ValidationError
from cdumay_result import Result, ResultSchema
from kser import KSER_TASK_COUNT, __hostname__, KSER_METRICS_ENABLED, \
KSER_TASKS_STATUS
from kser.schemas import Message
logger = logging.getLogger(__name__)
class EntrypointMeta(type):
"""Provide path to all child classes"""
@property
def path(cls):
"""Entrypoint full path"""
return "{}.{}".format(cls.__module__, cls.__name__)
class Entrypoint(object, metaclass=EntrypointMeta):
"""Entrypoint mother class"""
REQUIRED_FIELDS = []
def __init__(self, uuid=None, params=None, result=None, metadata=None):
self.uuid = uuid or str(uuid4())
self.params = params or dict()
self.result = result or Result(uuid=self.uuid)
self.metadata = metadata or dict()
self._post_init()
def label(self, action=None):
"""Format log prefix"""
return "{}[{}]{}".format(
self.__class__.__name__, self.uuid,
" - {}".format(action) if action else ""
)
def _post_init(self):
"""A post init trigger wrapper"""
try:
return self.postinit()
except Exception as exc:
return self._onerror(Result.from_exception(exc, uuid=self.uuid))
def postinit(self):
"""A post init trigger"""
def check_required_params(self):
""" Check if all required parameters are set"""
for param in self.REQUIRED_FIELDS:
if param not in self.params:
raise ValidationError("Missing parameter: {}".format(param))
def _onsuccess(self, result):
""" To execute on execution success
:param kser.result.Result result: Execution result
:return: Execution result
:rtype: kser.result.Result
"""
if KSER_METRICS_ENABLED == "yes":
KSER_TASKS_STATUS.labels(
__hostname__, self.__class__.path, 'SUCCESS'
).inc()
if result:
result = self.result + result
else:
result = self.result
logger.info(
"{}.Success: {}[{}]: {}".format(
self.__class__.__name__, self.__class__.path, self.uuid, result
),
extra=dict(
kmsg=Message(
self.uuid, entrypoint=self.__class__.path,
params=self.params, metadata=self.metadata
).dump(),
kresult=ResultSchema().dump(result) if result else dict()
)
)
return self.onsuccess(result)
def log(self, message, level=logging.INFO, *args, **kwargs):
"""Log text with a prefix"""
msg = "{}.MESSAGE: {}[{}]: {}".format(
self.__class__.__name__, self.__class__.path, self.uuid, message
)
return logger.log(level=level, msg=msg, *args, **kwargs)
def onsuccess(self, result):
""" To execute on execution success
:param kser.result.Result result: Execution result
:return: Execution result
:rtype: kser.result.Result
"""
return result
def _onerror(self, result):
""" To execute on execution failure
:param kser.result.Result result: Execution result
:return: Execution result
:rtype: kser.result.Result
"""
if KSER_METRICS_ENABLED == "yes":
KSER_TASKS_STATUS.labels(
__hostname__, self.__class__.path, 'FAILED'
).inc()
if result:
result = self.result + result
else:
result = self.result
extra = dict(
kmsg=Message(
self.uuid, entrypoint=self.__class__.path,
params=self.params, metadata=self.metadata
).dump(),
kresult=ResultSchema().dump(result) if result else dict()
)
if result:
error = result.search_value("error")
if error:
extra['error'] = error
logger.error(
"{}.Failed: {}[{}]: {}".format(
self.__class__.__name__, self.__class__.path, self.uuid, result
), extra=extra
)
return self.onerror(result)
def onerror(self, result):
""" To implement
:param kser.result.Result result: Execution result
:return: Execution result
:rtype: kser.result.Result
"""
return result
def _prerun(self):
""" To execute before running message
:return: Kafka message
:rtype: kser.schemas.Message
"""
logger.debug(
"{}.PreRun: {}[{}]".format(
self.__class__.__name__, self.__class__.path, self.uuid
),
extra=dict(
kmsg=Message(
self.uuid, entrypoint=self.__class__.path,
params=self.params, metadata=self.metadata
).dump()
)
)
self.check_required_params()
return self.prerun()
def prerun(self):
""" To implement"""
def _postrun(self, result):
""" To execute after exection
:param kser.result.Result result: Execution result
:return: Execution result
:rtype: kser.result.Result
"""
logger.debug(
"{}.PostRun: {}[{}]".format(
self.__class__.__name__, self.__class__.path, self.uuid
),
extra=dict(
kmsg=Message(
self.uuid, entrypoint=self.__class__.path,
params=self.params, metadata=self.metadata
).dump()
)
)
return self.postrun(result)
def postrun(self, result):
""" To implement
:param kser.result.Result result: Execution result
:return: Execution result
:rtype: kser.result.Result
"""
return result
def _run(self):
""" Execution body
:return: Execution result
:rtype: kser.result.Result
"""
if KSER_METRICS_ENABLED == "yes":
KSER_TASK_COUNT.inc()
logger.debug(
"{}.Run: {}[{}]".format(
self.__class__.__name__, self.__class__.path, self.uuid
),
extra=dict(
kmsg=Message(
self.uuid, entrypoint=self.__class__.path,
params=self.params, metadata=self.metadata
).dump()
)
)
return self.run()
def run(self):
""" To implement
:return: Execution result
:rtype: kser.result.Result
"""
raise NotImplemented("Task '{}' not implemented".format(
self.__class__.path
))
def unsafe_execute(self, result=None):
""" un-wrapped execution, can raise excepetion
:return: Execution result
:rtype: kser.result.Result
"""
if result:
self.result += result
self._prerun()
return self._onsuccess(self._postrun(self._run()))
def execute(self, result=None):
""" Execution 'wrapper' to make sure that it return a result
:return: Execution result
:rtype: kser.result.Result
"""
try:
return self.unsafe_execute(result=result)
except Exception as exc:
return self._onerror(Result.from_exception(exc, uuid=self.uuid))
# noinspection PyPep8Naming
def to_Message(self, result=None):
""" Entrypoint -> Message
:param kser.result.Result result: Execution result
:return: Kafka message
:rtype kser.schemas.Message
"""
return Message(
uuid=self.uuid, entrypoint=self.__class__.path, params=self.params,
result=result if result else self.result, metadata=self.metadata
)
# noinspection PyPep8Naming
@classmethod
def from_Message(cls, kmsg):
""" Message -> Entrypoint
:param kser.schemas.Message kmsg: Kafka message
:return: a entrypoint
:rtype kser.entry.Entrypoint
"""
return cls(
uuid=kmsg.uuid, params=kmsg.params, result=kmsg.result,
metadata=kmsg.metadata
)
| {
"content_hash": "9b0387a49b89309ebfbd6fc468c2150c",
"timestamp": "",
"source": "github",
"line_count": 283,
"max_line_length": 79,
"avg_line_length": 29.724381625441698,
"alnum_prop": 0.5349500713266762,
"repo_name": "cdumay/kser",
"id": "07226c213f3607fa27c93cf80e0a0219662c5a27",
"size": "8460",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/kser/entry.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "48357"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.