code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
'''
Created on 19 Feb 2013
@author: samgeen
'''
import Simulation
import Settings
from Hamu.Utils.Directory import Directory
class Simulations(object):
'''
A class that manages simulations with cached data
'''
def __init__(self):
'''
Constructor
'''
# Set up directory to use for simulations
settings = Settings.Settings()
self._dir = Directory(settings["DataDir"]+"/workspaces/"+\
settings["CurrentWorkspace"]+\
"/Simulations/")
# Open list of simulations
self._simNames = self._dir.ListItems()
def Exists(self, name):
'''
Does a simulation with this name exist?
'''
return name in self._simNames
def CachePath(self, simName=None):
'''
Path that the simulation cache data is stored in
simName: If set, return the path of that simulation's cache folder
'''
path = self._dir.Path()
if simName:
path+=simName+"/"
return path
def AddSimulation(self, sim):
'''
Add this simulation to the list of simulations
'''
name = sim.Name()
# Make the simulation directory
cachedir = self._dir.MakeSubdir(name)
# Add simulation to list of simulations
self._simNames.append(name)
return cachedir.Path()
| samgeen/Hamu | SimData/Simulations.py | Python | mit | 1,448 |
from flask.ext.wtf import Form
from wtforms import RadioField
from wtforms import StringField
from wtforms import SubmitField
from wtforms.validators import DataRequired
class QuestionForm(Form):
answer = RadioField('Answer', choices=[('yes', 'Yes'), ('no', 'No')],
validators=[DataRequired()])
submit = SubmitField()
class GuessResultForm(Form):
result = RadioField('Result', choices=[('yes', 'Yes'), ('no', 'No')],
validators=[DataRequired()])
submit = SubmitField()
class NewLanguageForm(Form):
language = StringField('New Language Name', validators=[DataRequired()])
question = StringField('What makes this language different than others?',
validators=[DataRequired()])
answer = RadioField('The answer to your question is',
choices=[('yes', 'Yes'), ('no', 'No')],
validators=[DataRequired()])
submit = SubmitField()
| StephenHesperus/favorite-programming-language | app/main/forms.py | Python | mit | 981 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-07-02 06:15
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
def migrate_win_to_ssh_protocol(apps, schema_editor):
asset_model = apps.get_model("assets", "Asset")
db_alias = schema_editor.connection.alias
asset_model.objects.using(db_alias).filter(platform__startswith='Win').update(protocol='rdp')
class Migration(migrations.Migration):
dependencies = [
('assets', '0016_auto_20180511_1203'),
]
operations = [
migrations.AddField(
model_name='asset',
name='protocol',
field=models.CharField(choices=[('ssh', 'ssh'), ('rdp', 'rdp'), ('telnet', 'telnet (beta)')], default='ssh', max_length=128, verbose_name='Protocol'),
),
migrations.AddField(
model_name='systemuser',
name='login_mode',
field=models.CharField(choices=[('auto', 'Automatic login'), ('manual', 'Manually login')], default='auto', max_length=10, verbose_name='Login mode'),
),
migrations.AlterField(
model_name='adminuser',
name='username',
field=models.CharField(blank=True, max_length=32, validators=[django.core.validators.RegexValidator('^[0-9a-zA-Z_@\\-\\.]*$', 'Special char not allowed')], verbose_name='Username'),
),
migrations.AlterField(
model_name='asset',
name='platform',
field=models.CharField(choices=[('Linux', 'Linux'), ('Unix', 'Unix'), ('MacOS', 'MacOS'), ('BSD', 'BSD'), ('Windows', 'Windows'), ('Windows2016', 'Windows(2016)'), ('Other', 'Other')], default='Linux', max_length=128, verbose_name='Platform'),
),
migrations.AlterField(
model_name='gateway',
name='username',
field=models.CharField(blank=True, max_length=32, validators=[django.core.validators.RegexValidator('^[0-9a-zA-Z_@\\-\\.]*$', 'Special char not allowed')], verbose_name='Username'),
),
migrations.AlterField(
model_name='systemuser',
name='protocol',
field=models.CharField(choices=[('ssh', 'ssh'), ('rdp', 'rdp'), ('telnet', 'telnet (beta)')], default='ssh', max_length=16, verbose_name='Protocol'),
),
migrations.AlterField(
model_name='systemuser',
name='username',
field=models.CharField(blank=True, max_length=32, validators=[django.core.validators.RegexValidator('^[0-9a-zA-Z_@\\-\\.]*$', 'Special char not allowed')], verbose_name='Username'),
),
migrations.RunPython(migrate_win_to_ssh_protocol),
]
| jumpserver/jumpserver | apps/assets/migrations/0017_auto_20180702_1415.py | Python | gpl-3.0 | 2,702 |
"""
Closures channels module for Zigbee Home Automation.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/zha/
"""
import logging
_LOGGER = logging.getLogger(__name__)
| jnewland/home-assistant | homeassistant/components/zha/core/channels/closures.py | Python | apache-2.0 | 234 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'MapTimeFrame.nodes_added'
db.add_column(u'map_maptimeframe', 'nodes_added',
self.gf('django.db.models.fields.IntegerField')(default=0),
keep_default=False)
# Adding field 'MapTimeFrame.chunks_added'
db.add_column(u'map_maptimeframe', 'chunks_added',
self.gf('django.db.models.fields.IntegerField')(default=0),
keep_default=False)
# Adding field 'MapTimeFrame.node_cache_hits'
db.add_column(u'map_maptimeframe', 'node_cache_hits',
self.gf('django.db.models.fields.IntegerField')(default=0),
keep_default=False)
# Adding field 'MapTimeFrame.node_cache_size'
db.add_column(u'map_maptimeframe', 'node_cache_size',
self.gf('django.db.models.fields.IntegerField')(default=0),
keep_default=False)
def backwards(self, orm):
# Deleting field 'MapTimeFrame.nodes_added'
db.delete_column(u'map_maptimeframe', 'nodes_added')
# Deleting field 'MapTimeFrame.chunks_added'
db.delete_column(u'map_maptimeframe', 'chunks_added')
# Deleting field 'MapTimeFrame.node_cache_hits'
db.delete_column(u'map_maptimeframe', 'node_cache_hits')
# Deleting field 'MapTimeFrame.node_cache_size'
db.delete_column(u'map_maptimeframe', 'node_cache_size')
models = {
u'map.maptimeframe': {
'Meta': {'object_name': 'MapTimeFrame'},
'analysis_time': ('django.db.models.fields.FloatField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'calculated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'chunks_added': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'missing_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'node_cache_hits': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'node_cache_size': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'nodes_added': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'start_time': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'tweet_count': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'map.treenode': {
'Meta': {'object_name': 'TreeNode', 'index_together': "[['parent', 'word']]"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['map.TreeNode']", 'null': 'True', 'blank': 'True'}),
'word': ('django.db.models.fields.CharField', [], {'max_length': '150'})
},
u'map.tweetchunk': {
'Meta': {'object_name': 'TweetChunk'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'node': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['map.TreeNode']"}),
'tweet': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['twitter_stream.Tweet']"}),
'tz_country': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'})
},
u'map.tz_country': {
'Meta': {'object_name': 'Tz_Country'},
'country': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user_time_zone': ('django.db.models.fields.CharField', [], {'max_length': '32'})
},
u'twitter_stream.tweet': {
'Meta': {'object_name': 'Tweet'},
'analyzed_by': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'favorite_count': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'filter_level': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '6', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_reply_to_status_id': ('django.db.models.fields.BigIntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'lang': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '9', 'null': 'True', 'blank': 'True'}),
'latitude': ('django.db.models.fields.FloatField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'longitude': ('django.db.models.fields.FloatField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'retweet_count': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'retweeted_status_id': ('django.db.models.fields.BigIntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'truncated': ('django.db.models.fields.BooleanField', [], {}),
'tweet_id': ('django.db.models.fields.BigIntegerField', [], {}),
'user_followers_count': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'user_friends_count': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'user_geo_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user_id': ('django.db.models.fields.BigIntegerField', [], {}),
'user_location': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '150', 'null': 'True', 'blank': 'True'}),
'user_name': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'user_screen_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'user_time_zone': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '150', 'null': 'True', 'blank': 'True'}),
'user_utc_offset': ('django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'user_verified': ('django.db.models.fields.BooleanField', [], {})
}
}
complete_apps = ['map'] | michaelbrooks/twitter-feels | twitter_feels/apps/map/migrations/0002_auto__add_field_maptimeframe_nodes_added__add_field_maptimeframe_chunk.py | Python | mit | 6,971 |
import click
from utils.api_config import update_config
from utils.jenkins import setCredential, startJob
from extensions.multiaccount.core import deploy_core_service
featureName = "MultiAccount"
@click.command()
@click.option(
"--regions",
"-r",
help=('Specify AWS regions you wish to apply on the new account'),
required=True,
multiple=True
)
@click.option(
"--stackprefix",
"-p",
help='Specify the stackprefix of your existing Jazz installation (e.g. myjazz), \
your existing config will be imported',
prompt=True
)
@click.option(
"--aws_accesskey",
help='AWS accesskey of the new account',
prompt=True
)
@click.option(
"--aws_secretkey",
help='AWS secretkey of the new account',
prompt=True
)
@click.option(
"--jazz_apiendpoint",
help='Specify the Jazz Endpoint',
prompt=True
)
@click.option(
'--jazz_userpass',
nargs=2,
required=True,
help='Provide the username and password \
of the jazz application separated by a space',
prompt=True)
@click.option(
"--jenkins_url",
help='Specify the Jenkins url',
prompt=True
)
@click.option(
'--jenkins_userpass',
nargs=2,
required=True,
help='Provide the username and password \
of the jenkins separated by a space',
prompt=True)
def install(regions, stackprefix, aws_accesskey, aws_secretkey, jazz_apiendpoint,
jazz_userpass, jenkins_url, jenkins_userpass):
click.secho('\n\nThis will install {0} functionality into your Jazz deployment'.format(featureName), fg='blue')
tags = [{
'Key': 'Name',
'Value': stackprefix
},
{
'Key': 'Application',
'Value': 'Jazz'
},
{
'Key': 'JazzInstance',
'Value': stackprefix
}]
regions_list = ' '.join(list(regions)).split()
jazz_userpass_list = ''.join(list(jazz_userpass)).split()
jazz_username, jazz_password = jazz_userpass_list[0], jazz_userpass_list[1]
jenkins_userpass_list = ''.join(list(jenkins_userpass)).split()
jenkins_username, jenkins_password = jenkins_userpass_list[0], jenkins_userpass_list[1]
account_json, credential_id = deploy_core_service(aws_accesskey, aws_secretkey, jazz_username, jazz_password,
jazz_apiendpoint, regions_list, stackprefix, tags)
if account_json != '':
# Store the CREDENTIAL_ID in jenkins
setCredential(jenkins_url, jenkins_username, jenkins_password, credential_id,
aws_accesskey, aws_secretkey, "aws")
update_config(
"AWS.ACCOUNTS",
account_json,
jazz_username,
jazz_password,
jazz_apiendpoint
)
# Trigger jazz ui
startJob(jenkins_url, jenkins_username, jenkins_password, "job/jazz_ui/buildWithParameters?token=jazz-101-job")
| tmobile/jazz-installer | features/extensions/multiaccount/install.py | Python | apache-2.0 | 2,976 |
# Download the Python helper library from twilio.com/docs/python/install
from twilio.rest import Client
# Your Account Sid and Auth Token from twilio.com/user/account
account_sid = "AC850685e1d9b8c09dae0b938923dc0d42"
auth_token = "your_auth_token"
client = Client(account_sid, auth_token)
fax = client.fax.v1.faxes.create(
from_="Jack",
to="sip:[email protected]?hatchkey=4815162342;transport=TCP",
media_url="https://www.twilio.com/docs/documents/25/justthefaxmaam.pdf"
)
print(fax.sid)
| teoreteetik/api-snippets | fax/sip-send/example-3/example-3.6.x.py | Python | mit | 503 |
import os
import sys
import imp
from idlelib.TreeWidget import TreeItem
from idlelib.ClassBrowser import ClassBrowser, ModuleBrowserTreeItem
from idlelib.PyShell import PyShellFileList
class PathBrowser(ClassBrowser):
def __init__(self, flist, _htest=False):
"""
_htest - bool, change box location when running htest
"""
self._htest = _htest
self.init(flist)
def settitle(self):
self.top.wm_title("Path Browser")
self.top.wm_iconname("Path Browser")
def rootnode(self):
return PathBrowserTreeItem()
class PathBrowserTreeItem(TreeItem):
def GetText(self):
return "sys.path"
def GetSubList(self):
sublist = []
for dir in sys.path:
item = DirBrowserTreeItem(dir)
sublist.append(item)
return sublist
class DirBrowserTreeItem(TreeItem):
def __init__(self, dir, packages=[]):
self.dir = dir
self.packages = packages
def GetText(self):
if not self.packages:
return self.dir
else:
return self.packages[-1] + ": package"
def GetSubList(self):
try:
names = os.listdir(self.dir or os.curdir)
except os.error:
return []
packages = []
for name in names:
file = os.path.join(self.dir, name)
if self.ispackagedir(file):
nn = os.path.normcase(name)
packages.append((nn, name, file))
packages.sort()
sublist = []
for nn, name, file in packages:
item = DirBrowserTreeItem(file, self.packages + [name])
sublist.append(item)
for nn, name in self.listmodules(names):
item = ModuleBrowserTreeItem(os.path.join(self.dir, name))
sublist.append(item)
return sublist
def ispackagedir(self, file):
if not os.path.isdir(file):
return 0
init = os.path.join(file, "__init__.py")
return os.path.exists(init)
def listmodules(self, allnames):
modules = {}
suffixes = imp.get_suffixes()
sorted = []
for suff, mode, flag in suffixes:
i = -len(suff)
for name in allnames[:]:
normed_name = os.path.normcase(name)
if normed_name[i:] == suff:
mod_name = name[:i]
if mod_name not in modules:
modules[mod_name] = None
sorted.append((normed_name, name))
allnames.remove(name)
sorted.sort()
return sorted
def _path_browser(parent):
flist = PyShellFileList(parent)
PathBrowser(flist, _htest=True)
parent.mainloop()
if __name__ == "__main__":
from unittest import main
main('idlelib.idle_test.test_pathbrowser', verbosity=2, exit=False)
from idlelib.idle_test.htest import run
run(_path_browser)
| j5shi/Thruster | pylibs/idlelib/PathBrowser.py | Python | gpl-2.0 | 3,069 |
#imports
import sys
sys.path.insert(0, "../")
sys.path.insert(0, "./")
import globalsettings
# functions which are called by function handlers are placed in the MenuFunc_Base class
class MenuFunc_Base:
def myFunction1():
if (globalsettings.DEBUGFLAG >= 1):
print("This is myFunction1")
def myFunction2():
if (globalsettings.DEBUGFLAG >= 1):
print("This is myFunction2")
# dictionary to hold function handlers
#functionHandlersDictionary = { "myFunction1": [MenuFunc_Base.myFunction1, "My function 1"], "myFunction2": [MenuFunc_Base.myFunction2, "My Function 2"]}
| jpuk/rpi-128x64-oled-menusystem | myFunctions.py | Python | lgpl-2.1 | 581 |
#!/usr/bin/env python
"""
outputs the frozen packages
"""
import sys
import os
import argparse
import subprocess
parser = argparse.ArgumentParser(description=__doc__.strip())
parser.add_argument('--venv', default='requirements/temporary_venv')
parser.add_argument(
"--template", default="requirements/template.txt",)
parser.add_argument(
"--out", default=sys.stdout, type=argparse.FileType('w'),
help='the file where packages should be written to')
def main(args):
if not os.path.isdir(args.venv):
subprocess.check_call([
sys.executable, '-m', 'virtualenv', args.venv
])
subprocess.check_call([
os.path.join(args.venv, 'bin/pip'),
'install', '-U', '-r', args.template])
subprocess.check_call([
os.path.join(args.venv, 'bin/pip'), 'freeze'
], stdout=args.out)
if __name__ == '__main__':
main(parser.parse_args())
| jdemon519/cfme_tests | requirements/freeze.py | Python | gpl-2.0 | 901 |
import lb_loader
import pandas as pd
import simtk.openmm.app as app
import numpy as np
import simtk.openmm as mm
from simtk import unit as u
from openmmtools import hmc_integrators, testsystems
sysname = "ljbox"
collision_rate = 1.0 / u.picoseconds
system, positions, groups, temperature, timestep = lb_loader.load(sysname)
integrator = hmc_integrators.AndersenVelocityVerletIntegrator(temperature, collision_rate, timestep / 4.)
context = lb_loader.build(system, integrator, positions, temperature)
integrator.step(50000)
positions = context.getState(getPositions=True).getPositions()
n_steps = 25
Neff_cutoff = 2000.
grid = []
for itype in ["AndersenVelocityVerletIntegrator"]:
for timestep_factor in [1.0, 2.0, 4.0]:
d = dict(itype=itype, timestep=timestep / timestep_factor)
grid.append(d)
for settings in grid:
itype = settings.pop("itype")
timestep = settings["timestep"]
integrator = hmc_integrators.AndersenVelocityVerletIntegrator(temperature, collision_rate, timestep / 4.)
context = lb_loader.build(system, integrator, positions, temperature)
filename = "./data/%s_%s_%.3f_%d.csv" % (sysname, itype, timestep / u.femtoseconds, collision_rate * u.picoseconds)
print(filename)
data, start, g, Neff = lb_loader.converge(context, n_steps=n_steps, Neff_cutoff=Neff_cutoff)
data.to_csv(filename)
| kyleabeauchamp/HMCNotes | code/correctness/old/test_anderson_velocity_verlet.py | Python | gpl-2.0 | 1,367 |
from .pytest import * # noqa
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'filmfest',
'USER': 'postgres',
}
}
| kinaklub/next.filmfest.by | filmfest/settings/pytest_pg.py | Python | unlicense | 187 |
#!/usr/bin/env python
"""
Computations create a new value for each :class:`.Row` in a :class:`.Table`.
When used with :meth:`.Table.compute` these new values become a new column.
For instance, the :class:`.PercentChange` computation takes two column names as
arguments and computes the percentage change between them for each row.
There are a variety of basic computations, such as :class:`.Change` and
:class:`.Percent`. If none of these meet your needs you can use the
:class:`Formula` computation to apply an arbitrary function to the row.
If this still isn't flexible enough, it's simple to create a custom computation
class by inheriting from :class:`Computation`.
"""
from agate.computations.base import Computation # noqa
from agate.computations.formula import Formula # noqa
from agate.computations.change import Change # noqa
from agate.computations.percent import Percent # noqa
from agate.computations.percent_change import PercentChange # noqa
from agate.computations.rank import Rank # noqa
from agate.computations.percentile_rank import PercentileRank # noqa
from agate.computations.slug import Slug # noqa
| onyxfish/agate | agate/computations/__init__.py | Python | mit | 1,133 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "timers.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| lnhubbell/Timers | backend/manage.py | Python | mit | 249 |
import clr, sys
from mathUtils import MathUtils
#引用示例
clr.AddReference('ZyGames.Framework');
clr.AddReference('ZyGames.Framework.Common');
clr.AddReference('ZyGames.Framework.Game');
clr.AddReference('ZyGames.Tianjiexing.BLL');
clr.AddReference('ZyGames.Tianjiexing.Model');
clr.AddReference('ZyGames.Tianjiexing.Lang');
clr.AddReference('ZyGames.Tianjiexing.Component');
from action import *
from System.Collections.Generic import *
from ZyGames.Framework.Common import *
from ZyGames.Framework.Game.Cache import *
from ZyGames.Framework.Game.Service import *
from ZyGames.Tianjiexing.BLL.Base import *
from ZyGames.Tianjiexing.Component.Chat import *
from ZyGames.Tianjiexing.Lang import *
from ZyGames.Tianjiexing.Model import *
from ZyGames.Tianjiexing.Model.Config import *
from ZyGames.Framework.Cache.Generic import *
from ZyGames.Framework.Game.Runtime import *
from ZyGames.Framework.Cache import *
from ZyGames.Tianjiexing.BLL.Base import *
#一键获取命运水晶接口
class UrlParam(HttpParam):
def __init__(self):
HttpParam.__init__(self)
class ActionResult(DataResult):
def __init__(self):
DataResult.__init__(self)
self.goldNum = 0
self.gameCoin = 0
self.freeNum = 0
self.isSale = 0
self.issTelegrams = 0
self.prompt = ''
self.grayCrystalArray = List[GrayCrystal]()
self.userLightArray = List[UserLight]()
def getUrlElement(httpGet, parent):
urlParam = UrlParam();
userID = '';
gainCount =14;
return urlParam;
def takeAction(urlParam, parent):
def compareToCrystal(x,y):
if x == None and y == None:
return 0;
if x != None and y == None:
return 1;
if x == None:
return -1;
return x.CreateDate.CompareTo(y.CreateDate);
actionResult = ActionResult();
userId = parent.Current.User.PersonalId;
urlParam.userID = parent.Current.User.PersonalId;
contextUser = parent.Current.User;
if contextUser.VipLv < 5:
parent.ErrorCode = LanguageManager.GetLang().ErrorCode;
parent.ErrorInfo = LanguageManager.GetLang().St_VipNotEnough;
actionResult.Result = False;
return actionResult;
UserHelper.GetUserLightOpen(userId);
if CrystalHelper.CheckAllowCrystall(contextUser) == False:
parent.ErrorCode = LanguageManager.GetLang().ErrorCode;
parent.ErrorInfo = LanguageManager.GetLang().St1305_FateBackpackFull;
actionResult.Result = False;
return actionResult;
saleNum = CrystalHelper.SellGrayCrystal(contextUser, None);
actionResult.prompt = CrystalHelper.AkeyHuntingLife(contextUser);
dailyRestrainSet = ShareCacheStruct[DailyRestrainSet]().FindKey(RestrainType.MianFeiLieMing);
userRestrain = GameDataCacheSet[UserDailyRestrain]().FindKey(userId);
if dailyRestrainSet is not None and userRestrain is not None:
if MathUtils.Now.Date == userRestrain.RefreshDate.Date:
actionResult.freeNum = VipHelper.GetVipUseNum(contextUser.VipLv, RestrainType.MianFeiLieMing);
actionResult.freeNum = MathUtils.Subtraction(actionResult.freeNum, userRestrain.Funtion2, 0);
else :
actionResult.freeNum = VipHelper.GetVipUseNum(contextUser.VipLv, RestrainType.MianFeiLieMing);
actionResult.userLightArray = GameDataCacheSet[UserLight]().FindAll(contextUser.UserID);
allowSale = False;
allowTake = False;
list =CrystalHelper.GetNotSaleCrystalNum(contextUser);
if list.Count >0:
actionResult.grayCrystalArray = list[0];
MathUtils.QuickSort[GrayCrystal](actionResult.grayCrystalArray, lambda x,y:compareToCrystal(x,y))
if list.Count >1:
allowSale=list[1];
if list.Count >2:
allowTake=list[2];
if allowSale is True:
actionResult.isSale=1;
else:
actionResult.isSale =2;
if allowTake is True:
actionResult.issTelegrams = 1;
else:
actionResult.issTelegrams = 2;
actionResult.goldNum = contextUser.GoldNum;
actionResult.gameCoin= contextUser.GameCoin;
#需要实现
return actionResult;
def buildPacket(writer, urlParam, actionResult):
writer.PushIntoStack(actionResult.goldNum)
writer.PushIntoStack(actionResult.gameCoin)
writer.PushIntoStack(actionResult.freeNum)
writer.PushIntoStack(actionResult.isSale)
writer.PushIntoStack(actionResult.issTelegrams)
writer.PushIntoStack(len(actionResult.grayCrystalArray))
for info in actionResult.grayCrystalArray:
crystalName = '';
headID ='';
crystalQuality = 0;
crystalInfo = ConfigCacheSet[CrystalInfo]().FindKey(info.CrystalID)
if crystalInfo is not None:
crystalName= crystalInfo.CrystalName;
headID = crystalInfo.HeadID;
crystalQuality = MathUtils.ToShort(crystalInfo.CrystalQuality);
DsItem = DataStruct()
DsItem.PushIntoStack(MathUtils.ToNotNullString(info.UserCrystalID))
DsItem.PushIntoStack(info.CrystalID)
DsItem.PushIntoStack(MathUtils.ToNotNullString(crystalName))
DsItem.PushIntoStack(MathUtils.ToNotNullString(headID))
DsItem.PushShortIntoStack(crystalQuality)
writer.PushIntoStack(DsItem)
writer.PushIntoStack(actionResult.userLightArray.Count)
for info in actionResult.userLightArray:
probabilityInfo = ConfigCacheSet[ProbabilityInfo]().FindKey(info.HuntingID);
price = 0;
if probabilityInfo is not None:
price = probabilityInfo.Price;
DsItem = DataStruct()
DsItem.PushIntoStack(info.HuntingID)
DsItem.PushIntoStack(price)
DsItem.PushIntoStack(info.IsLight)
writer.PushIntoStack(DsItem)
writer.PushIntoStack(MathUtils.ToNotNullString(actionResult.prompt))
return True; | wenhulove333/ScutServer | Sample/Koudai/Server/src/ZyGames.Tianjiexing.Server/Script/PyScript/Action/action1312.py | Python | mit | 6,031 |
'''
@author: Frank
'''
import unittest
from zstacklib.utils import linux
class Test(unittest.TestCase):
def testName(self):
linux.create_vlan_eth('eth0', 11, '10.1.1.1', '255.255.255.0')
linux.create_vlan_eth('eth0', 100, '10.3.3.3', '255.255.255.0')
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main() | live4thee/zstack-utility | zstacklib/zstacklib/test/test_create_vlan_eth.py | Python | apache-2.0 | 373 |
import gettext
import os
gettext.install('nova')
from nova import utils
def setup(app):
rootdir = os.path.abspath(app.srcdir + '/..')
print "**Autodocumenting from %s" % rootdir
os.chdir(rootdir)
rv = utils.execute('./generate_autodoc_index.sh')
print rv[0]
| nii-cloud/dodai-compute | doc/ext/nova_autodoc.py | Python | apache-2.0 | 281 |
"""Mayavi/traits GUI for setting MRI fiducials"""
# Authors: Christian Brodbeck <[email protected]>
#
# License: BSD (3-clause)
from glob import glob
import os
from ..externals.six.moves import map
# allow import without traits
try:
from mayavi.core.ui.mayavi_scene import MayaviScene
from mayavi.tools.mlab_scene_model import MlabSceneModel
import numpy as np
from pyface.api import confirm, FileDialog, OK, YES
from traits.api import (HasTraits, HasPrivateTraits, on_trait_change,
cached_property, DelegatesTo, Event, Instance,
Property, Array, Bool, Button, Enum)
from traitsui.api import HGroup, Item, VGroup, View
from traitsui.menu import NoButtons
from tvtk.pyface.scene_editor import SceneEditor
except:
from ..utils import trait_wraith
HasTraits = HasPrivateTraits = object
cached_property = on_trait_change = MayaviScene = MlabSceneModel = \
Array = Bool = Button = DelegatesTo = Enum = Event = Instance = \
Property = View = Item = HGroup = VGroup = SceneEditor = \
NoButtons = trait_wraith
from ..coreg import fid_fname, fid_fname_general, head_bem_fname
from ..io import write_fiducials
from ..io.constants import FIFF
from ..utils import get_subjects_dir, logger
from ._file_traits import (BemSource, fid_wildcard, FiducialsSource,
MRISubjectSource, SubjectSelectorPanel)
from ._viewer import (defaults, HeadViewController, PointObject, SurfaceObject,
headview_borders)
class MRIHeadWithFiducialsModel(HasPrivateTraits):
"""Represent an MRI head shape with fiducials
Attributes
----------
points : array (n_points, 3)
MRI head surface points.
tris : array (n_tris, 3)
Triangles based on points.
lpa : array (1, 3)
Left peri-auricular point coordinates.
nasion : array (1, 3)
Nasion coordinates.
rpa : array (1, 3)
Right peri-auricular point coordinates.
"""
subject_source = Instance(MRISubjectSource, ())
bem = Instance(BemSource, ())
fid = Instance(FiducialsSource, ())
fid_file = DelegatesTo('fid', 'file')
fid_fname = DelegatesTo('fid', 'fname')
fid_points = DelegatesTo('fid', 'points')
subjects_dir = DelegatesTo('subject_source')
subject = DelegatesTo('subject_source')
subject_has_bem = DelegatesTo('subject_source')
points = DelegatesTo('bem')
norms = DelegatesTo('bem')
tris = DelegatesTo('bem')
lpa = Array(float, (1, 3))
nasion = Array(float, (1, 3))
rpa = Array(float, (1, 3))
reset = Event(desc="Reset fiducials to the file.")
# info
can_save = Property(depends_on=['file', 'can_save_as'])
can_save_as = Property(depends_on=['lpa', 'nasion', 'rpa'])
can_reset = Property(depends_on=['file', 'fid.points', 'lpa', 'nasion',
'rpa'])
fid_ok = Property(depends_on=['lpa', 'nasion', 'rpa'], desc="All points "
"are set")
default_fid_fname = Property(depends_on=['subjects_dir', 'subject'],
desc="the default file name for the "
"fiducials fif file")
# switch for the GUI (has no effect in the model)
lock_fiducials = Bool(False, desc="Used by GIU, has no effect in the "
"model.")
@on_trait_change('fid_points')
def reset_fiducials(self):
if self.fid_points is not None:
self.lpa = self.fid_points[0:1]
self.nasion = self.fid_points[1:2]
self.rpa = self.fid_points[2:3]
def save(self, fname=None):
"""Save the current fiducials to a file
Parameters
----------
fname : str
Destination file path. If None, will use the current fid filename
if available, or else use the default pattern.
"""
if fname is None:
fname = self.fid_file
if not fname:
fname = self.default_fid_fname
dig = [{'kind': 1, 'ident': 1, 'r': np.array(self.lpa[0])},
{'kind': 1, 'ident': 2, 'r': np.array(self.nasion[0])},
{'kind': 1, 'ident': 3, 'r': np.array(self.rpa[0])}]
write_fiducials(fname, dig, FIFF.FIFFV_COORD_MRI)
self.fid_file = fname
@cached_property
def _get_can_reset(self):
if not self.fid_file:
return False
elif np.any(self.lpa != self.fid.points[0:1]):
return True
elif np.any(self.nasion != self.fid.points[1:2]):
return True
elif np.any(self.rpa != self.fid.points[2:3]):
return True
return False
@cached_property
def _get_can_save_as(self):
can = not (np.all(self.nasion == self.lpa)
or np.all(self.nasion == self.rpa)
or np.all(self.lpa == self.rpa))
return can
@cached_property
def _get_can_save(self):
if not self.can_save_as:
return False
elif self.fid_file:
return True
elif self.subjects_dir and self.subject:
return True
else:
return False
@cached_property
def _get_default_fid_fname(self):
fname = fid_fname.format(subjects_dir=self.subjects_dir,
subject=self.subject)
return fname
@cached_property
def _get_fid_ok(self):
return all(np.any(pt) for pt in (self.nasion, self.lpa, self.rpa))
def _reset_fired(self):
self.reset_fiducials()
# if subject changed because of a change of subjects_dir this was not
# triggered
@on_trait_change('subjects_dir,subject')
def _subject_changed(self):
subject = self.subject
subjects_dir = self.subjects_dir
if not subjects_dir or not subject:
return
# update bem head
path = head_bem_fname.format(subjects_dir=subjects_dir,
subject=subject)
self.bem.file = path
# find fiducials file
path = fid_fname.format(subjects_dir=subjects_dir, subject=subject)
if os.path.exists(path):
self.fid_file = path
self.lock_fiducials = True
else:
path = fid_fname_general.format(subjects_dir=subjects_dir,
subject=subject, head='*')
fnames = glob(path)
if fnames:
path = fnames[0]
self.fid.file = path
self.lock_fiducials = True
else:
self.fid.reset_traits(['file'])
self.lock_fiducials = False
# does not seem to happen by itself ... so hard code it:
self.reset_fiducials()
class FiducialsPanel(HasPrivateTraits):
"""Set fiducials on an MRI surface"""
model = Instance(MRIHeadWithFiducialsModel)
fid_file = DelegatesTo('model')
fid_fname = DelegatesTo('model')
lpa = DelegatesTo('model')
nasion = DelegatesTo('model')
rpa = DelegatesTo('model')
can_save = DelegatesTo('model')
can_save_as = DelegatesTo('model')
can_reset = DelegatesTo('model')
fid_ok = DelegatesTo('model')
locked = DelegatesTo('model', 'lock_fiducials')
set = Enum('LPA', 'Nasion', 'RPA')
current_pos = Array(float, (1, 3)) # for editing
save_as = Button(label='Save As...')
save = Button(label='Save')
reset_fid = Button(label="Reset to File")
headview = Instance(HeadViewController)
hsp_obj = Instance(SurfaceObject)
picker = Instance(object)
# the layout of the dialog created
view = View(VGroup(Item('fid_file', label='Fiducials File'),
Item('fid_fname', show_label=False, style='readonly'),
Item('set', style='custom'),
Item('current_pos', label='Pos'),
HGroup(Item('save', enabled_when='can_save',
tooltip="If a filename is currently "
"specified, save to that file, otherwise "
"save to the default file name"),
Item('save_as', enabled_when='can_save_as'),
Item('reset_fid', enabled_when='can_reset'),
show_labels=False),
enabled_when="locked==False"))
def __init__(self, *args, **kwargs):
super(FiducialsPanel, self).__init__(*args, **kwargs)
self.sync_trait('lpa', self, 'current_pos', mutual=True)
def _reset_fid_fired(self):
self.model.reset = True
def _save_fired(self):
self.model.save()
def _save_as_fired(self):
if self.fid_file:
default_path = self.fid_file
else:
default_path = self.model.default_fid_fname
dlg = FileDialog(action="save as", wildcard=fid_wildcard,
default_path=default_path)
dlg.open()
if dlg.return_code != OK:
return
path = dlg.path
if not path.endswith('.fif'):
path = path + '.fif'
if os.path.exists(path):
answer = confirm(None, "The file %r already exists. Should it "
"be replaced?", "Overwrite File?")
if answer != YES:
return
self.model.save(path)
def _on_pick(self, picker):
if self.locked:
return
self.picker = picker
n_pos = len(picker.picked_positions)
if n_pos == 0:
logger.debug("GUI: picked empty location")
return
if picker.actor is self.hsp_obj.surf.actor.actor:
idxs = []
idx = None
pt = [picker.pick_position]
elif self.hsp_obj.surf.actor.actor in picker.actors:
idxs = [i for i in range(n_pos) if picker.actors[i] is
self.hsp_obj.surf.actor.actor]
idx = idxs[-1]
pt = [picker.picked_positions[idx]]
else:
logger.debug("GUI: picked object other than MRI")
round_ = lambda x: round(x, 3)
poss = [map(round_, pos) for pos in picker.picked_positions]
pos = map(round_, picker.pick_position)
msg = ["Pick Event: %i picked_positions:" % n_pos]
line = str(pos)
if idx is None:
line += " <-pick_position"
msg.append(line)
for i, pos in enumerate(poss):
line = str(pos)
if i == idx:
line += " <- MRI mesh"
elif i in idxs:
line += " (<- also MRI mesh)"
msg.append(line)
logger.debug(os.linesep.join(msg))
if self.set == 'Nasion':
self.nasion = pt
elif self.set == 'LPA':
self.lpa = pt
elif self.set == 'RPA':
self.rpa = pt
else:
raise ValueError("set = %r" % self.set)
@on_trait_change('set')
def _on_set_change(self, obj, name, old, new):
self.sync_trait(old.lower(), self, 'current_pos', mutual=True,
remove=True)
self.sync_trait(new.lower(), self, 'current_pos', mutual=True)
if new == 'Nasion':
self.headview.front = True
elif new == 'LPA':
self.headview.left = True
elif new == 'RPA':
self.headview.right = True
# FiducialsPanel view that allows manipulating all coordinates numerically
view2 = View(VGroup(Item('fid_file', label='Fiducials File'),
Item('fid_fname', show_label=False, style='readonly'),
Item('set', style='custom'), 'lpa', 'nasion', 'rpa',
HGroup(Item('save', enabled_when='can_save'),
Item('save_as', enabled_when='can_save_as'),
Item('reset_fid', enabled_when='can_reset'),
show_labels=False),
enabled_when="locked==False"))
class FiducialsFrame(HasTraits):
"""GUI for interpolating between two KIT marker files
Parameters
----------
subject : None | str
Set the subject which is initially selected.
subjects_dir : None | str
Override the SUBJECTS_DIR environment variable.
"""
model = Instance(MRIHeadWithFiducialsModel, ())
scene = Instance(MlabSceneModel, ())
headview = Instance(HeadViewController)
spanel = Instance(SubjectSelectorPanel)
panel = Instance(FiducialsPanel)
mri_obj = Instance(SurfaceObject)
point_scale = float(defaults['mri_fid_scale'])
lpa_obj = Instance(PointObject)
nasion_obj = Instance(PointObject)
rpa_obj = Instance(PointObject)
def _headview_default(self):
return HeadViewController(scene=self.scene, system='RAS')
def _panel_default(self):
panel = FiducialsPanel(model=self.model, headview=self.headview)
panel.trait_view('view', view2)
return panel
def _spanel_default(self):
return SubjectSelectorPanel(model=self.model.subject_source)
view = View(HGroup(Item('scene',
editor=SceneEditor(scene_class=MayaviScene),
dock='vertical'),
VGroup(headview_borders,
VGroup(Item('spanel', style='custom'),
label="Subject", show_border=True,
show_labels=False),
VGroup(Item('panel', style="custom"),
label="Fiducials", show_border=True,
show_labels=False),
show_labels=False),
show_labels=False),
resizable=True,
buttons=NoButtons)
def __init__(self, subject=None, subjects_dir=None, **kwargs):
super(FiducialsFrame, self).__init__(**kwargs)
subjects_dir = get_subjects_dir(subjects_dir)
if subjects_dir is not None:
self.spanel.subjects_dir = subjects_dir
if subject is not None:
if subject in self.spanel.subjects:
self.spanel.subject = subject
@on_trait_change('scene.activated')
def _init_plot(self):
self.scene.disable_render = True
lpa_color = defaults['lpa_color']
nasion_color = defaults['nasion_color']
rpa_color = defaults['rpa_color']
# bem
color = defaults['mri_color']
self.mri_obj = SurfaceObject(points=self.model.points, color=color,
tri=self.model.tris, scene=self.scene)
self.model.on_trait_change(self._on_mri_src_change, 'tris')
self.panel.hsp_obj = self.mri_obj
# fiducials
self.lpa_obj = PointObject(scene=self.scene, color=lpa_color,
point_scale=self.point_scale)
self.panel.sync_trait('lpa', self.lpa_obj, 'points', mutual=False)
self.sync_trait('point_scale', self.lpa_obj, mutual=False)
self.nasion_obj = PointObject(scene=self.scene, color=nasion_color,
point_scale=self.point_scale)
self.panel.sync_trait('nasion', self.nasion_obj, 'points',
mutual=False)
self.sync_trait('point_scale', self.nasion_obj, mutual=False)
self.rpa_obj = PointObject(scene=self.scene, color=rpa_color,
point_scale=self.point_scale)
self.panel.sync_trait('rpa', self.rpa_obj, 'points', mutual=False)
self.sync_trait('point_scale', self.rpa_obj, mutual=False)
self.headview.left = True
self.scene.disable_render = False
# picker
self.scene.mayavi_scene.on_mouse_pick(self.panel._on_pick, type='cell')
def _on_mri_src_change(self):
if (not np.any(self.model.points)) or (not np.any(self.model.tris)):
self.mri_obj.clear()
return
self.mri_obj.points = self.model.points
self.mri_obj.tri = self.model.tris
self.mri_obj.plot()
| effigies/mne-python | mne/gui/_fiducials_gui.py | Python | bsd-3-clause | 16,311 |
from .measurement import Measurement
from .timer import Timer
from .version import __version__
timer = Timer()
| logicabrity/aeon | aeon/__init__.py | Python | mit | 112 |
# -*- coding: utf-8 -*-
"""
oauthlib.oauth1.rfc5849
~~~~~~~~~~~~~~
This module is an implementation of various logic needed
for signing and checking OAuth 1.0 RFC 5849 requests.
"""
from __future__ import absolute_import, unicode_literals
from . import SIGNATURE_METHODS, utils
class RequestValidator(object):
"""A validator/datastore interaction base class for OAuth 1 providers.
OAuth providers should inherit from RequestValidator and implement the
methods and properties outlined below. Further details are provided in the
documentation for each method and property.
Methods used to check the format of input parameters. Common tests include
length, character set, membership, range or pattern. These tests are
referred to as `whitelisting or blacklisting`_. Whitelisting is better
but blacklisting can be usefull to spot malicious activity.
The following have methods a default implementation:
- check_client_key
- check_request_token
- check_access_token
- check_nonce
- check_verifier
- check_realms
The methods above default to whitelist input parameters, checking that they
are alphanumerical and between a minimum and maximum length. Rather than
overloading the methods a few properties can be used to configure these
methods.
* @safe_characters -> (character set)
* @client_key_length -> (min, max)
* @request_token_length -> (min, max)
* @access_token_length -> (min, max)
* @nonce_length -> (min, max)
* @verifier_length -> (min, max)
* @realms -> [list, of, realms]
Methods used to validate input parameters. These checks usually hit either
persistent or temporary storage such as databases or the filesystem. See
each methods documentation for detailed usage.
The following methods must be implemented:
- validate_client_key
- validate_request_token
- validate_access_token
- validate_timestamp_and_nonce
- validate_redirect_uri
- validate_requested_realms
- validate_realms
- validate_verifier
Methods used to retrieve sensitive information from storage.
The following methods must be implemented:
- get_client_secret
- get_request_token_secret
- get_access_token_secret
- get_rsa_key
Methods used to save credentials.
The following methods must be implemented:
- save_request_token
- save_verifier
- save_access_token
To prevent timing attacks it is necessary to not exit early even if the
client key or resource owner key is invalid. Instead dummy values should
be used during the remaining verification process. It is very important
that the dummy client and token are valid input parameters to the methods
get_client_secret, get_rsa_key and get_(access/request)_token_secret and
that the running time of those methods when given a dummy value remain
equivalent to the running time when given a valid client/resource owner.
The following properties must be implemented:
* @dummy_client
* @dummy_request_token
* @dummy_access_token
Example implementations have been provided, note that the database used is
a simple dictionary and serves only an illustrative purpose. Use whichever
database suits your project and how to access it is entirely up to you.
The methods are introduced in an order which should make understanding
their use more straightforward and as such it could be worth reading what
follows in chronological order.
.. _`whitelisting or blacklisting`: http://www.schneier.com/blog/archives/2011/01/whitelisting_vs.html
"""
def __init__(self):
pass
@property
def allowed_signature_methods(self):
return SIGNATURE_METHODS
@property
def safe_characters(self):
return set(utils.UNICODE_ASCII_CHARACTER_SET)
@property
def client_key_length(self):
return 20, 30
@property
def request_token_length(self):
return 20, 30
@property
def access_token_length(self):
return 20, 30
@property
def timestamp_lifetime(self):
return 600
@property
def nonce_length(self):
return 20, 30
@property
def verifier_length(self):
return 20, 30
@property
def realms(self):
return []
@property
def enforce_ssl(self):
return True
def check_client_key(self, client_key):
"""Check that the client key only contains safe characters
and is no shorter than lower and no longer than upper.
"""
lower, upper = self.client_key_length
return (set(client_key) <= self.safe_characters and
lower <= len(client_key) <= upper)
def check_request_token(self, request_token):
"""Checks that the request token contains only safe characters
and is no shorter than lower and no longer than upper.
"""
lower, upper = self.request_token_length
return (set(request_token) <= self.safe_characters and
lower <= len(request_token) <= upper)
def check_access_token(self, request_token):
"""Checks that the token contains only safe characters
and is no shorter than lower and no longer than upper.
"""
lower, upper = self.access_token_length
return (set(request_token) <= self.safe_characters and
lower <= len(request_token) <= upper)
def check_nonce(self, nonce):
"""Checks that the nonce only contains only safe characters
and is no shorter than lower and no longer than upper.
"""
lower, upper = self.nonce_length
return (set(nonce) <= self.safe_characters and
lower <= len(nonce) <= upper)
def check_verifier(self, verifier):
"""Checks that the verifier contains only safe characters
and is no shorter than lower and no longer than upper.
"""
lower, upper = self.verifier_length
return (set(verifier) <= self.safe_characters and
lower <= len(verifier) <= upper)
def check_realms(self, realms):
"""Check that the realm is one of a set allowed realms."""
return all((r in self.realms for r in realms))
@property
def dummy_client(self):
"""Dummy client used when an invalid client key is supplied.
:returns: The dummy client key string.
The dummy client should be associated with either a client secret,
a rsa key or both depending on which signature methods are supported.
Providers should make sure that
get_client_secret(dummy_client)
get_rsa_key(dummy_client)
return a valid secret or key for the dummy client.
This method is used by
* AccessTokenEndpoint
* RequestTokenEndpoint
* ResourceEndpoint
"""
raise NotImplementedError("Subclasses must implement this function.")
@property
def dummy_request_token(self):
"""Dummy request token used when an invalid token was supplied.
:returns: The dummy request token string.
The dummy request token should be associated with a request token
secret such that get_request_token_secret(.., dummy_request_token)
returns a valid secret.
This method is used by
* AccessTokenEndpoint
"""
raise NotImplementedError("Subclasses must implement this function.")
@property
def dummy_access_token(self):
"""Dummy access token used when an invalid token was supplied.
:returns: The dummy access token string.
The dummy access token should be associated with an access token
secret such that get_access_token_secret(.., dummy_access_token)
returns a valid secret.
This method is used by
* ResourceEndpoint
"""
raise NotImplementedError("Subclasses must implement this function.")
def get_client_secret(self, client_key, request):
"""Retrieves the client secret associated with the client key.
:param client_key: The client/consumer key.
:param request: An oauthlib.common.Request object.
:returns: The client secret as a string.
This method must allow the use of a dummy client_key value.
Fetching the secret using the dummy key must take the same amount of
time as fetching a secret for a valid client::
# Unlikely to be near constant time as it uses two database
# lookups for a valid client, and only one for an invalid.
from your_datastore import ClientSecret
if ClientSecret.has(client_key):
return ClientSecret.get(client_key)
else:
return 'dummy'
# Aim to mimic number of latency inducing operations no matter
# whether the client is valid or not.
from your_datastore import ClientSecret
return ClientSecret.get(client_key, 'dummy')
Note that the returned key must be in plaintext.
This method is used by
* AccessTokenEndpoint
* RequestTokenEndpoint
* ResourceEndpoint
"""
raise NotImplementedError("Subclasses must implement this function.")
def get_request_token_secret(self, client_key, token, request):
"""Retrieves the shared secret associated with the request token.
:param client_key: The client/consumer key.
:param token: The request token string.
:param request: An oauthlib.common.Request object.
:returns: The token secret as a string.
This method must allow the use of a dummy values and the running time
must be roughly equivalent to that of the running time of valid values::
# Unlikely to be near constant time as it uses two database
# lookups for a valid client, and only one for an invalid.
from your_datastore import RequestTokenSecret
if RequestTokenSecret.has(client_key):
return RequestTokenSecret.get((client_key, request_token))
else:
return 'dummy'
# Aim to mimic number of latency inducing operations no matter
# whether the client is valid or not.
from your_datastore import RequestTokenSecret
return ClientSecret.get((client_key, request_token), 'dummy')
Note that the returned key must be in plaintext.
This method is used by
* AccessTokenEndpoint
"""
raise NotImplementedError("Subclasses must implement this function.")
def get_access_token_secret(self, client_key, token, request):
"""Retrieves the shared secret associated with the access token.
:param client_key: The client/consumer key.
:param token: The access token string.
:param request: An oauthlib.common.Request object.
:returns: The token secret as a string.
This method must allow the use of a dummy values and the running time
must be roughly equivalent to that of the running time of valid values::
# Unlikely to be near constant time as it uses two database
# lookups for a valid client, and only one for an invalid.
from your_datastore import AccessTokenSecret
if AccessTokenSecret.has(client_key):
return AccessTokenSecret.get((client_key, request_token))
else:
return 'dummy'
# Aim to mimic number of latency inducing operations no matter
# whether the client is valid or not.
from your_datastore import AccessTokenSecret
return ClientSecret.get((client_key, request_token), 'dummy')
Note that the returned key must be in plaintext.
This method is used by
* ResourceEndpoint
"""
raise NotImplementedError("Subclasses must implement this function.")
def get_default_realms(self, client_key, request):
"""Get the default realms for a client.
:param client_key: The client/consumer key.
:param request: An oauthlib.common.Request object.
:returns: The list of default realms associated with the client.
The list of default realms will be set during client registration and
is outside the scope of OAuthLib.
This method is used by
* RequestTokenEndpoint
"""
raise NotImplementedError("Subclasses must implement this function.")
def get_realms(self, token, request):
"""Get realms associated with a request token.
:param token: The request token string.
:param request: An oauthlib.common.Request object.
:returns: The list of realms associated with the request token.
This method is used by
* AuthorizationEndpoint
"""
raise NotImplementedError("Subclasses must implement this function.")
def get_redirect_uri(self, token, request):
"""Get the redirect URI associated with a request token.
:param token: The request token string.
:param request: An oauthlib.common.Request object.
:returns: The redirect URI associated with the request token.
It may be desirable to return a custom URI if the redirect is set to "oob".
In this case, the user will be redirected to the returned URI and at that
endpoint the verifier can be displayed.
This method is used by
* AuthorizationEndpoint
"""
raise NotImplementedError("Subclasses must implement this function.")
def get_rsa_key(self, client_key, request):
"""Retrieves a previously stored client provided RSA key.
:param client_key: The client/consumer key.
:param request: An oauthlib.common.Request object.
:returns: The rsa public key as a string.
This method must allow the use of a dummy client_key value. Fetching
the rsa key using the dummy key must take the same amount of time
as fetching a key for a valid client. The dummy key must also be of
the same bit length as client keys.
Note that the key must be returned in plaintext.
This method is used by
* AccessTokenEndpoint
* RequestTokenEndpoint
* ResourceEndpoint
"""
raise NotImplementedError("Subclasses must implement this function.")
def invalidate_request_token(self, client_key, request_token, request):
"""Invalidates a used request token.
:param client_key: The client/consumer key.
:param request_token: The request token string.
:param request: An oauthlib.common.Request object.
:returns: The rsa public key as a string.
This method is used by
* AccessTokenEndpoint
"""
raise NotImplementedError("Subclasses must implement this function.")
def validate_client_key(self, client_key, request):
"""Validates that supplied client key is a registered and valid client.
:param client_key: The client/consumer key.
:param request: An oauthlib.common.Request object.
:returns: True or False
Note that if the dummy client is supplied it should validate in same
or nearly the same amount of time as a valid one.
Ensure latency inducing tasks are mimiced even for dummy clients.
For example, use::
from your_datastore import Client
try:
return Client.exists(client_key, access_token)
except DoesNotExist:
return False
Rather than::
from your_datastore import Client
if access_token == self.dummy_access_token:
return False
else:
return Client.exists(client_key, access_token)
This method is used by
* AccessTokenEndpoint
* RequestTokenEndpoint
* ResourceEndpoint
"""
raise NotImplementedError("Subclasses must implement this function.")
def validate_request_token(self, client_key, token, request):
"""Validates that supplied request token is registered and valid.
:param client_key: The client/consumer key.
:param token: The request token string.
:param request: An oauthlib.common.Request object.
:returns: True or False
Note that if the dummy request_token is supplied it should validate in
the same nearly the same amount of time as a valid one.
Ensure latency inducing tasks are mimiced even for dummy clients.
For example, use::
from your_datastore import RequestToken
try:
return RequestToken.exists(client_key, access_token)
except DoesNotExist:
return False
Rather than::
from your_datastore import RequestToken
if access_token == self.dummy_access_token:
return False
else:
return RequestToken.exists(client_key, access_token)
This method is used by
* AccessTokenEndpoint
"""
raise NotImplementedError("Subclasses must implement this function.")
def validate_access_token(self, client_key, token, request):
"""Validates that supplied access token is registered and valid.
:param client_key: The client/consumer key.
:param token: The access token string.
:param request: An oauthlib.common.Request object.
:returns: True or False
Note that if the dummy access token is supplied it should validate in
the same or nearly the same amount of time as a valid one.
Ensure latency inducing tasks are mimiced even for dummy clients.
For example, use::
from your_datastore import AccessToken
try:
return AccessToken.exists(client_key, access_token)
except DoesNotExist:
return False
Rather than::
from your_datastore import AccessToken
if access_token == self.dummy_access_token:
return False
else:
return AccessToken.exists(client_key, access_token)
This method is used by
* ResourceEndpoint
"""
raise NotImplementedError("Subclasses must implement this function.")
def validate_timestamp_and_nonce(self, client_key, timestamp, nonce,
request, request_token=None, access_token=None):
"""Validates that the nonce has not been used before.
:param client_key: The client/consumer key.
:param timestamp: The ``oauth_timestamp`` parameter.
:param nonce: The ``oauth_nonce`` parameter.
:param request_token: Request token string, if any.
:param access_token: Access token string, if any.
:param request: An oauthlib.common.Request object.
:returns: True or False
Per `Section 3.3`_ of the spec.
"A nonce is a random string, uniquely generated by the client to allow
the server to verify that a request has never been made before and
helps prevent replay attacks when requests are made over a non-secure
channel. The nonce value MUST be unique across all requests with the
same timestamp, client credentials, and token combinations."
.. _`Section 3.3`: http://tools.ietf.org/html/rfc5849#section-3.3
One of the first validation checks that will be made is for the validity
of the nonce and timestamp, which are associated with a client key and
possibly a token. If invalid then immediately fail the request
by returning False. If the nonce/timestamp pair has been used before and
you may just have detected a replay attack. Therefore it is an essential
part of OAuth security that you not allow nonce/timestamp reuse.
Note that this validation check is done before checking the validity of
the client and token.::
nonces_and_timestamps_database = [
(u'foo', 1234567890, u'rannoMstrInghere', u'bar')
]
def validate_timestamp_and_nonce(self, client_key, timestamp, nonce,
request_token=None, access_token=None):
return ((client_key, timestamp, nonce, request_token or access_token)
not in self.nonces_and_timestamps_database)
This method is used by
* AccessTokenEndpoint
* RequestTokenEndpoint
* ResourceEndpoint
"""
raise NotImplementedError("Subclasses must implement this function.")
def validate_redirect_uri(self, client_key, redirect_uri, request):
"""Validates the client supplied redirection URI.
:param client_key: The client/consumer key.
:param redirect_uri: The URI the client which to redirect back to after
authorization is successful.
:param request: An oauthlib.common.Request object.
:returns: True or False
It is highly recommended that OAuth providers require their clients
to register all redirection URIs prior to using them in requests and
register them as absolute URIs. See `CWE-601`_ for more information
about open redirection attacks.
By requiring registration of all redirection URIs it should be
straightforward for the provider to verify whether the supplied
redirect_uri is valid or not.
Alternatively per `Section 2.1`_ of the spec:
"If the client is unable to receive callbacks or a callback URI has
been established via other means, the parameter value MUST be set to
"oob" (case sensitive), to indicate an out-of-band configuration."
.. _`CWE-601`: http://cwe.mitre.org/top25/index.html#CWE-601
.. _`Section 2.1`: https://tools.ietf.org/html/rfc5849#section-2.1
This method is used by
* RequestTokenEndpoint
"""
raise NotImplementedError("Subclasses must implement this function.")
def validate_requested_realms(self, client_key, realms, request):
"""Validates that the client may request access to the realm.
:param client_key: The client/consumer key.
:param realms: The list of realms that client is requesting access to.
:param request: An oauthlib.common.Request object.
:returns: True or False
This method is invoked when obtaining a request token and should
tie a realm to the request token and after user authorization
this realm restriction should transfer to the access token.
This method is used by
* RequestTokenEndpoint
"""
raise NotImplementedError("Subclasses must implement this function.")
def validate_realms(self, client_key, token, request, uri=None,
realms=None):
"""Validates access to the request realm.
:param client_key: The client/consumer key.
:param token: A request token string.
:param request: An oauthlib.common.Request object.
:param uri: The URI the realms is protecting.
:param realms: A list of realms that must have been granted to
the access token.
:returns: True or False
How providers choose to use the realm parameter is outside the OAuth
specification but it is commonly used to restrict access to a subset
of protected resources such as "photos".
realms is a convenience parameter which can be used to provide
a per view method pre-defined list of allowed realms.
This method is used by
* ResourceEndpoint
"""
raise NotImplementedError("Subclasses must implement this function.")
def validate_verifier(self, client_key, token, verifier, request):
"""Validates a verification code.
:param client_key: The client/consumer key.
:param token: A request token string.
:param verifier: The authorization verifier string.
:param request: An oauthlib.common.Request object.
:returns: True or False
OAuth providers issue a verification code to clients after the
resource owner authorizes access. This code is used by the client to
obtain token credentials and the provider must verify that the
verifier is valid and associated with the client as well as the
resource owner.
Verifier validation should be done in near constant time
(to avoid verifier enumeration). To achieve this we need a
constant time string comparison which is provided by OAuthLib
in ``oauthlib.common.safe_string_equals``::
from your_datastore import Verifier
correct_verifier = Verifier.get(client_key, request_token)
from oauthlib.common import safe_string_equals
return safe_string_equals(verifier, correct_verifier)
This method is used by
* AccessTokenEndpoint
"""
raise NotImplementedError("Subclasses must implement this function.")
def verify_request_token(self, token, request):
"""Verify that the given OAuth1 request token is valid.
:param token: A request token string.
:param request: An oauthlib.common.Request object.
:returns: True or False
This method is used by
* AuthorizationEndpoint
"""
raise NotImplementedError("Subclasses must implement this function.")
def verify_realms(self, token, realms, request):
"""Verify authorized realms to see if they match those given to token.
:param token: An access token string.
:param realms: A list of realms the client attempts to access.
:param request: An oauthlib.common.Request object.
:returns: True or False
This prevents the list of authorized realms sent by the client during
the authorization step to be altered to include realms outside what
was bound with the request token.
Can be as simple as::
valid_realms = self.get_realms(token)
return all((r in valid_realms for r in realms))
This method is used by
* AuthorizationEndpoint
"""
raise NotImplementedError("Subclasses must implement this function.")
def save_access_token(self, token, request):
"""Save an OAuth1 access token.
:param token: A dict with token credentials.
:param request: An oauthlib.common.Request object.
The token dictionary will at minimum include
* ``oauth_token`` the access token string.
* ``oauth_token_secret`` the token specific secret used in signing.
* ``oauth_authorized_realms`` a space separated list of realms.
Client key can be obtained from ``request.client_key``.
The list of realms (not joined string) can be obtained from
``request.realm``.
This method is used by
* AccessTokenEndpoint
"""
raise NotImplementedError("Subclasses must implement this function.")
def save_request_token(self, token, request):
"""Save an OAuth1 request token.
:param token: A dict with token credentials.
:param request: An oauthlib.common.Request object.
The token dictionary will at minimum include
* ``oauth_token`` the request token string.
* ``oauth_token_secret`` the token specific secret used in signing.
* ``oauth_callback_confirmed`` the string ``true``.
Client key can be obtained from ``request.client_key``.
This method is used by
* RequestTokenEndpoint
"""
raise NotImplementedError("Subclasses must implement this function.")
def save_verifier(self, token, verifier, request):
"""Associate an authorization verifier with a request token.
:param token: A request token string.
:param verifier A dictionary containing the oauth_verifier and
oauth_token
:param request: An oauthlib.common.Request object.
We need to associate verifiers with tokens for validation during the
access token request.
Note that unlike save_x_token token here is the ``oauth_token`` token
string from the request token saved previously.
This method is used by
* AuthorizationEndpoint
"""
raise NotImplementedError("Subclasses must implement this function.")
| google/contentbox | third_party/oauthlib/oauth1/rfc5849/request_validator.py | Python | apache-2.0 | 28,693 |
# -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
from . import bigquery
from . import sql
| okfn/datapackage-storage-py | examples/storages/__init__.py | Python | mit | 216 |
import torch
from torch.optim.optimizer import Optimizer, required
_available = False
try:
from pcl_embedding_bag import bf16_update
_available = True
except ImportError as e:
#print(e)
pass
def is_available():
return _available
class SplitSGD(Optimizer):
r"""Implements low precision stochastic gradient descent with extra state."""
def __init__(self, params, lr=required, momentum=0, dampening=0,
weight_decay=0, nesterov=False):
if not is_available():
raise ValueError("Module function 'bf16_update' not available for SplitSGD")
if lr is not required and lr < 0.0:
raise ValueError("Invalid learning rate: {}".format(lr))
if momentum != 0.0:
raise ValueError("Invalid momentum value: {}".format(momentum))
if weight_decay != 0.0:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(lr=lr, momentum=momentum, dampening=dampening,
weight_decay=weight_decay, nesterov=nesterov)
if nesterov and (momentum <= 0 or dampening != 0):
raise ValueError("Nesterov momentum requires a momentum and zero dampening")
super(SplitSGD, self).__init__(params, defaults)
print("Using SplitSGD")
def __setstate__(self, state):
super(SplitSGD, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('nesterov', False)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
weight_decay = group['weight_decay']
momentum = group['momentum']
dampening = group['dampening']
nesterov = group['nesterov']
for p in group['params']:
if p.grad is None:
continue
d_p = p.grad.data
if p.dtype == torch.bfloat16:
param_state = self.state[p]
if 'low_bits' not in param_state:
buf = param_state['low_bits'] = torch.zeros_like(p.data, dtype=torch.short)
else:
buf = param_state['low_bits']
# if weight_decay != 0:
# d_p = d_p.add(weight_decay, p.data)
# if momentum != 0:
# param_state = self.state[p]
# if 'momentum_buffer' not in param_state:
# buf = param_state['momentum_buffer'] = torch.clone(d_p).detach()
# else:
# buf = param_state['momentum_buffer']
# buf.mul_(momentum).add_(1 - dampening, d_p)
# if nesterov:
# d_p = d_p.add(momentum, buf)
# else:
# d_p = buf
#p.data.add_(-group['lr'], d_p)
if p.dtype == torch.bfloat16:
bf16_update(p.data, buf, d_p, -group['lr'])
else:
p.data.add_(d_p, alpha=-group['lr'])
return loss
| hfp/libxsmm | samples/deeplearning/sparse_training/dlrm/split_sgd.py | Python | bsd-3-clause | 3,392 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: junos_linkagg
version_added: "2.4"
author: "Ganesh Nalawade (@ganeshrn)"
short_description: Manage link aggregation groups on Juniper JUNOS network devices
description:
- This module provides declarative management of link aggregation groups
on Juniper JUNOS network devices.
options:
name:
description:
- Name of the link aggregation group.
required: true
mode:
description:
- Mode of the link aggregation group. A value of C(on) will enable LACP in C(passive) mode.
C(active) configures the link to actively information about the state of the link,
or it can be configured in C(passive) mode ie. send link state information only when
received them from another link. A value of C(off) will disable LACP.
default: off
choices: ['on', 'off', 'active', 'passive']
members:
description:
- List of members interfaces of the link aggregation group. The value can be
single interface or list of interfaces.
required: true
min_links:
description:
- Minimum members that should be up
before bringing up the link aggregation group.
device_count:
description:
- Number of aggregated ethernet devices that can be configured.
Acceptable integer value is between 1 and 128.
description:
description:
- Description of Interface.
aggregate:
description: List of link aggregation definitions.
state:
description:
- State of the link aggregation group.
default: present
choices: ['present', 'absent', 'up', 'down']
active:
description:
- Specifies whether or not the configuration is active or deactivated
default: True
type: bool
requirements:
- ncclient (>=v0.5.2)
notes:
- This module requires the netconf system service be enabled on
the remote device being managed.
- Tested against vSRX JUNOS version 15.1X49-D15.4, vqfx-10000 JUNOS Version 15.1X53-D60.4.
- Recommended connection is C(netconf). See L(the Junos OS Platform Options,../network/user_guide/platform_junos.html).
- This module also works with C(local) connections for legacy playbooks.
extends_documentation_fragment: junos
"""
EXAMPLES = """
- name: configure link aggregation
junos_linkagg:
name: ae11
members:
- ge-0/0/5
- ge-0/0/6
- ge-0/0/7
lacp: active
device_count: 4
state: present
- name: delete link aggregation
junos_linkagg:
name: ae11
members:
- ge-0/0/5
- ge-0/0/6
- ge-0/0/7
lacp: active
device_count: 4
state: delete
- name: deactivate link aggregation
junos_linkagg:
name: ae11
members:
- ge-0/0/5
- ge-0/0/6
- ge-0/0/7
lacp: active
device_count: 4
state: present
active: False
- name: Activate link aggregation
junos_linkagg:
name: ae11
members:
- ge-0/0/5
- ge-0/0/6
- ge-0/0/7
lacp: active
device_count: 4
state: present
active: True
- name: Disable link aggregation
junos_linkagg:
name: ae11
state: down
- name: Enable link aggregation
junos_linkagg:
name: ae11
state: up
"""
RETURN = """
diff:
description: Configuration difference before and after applying change.
returned: when configuration is changed and diff option is enabled.
type: string
sample: >
[edit interfaces]
+ ge-0/0/6 {
+ ether-options {
+ 802.3ad ae0;
+ }
+ }
[edit interfaces ge-0/0/7]
+ ether-options {
+ 802.3ad ae0;
+ }
[edit interfaces]
+ ae0 {
+ description "configured by junos_linkagg";
+ aggregated-ether-options {
+ lacp {
+ active;
+ }
+ }
+ }
"""
import collections
from copy import deepcopy
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.utils import remove_default_spec
from ansible.module_utils.network.junos.junos import junos_argument_spec
from ansible.module_utils.network.junos.junos import load_config, map_params_to_obj, map_obj_to_ele, to_param_list
from ansible.module_utils.network.junos.junos import commit_configuration, discard_changes, locked_config, get_configuration
try:
from lxml.etree import tostring
except ImportError:
from xml.etree.ElementTree import tostring
USE_PERSISTENT_CONNECTION = True
def validate_device_count(value, module):
if value and not 1 <= value <= 128:
module.fail_json(msg='device_count must be between 1 and 128')
def validate_min_links(value, module):
if value and not 1 <= value <= 8:
module.fail_json(msg='min_links must be between 1 and 8')
def validate_param_values(module, obj, item):
for key in obj:
# validate the param value (if validator func exists)
validator = globals().get('validate_%s' % key)
if callable(validator):
validator(item.get(key), module)
def configure_lag_params(module, requests, item):
top = 'interfaces/interface'
param_lag_to_xpath_map = collections.OrderedDict()
param_lag_to_xpath_map.update([
('name', {'xpath': 'name', 'is_key': True}),
('description', 'description'),
('min_links', {'xpath': 'minimum-links', 'top': 'aggregated-ether-options'}),
('disable', {'xpath': 'disable', 'tag_only': True}),
('mode', {'xpath': item['mode'], 'tag_only': True, 'top': 'aggregated-ether-options/lacp'}),
])
validate_param_values(module, param_lag_to_xpath_map, item)
want = map_params_to_obj(module, param_lag_to_xpath_map, param=item)
ele = map_obj_to_ele(module, want, top, param=item)
requests.append(ele)
if item['device_count']:
top = 'chassis/aggregated-devices/ethernet'
device_count_to_xpath_map = {'device_count': {'xpath': 'device-count', 'leaf_only': True}}
validate_param_values(module, device_count_to_xpath_map, item)
want = map_params_to_obj(module, device_count_to_xpath_map, param=item)
ele = map_obj_to_ele(module, want, top, param=item)
requests.append(ele)
def configure_member_params(module, requests, item):
top = 'interfaces/interface'
members = item['members']
if members:
member_to_xpath_map = collections.OrderedDict()
member_to_xpath_map.update([
('name', {'xpath': 'name', 'is_key': True, 'parent_attrib': False}),
('bundle', {'xpath': 'bundle', 'leaf_only': True, 'top': 'ether-options/ieee-802.3ad', 'is_key': True}),
])
# link aggregation bundle assigned to member
item['bundle'] = item['name']
for member in members:
if item['state'] == 'absent':
# if link aggregate bundle is not assigned to member, trying to
# delete it results in rpc-reply error, hence if is not assigned
# skip deleting it and continue to next member.
resp = get_configuration(module)
bundle = resp.xpath("configuration/interfaces/interface[name='%s']/ether-options/"
"ieee-802.3ad[bundle='%s']" % (member, item['bundle']))
if not bundle:
continue
# Name of member to be assigned to link aggregation bundle
item['name'] = member
validate_param_values(module, member_to_xpath_map, item)
want = map_params_to_obj(module, member_to_xpath_map, param=item)
ele = map_obj_to_ele(module, want, top, param=item)
requests.append(ele)
def main():
""" main entry point for module execution
"""
element_spec = dict(
name=dict(),
mode=dict(default='on', choices=['on', 'off', 'active', 'passive']),
members=dict(type='list'),
min_links=dict(type='int'),
device_count=dict(type='int'),
description=dict(),
state=dict(default='present', choices=['present', 'absent', 'up', 'down']),
active=dict(default=True, type='bool')
)
aggregate_spec = deepcopy(element_spec)
aggregate_spec['name'] = dict(required=True)
# remove default in aggregate spec, to handle common arguments
remove_default_spec(aggregate_spec)
argument_spec = dict(
aggregate=dict(type='list', elements='dict', options=aggregate_spec),
)
argument_spec.update(element_spec)
argument_spec.update(junos_argument_spec)
required_one_of = [['name', 'aggregate']]
mutually_exclusive = [['name', 'aggregate']]
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
required_one_of=required_one_of,
mutually_exclusive=mutually_exclusive)
warnings = list()
result = {'changed': False}
if warnings:
result['warnings'] = warnings
params = to_param_list(module)
requests = list()
for param in params:
# if key doesn't exist in the item, get it from module.params
for key in param:
if param.get(key) is None:
param[key] = module.params[key]
item = param.copy()
state = item.get('state')
item['disable'] = True if state == 'down' else False
if state in ('present', 'up', 'down'):
item['state'] = 'present'
else:
item['disable'] = True
mode = item.get('mode')
if mode == 'off':
item['mode'] = ''
elif mode == 'on':
item['mode'] = 'passive'
configure_lag_params(module, requests, item)
configure_member_params(module, requests, item)
diff = None
with locked_config(module):
for req in requests:
diff = load_config(module, tostring(req), warnings, action='merge')
commit = not module.check_mode
if diff:
if commit:
commit_configuration(module)
else:
discard_changes(module)
result['changed'] = True
if module._diff:
result['diff'] = {'prepared': diff}
module.exit_json(**result)
if __name__ == "__main__":
main()
| konstruktoid/ansible-upstream | lib/ansible/modules/network/junos/junos_linkagg.py | Python | gpl-3.0 | 10,827 |
# Copyright (c) 2012 The Khronos Group Inc.
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and /or associated documentation files (the "Materials "), to deal in the Materials without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Materials, and to permit persons to whom the Materials are furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Materials.
# THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
# See Core.Logic.FJudgementContext for the information
# of the 'context' parameter.
# This sample judging object does the following:
#
# JudgeBaseline: just verifies that the standard steps did not crash.
# JudgeSuperior: also verifies that the validation steps are not in error.
# JudgeExemplary: same as intermediate badge.
# We import an assistant script that includes the common verifications
# methods. The assistant buffers its checks, so that running them again
# does not incurs an unnecessary performance hint.
from StandardDataSets.scripts import JudgeAssistant
# Please feed your node list here:
tagLst = ['asset', 'contributor', 'authoring_tool']
attrName = ''
attrVal = ''
dataToCheck = ''
childList = ['author', 'comments', 'copyright', 'source_data']
class SimpleJudgingObject:
def __init__(self, _tagLst, _attrName, _attrVal, _data, _childList):
self.tagList = _tagLst
self.attrName = _attrName
self.attrVal = _attrVal
self.dataToCheck = _data
self.childList = _childList
self.status_baseline = False
self.status_superior = False
self.status_exemplary = False
self.__assistant = JudgeAssistant.JudgeAssistant()
def JudgeBaseline(self, context):
# No step should not crash
self.__assistant.CheckCrashes(context)
# Import/export/validate must exist and pass, while Render must only exist.
self.__assistant.CheckSteps(context, ["Import", "Export", "Validate"], [])
if (self.__assistant.GetResults() == False):
self.status_baseline = False
return False
# Check for preservation of element
self.__assistant.ElementDataExists(context, self.tagList)
self.status_baseline = self.__assistant.GetResults()
return self.status_baseline
# To pass intermediate you need to pass basic, this object could also include additional
# tests that were specific to the intermediate badge.
def JudgeSuperior(self, context):
self.status_superior = self.status_baseline
return self.status_superior
# To pass advanced you need to pass intermediate, this object could also include additional
# tests that were specific to the advanced badge
def JudgeExemplary(self, context):
# if superior fails, no point in further checking
if (self.status_superior == False):
self.status_exemplary = self.status_superior
return self.status_exemplary
self.status_exemplary = self.__assistant.checkShallowElePreservationByChild(context, self.tagList, self.childList)
return self.status_exemplary
# This is where all the work occurs: "judgingObject" is an absolutely necessary token.
# The dynamic loader looks very specifically for a class instance named "judgingObject".
#
judgingObject = SimpleJudgingObject(tagLst, attrName, attrVal, dataToCheck, childList);
| KhronosGroup/COLLADA-CTS | StandardDataSets/collada/asset/contributor/one_contributor/one_contributor.py | Python | mit | 4,073 |
from numpy.random import randn
import numpy as np
import matplotlib.pyplot as plt
# ----------------------------------------------------------------------
# Numpy ndarray: multidimensional array object
data = randn(2, 3)
print(data)
print(data * 10)
print(data + data)
print(data.shape)
print(data.dtype)
# Creating ndarrays
data1 = [6, 7.5, 8, 0, 1] # Python list
arr1 = np.array(data1) # Numpy 1-d array
print(arr1)
data2 = [[1, 2, 3, 4], [5, 6, 7, 8]] # Python nested list
arr2 = np.array(data2) # Numpy 2-d array
print(arr2)
print(arr2.ndim)
print(arr2.shape)
print(arr1.dtype)
print(arr2.dtype)
# Arrays of zeros, ones, empty, and ranges
zeros1 = np.zeros(10)
zeros2 = np.zeros((3, 6))
empty1 = np.empty((2, 3, 2))
ones1 = np.ones((4, 5))
x1 = np.arange(15)
# Specifying data types for ndarrays
arr1 = np.array([1, 2, 3], dtype=np.float64)
arr2 = np.array([1, 2, 3], dtype=np.int32)
print(arr1.dtype)
print(arr2.dtype)
# Implicit type definition based on array contents
arr = np.array([1, 2, 3, 4, 5])
print(arr.dtype)
# Casting from one type to another with astype()
float_arr = arr.astype(np.float64)
print(float_arr.dtype)
arr = np.array([3.7, -1.2, -2.6, 0.5, 12.9, 10.1])
int_arr = arr.astype(np.int32)
print(arr)
print(int_arr)
# Converting numeric strings to numbers
numeric_strings = np.array(['1.25', '-9.6', '42'], dtype=np.string_)
arr = numeric_strings.astype(float) # Can leave out the 64
int_array = np.arange(10)
calibers = np.array([.22, .270, .357, .380, .44, .50], dtype=np.float64)
arr = int_array.astype(calibers.dtype)
empty_uint32 = np.empty(8, dtype='u4')
print(empty_uint32)
# Operations between arrays and scalars
arr = np.array([[1., 2., 3.], [4., 5., 6.]])
print(arr)
print(arr * arr)
print(arr - arr)
print(1 / arr)
print(arr ** 0.5)
# ----------------------------------------------------------------------
# Basic indexing and slicing
# -- For 1-d arrays
arr = np.arange(10)
print(arr)
print(arr[5])
print(arr[5:8])
arr[5:8] = 12
print(arr)
# -- For 2-d arrays
arr2d = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
print(arr2d[2])
print(arr2d[0][2])
print(arr2d[0, 2])
# -- For 3-d arrays
arr3d = np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]])
print(arr3d)
print(arr3d[0])
old_values = arr3d[0].copy()
arr3d[0] = 42
print(arr3d)
arr3d[0] = old_values
print(arr3d)
print(arr3d[1, 0])
# Indexing with slices
print(arr)
print(arr[1:6])
print(arr2d)
print(arr2d[:2])
print(arr2d[:2, 1:])
print(arr2d[1, :2])
print(arr2d[2, :1])
print(arr2d[:, :1])
arr2d[:2, 1:] = 0
print(arr2d)
# Boolean indexing
names = np.array(['Bob', 'Joe', 'Will', 'Bob', 'Will', 'Joe', 'Joe'])
data = randn(7, 4)
print(names)
print(data)
print(names == 'Bob')
print(data[names == 'Bob', 2:])
print(data[names == 'Bob', 3])
print(names != 'Bob')
print(data[-(names == 'Bob')])
mask = (names == 'Bob') | (names == 'Will')
print(mask)
print(data[mask])
data[data < 0] = 0
print(data)
data[names != 'Joe'] = 7
print(data)
# Fancy indexing
arr = np.empty((8,4))
for i in range(8):
arr[i] = i
print(arr)
print(arr[[4, 3, 0, 6]])
print(arr[[-3, -5, -7]])
arr = np.arange(32).reshape((8, 4))
print(arr)
print(arr[[1, 5, 7, 2], [0, 3, 1, 2]])
print(arr[[1, 5, 7, 2]][:, [0, 3, 1, 2]])
print(arr[np.ix_([1, 5, 7, 2], [0, 3, 1, 2])])
# ----------------------------------------------------------------------
# Transposing arrays and swapping axes
arr = np.arange(15).reshape((3, 5))
print(arr)
print(arr.T) # Transpose
arr = np.random.randn(6, 3)
print(arr)
print(np.dot(arr.T, arr))
arr = np.arange(16).reshape((2, 2, 4))
print(arr)
print(arr.transpose((1, 0, 2)))
print(arr.swapaxes(1, 2))
# ----------------------------------------------------------------------
# Universal functions: fast element-wise array functions
arr = np.arange(10)
print(np.sqrt(arr))
print(np.exp(arr))
x = randn(8)
y = randn(8)
print(x)
print(y)
print(np.maximum(x, y)) # element-wise maximum
arr = randn(7) * 5
print(np.modf(arr))
# ----------------------------------------------------------------------
# Data processing using arrays
points = np.arange(-5, 5, 0.01) # 1000 equally spaced points
xs, ys = np.meshgrid(points, points)
z = np.sqrt(xs ** 2 + ys ** 2)
print(z)
plt.imshow(z, cmap=plt.cm.gray)
plt.colorbar()
plt.title('Image plot of $\sqrt{x^2 + y^2}$ for a grid of values')
plt.draw()
# ----------------------------------------------------------------------
# Expressing conditional logic as array operations
xarr = np.array([1.1, 1.2, 1.3, 1.4, 1.5])
yarr = np.array([2.1, 2.2, 2.3, 2.4, 2.5])
cond = np.array([True, False, True, True, False])
result = [(x if c else y)
for x, y, c in zip(xarr, yarr, cond)]
result
result = np.where(cond, xarr, yarr)
result
arr = randn(4, 4)
arr
np.where(arr > 0, 2, -2)
np.where(arr > 0, 2, arr) # set only positive values to 2
'''
# Not to be executed
result = []
for i in range(n):
if cond1[i] and cond2[i]:
result.append(0)
elif cond1[i]:
result.append(1)
elif cond2[i]:
result.append(2)
else:
result.append(3)
# Not to be executed
np.where(cond1 & cond2, 0,
np.where(cond1, 1,
np.where(cond2, 2, 3)))
# Not to be executed
result = 1 * cond1 + 2 * cond2 + 3 * -(cond1 | cond2)
'''
# ----------------------------------------------------------------------
# Mathematical and statistical methods
arr = np.random.randn(5, 4) # normally-distributed data
arr.mean()
np.mean(arr)
arr.sum()
arr.mean(axis=1)
arr.sum(0)
arr = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8]])
arr.cumsum(0)
arr.cumprod(1)
# ----------------------------------------------------------------------
# Methods for boolean arrays
arr = randn(100)
(arr > 0).sum() # Number of positive values
bools = np.array([False, False, True, False])
bools.any()
bools.all()
# ----------------------------------------------------------------------
# Sorting
arr = randn(8)
arr
arr.sort()
arr
arr = randn(5, 3)
arr
arr.sort(1)
arr
large_arr = randn(1000)
large_arr.sort()
large_arr[int(0.05 * len(large_arr))] # 5% quantile
# ----------------------------------------------------------------------
# Unique and other set logic
names = np.array(['Bob', 'Joe', 'Will', 'Bob', 'Will', 'Joe', 'Joe'])
np.unique(names)
ints = np.array([3, 3, 3, 2, 2, 1, 1, 4, 4])
np.unique(ints)
sorted(set(names))
values = np.array([6, 0, 0, 3, 2, 5, 6])
np.in1d(values, [2, 3, 6])
# ----------------------------------------------------------------------
# File input and output with arrays
# Storing arrays on disk in binary format
arr = np.arange(10)
np.save('some_array', arr)
np.load('some_array.npy')
np.savez('array_archive.npz', a=arr, b=arr)
arch = np.load('array_archive.npz')
arch['b']
'''
!rm some_array.npy
!rm array_archive.npz
'''
# Saving and loading text files
'''
!cat array_ex.txt
arr = np.loadtxt('array_ex.txt', delimiter=',')
arr
'''
# ----------------------------------------------------------------------
# Linear algebra
x = np.array([[1., 2., 3.], [4., 5., 6.]])
y = np.array([[6., 23.], [-1, 7], [8, 9]])
x
y
x.dot(y) # equivalently np.dot(x, y)
np.dot(x, np.ones(3))
np.random.seed(12345)
from numpy.linalg import inv, qr
X = randn(5, 5)
mat = X.T.dot(X)
inv(mat)
mat.dot(inv(mat))
q, r = qr(mat)
r
# ----------------------------------------------------------------------
# Random number generation
samples = np.random.normal(size=(4, 4))
samples
from random import normalvariate
N = 1000000
'''
%timeit samples = [normalvariate(0, 1) for _ in xrange(N)]
%timeit np.random.normal(size=N)
'''
# ----------------------------------------------------------------------
# Example: Random Walks
import random
position = 0
walk = [position]
steps = 1000
for i in xrange(steps):
step = 1 if random.randint(0, 1) else -1
position += step
walk.append(position)
np.random.seed(12345)
nsteps = 1000
draws = np.random.randint(0, 2, size=nsteps)
steps = np.where(draws > 0, 1, -1)
walk = steps.cumsum()
walk.min()
walk.max()
(np.abs(walk) >= 10).argmax()
# Simulating many random walks at once
nwalks = 5000
nsteps = 1000
draws = np.random.randint(0, 2, size=(nwalks, nsteps)) # 0 or 1
steps = np.where(draws > 0, 1, -1)
walks = steps.cumsum(1)
walks
walks.max()
walks.min()
hits30 = (np.abs(walks) >= 30).any(1)
hits30
hits30.sum() # Number that hit 30 or -30
crossing_times = (np.abs(walks[hits30]) >= 30).argmax(1)
crossing_times.mean()
steps = np.random.normal(loc=0, scale=0.25,
size=(nwalks, nsteps))
| jenfly/python-practice | pydata-book/numpy_basics.py | Python | mit | 8,461 |
#
# tests/test_service_manager.py - unit test for the service manager.
#
# Copyright (c) 2018 SingularityNET
#
# Distributed under the MIT software license, see LICENSE file.
#
import logging
import pytest
from examples import multi_agent_adapter
from sn_agent import ontology
from sn_agent.log import setup_logging
from sn_agent.service_adapter import setup_service_manager, ServiceManager
from sn_agent.test.mocks import MockApp
import tests
log = logging.getLogger(__name__)
@pytest.fixture
def app():
app = MockApp()
ontology.setup_ontology(app)
return app
def check_adapter(service_manager: ServiceManager, service_id: int, klass):
service_adapter = service_manager.get_service_adapter_for_id(service_id)
assert (not service_adapter is None)
assert (isinstance(service_adapter, klass))
def test_service_manager(app):
print()
setup_logging()
log.debug("--- test_service_manager ---")
setup_service_manager(app)
# Excercise the service manager methods.
assert (not app['service_manager'] is None)
service_manager = app['service_manager']
check_adapter(service_manager, tests.DOCUMENT_SUMMARIZER_ID, multi_agent_adapter.document_summarizer.DocumentSummarizer)
check_adapter(service_manager, tests.ENTITY_EXTRACTER_ID, multi_agent_adapter.entity_extracter.EntityExtracter)
check_adapter(service_manager, tests.FACE_RECOGNIZER_ID, multi_agent_adapter.face_recognizer.FaceRecognizer)
check_adapter(service_manager, tests.TEXT_SUMMARIZER_ID, multi_agent_adapter.text_summarizer.TextSummarizer)
check_adapter(service_manager, tests.VIDEO_SUMMARIZER_ID, multi_agent_adapter.video_summarizer.VideoSummarizer)
check_adapter(service_manager, tests.WORD_SENSE_DISAMBIGUATER_ID, multi_agent_adapter.word_sense_disambiguater.WordSenseDisambiguater)
service_adapter = service_manager.get_service_adapter_for_id(tests.DOCUMENT_SUMMARIZER_ID)
assert (not service_adapter is None)
assert (isinstance(service_adapter, multi_agent_adapter.document_summarizer.DocumentSummarizer))
service_adapter = service_manager.get_service_adapter_for_id(tests.ENTITY_EXTRACTER_ID)
assert (not service_adapter is None)
assert (isinstance(service_adapter, multi_agent_adapter.entity_extracter.EntityExtracter))
service_adapter = service_manager.get_service_adapter_for_id(tests.FACE_RECOGNIZER_ID)
assert (not service_adapter is None)
assert (isinstance(service_adapter, multi_agent_adapter.face_recognizer.FaceRecognizer))
service_adapter = service_manager.get_service_adapter_for_id(tests.TEXT_SUMMARIZER_ID)
assert (not service_adapter is None)
assert (isinstance(service_adapter, multi_agent_adapter.text_summarizer.TextSummarizer))
service_adapter = service_manager.get_service_adapter_for_id(tests.VIDEO_SUMMARIZER_ID)
assert (not service_adapter is None)
assert (isinstance(service_adapter, multi_agent_adapter.video_summarizer.VideoSummarizer))
service_adapter = service_manager.get_service_adapter_for_id(tests.WORD_SENSE_DISAMBIGUATER_ID)
assert (not service_adapter is None)
assert (isinstance(service_adapter, multi_agent_adapter.word_sense_disambiguater.WordSenseDisambiguater))
def test_start_stop_services(app):
print()
setup_logging()
log.debug("")
log.debug("--- test_start_stop_services ---")
setup_service_manager(app)
# Start and stop some services.
assert (not app['service_manager'] is None)
service_manager = app['service_manager']
service_manager.start(tests.DOCUMENT_SUMMARIZER_ID)
service_manager.start(tests.WORD_SENSE_DISAMBIGUATER_ID)
service_manager.start(tests.ENTITY_EXTRACTER_ID)
service_manager.stop(tests.ENTITY_EXTRACTER_ID)
service_manager.stop(tests.WORD_SENSE_DISAMBIGUATER_ID)
service_manager.stop(tests.DOCUMENT_SUMMARIZER_ID)
| singnet/singnet | agent/tests/test_service_manager.py | Python | mit | 3,864 |
# coding: utf-8
from django.db.models import Q
from django.views import generic
# from django.views.generic.dates import (YearArchiveView, MonthArchiveView,
# DayArchiveView)
from esperancanordeste.context_processors import EnterpriseExtraContext
from esperancanordeste.catalog.models import Catalog, Category, Product
class ProductListView(EnterpriseExtraContext, generic.ListView):
queryset = Product.published.all()
template_name = 'catalog/catalog_home.html'
# TODO: mudar a paginacao para 20
paginate_by = 20
def get_queryset(self, **kwargs):
search = self.request.GET.get('search', '')
if search:
obj_lst = Product.published.filter(
Q(name__icontains=search) |
Q(description__icontains=search))
else:
obj_lst = Product.published.all()
return obj_lst
def get_context_data(self, **kwargs):
context = super(ProductListView, self).get_context_data(**kwargs)
search = self.request.GET.get('search', '')
context['search'] = search
context['category_list'] = Category.objects.all()
context['catalog_list'] = Catalog.objects.all()
return context
class ProductCategoryListView(ProductListView):
"""
Herda de EntryListView mudando o filtro para tag selecionada
"""
def get_queryset(self):
"""
Incluir apenas as Entries marcadas com a tag selecionada
"""
return Product.published.filter(
category__slug=self.kwargs['category_slug'])
class ProductDetailListView(ProductListView):
"""
Herda de EntryListView mudando o filtro para tag selecionada
"""
template_name = 'catalog/catalog_detail.html'
def get_queryset(self):
"""
Incluir apenas as Entries marcadas com a tag selecionada
"""
return Product.published.filter(
category__slug=self.kwargs['category_slug'],
slug=self.kwargs['slug'])
| klebercode/esperancanordeste | esperancanordeste/catalog/views.py | Python | mit | 2,030 |
# stdlib
from contextlib import contextmanager
from random import random, randrange
import os
import subprocess
import sys
import time
import unittest
import urllib as url
# 3p
from mock import patch
from nose.plugins.attrib import attr
# project
# needed because of the subprocess calls
sys.path.append(os.getcwd())
from ddagent import Application
from utils.watchdog import WatchdogPosix as Watchdog
class WatchdogKill(Exception):
"""
The watchdog attempted to kill the process.
"""
pass
@attr('unix')
@attr(requires='core_integration')
class TestWatchdog(unittest.TestCase):
"""
Test watchdog in various conditions
"""
JITTER_FACTOR = 2
@contextmanager
def set_time(self, time):
"""
Helper, a context manager to set the current time value.
"""
# Set the current time within `util` module
mock_time = patch("util.time.time")
mock_time.start().return_value = time
# Yield
yield
# Unset the time mock
mock_time.stop()
@patch.object(Watchdog, 'self_destruct', side_effect=WatchdogKill)
def test_watchdog_frenesy_detection(self, mock_restarted):
"""
Watchdog restarts the process on suspicious high activity.
"""
# Limit the restart timeframe for test purpose
Watchdog._RESTART_TIMEFRAME = 1
# Create a watchdog with a low activity tolerancy
process_watchdog = Watchdog(10, max_resets=3)
ping_watchdog = process_watchdog.reset
with self.set_time(1):
# Can be reset 3 times within the watchdog timeframe
for x in xrange(0, 3):
ping_watchdog()
# On the 4th attempt, the watchdog detects a suspicously high activity
self.assertRaises(WatchdogKill, ping_watchdog)
with self.set_time(3):
# Gets back to normal when the activity timeframe expires.
ping_watchdog()
def test_watchdog(self):
"""
Verify that watchdog kills ourselves even when spinning
Verify that watchdog kills ourselves when hanging
"""
start = time.time()
try:
subprocess.check_call(["python", __file__, "busy"], stderr=subprocess.STDOUT)
raise Exception("Should have died with an error")
except subprocess.CalledProcessError:
duration = int(time.time() - start)
self.assertTrue(duration < self.JITTER_FACTOR * 5)
# Start pseudo web server
subprocess.Popen(["nc", "-l", "31834"])
start = time.time()
try:
subprocess.check_call(["python", __file__, "net"])
raise Exception("Should have died with an error")
except subprocess.CalledProcessError:
duration = int(time.time() - start)
self.assertTrue(duration < self.JITTER_FACTOR * 5)
# Normal loop, should run 5 times
start = time.time()
try:
subprocess.check_call(["python", __file__, "normal"])
duration = int(time.time() - start)
self.assertTrue(duration < self.JITTER_FACTOR * 5)
except subprocess.CalledProcessError:
self.fail("Watchdog killed normal process after %s seconds" % int(time.time() - start))
# Fast tornado, not killed
start = time.time()
p = subprocess.Popen(["python", __file__, "fast"])
p.wait()
duration = int(time.time() - start)
# should die as soon as flush_trs has been called
self.assertTrue(duration < self.JITTER_FACTOR * 10)
# Slow tornado, killed by the Watchdog
start = time.time()
p = subprocess.Popen(["python", __file__, "slow"])
p.wait()
duration = int(time.time() - start)
self.assertTrue(duration < self.JITTER_FACTOR * 4)
class MockTxManager(object):
def flush(self):
"Pretend to flush for a long time"
time.sleep(5)
sys.exit(0)
class MemoryHogTxManager(object):
def __init__(self, watchdog):
self._watchdog = watchdog
def flush(self):
rand_data = []
while True:
rand_data.append('%030x' % randrange(256**15))
self._watchdog.reset()
class PseudoAgent(object):
"""Same logic as the agent, simplified"""
AGENT_CONFIG = {
"bind_host": "localhost",
'endpoints': {
'https://app.datadoghq.com': ['api_key']
},
'forwarder_timeout': 5
}
def busy_run(self):
w = Watchdog(5)
w.reset()
while True:
random()
def hanging_net(self):
w = Watchdog(5)
w.reset()
x = url.urlopen("http://localhost:31834")
print "ERROR Net call returned", x
return True
def normal_run(self):
w = Watchdog(2)
w.reset()
for i in range(5):
time.sleep(1)
w.reset()
def slow_tornado(self):
a = Application(12345, self.AGENT_CONFIG)
a._watchdog = Watchdog(4)
a._tr_manager = MockTxManager()
a.run()
def fast_tornado(self):
a = Application(12345, self.AGENT_CONFIG)
a._watchdog = Watchdog(6)
a._tr_manager = MockTxManager()
a.run()
if __name__ == "__main__":
if sys.argv[1] == "busy":
a = PseudoAgent()
a.busy_run()
elif sys.argv[1] == "net":
a = PseudoAgent()
a.hanging_net()
elif sys.argv[1] == "normal":
a = PseudoAgent()
a.normal_run()
elif sys.argv[1] == "slow":
a = PseudoAgent()
a.slow_tornado()
elif sys.argv[1] == "fast":
a = PseudoAgent()
a.fast_tornado()
elif sys.argv[1] == "test":
t = TestWatchdog()
t.runTest()
elif sys.argv[1] == "memory":
a = PseudoAgent()
a.use_lots_of_memory()
| takus/dd-agent | tests/core/test_watchdog.py | Python | bsd-3-clause | 5,895 |
# Copyright 2015 HuaWei Technologies.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.api import extensions
from neutron.extensions import stdattrs_common
# Attribute Map
CREATED = 'created_at'
UPDATED = 'updated_at'
TIMESTAMP_BODY = {
CREATED: {'allow_post': False, 'allow_put': False,
'is_visible': True, 'default': None
},
UPDATED: {'allow_post': False, 'allow_put': False,
'is_visible': True, 'default': None
},
}
class Timestamp(extensions.ExtensionDescriptor):
"""Extension class supporting timestamp.
This class is used by neutron's extension framework for adding timestamp
to neutron core resources.
"""
@classmethod
def get_name(cls):
return "Resource timestamps"
@classmethod
def get_alias(cls):
return "standard-attr-timestamp"
@classmethod
def get_description(cls):
return ("Adds created_at and updated_at fields to all Neutron "
"resources that have Neutron standard attributes.")
@classmethod
def get_updated(cls):
return "2016-09-12T10:00:00-00:00"
def get_extended_resources(self, version):
if version != "2.0":
return {}
return stdattrs_common.stdattrs_extended_resources(TIMESTAMP_BODY)
| noironetworks/neutron | neutron/extensions/timestamp.py | Python | apache-2.0 | 1,852 |
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from ...sipmessaging import SIPHeaderField
from ...sipmessaging import classproperty
class PPreferredIdentitySIPHeaderField(SIPHeaderField):
# noinspection PyNestedDecorators
@classproperty
@classmethod
def canonical_field_name(cls):
return 'P-Preferred-Identity'
@classmethod
def new_for_attributes(cls, field_name="P-Preferred-Identity", field_value_string=""):
return cls.new_for_field_name_and_value_string(field_name=field_name, field_value_string=field_value_string)
@property
def is_p_preferred_identity(self):
return True
| bobjects/BobStack | bobstack/sipmessaging/concreteheaderfields/pPreferredIdentitySIPHeaderField.py | Python | apache-2.0 | 685 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''Method collection to obtain optical system information
This module contains a method collection to obtain information, and analyze
optical systems
'''
__all__=["intersection", "nearest_points", "chief_ray_search", "pupil_location",
"paraxial_location", "find_apperture", "find_ppp",
"get_optical_path_ep", "find_reference_sphere_radius",
"parallel_propagate", "parallel_propagate_ns", "ray_paths" ]
from pyoptools.raytrace.ray import Ray
from pyoptools.misc.pmisc import cross
from pyoptools.raytrace.system import System
from pyoptools.raytrace.component import Component
from pyoptools.raytrace.comp_lib import CCD
from pyoptools.raytrace.surface import Spherical
#from gui.plot_frame import PlotFrame
from pyoptools.raytrace.shape import Circular
from numpy import inf, sqrt, square, pi, dot, array, arctan2, alltrue, isnan,\
nan, mgrid,where
from scipy.optimize.minpack import fsolve
from numpy.random import normal
import multiprocessing as mp
#******Logger definition *******#
#import logging
#log= logging.getLogger("ray_trace.calc")
def intersection(r1,r2):
'''
Return the point of intersection between the rays r1 and r2.
Parameters
----------
r1,r2 : :class:`~pyoptools.raytrace.ray.Ray`
Rays to test for intersection.
Returns
-------
ip : tuple(float, float, float)
Intersection point coordinates. If the rays do not intersect
ip=(nan,nan,nan)
rv : bool
Indicates if the intersection point represent a real image (rv=true),
or a virtual image (rv=false). In this case virtual has the same meaning
as in virtual image i.e. intersection point is not in the actual path,
or is behind the ray's origin.
'''
d1=r1.dir
d2=r2.dir
p1=r1.pos
p2=r2.pos
d1xd2=cross(d1,d2)
# check if the rays are parallel
#log.info("Vector cross product:"+str(d1xd2))
if dot(d1xd2,d1xd2)==0. :
return array((nan,nan,nan)),False
p2p1xv2=cross(p2-p1,d2)
p2p1xv1=cross(p2-p1,d1)
a=p2p1xv2/d1xd2
b=p2p1xv1/d1xd2
# Remove the nan from the list
keep=~isnan(a)
an=a[keep]
keep=~isnan(b)
bn=b[keep]
ip=array((nan,nan,nan))
rv=False
#print an,bn
if len(an)>0:
if alltrue(an==an[0]) :
ip=p1+an[0]*d1
# check if all the solutions are equal
if alltrue(an>=0) and alltrue(bn>=0):
rv=True
#log.info("Intersection point found at:"+str(ip)+" "+str(rv))
return ip,rv
def nearest_points(ray1, ray2):
'''
Return the nearest points between 2 rays.
The image point locations in optical systems are usually found by
calculating the intersection between rays coming from a single object
point, but in aberrated systems, the 2 rays will not really intersect.
This function is used to find the point in space where the rays
are closest to each other. If the rays intersect the values returned
will be the intersection point.
The solution was taken from:
http://homepage.univie.ac.at/Franz.Vesely/notes/hard_sticks/hst/hst.html
Parameters
----------
r1,r2 : :class:`~pyoptools.raytrace.ray.Ray`
Rays to test for intersection.
Returns
-------
p1 : tuple(float, float, float)
Coordinates of the point living on ray 1 closest to ray 2
p2 : tuple(float, float, float)
Coordinates of the point living on ray 2 closest to ray 1
d : float
The distance between p1 and p2
rv : bool
Indicates if the intersection is real or virtual. rv=True for
real, rv=False for virtual. In this case virtual has the same meaning
as in virtual image i.e. p1 and p2 are not in the actual path, or are
behind the ray's origin.
'''
r1=ray1.pos
e1=ray1.dir
r2=ray2.pos
e2=ray2.dir
r12=r2-r1
t1= (dot(r12, e1) - (dot(r12, e2)*dot(e1, e2)))/(1-(dot(e1, e2))**2)
t2= -(dot(r12, e2) - (dot(r12, e1)*dot(e1, e2)))/(1-(dot(e1, e2))**2)
p1=r1+t1*e1
p2=r2+t2*e2
#log.info("nearest points"+str(p1)+" "+str(p2))
#log.info("tvalues "+str(t1)+" "+str(t2))
if t1>=0 and t2>=0:
rv=True
else:
rv=False
return p1, p2, sqrt(dot(p1-p2, p1-p2)), rv
def chief_ray_search(opsys,ccds,o=(0.,0.,0.),rt=(0.,0.,0.),er=0.1,w=pi/2.,maxiter=1000,wavelength=.58929):
'''
This function uses a random search algorithm to find the chief_ray for a
given optical system and object point.
Parameters
----------
opsys : :class:`pyoptools.raytrace.system.System`
Optical system that will be used to find the chief ray
ccds : :class:`pyoptools.raytrace.comp_lib.CCD`
Detector placed in the aperture plane. Must be centred in the optical
axis
o : tuple(float, flo0at, float)
coordinates of the object point used to find the chief ray
rt : tuple(float, float, float)
rotations made to a ray propagating in the z direction to obtain the
first test ray
er : float
Maximum acceptable distance between the ray and the center of the
aperture
w : float
Gaussian width in radians
wavelength : float
Wavelength of the ray used to find the principal ray given in
micrometers (.58929 by default).
Returns
-------
:class:`~pyoptools.raytrace.ray.Ray`
Chief ray found. (Ray instance)
Notes
-----
The algorithm starts using a given ray, propagating it in the optical
system, and finding the intersection point of this test ray and the
aperture plane. The distance from this point and the optical axis is
recorded.
Using a gaussian random generator, two rotation angles are calculated,
to generate a new test ray that is propagated in the optical system,
and its distance to the optical axis is found at the aperture plane.
If this distance is less than the distance found for the previous ray,
this ray is taken as the new *chief ray* candidate, and the algorithm
is repeated until the number of iterations reaches *maxiter*, or until
the distance is less than *er*.
the *rt* parameter gives the rotations made to a ray originating in
*o*, and propagating in the *Z* direction, to find the first test ray.
A detector object *ccds* should be placed at the aperture plane. It is used
to find the point where the ray intersects the aperture. To increase the
convergense speed of the algorithm, it is better to make sure that the first
test ray intersects the detector.
.. todo::
Implement a function similar to this one, using a minimization
algorithm
'''
#log.info("Entering chief_ray_search function")
test_ray=Ray(wavelength=wavelength)
opsys.clear_ray_list()
btx,bty,btz=rt #btz is not used
ntry=0
nt=0
#Check the initial test ray
retray=test_ray.ch_coord_sys_inv(o,(btx,bty,0))
#log.info("Calculating test_ray")
opsys.clear_ray_list()
opsys.reset()
opsys.ray_add(retray)
opsys.propagate()
try:
x,y,z=ccds.hit_list[0][0]
dist=sqrt(square(x)+square(y))
except:
dist=inf
p_dist=dist
while (p_dist> er)and (ntry<maxiter):
ntry=ntry+1
nt=nt+1
rx=normal(btx,w)
ry=normal(bty,w)
tray=test_ray.ch_coord_sys_inv(o,(rx,ry,0))
opsys.clear_ray_list()
opsys.reset()
opsys.ray_add(tray)
opsys.propagate()
try:
x,y,z=ccds.hit_list[0][0]
dist=sqrt(square(x)+square(y))
except:
#log.info("CCD not hitted by ray")
dist=inf
if p_dist>dist:
#Select this ray as new generator ray
btx=rx
bty=ry
p_dist=dist
nt=0
retray=tray
#log.info("distance to aperture center="+str(dist))
if (nt>10)and p_dist<inf:
nt=0
w=w/2
#limit the minimum value of w
if w<.0000001: w=.0000001
# print p_dist,ntry
return retray
def pupil_location(opsys,ccds,opaxis):
'''
Function to find the optical system pupils position
Note
----
For this function to operate, the system should have a rotational
symmetry around the optical axis.
Parameters
----------
opsys : :class:`pyoptools.raytrace.system.System`
Optical system to use.
opaxis : :class:`~pyoptools.raytrace.ray.Ray`
Ray representing the optical axis
ccds : :class:`pyoptools.raytrace.comp_lib.CCD`
Surface that represents a detector in the aperture plane
Returns
-------
enpl : tuple(float, float, float)
(xen,yen,zen) containing the entrance pupil coordinates
expl : tuple(float, float, float)
(xex,yex,zex) containing the exit pupil coordinates
'''
#log.info("Propagate Optical axis ray")
opsys.clear_ray_list()
opsys.reset()
#opsys.ray_add(cray)
opsys.ray_add(opaxis)
opsys.propagate()
if (len(ccds.hit_list)==0):
raise Exception("The optical axis did not intersect the aperture")
if(len(ccds.hit_list)>1):
raise Exception("The optical axis intersected the aperture more than once")
aip=ccds.hit_list[0][0]
air=ccds.hit_list[0][1]
#log.info("Optical Axis Intersection point= "+str(aip))
#log.info("Intersection Ray= "+str(air))
#Getting Intersection point in global coordinates
if(len(air.childs)!=1):
raise Exception("The intersected ray can only have one child")
ip=air.childs[0].pos
d=air.childs[0].dir
#log.info("Intersection point in world coordinates= "+str(ip))
#log.info("Direction of the optical axis at the intersection point"+str(d))
#Todo: Check if the optical axis and the aperture are perpendicular
# Calculate vectors perpendicular to the optical axis and to the XYZ axes
pv1= cross(d,(0,0,1))
pv2= cross(d,(0,1,0))
pv3= cross(d,(1,0,0))
pv=[pv1,pv2,pv3]
# Search for the longest pv
pvn=array((dot(pv1,pv1),dot(pv2,pv2),dot(pv3,pv3)))
pvm=pv[pvn.argmax()]
#log.info("Displacement vector found: "+str(pvm))
# Create ray to calculate the exit pupil
expuray=air.childs[0].copy()
expuray.dir=expuray.dir+pvm*.0001
# Create the ray to calculate the entrance pupil
enpuray=expuray.reverse()
opsys.clear_ray_list()
opsys.reset()
opsys.ray_add(enpuray)
opsys.ray_add(expuray)
opsys.propagate()
enp=enpuray.get_final_rays(inc_zeros = False)
exp=expuray.get_final_rays(inc_zeros = False)
oax=opaxis.get_final_rays(inc_zeros = False)
#log.info("enp="+str(enp))
#log.info("exp="+str(exp))
#log.info("oax="+str(oax))
if len(enp)!=1 or len(exp)!=1 or len(oax)!=1:
raise Exception("The principal ray or the optical axis ray have more"
" than one final ray")
#log.info("Calculating entrance pupil location")
# Find the nearest points between the rays.
# Some times because of numerical errors, or some aberrations in the optical
# system, the rays do not truly intersect.
# Use instead the nearest points and issue a warning when the rays do not truly
# intersect.
enpl=intersection(opaxis,enp[0])[0]
if (isnan(enpl)).all():
p1, p2, d, rv =nearest_points(opaxis,enp[0])
print("Warning: The optical axis does not intersect the principal ray at the entrance")
print("pupil. The minimum distance is:", d)
enpl=(p1+p2)/2
#log.info("Calculating exit pupil location")
expl=intersection(oax[0],exp[0])[0]
if (isnan(expl)).all():
p1, p2, d, rv =nearest_points(oax[0],exp[0])
print("Warning: The optical axis does not intersect the principal ray at the exit")
print("pupil. The minimum distance is:", d)
expl=(p1+p2)/2
return enpl,expl
def paraxial_location(opsys, opaxis):
"""Function to find the paraxial image location
This function finds the paraxial image location of a point
located in the optical axis, and a boolean indicating if the image
is real or virtual (image_location, real_virtual).
The origin of the opaxis location is taken as the object location
Parameters
----------
opsys : :class:`~pyoptools.raytrace.system.System`
Optical system to use.
opaxis: :class:`~pyoptools.raytrace.ray.Ray`
Ray representing the optical axis
Returns
-------
image_location : tuple(float, float, float)
Image location coordinates
real : bool
Indicates if the intersection point represent a real image (real=True),
or a virtual image (real=False).
Note
----
For this function to operate, the system should have a rotational symmetry
around the optical axis.
"""
#log.info("Propagate Optical axis ray")
opsys.clear_ray_list()
opsys.reset()
#opsys.ray_add(cray)
opsys.ray_add(opaxis)
opsys.propagate()
# Calculate vectors perpendicular to the optical axis and to the XYZ axes
d=opaxis.dir
pv1= cross(d,(0,0,1))
pv2= cross(d,(0,1,0))
pv3= cross(d,(1,0,0))
pv=[pv1,pv2,pv3]
# Search for the longest pv
pvn=array((dot(pv1,pv1),dot(pv2,pv2),dot(pv3,pv3)))
pvm=pv[pvn.argmax()]
#log.info("Displacement vector found: "+str(pvm))
# Create paraxial ray
par_ray=opaxis.copy()
par_ray.dir=par_ray.dir+pvm*.001
opsys.clear_ray_list()
opsys.reset()
opsys.ray_add(par_ray)
opsys.propagate()
par=par_ray.get_final_rays(inc_zeros = False)
oax=opaxis.get_final_rays(inc_zeros = False)
#log.info("par="+str(par))
#log.info("oax="+str(oax))
if len(par)!=1 or len(oax)!=1:
raise Exception("The paraxial ray or the optical axis ray have more"
" than one final ray")
#log.info("Calculating object location")
expl=intersection(oax[0],par[0])
return expl
def find_apperture(ccd, size=(50,50)):
'''Function to find a mask representing the aperture
This function returns a array containing 1's and 0's representing
the aperture shape. The aperture shape will be approximated from
the CCD hit_list
Parameters
----------
ccd : :class:`~pyoptools.raytrace.comp_lib.CCD`
CCD object that will be used to get the shape information from
size : tuple(int, int)
Array shape
Returns
-------
array
Array with the image of the aperture
.. todo::
please describe better
Notes
-----
Right now only works for round apertures.
.. todo::
please be more specific
'''
hl=ccd.hit_list
sx,sy=ccd.size
tx,ty=size
dx,dy=sx/(tx-1),sy/(ty-1)
CG= mgrid[float(-sx/2.):float(sx/2.+dx):float(dx),
float(-sy/2.):float(sy/2.+dy):float(dy)]
rm = sqrt(CG[0]**2+CG[1]**2)
maxr=0.
for i in hl:
X,Y,Z= i[0]
r=sqrt(X*X+Y*Y)
if maxr<r:
maxr=r
return where(rm<maxr,1.,0.)
def find_ppp(opsys, opaxis):
"""Function to find the primary principal plane location of a lens or an
optical component
Parameters
----------
opsys : :class:`~pyoptools.raytrace.system.System`
Optical system or optical component whose principal planes are to be
found
opaxis : :class:`~pyoptools.raytrace.ray.Ray`
Ray defining the optical axis of the system
For this function to operate, the system should have a rotational symmetry
around the optical axis.
Notes
-----
This function is returns the intersection point of the optical axis and
the principal plane.
"""
# Create a system with the component
if isinstance(opsys,(Component)):
c=opsys
opsys=System(complist=[(c,(0,0,0),(0,0,0)),
],n=1)
# To create a ray parallel to the optical axis, find a displacement vector
# perpendicular to the optical axis, and to the XYZ axes
d=opaxis.dir
pv1= cross(d,(0,0,1))
pv2= cross(d,(0,1,0))
pv3= cross(d,(1,0,0))
pv=[pv1,pv2,pv3]
# Search for the longest pv
pvn=array((dot(pv1,pv1),dot(pv2,pv2),dot(pv3,pv3)))
pvm=pv[pvn.argmax()]
# Create parallel ray
par_ray=opaxis.copy()
par_ray.pos=par_ray.pos+pvm*.0001
opsys.clear_ray_list()
opsys.ray_add([opaxis, par_ray])
opsys.propagate()
par_ray_end=par_ray.get_final_rays(inc_zeros = False)
if len(par_ray_end)!=1:
raise Exception("The paraxial ray has more than one final ray")
pppl=intersection(par_ray,par_ray_end[0])
#Move the intersection point toward the optical axis
ppp=pppl[0]-pvm*.0001
return ppp #, pppl[1])
def get_optical_path_ep(opsys, opaxis, raylist, stop=None, r=None):
"""Returns the optical path traveled by a ray up to the exit pupil
The optical path is measured from the ray origin until it crosses the
exit pupil of the system.
If a stop (aperture) is not given, the measurement is made up to the primary
principal plane.
Parameters
----------
opsys : :class:`~pyoptools.raytrace.system.System`
Optical system under analysis
opaxis : :class:`pyoptools.raytrace.ray.Ray`
Ray indicating the optical axis. The origin of the optical axis must be
the position of the object used in the image formation. This is needed
to be able to calculate the radius of the reference sphere.
raylist: list(:class:`pyoptools.raytrace.ray.Ray`)
List of rays that will be used to sample the optical path
stop : :class:`~pyoptools.raytrace.comp_lib.Stop`
Stop aperture of the system. It must belong to opsys. In not given it
will be assumed that the exit pupil is at the primary principal plane.
r :
If None, measure up to the exit pupil plane. If given, use a reference
sphere with a vertex coinciding with the optical vertex.
.. todo::
Need to check the function and fix this documentation
Returns
-------
hcl : list
List containing the coordinates of the hits in the pupil coordinate
system.
opl : list
list containing the optical paths measured
pc : tuple(float, float, float)
intersection point between the optical axis, and the pupil plane.
hcl[i] corresponds to opl[i]
Note: This method only works if the optical axis coincides with the Z axis.
This must be corrected.
"""
if stop != None:
enp,exp=pupil_location(opsys,stop,opaxis)
else:
exp= find_ppp(opsys, opaxis)
#Reset the system
opsys.clear_ray_list()
opsys.reset()
# Propagate the rays
#print "***", raylist
opsys.ray_add(raylist)
opsys.propagate()
#pf=PlotFrame(opsys=opsys)
rl=[]
l=[]
# Get the optical path up to the final element in the system
for i in raylist:
a=i.get_final_rays()
if a[0].intensity!=0:
# Reverse the rays to calculate the optical path from the final element
#to the exit pupil
nray=a[0].reverse()
rl.append(nray)
#TODO: This should not be done using the label
nray.label=str(a[0].optical_path_parent())
# Create a dummy system to calculate the wavefront at the exit pupil
if r==None:
#TODO: This ccd should be infinitely big. Have to see how this can be done
ccd=CCD(size=(1000,1000))
else:
ccds=Spherical(shape=Circular(radius=0.9*r), curvature=1./r)
ccd=Component(surflist=[(ccds, (0, 0, 0), (0, 0, 0)), ])
#print rl
dummy=System(complist=[(ccd,exp,(0,0,0)),
],n=1.)
#Calculate the optical path from the final element to the exit pupil plane
dummy.ray_add(rl)
dummy.propagate()
#PlotFrame(opsys=dummy)
hcl=[]
opl=[]
for ip,r in ccd.hit_list:
#print ip
x,y,z= ip
#TODO: This should not be done using the label
d= float(r.label)-r.optical_path()
hcl.append((x, y, z))
opl.append(d)
return (hcl, opl, exp)
#rv=bisplrep(X,Y,Z)
#data=bisplev(array(range(-20,20)),array(range(-20,20)),rv)
#data=(data-data.mean())
#print "Gaussian reference sphere radius =",sqrt(dot(impos-exp,impos-exp))
def find_reference_sphere_radius(ip, pl):
"""Find the radius os the reference sphere that best fits the input data.
This method assumes that the optical axis coincides with the z axis. This
means that the center of the sphere, has coordinates (0,0,r).
Parameters
----------
ip : list
list of the points where the optical path is measured, that are being
fitted. Each point is (XYZ) tuple. It can be also an array with a shape
n,3 where n is the number of points.
pl : list
List of path lengths. pl[i] corresponds to the point ip[i].
Returns
-------
float
Reference sphere radius
"""
ipa=array(ip)
pla=array(pl)
n, t=ipa.shape
# Find the point closest to the center of the aperture.
rm=sqrt(dot(ipa[0], ipa[0]))
im=0
for i in range (n):
if rm>sqrt(dot(ipa[i], ipa[i])):
rm=sqrt(dot(ipa[i], ipa[i]))
im=i
#Make the OPL 0 at the center of the aperture
pla=pla-pla[im]
#Encontrar el radio de la esfera de mejor ajuste
def F(z):
dist=pla-(sqrt(ipa[:, 0]**2+ipa[:, 1]**2+(ipa[:, 2]-z)**2)-z)
u=sqrt((dist**2).sum())
#print "*", u
#u=dist[-1]
#print u
return u
r=fsolve(F, -10.)
return r
def aux_paral_f(x):
"""
Auxiliary function needed in parallel propagate
"""
os,rb=x
os.ray_add(rb)
os.propagate()
return os
def parallel_propagate(os,r , np=None):
"""Perform a propagation of the rays in the system using all cores
present on a computer
os gets reset before beginning the propagation, so the only rays
used in the simulation are the rays given in r
Parameters
----------
os : :class:`~pyoptools.raytrace.system.System`
Optical system used in the simulation
r : list(:class:`pyoptools.raytrace.ray.Ray`)
List containing the rays to propagate
np : int or None
Number of processes used in the simulation. If not given use one
process per cpu
"""
if np==None:
cpus=mp.cpu_count()
else:
cpus=np
pool=mp.Pool(cpus)
os.reset()
#Split the ray list in the number of CPUS
nr=len(r)
r_list=[]
r_list.append((os,r[:nr/cpus]))
for i in range(2,cpus):
r_list.append((os,r[(nr/cpus)*(i-1):(nr/cpus)*(i)]))
r_list.append((os,r[(nr/cpus)*(cpus-1):]))
osi=pool.map(aux_paral_f,r_list)
pool.close()
pool.join()
for osp in osi:
os.merge(osp)
return os
def aux_paral_f_ns(x):
"""
Auxiliary function needed in parallel propagate
"""
#os optical system
#rg guide ray
#dp Path (key) of the destination surface.
#rb rays to propagate
os,rg,dp,rb=x
os.ray_add(rb)
os.propagate_ray_ns(rg,dp)
return os
def parallel_propagate_ns(os,rg, dp, r, np=None):
"""Perform a propagation of the rays in the system using all cores
present on a computer
os gets reset before beginning the propagation, so the only rays
used in the simulation are the rays given in r
Parameters
----------
os :
Optical system used in the simulation
rg :
Guide ray
dp :
Destination path
r :
List containing the rays to propagate
np : int or None
Number if processes used in the simulation. If not given use one
process per cpu
"""
if np==None:
cpus=mp.cpu_count()
else:
cpus=np
pool=mp.Pool(cpus)
os.reset()
#Split the ray list in the number of CPUS
nr=len(r)
r_list=[]
r_list.append((os,rg,dp,r[:nr/cpus]))
for i in range(2,cpus):
#os,rg,dp,rb=x
r_list.append((os,rg,dp,r[(nr/cpus)*(i-1):(nr/cpus)*(i)]))
r_list.append((os,rg,dp,r[(nr/cpus)*(cpus-1):]))
osi=pool.map(aux_paral_f_ns,r_list)
pool.close()
pool.join()
for osp in osi:
os.merge(osp)
return os
def ray_paths(r):
'''
Return lists with all the possible paths traveled by the ray r.
r must be previously propagated in an optical system
When there are beam splitters, there is more than one path
Parameters
----------
r : :class:`pyoptools.raytrace.ray.Ray`
.. todo::
Finish documentation
'''
def rt(r):
l=[]
rays=r.childs
for ray in rays:
a=rt(ray)
for ray1 in a:
l.append([ray]+ray1)
if len(a)==0: l.append([ray])
return l
A=rt(r)
B=[]
for rp in A:
t=[r]+rp
B.append(t)
return B
| ramezquitao/pyoptools | pyoptools/raytrace/calc/calc.py | Python | gpl-3.0 | 25,857 |
# Copyright (c) 2016 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public
# License as published by the Free Software Foundation; either version
# 2 of the License (GPLv2) or (at your option) any later version.
# There is NO WARRANTY for this software, express or implied,
# including the implied warranties of MERCHANTABILITY,
# NON-INFRINGEMENT, or FITNESS FOR A PARTICULAR PURPOSE. You should
# have received a copy of GPLv2 along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
from datetime import datetime
import collections.abc
import logging
log = logging.getLogger(__name__)
# TODO: Likely a bit much for this case
class FactsDict(collections.abc.MutableMapping):
"""A dict for facts that ignores items in 'graylist' on compares."""
graylist = set(['cpu.cpu_mhz', 'lscpu.cpu_mhz'])
def __init__(self, *args, **kwargs):
super(FactsDict, self).__init__(*args, **kwargs)
self.data = {}
def __getitem__(self, key):
return self.data[key]
def __setitem__(self, key, value):
self.data[key] = value
def __delitem__(self, key):
del self.data[key]
def __iter__(self):
return iter(self.data)
def __len__(self):
return len(self.data)
def __eq__(self, other):
"""Compares all of the items in self.data, except it ignores keys in self.graylist."""
if not isinstance(other, FactsDict):
return NotImplemented
keys_self = set(self.data).difference(self.graylist)
keys_other = set(other.data).difference(self.graylist)
if keys_self == keys_other:
if all(self.data[k] == other.data[k] for k in keys_self):
return True
return False
# Maybe total_ordering is a bit overkill for just a custom compare
def __lt__(self, other):
return len(self) < len(other)
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, list(self.items()))
def compare_with_graylist(dict_a, dict_b, graylist):
ka = set(dict_a).difference(graylist)
kb = set(dict_b).difference(graylist)
return ka == kb and all(dict_a[k] == dict_b[k] for k in ka)
class FactsCollection(object):
def __init__(self, facts_dict=None):
self.data = facts_dict or FactsDict()
self.collection_datetime = datetime.now()
def __repr__(self):
buf = "%s(facts_dict=%s, collection_datetime=%s)" % \
(self.__class__.__name__, self.data, self.collection_datetime)
return buf
@classmethod
def from_facts_collection(cls, facts_collection):
"""Create a FactsCollection with the data from facts_collection, but new timestamps.
ie, a copy(), more or less."""
fc = cls()
fc.data.update(facts_collection.data)
return fc
def __iter__(self):
return self.data
| candlepin/subscription-manager | src/rhsmlib/facts/collection.py | Python | gpl-2.0 | 2,910 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cms', '0013_urlconfrevision'),
]
operations = [
migrations.CreateModel(
name='News',
fields=[
('cmsplugin_ptr', models.OneToOneField(serialize=False, parent_link=True, auto_created=True, primary_key=True, to='cms.CMSPlugin')),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
]
| febsn/djangocms-copilot | copilot/migrations/0001_initial.py | Python | mit | 605 |
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import cStringIO
import select
import socket
import time
import warnings
from oslo_log import log as logging
import six
from tempest_lib import exceptions
with warnings.catch_warnings():
warnings.simplefilter("ignore")
import paramiko
LOG = logging.getLogger(__name__)
class SSHTimeout(exceptions.TempestException):
message = ("Connection to the %(host)s via SSH timed out.\n"
"User: %(user)s, Password: %(password)s")
class SSHExecCommandFailed(exceptions.TempestException):
"""Raised when remotely executed command returns nonzero status."""
message = ("Command '%(command)s', exit status: %(exit_status)d, "
"Error:\n%(strerror)s")
class Client(object):
def __init__(self, host, username, password=None, timeout=300, pkey=None,
channel_timeout=10, look_for_keys=False, key_filename=None):
self.host = host
self.username = username
self.password = password
if isinstance(pkey, six.string_types):
pkey = paramiko.RSAKey.from_private_key(
cStringIO.StringIO(str(pkey)))
self.pkey = pkey
self.look_for_keys = look_for_keys
self.key_filename = key_filename
self.timeout = int(timeout)
self.channel_timeout = float(channel_timeout)
self.buf_size = 1024
def _get_ssh_connection(self, sleep=1.5, backoff=1):
"""Returns an ssh connection to the specified host."""
bsleep = sleep
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(
paramiko.AutoAddPolicy())
_start_time = time.time()
if self.pkey is not None:
LOG.info("Creating ssh connection to '%s' as '%s'"
" with public key authentication",
self.host, self.username)
else:
LOG.info("Creating ssh connection to '%s' as '%s'"
" with password %s",
self.host, self.username, str(self.password))
attempts = 0
while True:
try:
ssh.connect(self.host, username=self.username,
password=self.password,
look_for_keys=self.look_for_keys,
key_filename=self.key_filename,
timeout=self.channel_timeout, pkey=self.pkey)
LOG.info("ssh connection to %s@%s successfuly created",
self.username, self.host)
return ssh
except (socket.error,
paramiko.SSHException) as e:
if self._is_timed_out(_start_time):
LOG.exception("Failed to establish authenticated ssh"
" connection to %s@%s after %d attempts",
self.username, self.host, attempts)
raise SSHTimeout(host=self.host,
user=self.username,
password=self.password)
bsleep += backoff
attempts += 1
LOG.warning("Failed to establish authenticated ssh"
" connection to %s@%s (%s). Number attempts: %s."
" Retry after %d seconds.",
self.username, self.host, e, attempts, bsleep)
time.sleep(bsleep)
def _is_timed_out(self, start_time):
return (time.time() - self.timeout) > start_time
def exec_command(self, cmd):
"""Execute the specified command on the server
Note that this method is reading whole command outputs to memory, thus
shouldn't be used for large outputs.
:returns: data read from standard output of the command.
:raises: SSHExecCommandFailed if command returns nonzero
status. The exception contains command status stderr content.
"""
ssh = self._get_ssh_connection()
transport = ssh.get_transport()
channel = transport.open_session()
channel.fileno() # Register event pipe
channel.exec_command(cmd)
channel.shutdown_write()
out_data = []
err_data = []
poll = select.poll()
poll.register(channel, select.POLLIN)
start_time = time.time()
while True:
ready = poll.poll(self.channel_timeout)
if not any(ready):
if not self._is_timed_out(start_time):
continue
raise exceptions.TimeoutException(
"Command: '{0}' executed on host '{1}'.".format(
cmd, self.host))
if not ready[0]: # If there is nothing to read.
continue
out_chunk = err_chunk = None
if channel.recv_ready():
out_chunk = channel.recv(self.buf_size)
out_data += out_chunk,
if channel.recv_stderr_ready():
err_chunk = channel.recv_stderr(self.buf_size)
err_data += err_chunk,
if channel.closed and not err_chunk and not out_chunk:
break
exit_status = channel.recv_exit_status()
if 0 != exit_status:
raise SSHExecCommandFailed(
command=cmd, exit_status=exit_status,
strerror=''.join(err_data))
return ''.join(out_data)
def test_connection_auth(self):
"""Raises an exception when we can not connect to server via ssh."""
connection = self._get_ssh_connection()
connection.close()
| MayankGo/ec2-api | ec2api/tests/functional/ssh.py | Python | apache-2.0 | 6,284 |
#!/usr/bin/env python3
# Copyright © 2012-13 Qtrac Ltd. All rights reserved.
# This program or module is free software: you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version. It is provided for
# educational purposes and is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import argparse
import multiprocessing
import os
import sys
import tempfile
import webbrowser
import Feed
import Qtrac
def main():
limit, concurrency = handle_commandline()
Qtrac.report("starting...")
datafile = os.path.join(os.path.dirname(__file__), "whatsnew.dat")
filename = os.path.join(tempfile.gettempdir(), "whatsnew.html")
canceled = False
with open(filename, "wt", encoding="utf-8") as file:
write_header(file)
pipeline = create_pipeline(limit, concurrency, file)
try:
for i, feed in enumerate(Feed.iter(datafile)):
pipeline.send((feed, i % concurrency))
except KeyboardInterrupt:
Qtrac.report("canceling...")
canceled = True
write_footer(file, results.ok, results.todo, canceled,
concurrency)
if not canceled:
webbrowser.open(filename)
def handle_commandline():
parser = argparse.ArgumentParser()
parser.add_argument("-l", "--limit", type=int, default=0,
help="the maximum items per feed [default: unlimited]")
parser.add_argument("-c", "--concurrency", type=int,
default=multiprocessing.cpu_count() * 4,
help="specify the concurrency (for debugging and "
"timing) [default: %(default)d]")
args = parser.parse_args()
return args.limit, args.concurrency
def write_header(file):
file.write("<!doctype html>\n")
file.write("<html><head><title>What's New</title></head>\n")
file.write("<body><h1>What's New</h1>\n")
def write_footer(file, ok, todo, canceled, concurrency):
file.write("</body></html>\n")
Qtrac.report("read {}/{} feeds using {} coroutines{}".format(ok, todo,
concurrency, " [canceled]" if canceled else ""))
print()
def create_pipeline(limit, concurrency, file):
pipeline = None
sink = results(file)
for who in range(concurrency):
pipeline = reader(pipeline, sink, limit, who)
return pipeline
@Qtrac.coroutine
def reader(receiver, sink, limit, me):
while True:
feed, who = (yield)
if who == me:
ok, result = Feed.read(feed, limit)
if not ok:
Qtrac.report(result, True)
result = None
else:
Qtrac.report("read {} at {}".format(feed.title, feed.url))
sink.send(result)
elif receiver is not None:
receiver.send((feed, who))
@Qtrac.coroutine
def results(file):
while True:
result = (yield)
results.todo += 1
if result is not None:
results.ok += 1
for item in result:
file.write(item)
results.todo = results.ok = 0
if __name__ == "__main__":
main()
| nwiizo/workspace_2017 | pipng/whatsnew-c.py | Python | mit | 3,481 |
# Copyright (c) 2010 Christopher Rebert <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
This module parses course textbook information from the `UCSD Bookstore's website <http://bookstore.ucsd.edu/>`_.
:copyright: (c) 2010 by Christopher Rebert.
:license: MIT, see :file:`LICENSE.txt` for more details.
"""
from decimal import Decimal
from triton_scraper.fetchparse import make_tree4url
from triton_scraper.util import *
from triton_scraper import config
class BookList(object):
def __init__(self, required=None, optional=None, as_soft_reserves=False, unknown=False):
#: Required books
#:
#: :type: list of :class:`Book`-s
self.required = required or []
#: Optional books
#:
#: :type: list of :class:`Book`-s
self.optional = optional or []
#: Is a course reader from A.S. Soft Reserves required?
#:
#: :type: bool
self.as_soft_reserves = as_soft_reserves
#: Indicates whether the UCSD Bookstore has yet to receive a booklist for the associated course
#:
#: :type: bool
self.unknown = unknown
def __repr__(self):
if self.unknown:
return "The UCSD Bookstore has not yet been given a book list."
requireds = ["Required:"] + self.required
if self.as_soft_reserves:
requireds.append("Custom reader from A.S. Soft Reserves")
requireds = "\n\t".join(str(entry) for entry in requireds)
optionals = "\n\t".join(str(entry) for entry in ["Optional:"]+self.optional)
string = '\n'.join([(requireds if self.any_required else ''), (optionals if self.optional else '')])
return string
@property
def any_required(self):
"""Are any materials on this booklist required?
:type: bool
"""
return self.as_soft_reserves or self.required
def add_book(self, book, required):
"""Add the given *book* to the booklist.
:param book: textbook to add
:type book: :class:`Book`
:param required: is the textbook required?
:type required: bool
"""
(self.required if required else self.optional).append(book)
class Book(object):
"""Textbook from the UCSD Bookstore."""
__FORMAT = u'ISBN {0.isbn}: "{0.title}" by {0.author}; ${0.used_price} Used, ${0.new_price} New; '
def __init__(self, isbn, new_price=NaN, used_price=NaN, title='', author=''):
#: Author of book
#:
#: :type: string
self.author = author
#: Title of book
#:
#: :type: string
self.title = title
#: International Standard Book Number
#:
#: :type: string
self.isbn = isbn
#: Price of a new copy at the UCSD Bookstore; NaN if new copies unavailable.
#:
#: :type: :class:`decimal.Decimal`
self.new_price = new_price
#: Price of a used copy at the UCSD Bookstore; NaN if used copies unavailable.
#:
#: :type: :class:`decimal.Decimal`
self.used_price = used_price
def __repr__(self):
return self.__FORMAT.format(self).encode('utf8')
book_cells = XPath(RELATIVE_PREFIX+"/table[@border='1']/tr/td/font[not(@align='right')]")
discounted_price = XPath(RELATIVE_PREFIX+"/font[@color='#008000']")
def _availability2price(availability): # New Books, In Stock, Retail Price: $62.50
return Decimal(availability.split("$")[1]) if config.IN_STOCK in availability else NaN
def _skipping_availability_side_headers(cells):
for cell in cells:
if cell.text:
yield cell
def books_on(bookstore_url_from_tritonlink):
"""Returns book list based on the given course page at the UCSD Bookstore's website.
:param bookstore_url_from_tritonlink: UCSD Bookstore website URL for a course section
:type bookstore_url_from_tritonlink: string
:rtype: :class:`BookList`
"""
url = bookstore_url_from_tritonlink.replace("https", "http", 1)
tree, _url = make_tree4url()(url)
booklist = BookList()
for sextuple in grouper(6, _skipping_availability_side_headers(book_cells(tree))):
if config.LACK_BOOK_LIST in sextuple[0].text:# No book list
return BookList(unknown=True)
_sections, _instructor, required, author, title_comma_isbn = (cell.text for cell in sextuple[:5])
availability = sextuple[-1]
required = required == config.REQUIRED_BOOK_CODE
title, isbn = title_comma_isbn.rsplit(", ", 1) # Principles Of General Chemistry, 2 Edition, 9780077470500
if config.NO_TEXTBOOK_REQUIRED in title:
return BookList(required=[])
if config.AS_SOFT_RESERVES in title:
booklist.as_soft_reserves = True
continue
discounts = discounted_price(availability)
if discounts:
discount = discounts[0]
if config.IN_STOCK not in availability.text:
new = NaN
else:
# New Books, Not in Stock*, Retail Price: $65.70, Discounted Price: <FONT COLOR="#008000">$21.03</FONT>
new = Decimal(discount.text[1:])#remove dollar sign
used = discount.tail
else:
new, used = availability.text.split("\n")
new = _availability2price(new)
used = _availability2price(used)
booklist.add_book(Book(isbn, new, used, title, author), required)
return booklist
| cvrebert/TritonScraper | src/triton_scraper/bookstore.py | Python | mit | 6,532 |
# Copyright (c) 2010, Trevor Bekolay
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or other
# materials provided with the distribution.
# * Neither the name of the IRCL nor the names of its contributors may be used to
# endorse or promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
# SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import Codegen, Gui, Io
app = Gui.BehaviourApp(redirect=1, filename="ErrorLog.txt")
app.MainLoop()
| tbekolay/behaviour-tool | BehaviourTool.py | Python | bsd-3-clause | 1,649 |
# -----------------------------------------------------------------------------
# @author:
# Tingwu Wang
# @brief:
# The environment wrapper
# -----------------------------------------------------------------------------
import init_path
from config.config import get_config
import os
# os.environ['DISABLE_MUJOCO_RENDERING'] = '1'
import fish_env_wrapper
# from util import dm_control_util
import numpy as np
if __name__ == '__main__':
'''
@brief: test the environments
@example:
1. test the gym environment
python test_env_wrapper.py
--task Reacher-v1
--monitor 0
--test_env 1
1. test the dm environment
python test_env_wrapper.py
--task Reacher-v1
--monitor 0
--test_env 1
'''
# os.environ['DISABLE_MUJOCO_RENDERING'] = '1'
args = get_config()
print('Base dir: {}'.format(init_path.get_abs_base_dir()))
if not args.monitor:
os.environ['DISABLE_MUJOCO_RENDERING'] = '1'
# make the environment
env = fish_env_wrapper.dm_fish3d_wrapper(
args, 1, args.monitor
)
action_size = env.env.action_spec().shape[0]
for num_episode in range(30):
# test for one episode
ob = env.reset()
print("reset return - ob size: {}".format(ob.shape))
for _ in range(1000):
# import pdb; pdb.set_trace()
ob, reward, done, _ = \
env.step((np.random.rand(action_size) - 0.5) * 2)
print(
"action_size:{}, ob_size:{}, reward:{}, done:{}".format(
action_size, ob.shape, reward, done
)
)
print("ob: {}\n".format(ob))
if args.monitor:
env.render()
if done:
break
break
ob = env.reset()
| WilsonWangTHU/neural_graph_evolution | env/test_fish.py | Python | mit | 1,964 |
from django.conf.urls import patterns, include, url
from django.conf.urls.static import static
from django.conf import settings
import django.views
from OMRS import views
from OMRS import omrsfunctions
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'openMRScap.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
#url(r'^$',views.index,name='index'),
#add other projects URLS
#url(r'^OMRS/',include('OMRS.urls')),
url(r'^$','OMRS.views.index', name='home'),
#url(r'^server/',views.jobs.as_view(),name='server'),
url(r'^jobs/',views.jobs.as_view(),name='jobs'), #looks like a placeholder
url(r'^admin/', include(admin.site.urls)),
url(r'^server/$','OMRS.views.server'), #allows user to view all their servers and create new ones
url(r'^userprofile/$','OMRS.views.userProfile',name='userprofile'),
url(r'^jobserversettings/$','OMRS.views.userJobSettings'), #lists just the URLS of the servers in the system
url(r'^restricted/$', 'OMRS.views.restricted', name='restricted'), #not doing anything yet
url(r'^alerts/$', 'working.views.create_user_alert', name='alerts'), #not doing anything yet
#server details
url(r'^setup/$', 'OMRS.views.post_server_details',name='setup'),
url(r'^server_form/$', 'OMRS.views.server_form',name='server_form'),
url(r'^server_details/$', 'OMRS.views.server_details_form',name='server_details'),
#user details
url(r'^register/$', 'OMRS.views.register', name='register'),
url(r'^login/$', 'OMRS.views.user_login',name='login'),
url(r'^logout/$', 'OMRS.views.user_logout', name='logout'),
#import file
url(r'^upload/$', 'OMRS.views.upload', name='upload'),
#testign URLS
#url(r'^serverauth/$', 'working.views.get_server_auth_details', name='serverauth'), #deleted this
url(r'^test/$', 'working.views.server_details_form', name='test'),
url(r'^test/(?P<server_url>\w{0,500})$','working.views.server_details_form'),
#http://127.0.0.1:8000/test/?server_url=http://localhost:8081/openmrs-standalone
url(r'^_test/$','working.views._test',name='_test'),
url(r'^createjob/$','working.views.createjob',name='createjob'),
url(r'^userfeed/$','working.views.create_user_feed',name='userfeeds'),
url(r'^messages/$', 'ajaxmessages.views.messages', name='ajaxmessages'),
)
if settings.DEBUG:
urlpatterns += patterns(
'django.views.static',
(r'media/(?P<path>.*)',
django.views.static.serve,
{'document_root': settings.MEDIA_ROOT}),
)
| omiltoro/testkenyacap | openMRScap/urls.py | Python | apache-2.0 | 2,620 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for the pipeline options validator module."""
import logging
import unittest
from apache_beam.internal import pickler
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options_validator import PipelineOptionsValidator
from hamcrest.core.base_matcher import BaseMatcher
# Mock runners to use for validations.
class MockRunners(object):
class DataflowRunner(object):
pass
class TestDataflowRunner(object):
pass
class OtherRunner(object):
pass
# Matcher that always passes for testing on_success_matcher option
class AlwaysPassMatcher(BaseMatcher):
def _matches(self, item):
return True
class SetupTest(unittest.TestCase):
def check_errors_for_arguments(self, errors, args):
"""Checks that there is exactly one error for each given argument."""
missing = []
remaining = list(errors)
for arg in args:
found = False
for error in remaining:
if arg in error:
remaining.remove(error)
found = True
break
if not found:
missing.append('Missing error for: ' + arg)
# Return missing and remaining (not matched) errors.
return missing + remaining
def test_local_runner(self):
runner = MockRunners.OtherRunner()
options = PipelineOptions([])
validator = PipelineOptionsValidator(options, runner)
errors = validator.validate()
self.assertEqual(len(errors), 0)
def test_missing_required_options(self):
options = PipelineOptions([''])
runner = MockRunners.DataflowRunner()
validator = PipelineOptionsValidator(options, runner)
errors = validator.validate()
self.assertEqual(
self.check_errors_for_arguments(
errors,
['project', 'staging_location', 'temp_location']),
[])
def test_gcs_path(self):
def get_validator(temp_location, staging_location):
options = ['--project=example:example', '--job_name=job']
if temp_location is not None:
options.append('--temp_location=' + temp_location)
if staging_location is not None:
options.append('--staging_location=' + staging_location)
pipeline_options = PipelineOptions(options)
runner = MockRunners.DataflowRunner()
validator = PipelineOptionsValidator(pipeline_options, runner)
return validator
test_cases = [
{'temp_location': None,
'staging_location': 'gs://foo/bar',
'errors': ['temp_location']},
{'temp_location': None,
'staging_location': None,
'errors': ['staging_location', 'temp_location']},
{'temp_location': 'gs://foo/bar',
'staging_location': None,
'errors': []},
{'temp_location': 'gs://foo/bar',
'staging_location': 'gs://ABC/bar',
'errors': ['staging_location']},
{'temp_location': 'gcs:/foo/bar',
'staging_location': 'gs://foo/bar',
'errors': ['temp_location']},
{'temp_location': 'gs:/foo/bar',
'staging_location': 'gs://foo/bar',
'errors': ['temp_location']},
{'temp_location': 'gs://ABC/bar',
'staging_location': 'gs://foo/bar',
'errors': ['temp_location']},
{'temp_location': 'gs://ABC/bar',
'staging_location': 'gs://foo/bar',
'errors': ['temp_location']},
{'temp_location': 'gs://foo',
'staging_location': 'gs://foo/bar',
'errors': ['temp_location']},
{'temp_location': 'gs://foo/',
'staging_location': 'gs://foo/bar',
'errors': []},
{'temp_location': 'gs://foo/bar',
'staging_location': 'gs://foo/bar',
'errors': []},
]
for case in test_cases:
errors = get_validator(case['temp_location'],
case['staging_location']).validate()
self.assertEqual(
self.check_errors_for_arguments(errors, case['errors']), [])
def test_project(self):
def get_validator(project):
options = ['--job_name=job', '--staging_location=gs://foo/bar',
'--temp_location=gs://foo/bar']
if project is not None:
options.append('--project=' + project)
pipeline_options = PipelineOptions(options)
runner = MockRunners.DataflowRunner()
validator = PipelineOptionsValidator(pipeline_options, runner)
return validator
test_cases = [
{'project': None, 'errors': ['project']},
{'project': '12345', 'errors': ['project']},
{'project': 'FOO', 'errors': ['project']},
{'project': 'foo:BAR', 'errors': ['project']},
{'project': 'fo', 'errors': ['project']},
{'project': 'foo', 'errors': []},
{'project': 'foo:bar', 'errors': []},
]
for case in test_cases:
errors = get_validator(case['project']).validate()
self.assertEqual(
self.check_errors_for_arguments(errors, case['errors']), [])
def test_job_name(self):
def get_validator(job_name):
options = ['--project=example:example', '--staging_location=gs://foo/bar',
'--temp_location=gs://foo/bar']
if job_name is not None:
options.append('--job_name=' + job_name)
pipeline_options = PipelineOptions(options)
runner = MockRunners.DataflowRunner()
validator = PipelineOptionsValidator(pipeline_options, runner)
return validator
test_cases = [
{'job_name': None, 'errors': []},
{'job_name': '12345', 'errors': ['job_name']},
{'job_name': 'FOO', 'errors': ['job_name']},
{'job_name': 'foo:bar', 'errors': ['job_name']},
{'job_name': 'fo', 'errors': []},
{'job_name': 'foo', 'errors': []},
]
for case in test_cases:
errors = get_validator(case['job_name']).validate()
self.assertEqual(
self.check_errors_for_arguments(errors, case['errors']), [])
def test_num_workers(self):
def get_validator(num_workers):
options = ['--project=example:example', '--job_name=job',
'--staging_location=gs://foo/bar',
'--temp_location=gs://foo/bar']
if num_workers is not None:
options.append('--num_workers=' + num_workers)
pipeline_options = PipelineOptions(options)
runner = MockRunners.DataflowRunner()
validator = PipelineOptionsValidator(pipeline_options, runner)
return validator
test_cases = [
{'num_workers': None, 'errors': []},
{'num_workers': '1', 'errors': []},
{'num_workers': '0', 'errors': ['num_workers']},
{'num_workers': '-1', 'errors': ['num_workers']},
]
for case in test_cases:
errors = get_validator(case['num_workers']).validate()
self.assertEqual(
self.check_errors_for_arguments(errors, case['errors']), [])
def test_is_service_runner(self):
test_cases = [
{
'runner': MockRunners.OtherRunner(),
'options': [],
'expected': False,
},
{
'runner': MockRunners.OtherRunner(),
'options': ['--dataflow_endpoint=https://dataflow.googleapis.com'],
'expected': False,
},
{
'runner': MockRunners.OtherRunner(),
'options': ['--dataflow_endpoint=https://dataflow.googleapis.com/'],
'expected': False,
},
{
'runner': MockRunners.DataflowRunner(),
'options': ['--dataflow_endpoint=https://another.service.com'],
'expected': False,
},
{
'runner': MockRunners.DataflowRunner(),
'options': ['--dataflow_endpoint=https://another.service.com/'],
'expected': False,
},
{
'runner': MockRunners.DataflowRunner(),
'options': ['--dataflow_endpoint=https://dataflow.googleapis.com'],
'expected': True,
},
{
'runner': MockRunners.DataflowRunner(),
'options': ['--dataflow_endpoint=https://dataflow.googleapis.com/'],
'expected': True,
},
{
'runner': MockRunners.DataflowRunner(),
'options': [],
'expected': True,
},
]
for case in test_cases:
validator = PipelineOptionsValidator(
PipelineOptions(case['options']), case['runner'])
self.assertEqual(validator.is_service_runner(), case['expected'])
def test_dataflow_job_file_and_template_location_mutually_exclusive(self):
runner = MockRunners.OtherRunner()
options = PipelineOptions([
'--template_location', 'abc',
'--dataflow_job_file', 'def'
])
validator = PipelineOptionsValidator(options, runner)
errors = validator.validate()
self.assertTrue(errors)
def test_validate_template_location(self):
runner = MockRunners.OtherRunner()
options = PipelineOptions([
'--template_location', 'abc',
])
validator = PipelineOptionsValidator(options, runner)
errors = validator.validate()
self.assertFalse(errors)
def test_validate_dataflow_job_file(self):
runner = MockRunners.OtherRunner()
options = PipelineOptions([
'--dataflow_job_file', 'abc'
])
validator = PipelineOptionsValidator(options, runner)
errors = validator.validate()
self.assertFalse(errors)
def test_test_matcher(self):
def get_validator(matcher):
options = ['--project=example:example',
'--job_name=job',
'--staging_location=gs://foo/bar',
'--temp_location=gs://foo/bar',]
if matcher:
options.append('--on_success_matcher=' + matcher)
pipeline_options = PipelineOptions(options)
runner = MockRunners.TestDataflowRunner()
return PipelineOptionsValidator(pipeline_options, runner)
test_case = [
{'on_success_matcher': None,
'errors': []},
{'on_success_matcher': pickler.dumps(AlwaysPassMatcher()),
'errors': []},
{'on_success_matcher': 'abc',
'errors': ['on_success_matcher']},
{'on_success_matcher': pickler.dumps(object),
'errors': ['on_success_matcher']},
]
for case in test_case:
errors = get_validator(case['on_success_matcher']).validate()
self.assertEqual(
self.check_errors_for_arguments(errors, case['errors']), [])
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
| wtanaka/beam | sdks/python/apache_beam/options/pipeline_options_validator_test.py | Python | apache-2.0 | 11,275 |
from .base_infant_scheduled_modeladmin import BaseInfantScheduleModelAdmin
from .infant_birth_admin import InfantBirthAdmin
from .infant_birth_data_admin import InfantBirthDataAdmin
from .infant_birth_arv_admin import InfantBirthArvAdmin
from .infant_death_report_admin import InfantDeathReportAdmin
from .infant_off_study_admin import InfantOffStudyAdmin
from .infant_visit_admin import InfantVisitAdmin
from .infant_birth_exam_admin import InfantBirthExamAdmin
from .infant_birth_feeding_admin import InfantBirthFeedingVaccineAdmin
from .infant_birth_feeding_admin import InfantVaccinesAdmin
from .infant_congenital_anomalies_admin import (InfantCongenitalAnomaliesAdmin, InfantCnsAdmin, InfantFacialDefectAdmin,
InfantCleftDisorderAdmin, InfantMouthUpGiAdmin,
InfantRespiratoryDefectAdmin, InfantLowerGiAdmin,
InfantFemaleGenitalAdmin, InfantRenalAdmin,
InfantMusculoskeletalAdmin, InfantSkinAdmin, InfantTrisomiesAdmin,
InfantMaleGenitalAdmin, InfantCardioDisorderAdmin)
from .infant_fu_admin import InfantFuAdmin
from .infant_fu_physical_admin import InfantFuPhysicalAdmin
from .infant_fu_dx_admin import (InfantFuDxAdmin, InfantFuDxItemsAdmin)
from .infant_fu_immunizations_admin import (InfantFuImmunizationsAdmin, VaccinesReceivedAdmin, VaccinesMissedAdmin)
from .infant_feeding_admin import InfantFeedingAdmin
from .infant_fu_new_med_admin import (InfantFuNewMedItemsAdmin, InfantFuNewMedAdmin)
from .infant_arv_proph_admin import (InfantArvProphAdmin, InfantArvProphModAdmin)
| TshepangRas/tshilo-dikotla | td_infant/admin/__init__.py | Python | gpl-2.0 | 1,727 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""Save and restore variables."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os.path
import re
import time
import numpy as np
import six
from google.protobuf.any_pb2 import Any
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import op_def_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import importer
from tensorflow.python.framework import op_def_registry
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_io_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import training_util
from tensorflow.python.training.checkpoint_state_pb2 import CheckpointState
from tensorflow.python.util import compat
def ops_used_by_graph_def(graph_def):
"""Collect the list of ops used by a graph.
Does not validate that the ops are all registered.
Args:
graph_def: A `GraphDef` proto, as from `graph.as_graph_def()`.
Returns:
A list of strings, each naming an op used by the graph.
"""
# Map function names to definitions
name_to_function = {}
for fun in graph_def.library.function:
name_to_function[fun.signature.name] = fun
# Collect the list of op names. Since functions can reference functions, we
# need a recursive traversal.
used_ops = set() # Includes both primitive ops and functions
functions_to_process = [] # A subset of used_ops
def mark_op_as_used(op):
if op not in used_ops and op in name_to_function:
functions_to_process.append(name_to_function[op])
used_ops.add(op)
for node in graph_def.node:
mark_op_as_used(node.op)
while functions_to_process:
fun = functions_to_process.pop()
for node in fun.node:
mark_op_as_used(node.op)
return [op for op in used_ops if op not in name_to_function]
def stripped_op_list_for_graph(graph_def):
"""Collect the stripped OpDefs for ops used by a graph.
This function computes the `stripped_op_list` field of `MetaGraphDef` and
similar protos. The result can be communicated from the producer to the
consumer, which can then use the C++ function
`RemoveNewDefaultAttrsFromGraphDef` to improve forwards compatibility.
Args:
graph_def: A `GraphDef` proto, as from `graph.as_graph_def()`.
Returns:
An `OpList` of ops used by the graph.
Raises:
ValueError: If an unregistered op is used.
"""
# This is the Python equivalent of StrippedOpListForGraph in C++.
# Unfortunately, since the Python op registry can differ from that in C++, we
# can't remove the duplication using swig (at least naively).
# TODO(irving): Support taking graphs directly.
used_ops = ops_used_by_graph_def(graph_def)
# Verify that all used ops are registered.
registered_ops = op_def_registry.get_registered_ops()
# These internal ops used by functions are not registered, so we need to
# whitelist them. # TODO(irving): Do something better here.
op_whitelist = ("_Arg", "_Retval", "_ListToArray", "_ArrayToList")
for op in used_ops:
if op not in registered_ops and op not in op_whitelist:
raise ValueError("Op %s is used by the graph, but is not registered" % op)
# Build the stripped op list in sorted order
return op_def_pb2.OpList(op=[registered_ops[op] for op in sorted(used_ops)
if op in registered_ops])
class BaseSaverBuilder(object):
"""Base class for Savers.
Can be extended to create different Ops.
"""
class VarToSave(object):
"""Class used to describe variable slices that need to be saved."""
def __init__(self, var, slice_spec, name):
self.var = var
self.slice_spec = slice_spec
self.name = name
def __init__(self):
pass
def save_op(self, filename_tensor, vars_to_save):
"""Create an Op to save 'vars_to_save'.
This is intended to be overridden by subclasses that want to generate
different Ops.
Args:
filename_tensor: String Tensor.
vars_to_save: A list of BaseSaverBuilder.VarToSave objects.
Returns:
An Operation that save the variables.
"""
# pylint: disable=protected-access
return io_ops._save(
filename=filename_tensor,
tensor_names=[vs.name for vs in vars_to_save],
tensors=[vs.var for vs in vars_to_save],
tensor_slices=[vs.slice_spec for vs in vars_to_save])
def restore_op(self, filename_tensor, var_to_save, preferred_shard):
"""Create an Op to read the variable 'var_to_save'.
This is intended to be overridden by subclasses that want to generate
different Ops.
Args:
filename_tensor: String Tensor.
var_to_save: A BaseSaverBuilder.VarToSave object.
preferred_shard: Int. Shard to open first when loading a sharded file.
Returns:
A Tensor resulting from reading 'var_to_save' from 'filename'.
"""
# pylint: disable=protected-access
return io_ops._restore_slice(
filename_tensor,
var_to_save.name,
var_to_save.slice_spec,
var_to_save.var.dtype,
preferred_shard=preferred_shard)
def sharded_filename(self, filename_tensor, shard, num_shards):
"""Append sharding information to a filename.
Args:
filename_tensor: A string tensor.
shard: Integer. The shard for the filename.
num_shards: An int Tensor for the number of shards.
Returns:
A string tensor.
"""
# pylint: disable=protected-access
return gen_io_ops._sharded_filename(filename_tensor, shard, num_shards)
def _AddSaveOps(self, filename_tensor, vars_to_save):
"""Add ops to save variables that are on the same shard.
Args:
filename_tensor: String Tensor.
vars_to_save: A list of _VarToSave objects.
Returns:
A tensor with the filename used to save.
"""
save = self.save_op(filename_tensor, vars_to_save)
return control_flow_ops.with_dependencies([save], filename_tensor)
def _AddShardedSaveOps(self, filename_tensor, per_device):
"""Add ops to save the params per shard.
Args:
filename_tensor: String Tensor.
per_device: A list of (device, BaseSaverBuilder.VarToSave) pairs, as
returned by _GroupByDevices().
Returns:
An op to save the variables.
"""
num_shards = len(per_device)
sharded_saves = []
num_shards_tensor = constant_op.constant(num_shards, name="num_shards")
for shard, (device, vars_to_save) in enumerate(per_device):
with ops.device(device):
sharded_filename = self.sharded_filename(
filename_tensor, shard, num_shards_tensor)
sharded_saves.append(self._AddSaveOps(sharded_filename, vars_to_save))
# Return the sharded name for the save path.
with ops.control_dependencies([x.op for x in sharded_saves]):
# pylint: disable=protected-access
return gen_io_ops._sharded_filespec(filename_tensor, num_shards_tensor)
def _AddRestoreOps(self,
filename_tensor,
vars_to_save,
restore_sequentially,
reshape,
preferred_shard=-1,
name="restore_all"):
"""Add operations to restore vars_to_save.
Args:
filename_tensor: Tensor for the path of the file to load.
vars_to_save: A list of _VarToSave objects.
restore_sequentially: True if we want to restore variables sequentially
within a shard.
reshape: True if we want to reshape loaded tensors to the shape of
the corresponding variable.
preferred_shard: Shard to open first when loading a sharded file.
name: Name for the returned op.
Returns:
An Operation that restores the variables.
"""
assign_ops = []
for vs in vars_to_save:
v = vs.var
restore_control_inputs = assign_ops[-1:] if restore_sequentially else []
# Load and optionally reshape on the CPU, as string tensors are not
# available on the GPU.
# TODO(touts): Re-enable restore on GPU when we can support annotating
# string tensors as "HostMemory" inputs.
with ops.device(graph_util.set_cpu0(v.device) if v.device else None):
with ops.control_dependencies(restore_control_inputs):
values = self.restore_op(filename_tensor, vs, preferred_shard)
if reshape:
shape = v.get_shape()
if not shape.is_fully_defined():
shape = array_ops.shape(v)
values = array_ops.reshape(values, shape)
validate_shape = not reshape and v.get_shape().is_fully_defined()
assign_ops.append(state_ops.assign(v,
values,
validate_shape=validate_shape))
# Create a Noop that has control dependencies from all the updates.
return control_flow_ops.group(*assign_ops, name=name)
def _AddShardedRestoreOps(self, filename_tensor, per_device,
restore_sequentially, reshape):
"""Add Ops to save variables from multiple devices.
Args:
filename_tensor: Tensor for the path of the file to load.
per_device: A list of (device, _VarToSave) pairs, as
returned by _GroupByDevices().
restore_sequentially: True if we want to restore variables sequentially
within a shard.
reshape: True if we want to reshape loaded tensors to the shape of
the corresponding variable.
Returns:
An Operation that restores the variables.
"""
sharded_restores = []
for shard, (device, vars_to_save) in enumerate(per_device):
with ops.device(device):
sharded_restores.append(self._AddRestoreOps(
filename_tensor,
vars_to_save,
restore_sequentially,
reshape,
preferred_shard=shard,
name="restore_shard"))
return control_flow_ops.group(*sharded_restores, name="restore_all")
def _IsVariable(self, v):
return isinstance(v, ops.Tensor) and (
v.op.type == "Variable" or v.op.type == "AutoReloadVariable")
def _GroupByDevices(self, vars_to_save):
"""Group Variable tensor slices per device.
TODO(touts): Make sure that all the devices found are on different
job/replica/task/cpu|gpu. It would be bad if 2 were on the same device.
It can happen if the devices as unspecified.
Args:
vars_to_save: A list of BaseSaverBuilder.VarToSave objects.
Returns:
A list of tuples: (device_name, BaseSaverBuilder.VarToSave) tuples.
The list is sorted by ascending device_name.
"""
per_device = collections.defaultdict(lambda: [])
for var_to_save in vars_to_save:
canonical_device = pydev.canonical_name(var_to_save.var.device)
per_device[canonical_device].append(var_to_save)
return sorted(per_device.items(), key=lambda t: t[0])
def _VarListToDict(self, var_list):
"""Create a dictionary of names to variable lists.
Args:
var_list: A list, tuple, or set of Variables.
Returns:
A dictionary of variable names to the variables that must be saved under
that name. Variables with save_slice_info are grouped together under the
same key in no particular order.
Raises:
TypeError: If the type of var_list or its elements is not supported.
ValueError: If at least two variables share the same name.
"""
if not isinstance(var_list, (list, tuple, set)):
raise TypeError("Variables to save should be passed in a dict or a "
"list: %s" % var_list)
var_list = set(var_list)
names_to_variables = {}
for var in var_list:
# pylint: disable=protected-access
if isinstance(var, variables.Variable) and var._save_slice_info:
name = var._save_slice_info.full_name
if name in names_to_variables:
if not isinstance(names_to_variables[name], list):
raise ValueError("Mixing slices and non-slices with the same name: "
"%s" % name)
names_to_variables[name].append(var)
else:
names_to_variables[name] = [var]
else:
var = ops.convert_to_tensor(var, as_ref=True)
if not self._IsVariable(var):
raise TypeError("Variable to save is not a Variable: %s" % var)
name = var.op.name
if name in names_to_variables:
raise ValueError("At least two variables have the same name: %s" %
name)
names_to_variables[name] = var
# pylint: enable=protected-access
return names_to_variables
def _ValidateAndSliceInputs(self, names_to_variables):
"""Returns the variables and names that will be used for a Saver.
Args:
names_to_variables: A dict (k, v) where k is the name of a variable and v
is a Variable to save or a BaseSaverBuilder.Saver.
Returns:
A list of BaseSaverBuilder.VarToSave objects.
Raises:
TypeError: If any of the keys are not strings or any of the
values are not one of Tensor or Variable.
ValueError: If the same variable is given in more than one value
(this also applies to slices of SlicedVariables).
"""
if not isinstance(names_to_variables, dict):
names_to_variables = self._VarListToDict(names_to_variables)
vars_to_save = []
seen_variables = set()
for name in sorted(names_to_variables.keys()):
if not isinstance(name, six.string_types):
raise TypeError("names_to_variables must be a dict mapping string "
"names to variable Tensors. Name is not a string: %s" %
name)
v = names_to_variables[name]
if isinstance(v, (list, tuple)):
# A set of slices.
slice_name = None
# pylint: disable=protected-access
for variable in v:
if not isinstance(variable, variables.Variable):
raise ValueError("Slices must all be Variables: %s" % variable)
if not variable._save_slice_info:
raise ValueError("Slices must all be slices: %s" % variable)
if slice_name is None:
slice_name = variable._save_slice_info.full_name
elif slice_name != variable._save_slice_info.full_name:
raise ValueError(
"Slices must all be from the same tensor: %s != %s"
% (slice_name, variable._save_slice_info.full_name))
self._AddVarToSave(vars_to_save, seen_variables,
variable, variable._save_slice_info.spec, name)
# pylint: enable=protected-access
else:
# A variable or tensor.
variable = ops.convert_to_tensor(v, as_ref=True)
if not self._IsVariable(variable):
raise TypeError("names_to_variables must be a dict mapping string "
"names to Tensors/Variables. Not a variable: %s" %
variable)
self._AddVarToSave(vars_to_save, seen_variables, variable, "", name)
return vars_to_save
def _AddVarToSave(self, vars_to_save, seen_variables, variable, slice_spec,
name):
"""Create a VarToSave and add it to the vars_to_save list.
Args:
vars_to_save: List to append the new VarToSave to.
seen_variables: Set of variables already processed. Used to check
that each variable is only saved once.
variable: Variable to save.
slice_spec: String. Slice spec for the variable.
name: Name to use to save the variable.
Raises:
ValueError: If the variable has already been processed.
"""
if variable in seen_variables:
raise ValueError("The same variable will be restored with two names: %s",
variable)
vars_to_save.append(BaseSaverBuilder.VarToSave(variable, slice_spec, name))
seen_variables.add(variable)
def build(self,
names_to_variables,
reshape=False,
sharded=False,
max_to_keep=5,
keep_checkpoint_every_n_hours=10000.0,
name=None,
restore_sequentially=False):
"""Adds save/restore nodes to the graph and creates a SaverDef proto.
Args:
names_to_variables: A dictionary mapping name to a Variable.
Each name will be associated with the
corresponding variable in the checkpoint.
reshape: If True, allow restoring parameters from a checkpoint
that where the parameters have a different shape. This is
only needed when you try to restore from a Dist-Belief checkpoint,
and only some times.
sharded: If True, shard the checkpoints, one per device that has
Variable nodes.
max_to_keep: Maximum number of checkpoints to keep. As new checkpoints
are created, old ones are deleted. If None or 0, no checkpoints are
deleted from the filesystem but only the last one is kept in the
`checkpoint` file. Presently the number is only roughly enforced. For
example in case of restarts more than max_to_keep checkpoints may be
kept.
keep_checkpoint_every_n_hours: How often checkpoints should be kept.
Defaults to 10,000 hours.
name: String. Optional name to use as a prefix when adding operations.
restore_sequentially: A Bool, which if true, causes restore of different
variables to happen sequentially within each device.
Returns:
A SaverDef proto.
Raises:
TypeError: If 'names_to_variables' is not a dictionary mapping string
keys to variable Tensors.
ValueError: If any of the keys or values in 'names_to_variables' is not
unique.
"""
vars_to_save = self._ValidateAndSliceInputs(names_to_variables)
if max_to_keep is None:
max_to_keep = 0
with ops.op_scope([vs.var for vs in vars_to_save], name, "save") as name:
# Add the Constant string tensor for the filename.
filename_tensor = constant_op.constant("model")
# Add the save ops.
if sharded:
per_device = self._GroupByDevices(vars_to_save)
save_tensor = self._AddShardedSaveOps(filename_tensor, per_device)
restore_op = self._AddShardedRestoreOps(
filename_tensor, per_device, restore_sequentially, reshape)
else:
save_tensor = self._AddSaveOps(filename_tensor, vars_to_save)
restore_op = self._AddRestoreOps(
filename_tensor, vars_to_save, restore_sequentially, reshape)
assert restore_op.name.endswith("restore_all"), restore_op.name
return saver_pb2.SaverDef(
filename_tensor_name=filename_tensor.name,
save_tensor_name=save_tensor.name,
restore_op_name=restore_op.name,
max_to_keep=max_to_keep,
keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours,
sharded=sharded)
def _GetCheckpointFilename(save_dir, latest_filename):
"""Returns a filename for storing the CheckpointState.
Args:
save_dir: The directory for saving and restoring checkpoints.
latest_filename: Name of the file in 'save_dir' that is used
to store the CheckpointState.
Returns:
The path of the file that contains the CheckpointState proto.
"""
if latest_filename is None:
latest_filename = "checkpoint"
return os.path.join(save_dir, latest_filename)
def generate_checkpoint_state_proto(save_dir,
model_checkpoint_path,
all_model_checkpoint_paths=None):
"""Generates a checkpoint state proto.
Args:
save_dir: Directory where the model was saved.
model_checkpoint_path: The checkpoint file.
all_model_checkpoint_paths: List of strings. Paths to all not-yet-deleted
checkpoints, sorted from oldest to newest. If this is a non-empty list,
the last element must be equal to model_checkpoint_path. These paths
are also saved in the CheckpointState proto.
Returns:
CheckpointState proto with model_checkpoint_path and
all_model_checkpoint_paths updated to either absolute paths or
relative paths to the current save_dir.
"""
if all_model_checkpoint_paths is None:
all_model_checkpoint_paths = []
if (not all_model_checkpoint_paths or
all_model_checkpoint_paths[-1] != model_checkpoint_path):
logging.info("%s is not in all_model_checkpoint_paths. Manually adding it.",
model_checkpoint_path)
all_model_checkpoint_paths.append(model_checkpoint_path)
# Relative paths need to be rewritten to be relative to the "save_dir"
# if model_checkpoint_path already contains "save_dir".
if not os.path.isabs(save_dir):
if not os.path.isabs(model_checkpoint_path):
model_checkpoint_path = os.path.relpath(model_checkpoint_path, save_dir)
for i in range(len(all_model_checkpoint_paths)):
p = all_model_checkpoint_paths[i]
if not os.path.isabs(p):
all_model_checkpoint_paths[i] = os.path.relpath(p, save_dir)
coord_checkpoint_proto = CheckpointState(
model_checkpoint_path=model_checkpoint_path,
all_model_checkpoint_paths=all_model_checkpoint_paths)
return coord_checkpoint_proto
def update_checkpoint_state(save_dir,
model_checkpoint_path,
all_model_checkpoint_paths=None,
latest_filename=None):
"""Updates the content of the 'checkpoint' file.
This updates the checkpoint file containing a CheckpointState
proto.
Args:
save_dir: Directory where the model was saved.
model_checkpoint_path: The checkpoint file.
all_model_checkpoint_paths: List of strings. Paths to all not-yet-deleted
checkpoints, sorted from oldest to newest. If this is a non-empty list,
the last element must be equal to model_checkpoint_path. These paths
are also saved in the CheckpointState proto.
latest_filename: Optional name of the checkpoint file. Default to
'checkpoint'.
Raises:
RuntimeError: If the save paths conflict.
"""
# Writes the "checkpoint" file for the coordinator for later restoration.
coord_checkpoint_filename = _GetCheckpointFilename(save_dir, latest_filename)
ckpt = generate_checkpoint_state_proto(
save_dir,
model_checkpoint_path,
all_model_checkpoint_paths=all_model_checkpoint_paths)
if coord_checkpoint_filename == ckpt.model_checkpoint_path:
raise RuntimeError("Save path '%s' conflicts with path used for "
"checkpoint state. Please use a different save path." %
model_checkpoint_path)
pywrap_tensorflow.write_string_to_file(
coord_checkpoint_filename, text_format.MessageToString(ckpt))
def get_checkpoint_state(checkpoint_dir, latest_filename=None):
"""Returns CheckpointState proto from the "checkpoint" file.
If the "checkpoint" file contains a valid CheckpointState
proto, returns it.
Args:
checkpoint_dir: The directory of checkpoints.
latest_filename: Optional name of the checkpoint file. Default to
'checkpoint'.
Returns:
A CheckpointState if the state was available, None
otherwise.
"""
ckpt = None
coord_checkpoint_filename = _GetCheckpointFilename(
checkpoint_dir, latest_filename)
f = None
try:
# Check that the file exists before opening it to avoid
# many lines of errors from colossus in the logs.
if pywrap_tensorflow.file_exists(coord_checkpoint_filename):
file_content = pywrap_tensorflow.read_file_to_string(
coord_checkpoint_filename).decode("utf-8")
ckpt = CheckpointState()
text_format.Merge(file_content, ckpt)
# For relative model_checkpoint_path and all_model_checkpoint_paths,
# prepend checkpoint_dir.
if not os.path.isabs(checkpoint_dir):
if not os.path.isabs(ckpt.model_checkpoint_path):
ckpt.model_checkpoint_path = os.path.join(checkpoint_dir,
ckpt.model_checkpoint_path)
for i in range(len(ckpt.all_model_checkpoint_paths)):
p = ckpt.all_model_checkpoint_paths[i]
if not os.path.isabs(p):
ckpt.all_model_checkpoint_paths[i] = os.path.join(checkpoint_dir, p)
except IOError:
# It's ok if the file cannot be read
return None
except text_format.ParseError as e:
logging.warning(str(e))
logging.warning("%s: Checkpoint ignored", coord_checkpoint_filename)
return None
finally:
if f:
f.close()
return ckpt
class Saver(object):
"""Saves and restores variables.
See [Variables](../../how_tos/variables/index.md)
for an overview of variables, saving and restoring.
The `Saver` class adds ops to save and restore variables to and from
*checkpoints*. It also provides convenience methods to run these ops.
Checkpoints are binary files in a proprietary format which map variable names
to tensor values. The best way to examine the contents of a checkpoint is to
load it using a `Saver`.
Savers can automatically number checkpoint filenames with a provided counter.
This lets you keep multiple checkpoints at different steps while training a
model. For example you can number the checkpoint filenames with the training
step number. To avoid filling up disks, savers manage checkpoint files
automatically. For example, they can keep only the N most recent files, or
one checkpoint for every N hours of training.
You number checkpoint filenames by passing a value to the optional
`global_step` argument to `save()`:
```python
saver.save(sess, 'my-model', global_step=0) ==> filename: 'my-model-0'
...
saver.save(sess, 'my-model', global_step=1000) ==> filename: 'my-model-1000'
```
Additionally, optional arguments to the `Saver()` constructor let you control
the proliferation of checkpoint files on disk:
* `max_to_keep` indicates the maximum number of recent checkpoint files to
keep. As new files are created, older files are deleted. If None or 0,
all checkpoint files are kept. Defaults to 5 (that is, the 5 most recent
checkpoint files are kept.)
* `keep_checkpoint_every_n_hours`: In addition to keeping the most recent
`max_to_keep` checkpoint files, you might want to keep one checkpoint file
for every N hours of training. This can be useful if you want to later
analyze how a model progressed during a long training session. For
example, passing `keep_checkpoint_every_n_hours=2` ensures that you keep
one checkpoint file for every 2 hours of training. The default value of
10,000 hours effectively disables the feature.
Note that you still have to call the `save()` method to save the model.
Passing these arguments to the constructor will not save variables
automatically for you.
A training program that saves regularly looks like:
```python
...
# Create a saver.
saver = tf.train.Saver(...variables...)
# Launch the graph and train, saving the model every 1,000 steps.
sess = tf.Session()
for step in xrange(1000000):
sess.run(..training_op..)
if step % 1000 == 0:
# Append the step number to the checkpoint name:
saver.save(sess, 'my-model', global_step=step)
```
In addition to checkpoint files, savers keep a protocol buffer on disk with
the list of recent checkpoints. This is used to manage numbered checkpoint
files and by `latest_checkpoint()`, which makes it easy to discover the path
to the most recent checkpoint. That protocol buffer is stored in a file named
'checkpoint' next to the checkpoint files.
If you create several savers, you can specify a different filename for the
protocol buffer file in the call to `save()`.
@@__init__
@@save
@@restore
Other utility methods.
@@last_checkpoints
@@set_last_checkpoints
@@as_saver_def
"""
def __init__(self,
var_list=None,
reshape=False,
sharded=False,
max_to_keep=5,
keep_checkpoint_every_n_hours=10000.0,
name=None,
restore_sequentially=False,
saver_def=None,
builder=None):
"""Creates a `Saver`.
The constructor adds ops to save and restore variables.
`var_list` specifies the variables that will be saved and restored. It can
be passed as a `dict` or a list:
* A `dict` of names to variables: The keys are the names that will be
used to save or restore the variables in the checkpoint files.
* A list of variables: The variables will be keyed with their op name in
the checkpoint files.
For example:
```python
v1 = tf.Variable(..., name='v1')
v2 = tf.Variable(..., name='v2')
# Pass the variables as a dict:
saver = tf.train.Saver({'v1': v1, 'v2': v2})
# Or pass them as a list.
saver = tf.train.Saver([v1, v2])
# Passing a list is equivalent to passing a dict with the variable op names
# as keys:
saver = tf.train.Saver({v.op.name: v for v in [v1, v2]})
```
The optional `reshape` argument, if `True`, allows restoring a variable from
a save file where the variable had a different shape, but the same number
of elements and type. This is useful if you have reshaped a variable and
want to reload it from an older checkpoint.
The optional `sharded` argument, if `True`, instructs the saver to shard
checkpoints per device.
Args:
var_list: A list of `Variable` objects or a dictionary mapping names to
variables. If `None`, defaults to the list of all variables.
reshape: If `True`, allows restoring parameters from a checkpoint
where the variables have a different shape.
sharded: If `True`, shard the checkpoints, one per device.
max_to_keep: Maximum number of recent checkpoints to keep.
Defaults to 5.
keep_checkpoint_every_n_hours: How often to keep checkpoints.
Defaults to 10,000 hours.
name: String. Optional name to use as a prefix when adding operations.
restore_sequentially: A `Bool`, which if true, causes restore of different
variables to happen sequentially within each device. This can lower
memory usage when restoring very large models.
saver_def: Optional `SaverDef` proto to use instead of running the
builder. This is only useful for specialty code that wants to recreate
a `Saver` object for a previously built `Graph` that had a `Saver`.
The `saver_def` proto should be the one returned by the
`as_saver_def()` call of the `Saver` that was created for that `Graph`.
builder: Optional `SaverBuilder` to use if a `saver_def` was not provided.
Defaults to `BaseSaverBuilder()`.
Raises:
TypeError: If `var_list` is invalid.
ValueError: If any of the keys or values in `var_list` are not unique.
"""
if not saver_def:
if builder is None:
builder = BaseSaverBuilder()
if var_list is None:
var_list = variables.all_variables()
if not var_list:
raise ValueError("No variables to save")
saver_def = builder.build(
var_list,
reshape=reshape,
sharded=sharded,
max_to_keep=max_to_keep,
keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours,
name=name,
restore_sequentially=restore_sequentially)
if not isinstance(saver_def, saver_pb2.SaverDef):
raise ValueError("saver_def must if a saver_pb2.SaverDef: %s" % saver_def)
if not saver_def.save_tensor_name:
raise ValueError("saver_def must specify the save_tensor_name: %s"
% str(saver_def))
if not saver_def.restore_op_name:
raise ValueError("saver_def must specify the restore_op_name: %s"
% str(saver_def))
# Assigns saver_def.
self.saver_def = saver_def
# Updates next checkpoint time.
self._next_checkpoint_time = (
time.time() + self.saver_def.keep_checkpoint_every_n_hours * 3600)
self._last_checkpoints = []
def _CheckpointFilename(self, p):
"""Returns the checkpoint filename given a `(filename, time)` pair.
Args:
p: (filename, time) pair.
Returns:
Checkpoint file name.
"""
name, _ = p
return name
def _MetaGraphFilename(self, checkpoint_filename, meta_graph_suffix="meta"):
"""Returns the meta graph filename.
Args:
checkpoint_filename: Name of the checkpoint file.
meta_graph_suffix: Suffix for `MetaGraphDef` file. Defaults to 'meta'.
Returns:
MetaGraph file name.
"""
# If the checkpoint_filename is sharded, the checkpoint_filename could
# be of format model.ckpt-step#-?????-of-shard#. For example,
# model.ckpt-123456-?????-of-00005, or model.ckpt-123456-00001-of-00002.
basename = re.sub(r"-[\d\?]+-of-\d+$", "", checkpoint_filename)
meta_graph_filename = ".".join([basename, meta_graph_suffix])
return meta_graph_filename
def _MaybeDeleteOldCheckpoints(self, latest_save_path,
meta_graph_suffix="meta"):
"""Deletes old checkpoints if necessary.
Always keep the last `max_to_keep` checkpoints. If
`keep_checkpoint_every_n_hours` was specified, keep an additional checkpoint
every `N` hours. For example, if `N` is 0.5, an additional checkpoint is
kept for every 0.5 hours of training; if `N` is 10, an additional
checkpoint is kept for every 10 hours of training.
Args:
latest_save_path: Name including path of checkpoint file to save.
meta_graph_suffix: Suffix for `MetaGraphDef` file. Defaults to 'meta'.
"""
if not self.saver_def.max_to_keep:
return
# Remove first from list if the same name was used before.
for p in self._last_checkpoints:
if latest_save_path == self._CheckpointFilename(p):
self._last_checkpoints.remove(p)
# Append new path to list
self._last_checkpoints.append((latest_save_path, time.time()))
# If more than max_to_keep, remove oldest.
if len(self._last_checkpoints) > self.saver_def.max_to_keep:
p = self._last_checkpoints.pop(0)
# Do not delete the file if we keep_checkpoint_every_n_hours is set and we
# have reached N hours of training.
should_keep = p[1] > self._next_checkpoint_time
if should_keep:
self._next_checkpoint_time += (
self.saver_def.keep_checkpoint_every_n_hours * 3600)
return
# Otherwise delete the files.
for f in pywrap_tensorflow.get_matching_files(
self._CheckpointFilename(p)):
try:
pywrap_tensorflow.delete_file(f)
meta_graph_filename = self._MetaGraphFilename(
f, meta_graph_suffix=meta_graph_suffix)
if pywrap_tensorflow.file_exists(meta_graph_filename):
pywrap_tensorflow.delete_file(meta_graph_filename)
except Exception as e: # pylint: disable=broad-except
logging.warning("Ignoring: %s", str(e))
def as_saver_def(self):
"""Generates a `SaverDef` representation of this saver.
Returns:
A `SaverDef` proto.
"""
return self.saver_def
def to_proto(self):
"""Converts this `Saver` to a `SaverDef` protocol buffer.
Returns:
A `SaverDef` protocol buffer.
"""
return self.saver_def
@staticmethod
def from_proto(saver_def):
"""Returns a `Saver` object created from `saver_def`."""
return Saver(saver_def=saver_def)
@property
def last_checkpoints(self):
"""List of not-yet-deleted checkpoint filenames.
You can pass any of the returned values to `restore()`.
Returns:
A list of checkpoint filenames, sorted from oldest to newest.
"""
return list(self._CheckpointFilename(p) for p in self._last_checkpoints)
def set_last_checkpoints(self, last_checkpoints):
"""DEPRECATED: Use set_last_checkpoints_with_time.
Sets the list of old checkpoint filenames.
Args:
last_checkpoints: A list of checkpoint filenames.
Raises:
AssertionError: If last_checkpoints is not a list.
"""
assert isinstance(last_checkpoints, list)
# We use a timestamp of +inf so that this checkpoint will never be
# deleted. This is both safe and backwards compatible to a previous
# version of the code which used s[1] as the "timestamp".
self._last_checkpoints = [(s, np.inf) for s in last_checkpoints]
def set_last_checkpoints_with_time(self, last_checkpoints_with_time):
"""Sets the list of old checkpoint filenames and timestamps.
Args:
last_checkpoints_with_time: A list of tuples of checkpoint filenames and
timestamps.
Raises:
AssertionError: If last_checkpoints_with_time is not a list.
"""
assert isinstance(last_checkpoints_with_time, list)
self._last_checkpoints = last_checkpoints_with_time
def save(self, sess, save_path, global_step=None, latest_filename=None,
meta_graph_suffix="meta", write_meta_graph=True):
"""Saves variables.
This method runs the ops added by the constructor for saving variables.
It requires a session in which the graph was launched. The variables to
save must also have been initialized.
The method returns the path of the newly created checkpoint file. This
path can be passed directly to a call to `restore()`.
Args:
sess: A Session to use to save the variables.
save_path: String. Path to the checkpoint filename. If the saver is
`sharded`, this is the prefix of the sharded checkpoint filename.
global_step: If provided the global step number is appended to
`save_path` to create the checkpoint filename. The optional argument
can be a `Tensor`, a `Tensor` name or an integer.
latest_filename: Optional name for the protocol buffer file that will
contains the list of most recent checkpoint filenames. That file,
kept in the same directory as the checkpoint files, is automatically
managed by the saver to keep track of recent checkpoints. Defaults to
'checkpoint'.
meta_graph_suffix: Suffix for `MetaGraphDef` file. Defaults to 'meta'.
write_meta_graph: `Boolean` indicating whether or not to write the meta
graph file.
Returns:
A string: path at which the variables were saved. If the saver is
sharded, this string ends with: '-?????-of-nnnnn' where 'nnnnn'
is the number of shards created.
Raises:
TypeError: If `sess` is not a `Session`.
ValueError: If `latest_filename` contains path components.
"""
if latest_filename is None:
latest_filename = "checkpoint"
if os.path.split(latest_filename)[0]:
raise ValueError("'latest_filename' must not contain path components")
if global_step is not None:
if not isinstance(global_step, compat.integral_types):
global_step = training_util.global_step(sess, global_step)
checkpoint_file = "%s-%d" % (save_path, global_step)
else:
checkpoint_file = save_path
save_path = os.path.dirname(save_path)
if not isinstance(sess, session.SessionInterface):
raise TypeError("'sess' must be a Session; %s" % sess)
model_checkpoint_path = sess.run(
self.saver_def.save_tensor_name,
{self.saver_def.filename_tensor_name: checkpoint_file})
model_checkpoint_path = compat.as_str(model_checkpoint_path)
self._MaybeDeleteOldCheckpoints(model_checkpoint_path,
meta_graph_suffix=meta_graph_suffix)
update_checkpoint_state(save_path, model_checkpoint_path,
self.last_checkpoints, latest_filename)
if write_meta_graph:
meta_graph_filename = self._MetaGraphFilename(
checkpoint_file, meta_graph_suffix=meta_graph_suffix)
with sess.graph.as_default():
self.export_meta_graph(meta_graph_filename)
return model_checkpoint_path
def export_meta_graph(self, filename=None, collection_list=None,
as_text=False):
"""Writes `MetaGraphDef` to save_path/filename.
Args:
filename: Optional meta_graph filename including the path.
collection_list: List of string keys to collect.
as_text: If `True`, writes the meta_graph as an ASCII proto.
Returns:
A `MetaGraphDef` proto.
"""
return export_meta_graph(filename=filename,
graph_def=ops.get_default_graph().as_graph_def(
add_shapes=True),
saver_def=self.saver_def,
collection_list=collection_list,
as_text=as_text)
def restore(self, sess, save_path):
"""Restores previously saved variables.
This method runs the ops added by the constructor for restoring variables.
It requires a session in which the graph was launched. The variables to
restore do not have to have been initialized, as restoring is itself a way
to initialize variables.
The `save_path` argument is typically a value previously returned from a
`save()` call, or a call to `latest_checkpoint()`.
Args:
sess: A `Session` to use to restore the parameters.
save_path: Path where parameters were previously saved.
Raises:
ValueError: If the given `save_path` does not point to a file.
"""
if not pywrap_tensorflow.get_matching_files(save_path):
raise ValueError("Restore called with invalid save path %s" % save_path)
sess.run(self.saver_def.restore_op_name,
{self.saver_def.filename_tensor_name: save_path})
@staticmethod
def _add_collection_def(meta_graph_def, key):
"""Adds a collection to MetaGraphDef protocol buffer.
Args:
meta_graph_def: MetaGraphDef protocol buffer.
key: One of the GraphKeys or user-defined string.
"""
_add_collection_def(meta_graph_def, key)
def latest_checkpoint(checkpoint_dir, latest_filename=None):
"""Finds the filename of latest saved checkpoint file.
Args:
checkpoint_dir: Directory where the variables were saved.
latest_filename: Optional name for the protocol buffer file that
contains the list of most recent checkpoint filenames.
See the corresponding argument to `Saver.save()`.
Returns:
The full path to the latest checkpoint or `None` if no checkpoint was found.
"""
# Pick the latest checkpoint based on checkpoint state.
ckpt = get_checkpoint_state(checkpoint_dir, latest_filename)
if ckpt and ckpt.model_checkpoint_path:
if pywrap_tensorflow.get_matching_files(ckpt.model_checkpoint_path):
return ckpt.model_checkpoint_path
return None
def _get_kind_name(item):
"""Returns the kind name in CollectionDef.
Args:
item: A data item.
Returns:
The string representation of the kind in CollectionDef.
"""
if isinstance(item, (six.string_types, six.binary_type)):
kind = "bytes_list"
elif isinstance(item, six.integer_types):
kind = "int64_list"
elif isinstance(item, float):
kind = "float_list"
elif isinstance(item, Any):
kind = "any_list"
else:
kind = "node_list"
return kind
def _add_collection_def(meta_graph_def, key):
"""Adds a collection to MetaGraphDef protocol buffer.
Args:
meta_graph_def: MetaGraphDef protocol buffer.
key: One of the GraphKeys or user-defined string.
"""
if not isinstance(key, six.string_types) and not isinstance(key, bytes):
logging.warning("Only collections with string type keys will be "
"serialized. This key has %s", type(key))
return
collection_list = ops.get_collection(key)
if not collection_list:
return
try:
col_def = meta_graph_def.collection_def[key]
to_proto = ops.get_to_proto_function(key)
proto_type = ops.get_collection_proto_type(key)
if to_proto:
kind = "bytes_list"
for x in collection_list:
# Additional type check to make sure the returned proto is indeed
# what we expect.
proto = to_proto(x)
assert isinstance(proto, proto_type)
getattr(col_def, kind).value.append(proto.SerializeToString())
else:
kind = _get_kind_name(collection_list[0])
if kind == "node_list":
getattr(col_def, kind).value.extend([x.name for x in collection_list])
elif kind == "bytes_list":
# NOTE(opensource): This force conversion is to work around the fact
# that Python3 distinguishes between bytes and strings.
getattr(col_def, kind).value.extend(
[compat.as_bytes(x) for x in collection_list])
else:
getattr(col_def, kind).value.extend([x for x in collection_list])
except Exception as e: # pylint: disable=broad-except
logging.warning("Error encountered when serializing %s.\n"
"Type is unsupported, or the types of the items don't "
"match field type in CollectionDef.\n%s", key, str(e))
if key in meta_graph_def.collection_def:
del meta_graph_def.collection_def[key]
return
def _as_meta_graph_def(meta_info_def=None, graph_def=None, saver_def=None,
collection_list=None):
"""Construct and returns a `MetaGraphDef` protocol buffer.
Args:
meta_info_def: `MetaInfoDef` protocol buffer.
graph_def: `GraphDef` protocol buffer.
saver_def: `SaverDef` protocol buffer.
collection_list: List of string keys to collect.
Returns:
MetaGraphDef protocol buffer.
Raises:
TypeError: If the arguments are not of the correct proto buffer type.
"""
# Type check.
if meta_info_def and not isinstance(meta_info_def,
meta_graph_pb2.MetaGraphDef.MetaInfoDef):
raise TypeError("meta_info_def must be of type MetaInfoDef, not %s",
type(meta_info_def))
if graph_def and not isinstance(graph_def, graph_pb2.GraphDef):
raise TypeError("graph_def must be of type GraphDef, not %s",
type(graph_def))
if saver_def and not isinstance(saver_def, saver_pb2.SaverDef):
raise TypeError("saver_def must be of type SaverDef, not %s",
type(saver_def))
# Creates a MetaGraphDef proto.
meta_graph_def = meta_graph_pb2.MetaGraphDef()
# Adds meta_info_def.
if meta_info_def:
meta_graph_def.meta_info_def.MergeFrom(meta_info_def)
# Adds graph_def or the default.
if not graph_def:
meta_graph_def.graph_def.MergeFrom(
ops.get_default_graph().as_graph_def(add_shapes=True))
else:
meta_graph_def.graph_def.MergeFrom(graph_def)
# Fills in meta_info_def.stripped_op_list using the ops from graph_def.
# pylint: disable=g-explicit-length-test
if len(meta_graph_def.meta_info_def.stripped_op_list.op) == 0:
meta_graph_def.meta_info_def.stripped_op_list.MergeFrom(
stripped_op_list_for_graph(meta_graph_def.graph_def))
# pylint: enable=g-explicit-length-test
# Adds saver_def.
if saver_def:
meta_graph_def.saver_def.MergeFrom(saver_def)
# Adds collection_list.
if collection_list:
clist = collection_list
else:
clist = ops.get_all_collection_keys()
for ctype in clist:
_add_collection_def(meta_graph_def, ctype)
return meta_graph_def
def read_meta_graph_file(filename):
"""Reads a file containing `MetaGraphDef` and returns the protocol buffer.
Args:
filename: `meta_graph_def` filename including the path.
Returns:
A `MetaGraphDef` protocol buffer.
Raises:
IOError: If the file doesn't exist, or cannot be successfully parsed.
"""
meta_graph_def = meta_graph_pb2.MetaGraphDef()
if not pywrap_tensorflow.file_exists(filename):
raise IOError("File %s does not exist." % filename)
# First try to read it as a binary file.
file_content = pywrap_tensorflow.read_file_to_string(filename)
try:
meta_graph_def.ParseFromString(file_content)
return meta_graph_def
except Exception: # pylint: disable=broad-except
pass
# Next try to read it as a text file.
try:
text_format.Merge(file_content.decode("utf-8"), meta_graph_def)
except text_format.ParseError as e:
raise IOError("Cannot parse file %s: %s." % (filename, str(e)))
return meta_graph_def
def _import_meta_graph_def(meta_graph_def):
"""Recreates a Graph saved in a `MetaGraphDef` proto.
This function adds all the nodes from the meta graph def proto to the current
graph, recreates all the collections, and returns a saver from saver_def.
Args:
meta_graph_def: `MetaGraphDef` protocol buffer.
Returns:
A saver constructed from `saver_def` in `meta_graph_def` or None.
A None value is returned if no variables exist in the `meta_graph_def`
(i.e., no variables to restore).
"""
# Gathers the list of nodes we are interested in.
producer_op_list = None
if meta_graph_def.meta_info_def.HasField("stripped_op_list"):
producer_op_list = meta_graph_def.meta_info_def.stripped_op_list
importer.import_graph_def(meta_graph_def.graph_def, name="",
producer_op_list=producer_op_list)
# Restores all the other collections.
for key, col_def in meta_graph_def.collection_def.items():
kind = col_def.WhichOneof("kind")
if kind is None:
logging.error("Cannot identify data type for collection %s. Skipping.",
key)
continue
from_proto = ops.get_from_proto_function(key)
if from_proto:
assert kind == "bytes_list"
proto_type = ops.get_collection_proto_type(key)
for value in col_def.bytes_list.value:
proto = proto_type()
proto.ParseFromString(value)
ops.add_to_collection(key, from_proto(proto))
else:
field = getattr(col_def, kind)
if kind == "node_list":
for value in field.value:
col_op = ops.get_default_graph().as_graph_element(value)
ops.add_to_collection(key, col_op)
elif kind == "int64_list":
# NOTE(opensource): This force conversion is to work around the fact
# that Python2 distinguishes between int and long, while Python3 has
# only int.
for value in field.value:
ops.add_to_collection(key, int(value))
else:
for value in field.value:
ops.add_to_collection(key, value)
if meta_graph_def.HasField("saver_def"):
return Saver(saver_def=meta_graph_def.saver_def)
else:
if variables.all_variables():
# Return the default saver instance for all graph variables.
return Saver()
else:
# If not graph variables exist, then a Saver cannot be constructed.
logging.info("Saver not created because there are no variables in the"
" graph to restore")
return None
def import_meta_graph(meta_graph_or_file):
"""Recreates a Graph saved in a `MetaGraphDef` proto.
This function takes a `MetaGraphDef` protocol buffer as input. If
the argument is a file containing a `MetaGraphDef` protocol buffer ,
it constructs a protocol buffer from the file content. The function
then adds all the nodes from the `graph_def` field to the
current graph, recreates all the collections, and returns a saver
constructed from the `saver_def` field.
In combination with `export_meta_graph()`, this function can be used to
* Serialize a graph along with other Python objects such as `QueueRunner`,
`Variable` into a `MetaGraphDef`.
* Restart training from a saved graph and checkpoints.
* Run inference from a saved graph and checkpoints.
```Python
...
# Create a saver.
saver = tf.train.Saver(...variables...)
# Remember the training_op we want to run by adding it to a collection.
tf.add_to_collection('train_op', train_op)
sess = tf.Session()
for step in xrange(1000000):
sess.run(train_op)
if step % 1000 == 0:
# Saves checkpoint, which by default also exports a meta_graph
# named 'my-model-global_step.meta'.
saver.save(sess, 'my-model', global_step=step)
```
Later we can continue training from this saved `meta_graph` without building
the model from scratch.
```Python
with tf.Session() as sess:
new_saver = tf.train.import_meta_graph('my-save-dir/my-model-10000.meta')
new_saver.restore(sess, 'my-save-dir/my-model-10000')
# tf.get_collection() returns a list. In this example we only want the
# first one.
train_op = tf.get_collection('train_op')[0]
for step in xrange(1000000):
sess.run(train_op)
```
NOTE: Restarting training from saved `meta_graph` only works if the
device assignments have not changed.
Args:
meta_graph_or_file: `MetaGraphDef` protocol buffer or filename (including
the path) containing a `MetaGraphDef`.
Returns:
A saver constructed from `saver_def` in `MetaGraphDef` or None.
A None value is returned if no variables exist in the `MetaGraphDef`
(i.e., there are no variables to restore).
"""
if isinstance(meta_graph_or_file, meta_graph_pb2.MetaGraphDef):
return _import_meta_graph_def(meta_graph_or_file)
else:
return _import_meta_graph_def(read_meta_graph_file(meta_graph_or_file))
def export_meta_graph(filename=None, meta_info_def=None, graph_def=None,
saver_def=None, collection_list=None, as_text=False):
"""Returns `MetaGraphDef` proto. Optionally writes it to filename.
This function exports the graph, saver, and collection objects into
`MetaGraphDef` protocol buffer with the intension of it being imported
at a later time or location to restart training, run inference, or be
a subgraph.
Args:
filename: Optional filename including the path for writing the
generated `MetaGraphDef` protocol buffer.
meta_info_def: `MetaInfoDef` protocol buffer.
graph_def: `GraphDef` protocol buffer.
saver_def: `SaverDef` protocol buffer.
collection_list: List of string keys to collect.
as_text: If `True`, writes the `MetaGraphDef` as an ASCII proto.
Returns:
A `MetaGraphDef` proto.
"""
meta_graph_def = _as_meta_graph_def(meta_info_def=meta_info_def,
graph_def=graph_def,
saver_def=saver_def,
collection_list=collection_list)
if filename:
training_util.write_graph(meta_graph_def, os.path.dirname(filename),
os.path.basename(filename), as_text=as_text)
return meta_graph_def
ops.register_proto_function(ops.GraphKeys.SAVERS,
proto_type=saver_pb2.SaverDef,
to_proto=Saver.to_proto,
from_proto=Saver.from_proto)
| dhalleine/tensorflow | tensorflow/python/training/saver.py | Python | apache-2.0 | 56,216 |
#============================
# Python Interface
#============================
from __future__ import absolute_import, division, print_function
from os.path import join, realpath, dirname
import utool as ut
import cv2
import random
import numpy as np
import ctypes as C
import detecttools.ctypes_interface as ctypes_interface
def _cast_list_to_c(py_list, dtype):
"""
Converts a python list of strings into a c array of strings
adapted from "http://stackoverflow.com/questions/3494598/passing-a-list-of
-strings-to-from-python-ctypes-to-c-function-expecting-char"
Avi's code
"""
c_arr = (dtype * len(py_list))()
c_arr[:] = py_list
return c_arr
def _arrptr_to_np(c_arrptr, shape, arr_t, dtype):
"""
Casts an array pointer from C to numpy
Input:
c_arrpt - an array pointer returned from C
shape - shape of that array pointer
arr_t - the ctypes datatype of c_arrptr
Avi's code
"""
arr_t_size = C.POINTER(C.c_char * dtype().itemsize) # size of each item
c_arr = C.cast(c_arrptr.astype(int), arr_t_size) # cast to ctypes
np_arr = np.ctypeslib.as_array(c_arr, shape) # cast to numpy
np_arr.dtype = dtype # fix numpy dtype
np_arr = np.require(np_arr, dtype=dtype, requirements=['O']) # prevent memory leaks
return np_arr
def _extract_np_array(size_list, ptr_list, arr_t, arr_dtype,
arr_dim):
"""
size_list - contains the size of each output 2d array
ptr_list - an array of pointers to the head of each output 2d
array (which was allocated in C)
arr_t - the C pointer type
arr_dtype - the numpy array type
arr_dim - the number of columns in each output 2d array
"""
arr_list = [_arrptr_to_np(arr_ptr, (size, arr_dim), arr_t, arr_dtype)
for (arr_ptr, size) in zip(ptr_list, size_list)]
return arr_list
def _load_c_shared_library(METHODS):
''' Loads the pybing dynamic library and defines its functions '''
if ut.VERBOSE:
print('_load_c_shared_library')
root_dir = realpath(join('..', dirname(__file__)))
libname = 'pybing'
rf_clib, def_cfunc = ctypes_interface.load_clib(libname, root_dir)
# Load and expose methods from lib
for method in METHODS.keys():
def_cfunc(METHODS[method][1], method, METHODS[method][0])
return rf_clib
def _cache_data(src_path_list, dst_path, format_str='data_%07d.JPEG', **kwargs):
'''
src_path_list (required)
dst_path (required)
chips_norm_width (required)
chips_norm_height (required)
chips_prob_flip_horizontally (required)
chips_prob_flip_vertically (required)
'''
if kwargs['chips_norm_width'] is not None:
kwargs['chips_norm_width'] = int(kwargs['chips_norm_width'])
if kwargs['chips_norm_height'] is not None:
kwargs['chips_norm_height'] = int(kwargs['chips_norm_height'])
chip_filename_list = []
counter = 0
for src_path in src_path_list:
if kwargs['verbose']:
print("Processing %r" % (src_path, ))
# Load the iamge
image = cv2.imread(src_path)
# Get the shape of the iamge
height_, width_, channels_ = image.shape
# Determine new image size
if kwargs['chips_norm_width'] is not None and kwargs['chips_norm_height'] is None:
# Normalizing width (with respect to aspect ratio)
width = kwargs['chips_norm_width']
height = int( ( width / width_ ) * height_ )
elif kwargs['chips_norm_height'] is not None and kwargs['chips_norm_width'] is None:
# Normalizing height (with respect to aspect ratio)
height = kwargs['chips_norm_height']
width = int( ( height / height_ ) * width_ )
elif kwargs['chips_norm_width'] is not None and kwargs['chips_norm_height'] is not None:
# Normalizing width and height (ignoring aspect ratio)
width = kwargs['chips_norm_width']
height = kwargs['chips_norm_height']
else:
width = width_
height = height_
# Check for patch size limitation
if width < kwargs['patch_width'] or height < kwargs['patch_height']:
print('\t[WARNING] Image size is too small for the patch size, skipping image ')
continue
# Resize the image
image_ = cv2.resize(image, (width, height), interpolation=cv2.INTER_LANCZOS4)
# Flip the image (if nessicary)
if kwargs['chips_prob_flip_horizontally'] is not None and random.uniform(0.0, 1.0) <= kwargs['chips_prob_flip_horizontally']:
image_ = cv2.flip(image_, 1)
if kwargs['chips_prob_flip_vertically'] is not None and random.uniform(0.0, 1.0) <= kwargs['chips_prob_flip_vertically']:
image_ = cv2.flip(image_, 0)
# Get the images destination filename
chip_filename = format_str % (counter, )
# Write the iamge
cv2.imwrite(join(dst_path, chip_filename), image_)
# Append the image's destaintion filename to the return list
chip_filename_list.append(chip_filename)
# Increment the counter
counter += 1
return chip_filename_list
| bluemellophone/pybing | pybing/pybing_helpers.py | Python | bsd-3-clause | 5,425 |
from django.core.management.base import BaseCommand
from optparse import make_option
# Python 2.3 doesn't have sorted()
try:
sorted
except NameError:
from django.utils.itercompat import sorted
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--option_a','-a', action='store', dest='option_a', default='1'),
make_option('--option_b','-b', action='store', dest='option_b', default='2'),
make_option('--option_c','-c', action='store', dest='option_c', default='3'),
)
help = 'Test basic commands'
requires_model_validation = False
args = '[labels ...]'
def handle(self, *labels, **options):
print 'EXECUTE:BaseCommand labels=%s, options=%s' % (labels, sorted(options.items()))
| weigj/django-multidb | tests/regressiontests/admin_scripts/management/commands/base_command.py | Python | bsd-3-clause | 774 |
from classes.Feature import *
from classes.Extension import *
import bisect
def translateType(t, name):
if name in [ "GL_TRUE", "GL_FALSE" ]:
return "GLboolean"
return { "u" : "GLuint", "ull" : "GLuint64" }.get(t, "GLenum")
# https://docs.python.org/2/library/bisect.html
def find_le(a, x):
'Find rightmost value less than or equal to x'
i = bisect.bisect_right(a, x)
if i:
return a[i-1]
return None
class Enum:
def __init__(self, xml, features, extensions, groupString, groupType, api):
self.api = api
self.name = xml.attrib["name"]
self.value = xml.attrib["value"]
self.type = "GLenum"
self.aliasString = ""
self.alias = None
# an enum group is, if defined, defined specifically for an enum
# but the enum itself might be reused by other groups as well.
self.groups = set()
self.groupString = None # ToDo: only supported for GLbitfield for now
self.aliasString = xml.attrib.get("alias", None)
if groupString == "SpecialNumbers":
self.type = translateType(xml.attrib.get("type", ""), self.name)
elif groupType == "bitmask":
self.type = "GLbitfield"
self.groupString = groupString
self.reqFeatures = []
self.remFeatures = [] # len(remF) should always be < 2
self.reqExtensions = []
for feature in features:
if feature.api == api and self.name in feature.reqEnumStrings:
self.reqFeatures.append(feature)
for feature in features:
if feature.api == api and self.name in feature.remEnumStrings:
self.remFeatures.append(feature)
for extension in extensions:
if extension.api == api and self.name in extension.reqEnumStrings:
self.reqExtensions.append(extensions)
def __str__(self):
return "Enum(%s, %s)" % (self.name, self.value)
def __lt__(self, other):
return self.value < other.value or (self.value == other.value and self.name < other.name)
# this compares the given feature with the lowest requiring feature
def supported(self, feature, core):
if feature is None:
return True
# ToDo: this might create a cyclic recursion if OpenGL is errorneuos
aliasSupported = self.alias.supported(feature, core) if self.alias else False
# Note: design decission:
# every featured functions include should not contain enums from extensions.
#if len(self.reqFeatures) == 0 and len(self.reqExtensions) > 0:
# return True
if len(self.reqFeatures) == 0:
return aliasSupported
if core:
req = find_le(self.reqFeatures, feature)
rem = find_le(self.remFeatures, feature)
if req is not None and rem is not None:
return req > rem or aliasSupported
return req <= feature or aliasSupported
else:
sSelf = min(self.reqFeatures) <= feature
return sSelf or aliasSupported
class Group:
def __init__(self, xml):
self.enums = set()
self.enumStrings = []
if isinstance(xml, str):
self.name = xml
return
self.name = xml.attrib["name"]
for enum in xml.iter("enum"):
self.enumStrings.append(enum.attrib["name"])
def __str__(self):
return "Group(%s, %s)" % (self.name, str(len(self.enumStrings)))
def __lt__(self, other):
return self.name < other.name
def parseGroups(xml, enums):
groups = []
groupsByName = dict()
for G in xml.iter("groups"):
for g in G.iter("group"):
group = Group(g)
groups.append(group)
groupsByName[group.name] = group
for e in g.iter("enum"):
group.enumStrings.append(e.attrib["name"])
# if groups are not listed in groups section
# they can be implicitly specified by enums
for enum in enums:
createGroup_ifImplicit(groups, groupsByName, enum)
return sorted(groups)
def createGroup_ifImplicit(groups, groupsByName, enum):
name = enum.groupString
if name is None:
return
if name not in groupsByName:
group = Group(name)
groups.append(group)
groupsByName[name] = group
groupsByName[name].enumStrings.append(enum.name)
def resolveGroups(groups, enumsByName):
for group in groups:
group.enums = set([ enumsByName[e] for e
in group.enumStrings if e in enumsByName ])
for enum in group.enums:
enum.groups.add(group)
def verifyGroups(groups, enums):
# all non verified enums/grups should be patched
unreferenced = set()
# (1) check that every referenced group exists (resolveEnums)
groupsByName = dict([(group.name, group) for group in groups])
for enum in enums:
if enum.groupString is not None and enum.groupString not in groupsByName:
unreferenced.add(enum)
if len(unreferenced) > 0:
print(" WARNING: " + str(len(unreferenced)) + " unreferenced groups:")
for enum in unreferenced:
print(" %s (in %s)" % (enum.groupString, enum.name))
# (2) check that every enum referencing a group,
# is actually referenced in that group
# ToDo
# (3) check that every enum of type GLbitfield
# has only one group (important for namespace import)
# Note: (3) is deprecated since glbinding supports groups
#overflows = set()
#for enum in enums:
# if enum.type == "GLbitfield" and len(enum.groups) > 1:
# overflows.add(enum)
#if len(overflows) > 0:
# print " WARNING: " + str(len(overflows)) + " enums are in multiple groups:"
# for enum in overflows:
# print (" %s groups for %s (%s)" % (str(len(enum.groups)), enum.name, ", ".join([g.name for g in enum.groups])))
def parseEnums(xml, features, extensions, commands, api):
# create utility string sets to simplify application of constraints
groupsUsed = set()
for command in (command for command in commands if len(command.params) > 0):
for param in (param for param in command.params if param.groupString is not None):
groupsUsed.add(param.groupString)
enumsRequired = set()
for feature in features:
if len(feature.reqEnumStrings) > 0:
enumsRequired |= set(feature.reqEnumStrings)
for extension in extensions:
if len(extension.reqEnumStrings) > 0:
enumsRequired |= set(extension.reqEnumStrings)
enums = set()
for E in xml.iter("enums"):
groupString = E.attrib.get("group", None)
groupType = E.attrib.get("type", None)
# only parse enum if
# (1) no comment attribute exists for <enum> starting with "Not an API enum. ..."
# (2) at least one feature or extension of the requested api requires the enum of requested api
# (3) if the enum has a group and at least one command has a parameter of that group
for enum in E.findall("enum"):
# enorce constraint (1)
if "comment" in enum.attrib and enum.attrib["comment"].startswith("Not an API enum."):
continue
name = enum.attrib["name"]
# enorce constraint (2) and (3)
if name not in enumsRequired and groupString not in groupsUsed:
continue
if "api" in enum.attrib and enum.attrib["api"] != api:
continue
enums.add(Enum(enum, features, extensions, groupString, groupType, api))
return sorted(enums)
def resolveEnums(enums, enumsByName, groupsByName):
aliases = dict()
groups = dict()
for enum in enums:
# aliases might be from other api, but are not added
# since enums by name only includes api enums
if enum.aliasString is not None:
if enum.aliasString in enumsByName:
enum.alias = enumsByName[enum.aliasString]
else:
aliases[enum.aliasString] = enum
if enum.groupString is not None:
if enum.groupString in groupsByName:
group = groupsByName[enum.groupString]
enum.groups.add(group)
group.enums.add(enum)
else:
groups[enum.groupString] = enum
if len(aliases) > 0:
print(" WARNING: " + str(len(aliases)) + " unresolved aliases:")
for alias, enum in aliases.items():
print(" %s (of %s)" % (alias, enum.name))
if len(groups) > 0:
print(" WARNING: " + str(len(groups)) + " unresolved groups:")
for group, enum in groups.items():
print(" %s (in %s)" % (group, enum.name))
def patchEnums(enums, patches, groups):
enumsByName = dict([(enum.name, enum) for enum in enums])
groupsByName = dict([(group.name, group) for group in groups])
for patch in patches:
if patch.name not in enumsByName:
createGroup_ifImplicit(groups, groupsByName, patch)
enums.append(patch)
elif len(patch.aliasString) > 0:
enumsByName[patch.name].aliasString = patch.aliasString
enumsByName[patch.name].alias = enumsByName[patch.aliasString]
# ToDo: probably more fixes might be appropriate
def patchGroups(groups, patches):
groupsByName = dict([(group.name, group) for group in groups])
for patch in patches:
if patch.name not in groupsByName:
groups.append(patch)
else:
g = groupsByName[patch.name]
for e in patch.enumStrings:
g.enumStrings.append(e)
def groupEnumsByType(enums):
d = dict()
for e in enums:
if not e.type in d:
d[e.type] = []
d[e.type].append(e)
return d
def groupEnumsByGroup(enums):
d = dict()
ungroupedName = "__UNGROUPED__"
for e in enums:
if len(e.groups)==0:
if not ungroupedName in d:
d[ungroupedName] = []
d[ungroupedName].append(e)
continue
for g in e.groups:
if not g.name in d:
d[g.name] = []
d[g.name].append(e)
for key in d.keys():
d[key] = sorted(d[key], key = lambda e: e.value)
return d
| j-o/glbinding | source/codegeneration/scripts/classes/Enum.py | Python | mit | 10,589 |
# -*- coding: utf-8 -*-
import os
import logging
import sys
from django.utils import timezone
from website import settings
def format_now():
return timezone.now().isoformat()
def add_file_logger(logger, script_name, suffix=None):
_, name = os.path.split(script_name)
name = name.rstrip('c')
if suffix is not None:
name = '{0}-{1}'.format(name, suffix)
file_handler = logging.FileHandler(
os.path.join(
settings.LOG_PATH,
'.'.join([name, format_now(), 'log'])
)
)
logger.addHandler(file_handler)
class Progress(object):
def __init__(self, bar_len=50, precision=1):
self.bar_len = bar_len
self.precision = precision
self.bar_format = '{}[{}] {:0.' + str(self.precision) + 'f}% ... {}\r'
def start(self, total, prefix):
self.total = total
self.count = 0
self.prefix = prefix
self.last_percents = None
def increment(self, inc=1):
self.count = self.count + inc
percents = round(100.0 * self.count / float(self.total), self.precision)
if self.last_percents == percents:
return
self.last_percents = percents
filled_len = int(round(self.bar_len * self.count / float(self.total)))
bar = '=' * filled_len + '-' * (self.bar_len - filled_len)
sys.stdout.flush()
sys.stdout.write(self.bar_format.format(self.prefix, bar, percents, str(self.total)))
def stop(self):
# To preserve line, there is probably a better way to do this
print('')
| TomBaxter/osf.io | scripts/utils.py | Python | apache-2.0 | 1,576 |
import math
def add(a, b):
return (a + b)
add(3, 4)
def sub(a, b):
return (a - b)
sub(5, 3)
def mul(a, b):
return (a * b)
mul(4, 4)
def div(a, b):
return (a / b)
div(2, 3)
def hours_from_second(a):
return (a/3600)
hours_from_second(86400)
def circle_area(a):
return (math.pi*(a**2))
circle_area(5)
def sphere_volume(a):
return ((4/3)*(math.pi)*(a**3))
sphere_volume(5)
def avg_volume(a, b):
c = float(a / 2)
d = float(b / 2)
return ((4/3)*math.pi*(c**3)) + ((4/3)*math.pi*(d**3)) /2
avg_volume(10, 20)
def area(a, b, c):
s = (a + b +c) /2
return math.sqrt (2.75*(2.75-a)*(2.75-b)*(2.75-c))
print area(1, 2, 2.5)
def right_align(word):
return str ((80-len(word))*" " + word)
print right_align("Hello")
def center(term):
return str ((40-len(term))*" " + term)
print center ("Hello")
def msg_box(word):
return "+" + ((len(word)+4)*"-") + "+" + "\n" + "|" + (2*" ") + (word) + (2*" ") + "|" + "\n" + "+" + ((len(word)+4)*"-") + "+"
print msg_box("Hello")
print msg_box("I eat cats!")
a = add(3, 4)
b = sub(5, 3)
c = mul(4, 4)
d = div(2, 3)
e = hours_from_second(86400)
f = circle_area(5)
g = sphere_volume(5)
h = avg_volume(10, 20)
i = area(1, 2, 2.5)
j = right_align("Hello")
k = center("Hello")
print msg_box(str(a))
print msg_box(str(b))
print msg_box(str(c))
print msg_box(str(d))
print msg_box(str(e))
print msg_box(str(f))
print msg_box(str(g))
print msg_box(str(h))
print msg_box(str(i))
print msg_box(str(j))
print msg_box(str(k))
| Naja2445-cmis/naja2445-cmis-cs2 | functions.py | Python | cc0-1.0 | 1,478 |
# -*- coding: utf-8 -*-
import unittest
import sys
sys.path.append(u'../ftplugin')
from datetime import date
from datetime import datetime
from orgmode.plugins.Date import Date
class DateTestCase(unittest.TestCase):
u"""Tests all the functionality of the Date plugin.
Also see:
http://orgmode.org/manual/The-date_002ftime-prompt.html#The-date_002ftime-prompt
"""
def setUp(self):
self.d = date(2011, 5, 22)
def test_modify_time_with_None(self):
# no modification should happen
res = Date._modify_time(self.d, None)
self.assertEquals(self.d, res)
def test_modify_time_with_dot(self):
# no modification should happen
res = Date._modify_time(self.d, u'.')
self.assertEquals(self.d, res)
def test_modify_time_with_given_relative_days(self):
# modifier and expected result
test_data = [(u'+0d', self.d),
(u'+1d', date(2011, 5, 23)),
(u'+2d', date(2011, 5, 24)),
(u'+7d', date(2011, 5, 29)),
(u'+9d', date(2011, 5, 31)),
(u'+10d', date(2011, 6, 1)),
(u'7d', self.d)] # wrong format: plus is missing
for modifier, expected in test_data:
self.assertEquals(expected, Date._modify_time(self.d, modifier))
def test_modify_time_with_given_relative_days_without_d(self):
# modifier and expected result
test_data = [(u'+0', self.d),
(u'+1', date(2011, 5, 23)),
(u'+2', date(2011, 5, 24)),
(u'+7', date(2011, 5, 29)),
(u'+9', date(2011, 5, 31)),
(u'+10', date(2011, 6, 1))]
for modifier, expected in test_data:
result = Date._modify_time(self.d, modifier)
self.assertEquals(expected, result)
def test_modify_time_with_given_relative_weeks(self):
# modifier and expected result
test_data = [(u'+1w', date(2011, 5, 29)),
(u'+2w', date(2011, 6, 5)),
(u'+3w', date(2011, 6, 12)),
(u'+3w', date(2011, 6, 12)),
(u'+0w', self.d),
(u'3w', self.d), # wrong format
(u'+w', self.d)] # wrong format
for modifier, expected in test_data:
self.assertEquals(expected, Date._modify_time(self.d, modifier))
def test_modify_time_with_given_relative_months(self):
test_data = [(u'+0m', self.d),
(u'+1m', date(2011, 6, 22)),
(u'+2m', date(2011, 7, 22))]
for modifier, expected in test_data:
self.assertEquals(expected, Date._modify_time(self.d, modifier))
def test_modify_time_with_given_relative_years(self):
test_data = [(u'+1y', date(2012, 5, 22)),
(u'+10y', date(2021, 5, 22)),
(u'+0y', self.d)]
for modifier, expected in test_data:
self.assertEquals(expected, Date._modify_time(self.d, modifier))
def test_modify_time_with_given_weekday(self):
# use custom day instead of self.d to ease testing
cust_day = date(2011, 5, 25) # it's a Wednesday
#print cust_day.weekday() # 2
test_data = [(u'Thu', date(2011, 5, 26)),
(u'thu', date(2011, 5, 26)),
(u'tHU', date(2011, 5, 26)),
(u'THU', date(2011, 5, 26)),
(u'Fri', date(2011, 5, 27)),
(u'sat', date(2011, 5, 28)),
(u'sun', date(2011, 5, 29)),
(u'mon', date(2011, 5, 30)),
(u'tue', date(2011, 5, 31)),
(u'wed', date(2011, 6, 1))]
for modifier, expected in test_data:
self.assertEquals(expected, Date._modify_time(cust_day, modifier))
def test_modify_time_with_month_and_day(self):
cust_date = date(2006, 6, 13)
test_data = [(u'sep 15', date(2006, 9, 15)),
(u'Sep 15', date(2006, 9, 15)),
(u'SEP 15', date(2006, 9, 15)),
(u'feb 15', date(2007, 2, 15)),
(u'jan 1', date(2007, 1, 1)),
(u'7/5', date(2006, 07, 05)),
(u'2/5', date(2007, 02, 05)),]
for modifier, expected in test_data:
self.assertEquals(expected, Date._modify_time(cust_date, modifier))
def test_modify_time_with_time(self):
cust_date = date(2006, 6, 13)
test_data = [(u'12:45', datetime(2006, 06, 13, 12, 45)),
(u'1:45', datetime(2006, 06, 13, 1, 45)),
(u'1:05', datetime(2006, 06, 13, 1, 5)),]
for modifier, expected in test_data:
res = Date._modify_time(cust_date, modifier)
self.assertTrue(isinstance(res, datetime))
self.assertEquals(expected, res)
def test_modify_time_with_full_dates(self):
result = Date._modify_time(self.d, u'2011-01-12')
expected = date(2011, 1, 12)
self.assertEquals(expected, result)
reults = Date._modify_time(self.d, u'2015-03-12')
expected = date(2015, 3, 12)
self.assertEquals(expected, reults)
cust_date = date(2006, 6, 13)
test_data = [(u'3-2-5', date(2003, 2, 05)),
(u'12-2-28', date(2012, 2, 28)),
(u'2/5/3', date(2003, 02, 05)),
(u'sep 12 9', date(2009, 9, 12)),
(u'jan 2 99', date(2099, 1, 2)),]
for modifier, expected in test_data:
self.assertEquals(expected, Date._modify_time(cust_date, modifier))
def test_modify_time_with_only_days(self):
cust_date = date(2006, 6, 13)
test_data = [(u'14', date(2006, 06, 14)),
(u'12', date(2006, 07, 12)),
(u'1', date(2006, 07, 1)),
(u'29', date(2006, 06, 29)),]
for modifier, expected in test_data:
self.assertEquals(expected, Date._modify_time(cust_date, modifier))
def test_modify_time_with_day_and_time(self):
cust_date = date(2006, 6, 13)
test_data = [(u'+1 10:20', datetime(2006, 06, 14, 10, 20)),
(u'+1w 10:20', datetime(2006, 06, 20, 10, 20)),
(u'+2 10:30', datetime(2006, 06, 15, 10, 30)),
(u'+2d 10:30', datetime(2006, 06, 15, 10, 30))]
for modifier, expected in test_data:
result = Date._modify_time(cust_date, modifier)
self.assertEquals(expected, result)
def suite():
return unittest.TestLoader().loadTestsFromTestCase(DateTestCase)
# vi: noexpandtab
| j-a-m-l/.dot | vim/bundle/vim-orgmode/tests/test_plugin_date.py | Python | mit | 5,504 |
# -*- coding: utf-8 -*-
# Copyright © 2012-2013 Roberto Alsina and others.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import print_function
from nikola.plugin_categories import Command
from nikola import __version__
class CommandVersion(Command):
"""Print the version."""
name = "version"
doc_usage = ""
needs_config = False
doc_purpose = "print the Nikola version number"
def _execute(self, options={}, args=None):
"""Print the version number."""
print("Nikola version " + __version__)
| kotnik/nikola | nikola/plugins/command_version.py | Python | mit | 1,573 |
import test_config
import pyautogui
#saves a savestate
def save(components):
response = 'There was a problem saving @' + components['sender'] + ". Either you aren't allowed to do that or you gave invalid arguments."
slot = int(''.join(i for i in components['arguments'] if i.isdigit()))
if slot not in test_config.valid_savestates:
pass #todo(metro) fix this shit
elif components['sender'].lower() in test_config.owner:
print('gonna press shut to save now')
pyautogui.press(str(slot))
pyautogui.press(str(test_config.buttons['save']))
pyautogui.press(str(test_config.default_savestate))
response = 'saved successfully in state ' + str(slot)
return response | Metruption/IRC-Bot | src/cmds/save.py | Python | gpl-3.0 | 676 |
# Copyright 2013 - Rackspace
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from wsme import types as wtypes
from solum.api.controllers import common_types
from solum.api.controllers.v1.datamodel import operation
from solum.api.controllers.v1.datamodel import sensor
from solum.api.controllers.v1.datamodel import service
from solum.api.controllers.v1.datamodel import types as api_types
class Component(api_types.Base):
"""The Component resource represents one part of an Assembly.
For example, an instance of a database service may be a
Component. A Component resource may also represent a static artifact, such
as an archive file that contains data for initializing your application.
An Assembly may have different components that represent different
processes that run. For example, you may have one Component that represents
an API service process, and another that represents a web UI process that
consumes that API service. The simplest case is when an Assembly has only
one component. For example, your component may be named "PHP" and refers to
the PHP Service offered by the platform for running a PHP application.
"""
assembly_uuid = wtypes.text
""""The uuid of the assembly that this component belongs in."""
services = [service.Service]
"""Services that belong to the component."""
operations = [operation.Operation]
"""Operations that belong to the component."""
sensors = [sensor.Sensor]
"""Sensors that belong to the component."""
abbreviated = bool
"""Boolean value indicating if this components has nested components at
more than one level of depth."""
components_ids = [wtypes.text]
"""IDs of nested component of the component."""
resource_uri = common_types.Uri
"""Remote resource URI of the component."""
plan_uri = common_types.Uri
"""URI of Plan of which the component is a part."""
component_type = wtypes.text
"""Type of component e.g. heat_stack."""
heat_stack_id = wtypes.text
"""Unique identifier of the Heat Stack."""
@classmethod
def sample(cls):
return cls(uri='http://example.com/v1/components/php-web-app',
name='php-web-app',
type='component',
component_type='heat_stack',
description='A php web application component',
tags=['group_xyz'],
project_id='1dae5a09ef2b4d8cbf3594b0eb4f6b94',
user_id='55f41cf46df74320b9486a35f5d28a11',
assembly_id='b3e0d79c698ea7b1561075bcfbbd2206a23d19b9',
heat_stack_id='4c712026-dcd5-4664-90b8-0915494c1332',
abbreviated=True,
components_ids=[],
services=[],
operations=[],
sensors=[])
| openstack/solum | solum/api/controllers/v1/datamodel/component.py | Python | apache-2.0 | 3,377 |
import codecs
import json
import os
import random
import re
from cloudbot import hook
@hook.on_start()
def shuffle_deck(bot):
global gnomecards
with codecs.open(os.path.join(bot.data_dir, "gnomecards.json"), encoding="utf-8") as f:
gnomecards = json.load(f)
@hook.command('cah')
def CAHwhitecard(text):
"""<text> - Submit text to be used as a CAH whitecard"""
return random.choice(gnomecards['black']).format(text)
@hook.command('cahb')
def CAHblackcard(text):
"""<text> - Submit text with _ for the bot to fill in the rest. You can submit text with multiple _"""
CardText = text.strip()
# noinspection PyUnusedLocal
def blankfiller(matchobj):
return random.choice(gnomecards['white'])
out = re.sub(r'\b_\b', blankfiller, CardText)
return out
| weylin/CloudBot | plugins/gnomeagainsthumanity.py | Python | gpl-3.0 | 808 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import libs.boxes.cython_bbox as cython_bbox
import libs.configs.config_v1 as cfg
from libs.boxes.bbox_transform import bbox_transform, bbox_transform_inv, clip_boxes
from libs.logs.log import LOG
# FLAGS = tf.app.flags.FLAGS
_DEBUG = False
def encode(gt_boxes, rois, num_classes):
"""Matching and Encoding groundtruth boxes (gt_boxes) into learning targets to boxes
Sampling
Parameters
---------
gt_boxes an array of shape (G x 5), [x1, y1, x2, y2, class]
rois an array of shape (R x 4), [x1, y1, x2, y2]
num_classes: scalar, number of classes
Returns
--------
labels: Nx1 array in [0, num_classes)
bbox_targets: of shape (N, Kx4) regression targets
bbox_inside_weights: of shape (N, Kx4), in {0, 1} indicating which class is assigned.
"""
all_rois = rois
num_rois = rois.shape[0]
if gt_boxes.size > 0:
# R x G matrix
overlaps = cython_bbox.bbox_overlaps(
np.ascontiguousarray(all_rois[:, 0:4], dtype=np.float),
np.ascontiguousarray(gt_boxes[:, :4], dtype=np.float))
gt_assignment = overlaps.argmax(axis=1) # R
# max_overlaps = overlaps.max(axis=1) # R
max_overlaps = overlaps[np.arange(rois.shape[0]), gt_assignment]
# note: this will assign every rois with a positive label
# labels = gt_boxes[gt_assignment, 4]
labels = np.zeros([num_rois], dtype=np.float32)
labels[:] = -1
# if _DEBUG:
# print ('gt_assignment')
# print (gt_assignment)
# sample rois as to 1:3
fg_inds = np.where(max_overlaps >= cfg.FLAGS.fg_threshold)[0]
fg_rois = int(min(fg_inds.size, cfg.FLAGS.rois_per_image * cfg.FLAGS.fg_roi_fraction))
if fg_inds.size > 0 and fg_rois < fg_inds.size:
fg_inds = np.random.choice(fg_inds, size=fg_rois, replace=False)
labels[fg_inds] = gt_boxes[gt_assignment[fg_inds], 4]
# TODO: sampling strategy
bg_inds = np.where((max_overlaps < cfg.FLAGS.bg_threshold))[0]
bg_rois = max(min(cfg.FLAGS.rois_per_image - fg_rois, fg_rois * 3), 64)
if bg_inds.size > 0 and bg_rois < bg_inds.size:
bg_inds = np.random.choice(bg_inds, size=bg_rois, replace=False)
labels[bg_inds] = 0
# ignore rois with overlaps between fg_threshold and bg_threshold
ignore_inds = np.where(((max_overlaps > cfg.FLAGS.bg_threshold) &\
(max_overlaps < cfg.FLAGS.fg_threshold)))[0]
labels[ignore_inds] = -1
keep_inds = np.append(fg_inds, bg_inds)
if _DEBUG:
print ('keep_inds')
print (keep_inds)
print ('fg_inds')
print (fg_inds)
print ('bg_inds')
print (bg_inds)
print ('bg_rois:', bg_rois)
print ('cfg.FLAGS.bg_threshold:', cfg.FLAGS.bg_threshold)
# print (max_overlaps)
LOG('ROIEncoder: %d positive rois, %d negative rois' % (len(fg_inds), len(bg_inds)))
bbox_targets, bbox_inside_weights = _compute_targets(
rois[keep_inds, 0:4], gt_boxes[gt_assignment[keep_inds], :4], labels[keep_inds], num_classes)
bbox_targets = _unmap(bbox_targets, num_rois, keep_inds, 0)
bbox_inside_weights = _unmap(bbox_inside_weights, num_rois, keep_inds, 0)
else:
# there is no gt
labels = np.zeros((num_rois, ), np.float32)
bbox_targets = np.zeros((num_rois, 4 * num_classes), np.float32)
bbox_inside_weights = np.zeros((num_rois, 4 * num_classes), np.float32)
bg_rois = min(int(cfg.FLAGS.rois_per_image * (1 - cfg.FLAGS.fg_roi_fraction)), 64)
if bg_rois < num_rois:
bg_inds = np.arange(num_rois)
ignore_inds = np.random.choice(bg_inds, size=num_rois - bg_rois, replace=False)
labels[ignore_inds] = -1
return labels, bbox_targets, bbox_inside_weights
def decode(boxes, scores, rois, ih, iw):
"""Decode prediction targets into boxes and only keep only one boxes of greatest possibility for each rois
Parameters
---------
boxes: an array of shape (R, Kx4), [x1, y1, x2, y2, x1, x2, y1, y2]
scores: an array of shape (R, K),
rois: an array of shape (R, 4), [x1, y1, x2, y2]
Returns
--------
final_boxes: of shape (R x 4)
classes: of shape (R) in {0,1,2,3... K-1}
scores: of shape (R) in [0 ~ 1]
"""
boxes = bbox_transform_inv(rois, deltas=boxes)
classes = np.argmax(scores, axis=1)
classes = classes.astype(np.int32)
scores = np.max(scores, axis=1)
final_boxes = np.zeros((boxes.shape[0], 4), dtype=np.float32)
for i in np.arange(0, boxes.shape[0]):
ind = classes[i]*4
final_boxes[i, 0:4] = boxes[i, ind:ind+4]
final_boxes = clip_boxes(final_boxes, (ih, iw))
return final_boxes, classes, scores
def _compute_targets(ex_rois, gt_rois, labels, num_classes):
"""
This function expands those targets into the 4-of-4*K representation used
by the network (i.e. only one class has non-zero targets).
Returns:
bbox_target (ndarray): N x 4K blob of regression targets
bbox_inside_weights (ndarray): N x 4K blob of loss weights
"""
assert ex_rois.shape[0] == gt_rois.shape[0]
assert ex_rois.shape[1] == 4
assert gt_rois.shape[1] == 4
targets = bbox_transform(ex_rois, gt_rois)
clss = labels
bbox_targets = np.zeros((clss.size, 4 * num_classes), dtype=np.float32)
bbox_inside_weights = np.zeros(bbox_targets.shape, dtype=np.float32)
inds = np.where(clss > 0)[0]
for ind in inds:
cls = int(clss[ind])
start = 4 * cls
end = start + 4
bbox_targets[ind, start:end] = targets[ind, 0:4]
bbox_inside_weights[ind, start:end] = 1
return bbox_targets, bbox_inside_weights
def _unmap(data, count, inds, fill=0):
""" Unmap a subset of item (data) back to the original set of items (of
size count) """
if len(data.shape) == 1:
ret = np.empty((count,), dtype=np.float32)
ret.fill(fill)
ret[inds] = data
else:
ret = np.empty((count,) + data.shape[1:], dtype=np.float32)
ret.fill(fill)
ret[inds, :] = data
return ret
if __name__ == '__main__':
cfg.FLAGS.fg_threshold = 0.1
classes = np.random.randint(0, 3, (10, 1))
boxes = np.random.randint(10, 50, (10, 2))
s = np.random.randint(10, 20, (10, 2))
s = boxes + s
boxes = np.concatenate((boxes, s), axis=1)
gt_boxes = np.hstack((boxes, classes))
noise = np.random.randint(-3, 3, (10, 4))
rois = gt_boxes[:, :4] + noise
labels, rois, bbox_targets, bbox_inside_weights = encode(gt_boxes, rois, num_classes=3)
print (labels)
print (bbox_inside_weights)
ls = np.zeros((labels.shape[0], 3))
for i in range(labels.shape[0]):
ls[i, labels[i]] = 1
final_boxes, classes, scores = decode(bbox_targets, ls, rois, 100, 100)
print('gt_boxes:\n', gt_boxes)
print ('final boxes:\n', np.hstack((final_boxes, np.expand_dims(classes, axis=1))).astype(np.int32))
# print (final_boxes.astype(np.int32))
| CharlesShang/FastMaskRCNN | libs/layers/roi.py | Python | apache-2.0 | 6,976 |
from vk_app.utils.utils import *
| lycantropos/VKApp | vk_app/utils/__init__.py | Python | gpl-3.0 | 33 |
# coding: utf-8
# IPython (or Jupyter) notebooks are good for illustrative purposes. It allows to document the process and save the results which are easy to publish.
#
# ### Hello World Example
# In[1]:
print "Hello World!"
| shameeriqbal/pandas-tutorial | notebooks/1.hello_world.py | Python | mit | 231 |
default_app_config = 'gro_api.resources.apps.ResourcesConfig'
| OpenAgInitiative/gro-api | gro_api/resources/__init__.py | Python | gpl-2.0 | 62 |
# Copyright 2015 - Yahoo! Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Magnum Service Layer"""
from oslo_log import log
from oslo_service import periodic_task
from magnum import objects
from magnum.service import periodic
LOG = log.getLogger(__name__)
class MagnumServicePeriodicTasks(periodic_task.PeriodicTasks):
'''Magnum periodic Task class
Any periodic task job need to be added into this class
'''
def __init__(self, conf, binary):
self.magnum_service_ref = None
self.host = conf.host
self.binary = binary
super(MagnumServicePeriodicTasks, self).__init__(conf)
@periodic_task.periodic_task(run_immediately=True)
@periodic.set_context
def update_magnum_service(self, ctx):
LOG.debug('Update magnum_service')
if self.magnum_service_ref is None:
self.magnum_service_ref = \
objects.MagnumService.get_by_host_and_binary(
ctx, self.host, self.binary)
if self.magnum_service_ref is None:
magnum_service_dict = {
'host': self.host,
'binary': self.binary
}
self.magnum_service_ref = objects.MagnumService(
ctx, **magnum_service_dict)
self.magnum_service_ref.create()
self.magnum_service_ref.report_state_up()
def setup(conf, binary, tg):
pt = MagnumServicePeriodicTasks(conf, binary)
tg.add_dynamic_timer(
pt.run_periodic_tasks,
periodic_interval_max=conf.periodic_interval_max,
context=None)
| jay-lau/magnum | magnum/servicegroup/magnum_service_periodic.py | Python | apache-2.0 | 2,139 |
# -*- coding: utf-8 -*-
###############################################################################
#
# GetDirectMessages
# Retrieves the 20 most recent direct messages sent to the authenticating user.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class GetDirectMessages(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the GetDirectMessages Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(GetDirectMessages, self).__init__(temboo_session, '/Library/Twitter/DirectMessages/GetDirectMessages')
def new_input_set(self):
return GetDirectMessagesInputSet()
def _make_result_set(self, result, path):
return GetDirectMessagesResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return GetDirectMessagesChoreographyExecution(session, exec_id, path)
class GetDirectMessagesInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the GetDirectMessages
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessTokenSecret(self, value):
"""
Set the value of the AccessTokenSecret input for this Choreo. ((required, string) The Access Token Secret provided by Twitter or retrieved during the OAuth process.)
"""
super(GetDirectMessagesInputSet, self)._set_input('AccessTokenSecret', value)
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((required, string) The Access Token provided by Twitter or retrieved during the OAuth process.)
"""
super(GetDirectMessagesInputSet, self)._set_input('AccessToken', value)
def set_ConsumerKey(self, value):
"""
Set the value of the ConsumerKey input for this Choreo. ((required, string) The API Key (or Consumer Key) provided by Twitter.)
"""
super(GetDirectMessagesInputSet, self)._set_input('ConsumerKey', value)
def set_ConsumerSecret(self, value):
"""
Set the value of the ConsumerSecret input for this Choreo. ((required, string) The API Secret (or Consumer Secret) provided by Twitter.)
"""
super(GetDirectMessagesInputSet, self)._set_input('ConsumerSecret', value)
def set_Count(self, value):
"""
Set the value of the Count input for this Choreo. ((optional, integer) Specifies the number of records to retrieve up to a maximum of 200.)
"""
super(GetDirectMessagesInputSet, self)._set_input('Count', value)
def set_IncludeEntities(self, value):
"""
Set the value of the IncludeEntities input for this Choreo. ((optional, boolean) The "entities" node containing extra metadata will not be included when set to false.)
"""
super(GetDirectMessagesInputSet, self)._set_input('IncludeEntities', value)
def set_MaxID(self, value):
"""
Set the value of the MaxID input for this Choreo. ((optional, string) Returns results with an ID less than (older than) or equal to the specified ID.)
"""
super(GetDirectMessagesInputSet, self)._set_input('MaxID', value)
def set_Page(self, value):
"""
Set the value of the Page input for this Choreo. ((optional, integer) Specifies the page of results to retrieve.)
"""
super(GetDirectMessagesInputSet, self)._set_input('Page', value)
def set_SinceID(self, value):
"""
Set the value of the SinceID input for this Choreo. ((optional, string) Returns results with an ID greater than (more recent than) the specified ID.)
"""
super(GetDirectMessagesInputSet, self)._set_input('SinceID', value)
def set_SkipStatus(self, value):
"""
Set the value of the SkipStatus input for this Choreo. ((optional, boolean) When set to true, statuses will not be included in the returned user objects.)
"""
super(GetDirectMessagesInputSet, self)._set_input('SkipStatus', value)
class GetDirectMessagesResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the GetDirectMessages Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Twitter.)
"""
return self._output.get('Response', None)
def get_Limit(self):
"""
Retrieve the value for the "Limit" output from this Choreo execution. ((integer) The rate limit ceiling for this particular request.)
"""
return self._output.get('Limit', None)
def get_Remaining(self):
"""
Retrieve the value for the "Remaining" output from this Choreo execution. ((integer) The number of requests left for the 15 minute window.)
"""
return self._output.get('Remaining', None)
def get_Reset(self):
"""
Retrieve the value for the "Reset" output from this Choreo execution. ((date) The remaining window before the rate limit resets in UTC epoch seconds.)
"""
return self._output.get('Reset', None)
class GetDirectMessagesChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return GetDirectMessagesResultSet(response, path)
| jordanemedlock/psychtruths | temboo/core/Library/Twitter/DirectMessages/GetDirectMessages.py | Python | apache-2.0 | 6,443 |
from typing import List
from .genomic_interval import GenomicInterval
from lhc.binf.sequence.reverse_complement import reverse_complement
class NestedGenomicInterval(GenomicInterval):
def __init__(self, start, stop, *, chromosome=None, strand='+', data=None):
super().__init__(start, stop, chromosome=chromosome, strand=strand, data=data)
self.parent = None
self.children = [] # type: List['NestedGenomicInterval']
def __str__(self):
if self.chromosome is None:
return '{}-{}'.format(self.start.position, self.stop.position)
return '{}:{}-{}'.format(self.chromosome, self.start.position, self.stop.position)
def __repr__(self):
return 'NestedGenomicInterval({})'.format(self)
def __len__(self):
if len(self.children) == 0:
return self.stop - self.start
return sum(len(child) for child in self.children)
def add_child(self, child: 'NestedGenomicInterval'):
child.parent = self
self.children.append(child)
if child.start < self.start:
self.start = child.start
if self.parent:
self.parent.start = child.start
if child.stop > self.stop:
self.stop = child.stop
if self.parent:
self.parent.stop = child.stop
def switch_strand(self):
super().switch_strand()
for child in self.children:
child.switch_strand()
# Position functions
def get_abs_pos(self, pos):
intervals = self.children if self.strand == '+' else reversed(self.children)
fr = 0
for interval in intervals:
length = len(interval)
if fr <= pos < fr + length:
return interval.get_abs_pos(pos - fr)
fr += length
raise IndexError('relative position {} not contained within {}'.format(pos, self))
def get_rel_pos(self, pos, types=None):
if len(self.children) == 0:
if types is None or self.data['type'] in types:
return pos - self.start.position
else:
raise ValueError('Position in interval but not of right type.')
rel_pos = 0
intervals = iter(self.children) if self.strand == '+' else reversed(self.children)
for interval in intervals:
if interval.start.position <= pos < interval.stop.position:
return rel_pos + interval.get_rel_pos(pos, types=types)
if types is None or interval.data['type'] in types:
rel_pos += len(interval)
raise IndexError('absolute position {} not contained within {}'.format(pos, self))
# Sequence functions
def get_sub_seq(self, sequence_set, *, types=None):
res = ''
if len(self.children) > 0:
res = ''.join(child.get_sub_seq(sequence_set, types=types) for child in self.children)
if self.strand == '-':
res = reverse_complement(res)
elif types is None or self.data['type'] in types:
res = super().get_sub_seq(sequence_set)
return res
def get_5p(self):
return self.children[0].get_5p() if self.strand == '+' else\
self.children[-1].get_5p()
def get_3p(self):
return self.children[-1].get_3p() if self.strand == '+' else\
self.children[0].get_3p()
| childsish/lhc-python | lhc/binf/genomic_coordinate/nested_genomic_interval.py | Python | gpl-2.0 | 3,398 |
# -*- coding: utf-8 -*-
""" GUI definition.
Copyright (c) Karol Będkowski, 2013
This file is part of wxGTD
Licence: GPLv2+
"""
| KarolBedkowski/wxgtd | wxgtd/gui/__init__.py | Python | gpl-2.0 | 130 |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.cli.yamlutils import * # noqa: F401,F403
from rally.cli import yamlutils as _new
# import it as last item to be sure that we use the right module
from rally.common import logging
logging.log_deprecated_module(
target=__name__, new_module=_new.__name__, release="3.0.0"
)
| openstack/rally | rally/common/yamlutils.py | Python | apache-2.0 | 888 |
import pandas as pd
import re
import nltk
from bs4 import BeautifulSoup
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.ensemble import RandomForestClassifier
def review_to_words(raw_review):
# 1. Remove HTML
review_text = BeautifulSoup(raw_review, "html").get_text()
# 2. Remove non-letters
letters_only = re.sub("[^a-zA-Z]", " ", review_text)
# 3. Convert to lower case, split into individual words
words = letters_only.lower().split()
# 4. In Python, searching a set is much faster than searching a list, so convert the stop words to a set
stops = set(stopwords.words("english"))
# 5. Remove stop words
meaningful_words = [w for w in words if not w in stops]
# 6. Join the words back into one string separated by space, and return the result
return(" ".join(meaningful_words))
train = pd.read_csv("/home/hkh/sources/kagglepy/popcorn/data/labeledTrainData.tsv", header=0, delimiter="\t", quoting=3)
# train = pd.read_csv("../data/labeledTrainData.tsv", header=0, delimiter="\t", quoting=3)
# clean_review = review_to_words(train["review"][0])
# Get the number of reviews based on the dataframe column size
num_reviews = train["review"].size
# Initialize an empty list to hold the clean reviews
clean_train_reviews = []
# Loop over each review; create an index i that goes from 0 to the length of the movie review list
for i in xrange(0, num_reviews):
if ((i+1) % 1000 == 0):
print "processing %d of %d\n" % (i+1, num_reviews)
clean_train_reviews.append(review_to_words(train["review"][i]))
print "Creating the bag of words...\n"
# Initialize the "CountVectorizer" object, which is scikit-learn's bag of words tool.
vectorizer = CountVectorizer(analyzer = "word",
tokenizer = None,
preprocessor = None,
stop_words = None,
max_features = 5000)
# fit_transform() does two function: First it fits the model and learns the vocabulary;
# second, it transforms our training data into feature vectors.
# The input to fit_transform should be a list of strings.
train_data_features = vectorizer.fit_transform(clean_train_reviews)
# Numpy arrays are easy to work with, so convert the result to an array
train_data_features = train_data_features.toarray()
## Train classifier
print "Training the random forest..."
# Initialize a Random Forest classifier with 100 trees
forest = RandomForestClassifier(n_estimators = 100)
# Fit the forest to the training set, using the bag of words as features
# and the sentiment labels as the response variable
#
# This may take a few minutes to run
forest = forest.fit(train_data_features, train["sentiment"])
# Read the test data
test = pd.read_csv("/home/hkh/sources/kagglepy/popcorn/data/testData.tsv", header=0, delimiter="\t", quoting=3)
# Verify that there are 25,000 rows and 2 columns
print "printing test data shape"
print test.shape
# Create an empty list and append the clean reviews one by one
num_reviews = len(test["review"])
clean_test_reviews = []
print "Cleaning and parsing the test set movie reviews...\n"
for i in xrange(0, num_reviews):
if ((i+1) % 1000 == 0):
print "Review %d of %d\n" % (i+1, num_reviews)
clean_review = review_to_words(test["review"][i])
clean_test_reviews.append(clean_review)
# Get a bag of words for the test set, and convert to numpy array
test_data_features = vectorizer.transform(clean_test_reviews)
test_data_features = test_data_features.toarray()
# Use the random forest to make sentiment label predictions
result = forest.predict(test_data_features)
# Copy the results to a pandas dataframe with an "id" column and a "sentiment" column
output = pd.DataFrame(data={"id":test["id"], "sentiment":result})
# Use pandas to write the comma-separated output file
output.to_csv("../output/Bag_of_Words_model.csv", index=False, quoting=3)
print "Done." | hkhpub/kagglepy | popcorn/src/bagofwords.py | Python | mit | 3,858 |
"""Support for Homekit motion sensors."""
from aiohomekit.model.characteristics import CharacteristicsTypes
from homeassistant.components.binary_sensor import (
DEVICE_CLASS_GAS,
DEVICE_CLASS_MOISTURE,
DEVICE_CLASS_MOTION,
DEVICE_CLASS_OCCUPANCY,
DEVICE_CLASS_OPENING,
DEVICE_CLASS_SMOKE,
BinarySensorEntity,
)
from homeassistant.core import callback
from . import KNOWN_DEVICES, HomeKitEntity
class HomeKitMotionSensor(HomeKitEntity, BinarySensorEntity):
"""Representation of a Homekit motion sensor."""
def get_characteristic_types(self):
"""Define the homekit characteristics the entity is tracking."""
return [CharacteristicsTypes.MOTION_DETECTED]
@property
def device_class(self):
"""Define this binary_sensor as a motion sensor."""
return DEVICE_CLASS_MOTION
@property
def is_on(self):
"""Has motion been detected."""
return self.service.value(CharacteristicsTypes.MOTION_DETECTED)
class HomeKitContactSensor(HomeKitEntity, BinarySensorEntity):
"""Representation of a Homekit contact sensor."""
def get_characteristic_types(self):
"""Define the homekit characteristics the entity is tracking."""
return [CharacteristicsTypes.CONTACT_STATE]
@property
def device_class(self):
"""Define this binary_sensor as a opening sensor."""
return DEVICE_CLASS_OPENING
@property
def is_on(self):
"""Return true if the binary sensor is on/open."""
return self.service.value(CharacteristicsTypes.CONTACT_STATE) == 1
class HomeKitSmokeSensor(HomeKitEntity, BinarySensorEntity):
"""Representation of a Homekit smoke sensor."""
@property
def device_class(self) -> str:
"""Return the class of this sensor."""
return DEVICE_CLASS_SMOKE
def get_characteristic_types(self):
"""Define the homekit characteristics the entity is tracking."""
return [CharacteristicsTypes.SMOKE_DETECTED]
@property
def is_on(self):
"""Return true if smoke is currently detected."""
return self.service.value(CharacteristicsTypes.SMOKE_DETECTED) == 1
class HomeKitCarbonMonoxideSensor(HomeKitEntity, BinarySensorEntity):
"""Representation of a Homekit BO sensor."""
@property
def device_class(self) -> str:
"""Return the class of this sensor."""
return DEVICE_CLASS_GAS
def get_characteristic_types(self):
"""Define the homekit characteristics the entity is tracking."""
return [CharacteristicsTypes.CARBON_MONOXIDE_DETECTED]
@property
def is_on(self):
"""Return true if CO is currently detected."""
return self.service.value(CharacteristicsTypes.CARBON_MONOXIDE_DETECTED) == 1
class HomeKitOccupancySensor(HomeKitEntity, BinarySensorEntity):
"""Representation of a Homekit occupancy sensor."""
@property
def device_class(self) -> str:
"""Return the class of this sensor."""
return DEVICE_CLASS_OCCUPANCY
def get_characteristic_types(self):
"""Define the homekit characteristics the entity is tracking."""
return [CharacteristicsTypes.OCCUPANCY_DETECTED]
@property
def is_on(self):
"""Return true if occupancy is currently detected."""
return self.service.value(CharacteristicsTypes.OCCUPANCY_DETECTED) == 1
class HomeKitLeakSensor(HomeKitEntity, BinarySensorEntity):
"""Representation of a Homekit leak sensor."""
def get_characteristic_types(self):
"""Define the homekit characteristics the entity is tracking."""
return [CharacteristicsTypes.LEAK_DETECTED]
@property
def device_class(self):
"""Define this binary_sensor as a leak sensor."""
return DEVICE_CLASS_MOISTURE
@property
def is_on(self):
"""Return true if a leak is detected from the binary sensor."""
return self.service.value(CharacteristicsTypes.LEAK_DETECTED) == 1
ENTITY_TYPES = {
"motion": HomeKitMotionSensor,
"contact": HomeKitContactSensor,
"smoke": HomeKitSmokeSensor,
"carbon-monoxide": HomeKitCarbonMonoxideSensor,
"occupancy": HomeKitOccupancySensor,
"leak": HomeKitLeakSensor,
}
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up Homekit lighting."""
hkid = config_entry.data["AccessoryPairingID"]
conn = hass.data[KNOWN_DEVICES][hkid]
@callback
def async_add_service(aid, service):
entity_class = ENTITY_TYPES.get(service["stype"])
if not entity_class:
return False
info = {"aid": aid, "iid": service["iid"]}
async_add_entities([entity_class(conn, info)], True)
return True
conn.add_listener(async_add_service)
| sdague/home-assistant | homeassistant/components/homekit_controller/binary_sensor.py | Python | apache-2.0 | 4,775 |
from collections import namedtuple
from ...constants import MODEL_SCHEMA, MODEL_NAMESPACE
ProjectInfo = namedtuple("ProjectInfo", ["id"])
SolutionInfo = namedtuple("SolutionInfo", ["id", "projects"])
class SolutionInfoLoader:
def __init__(self, xmlroot):
self.__xmlroot = xmlroot
if not MODEL_SCHEMA.validate(self.__xmlroot):
raise Exception("Cannot load project: {0}".format(MODEL_SCHEMA.error_log.last_error))
def load(self):
ret = []
for node in self.__xmlroot:
if node.tag == "{{{0}}}Project".format(MODEL_NAMESPACE):
ret.append(ProjectInfo(node.attrib["id"]))
return SolutionInfo(self.__xmlroot.attrib["id"], ret)
| umlfri/umlfri2 | umlfri2/datalayer/loaders/solution/solutioninfoloader.py | Python | gpl-3.0 | 734 |
import string,nltk
from nltk.corpus import stopwords
from nltk.stem.porter import *
def tokenizeAndStemStrings(text):
# turn text to tokens
tokens = nltk.word_tokenize(text)
# remove stop words
tokens_no_sw = [word for word in tokens if not word in stopwords.words('english')]
# stem words
stemmed = []
stemmer = PorterStemmer()
for item in tokens_no_sw:
# this line converts strings to unicode, so here I do it explicitly
#try:
stemmed.append(stemmer.stem(item))
#except:
# stemmed.append(unicode(item)) # for example, stemmer can't stem aed because it expects a letter before a
# print("stemmer skipped word: " + str(unicode(item)))
return stemmed
def cleanString(x):
return x.translate(str.maketrans('','',string.punctuation)).replace('\n', ' ').replace('\r', ' ')
#return x.encode('ascii','ignore').translate(None, string.punctuation).replace('\n', ' ').replace('\r', ' ')
| ryanmdavis/BioTechTopics | BTT_functions.py | Python | mit | 997 |
from setuptools import find_packages, setup
DESCRIPTION = 'App Engine backends for Django-nonrel'
LONG_DESCRIPTION = None
try:
LONG_DESCRIPTION = open('README.rst').read()
except:
pass
setup(name='djangoappengine',
version='1.4.0',
packages=find_packages(exclude=['docs']),
install_requires=['djangotoolbox'],
author='Waldemar Kornewald',
author_email='[email protected]',
url='https://github.com/django-nonrel/djangoappengine',
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
license='3-clause BSD',
platforms=['any'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: BSD License',
],
)
| jerod2000/djangoappengine | setup.py | Python | bsd-3-clause | 1,119 |
import pytest
from cryptotik import Hitbtc
from decimal import Decimal
from cryptotik.exceptions import APIError
private = pytest.mark.skipif(
not pytest.config.getoption("--apikey"),
reason="needs --apikey option to run."
)
hit = Hitbtc(pytest.config.getoption("--apikey"),
pytest.config.getoption("--secret"))
def test_format_pair():
'''test string formating to match API expectations'''
assert hit.format_pair("ppc-usd") == "PPCUSD"
def test_get_markets():
'''test get_markets'''
assert isinstance(hit.get_markets(), list)
assert "ppcusd" in hit.get_markets()
def test_get_market_ticker():
'''test get_market_ticker'''
ticker = hit.get_market_ticker("PPC-USD")
assert isinstance(ticker, dict)
assert sorted(ticker.keys()) == ['ask', 'bid', 'high', 'last', 'low', 'open', 'symbol', 'timestamp', 'volume', 'volumeQuote']
def test_get_market_orders():
'''test get_market_orderbook'''
market_orders = hit.get_market_orders("ppc-usd")
assert isinstance(market_orders, dict)
assert isinstance(market_orders["ask"], list)
assert isinstance(market_orders["bid"], list)
def test_get_market_trade_history():
'''test get_market_trade_history'''
trade_history = hit.get_market_trade_history("ppc-usd", 10)
assert isinstance(trade_history, list)
assert len(trade_history) == 10
assert sorted(trade_history[0].keys()) == sorted(['id', 'price', 'quantity', 'side', 'timestamp'])
@private
def test_get_balances(apikey, secret):
balances = hit.get_balances()
assert isinstance(balances, list)
@private
def test_get_deposit_address(apikey, secret):
assert isinstance(hit.get_deposit_address("ppc"), dict)
@private
def test_get_withdraw_history(apikey, secret):
assert isinstance(hit.get_withdraw_history("ppc"), list)
@private
def test_withdraw(apikey, secret):
with pytest.raises(APIError):
hit.withdraw("ppc", 1, 'PpcEaT3Rd0NTsendftMKDAKr331DXgHe3L')
@private
def test_buy_limit(apikey, secret):
with pytest.raises(APIError):
hit.buy_limit("ppc-btc", 0.05, 1)
@private
def test_sell_limit(apikey, secret):
with pytest.raises(APIError):
hit.sell_limit("ltc_btc", 1, 0.25)
@private
def test_cancel_order(apikey, secret):
with pytest.raises(APIError):
hit.cancel_order('invalid') | peerchemist/cryptotik | test/hitbtc_test.py | Python | bsd-3-clause | 2,363 |
from conch.analysis.formants import FormantTrackFunction
import librosa
from conch.analysis.segments import FileSegment, SignalSegment
def test_formants_praat(base_filenames):
for f in base_filenames:
wavpath = f + '.wav'
func = FormantTrackFunction(time_step=0.01,
window_length=0.025, num_formants=5, max_frequency=5500)
formants = func(wavpath)
sig, sr = librosa.load(wavpath)
formants2 = func(SignalSegment(sig, sr))
# Things are not exact...
# assert formants == formants2
| mmcauliffe/python-acoustic-similarity | tests/test_analysis_formants.py | Python | mit | 580 |
"""Utilities for with-statement contexts. See PEP 343."""
import abc
import sys
import _collections_abc
from collections import deque
from functools import wraps
__all__ = ["asynccontextmanager", "contextmanager", "closing", "nullcontext",
"AbstractContextManager", "AbstractAsyncContextManager",
"AsyncExitStack", "ContextDecorator", "ExitStack",
"redirect_stdout", "redirect_stderr", "suppress"]
class AbstractContextManager(abc.ABC):
"""An abstract base class for context managers."""
def __enter__(self):
"""Return `self` upon entering the runtime context."""
return self
@abc.abstractmethod
def __exit__(self, exc_type, exc_value, traceback):
"""Raise any exception triggered within the runtime context."""
return None
@classmethod
def __subclasshook__(cls, C):
if cls is AbstractContextManager:
return _collections_abc._check_methods(C, "__enter__", "__exit__")
return NotImplemented
class AbstractAsyncContextManager(abc.ABC):
"""An abstract base class for asynchronous context managers."""
async def __aenter__(self):
"""Return `self` upon entering the runtime context."""
return self
@abc.abstractmethod
async def __aexit__(self, exc_type, exc_value, traceback):
"""Raise any exception triggered within the runtime context."""
return None
@classmethod
def __subclasshook__(cls, C):
if cls is AbstractAsyncContextManager:
return _collections_abc._check_methods(C, "__aenter__",
"__aexit__")
return NotImplemented
class ContextDecorator(object):
"A base class or mixin that enables context managers to work as decorators."
def _recreate_cm(self):
"""Return a recreated instance of self.
Allows an otherwise one-shot context manager like
_GeneratorContextManager to support use as
a decorator via implicit recreation.
This is a private interface just for _GeneratorContextManager.
See issue #11647 for details.
"""
return self
def __call__(self, func):
@wraps(func)
def inner(*args, **kwds):
with self._recreate_cm():
return func(*args, **kwds)
return inner
class _GeneratorContextManagerBase:
"""Shared functionality for @contextmanager and @asynccontextmanager."""
def __init__(self, func, args, kwds):
self.gen = func(*args, **kwds)
self.func, self.args, self.kwds = func, args, kwds
# Issue 19330: ensure context manager instances have good docstrings
doc = getattr(func, "__doc__", None)
if doc is None:
doc = type(self).__doc__
self.__doc__ = doc
# Unfortunately, this still doesn't provide good help output when
# inspecting the created context manager instances, since pydoc
# currently bypasses the instance docstring and shows the docstring
# for the class instead.
# See http://bugs.python.org/issue19404 for more details.
class _GeneratorContextManager(_GeneratorContextManagerBase,
AbstractContextManager,
ContextDecorator):
"""Helper for @contextmanager decorator."""
def _recreate_cm(self):
# _GCM instances are one-shot context managers, so the
# CM must be recreated each time a decorated function is
# called
return self.__class__(self.func, self.args, self.kwds)
def __enter__(self):
# do not keep args and kwds alive unnecessarily
# they are only needed for recreation, which is not possible anymore
del self.args, self.kwds, self.func
try:
return next(self.gen)
except StopIteration:
raise RuntimeError("generator didn't yield") from None
def __exit__(self, type, value, traceback):
if type is None:
try:
next(self.gen)
except StopIteration:
return False
else:
raise RuntimeError("generator didn't stop")
else:
if value is None:
# Need to force instantiation so we can reliably
# tell if we get the same exception back
value = type()
try:
self.gen.throw(type, value, traceback)
except StopIteration as exc:
# Suppress StopIteration *unless* it's the same exception that
# was passed to throw(). This prevents a StopIteration
# raised inside the "with" statement from being suppressed.
return exc is not value
except RuntimeError as exc:
# Don't re-raise the passed in exception. (issue27122)
if exc is value:
return False
# Likewise, avoid suppressing if a StopIteration exception
# was passed to throw() and later wrapped into a RuntimeError
# (see PEP 479).
if type is StopIteration and exc.__cause__ is value:
return False
raise
except:
# only re-raise if it's *not* the exception that was
# passed to throw(), because __exit__() must not raise
# an exception unless __exit__() itself failed. But throw()
# has to raise the exception to signal propagation, so this
# fixes the impedance mismatch between the throw() protocol
# and the __exit__() protocol.
#
# This cannot use 'except BaseException as exc' (as in the
# async implementation) to maintain compatibility with
# Python 2, where old-style class exceptions are not caught
# by 'except BaseException'.
if sys.exc_info()[1] is value:
return False
raise
raise RuntimeError("generator didn't stop after throw()")
class _AsyncGeneratorContextManager(_GeneratorContextManagerBase,
AbstractAsyncContextManager):
"""Helper for @asynccontextmanager."""
async def __aenter__(self):
try:
return await self.gen.__anext__()
except StopAsyncIteration:
raise RuntimeError("generator didn't yield") from None
async def __aexit__(self, typ, value, traceback):
if typ is None:
try:
await self.gen.__anext__()
except StopAsyncIteration:
return
else:
raise RuntimeError("generator didn't stop")
else:
if value is None:
value = typ()
# See _GeneratorContextManager.__exit__ for comments on subtleties
# in this implementation
try:
await self.gen.athrow(typ, value, traceback)
raise RuntimeError("generator didn't stop after throw()")
except StopAsyncIteration as exc:
return exc is not value
except RuntimeError as exc:
if exc is value:
return False
# Avoid suppressing if a StopIteration exception
# was passed to throw() and later wrapped into a RuntimeError
# (see PEP 479 for sync generators; async generators also
# have this behavior). But do this only if the exception wrapped
# by the RuntimeError is actully Stop(Async)Iteration (see
# issue29692).
if isinstance(value, (StopIteration, StopAsyncIteration)):
if exc.__cause__ is value:
return False
raise
except BaseException as exc:
if exc is not value:
raise
def contextmanager(func):
"""@contextmanager decorator.
Typical usage:
@contextmanager
def some_generator(<arguments>):
<setup>
try:
yield <value>
finally:
<cleanup>
This makes this:
with some_generator(<arguments>) as <variable>:
<body>
equivalent to this:
<setup>
try:
<variable> = <value>
<body>
finally:
<cleanup>
"""
@wraps(func)
def helper(*args, **kwds):
return _GeneratorContextManager(func, args, kwds)
return helper
def asynccontextmanager(func):
"""@asynccontextmanager decorator.
Typical usage:
@asynccontextmanager
async def some_async_generator(<arguments>):
<setup>
try:
yield <value>
finally:
<cleanup>
This makes this:
async with some_async_generator(<arguments>) as <variable>:
<body>
equivalent to this:
<setup>
try:
<variable> = <value>
<body>
finally:
<cleanup>
"""
@wraps(func)
def helper(*args, **kwds):
return _AsyncGeneratorContextManager(func, args, kwds)
return helper
class closing(AbstractContextManager):
"""Context to automatically close something at the end of a block.
Code like this:
with closing(<module>.open(<arguments>)) as f:
<block>
is equivalent to this:
f = <module>.open(<arguments>)
try:
<block>
finally:
f.close()
"""
def __init__(self, thing):
self.thing = thing
def __enter__(self):
return self.thing
def __exit__(self, *exc_info):
self.thing.close()
class _RedirectStream(AbstractContextManager):
_stream = None
def __init__(self, new_target):
self._new_target = new_target
# We use a list of old targets to make this CM re-entrant
self._old_targets = []
def __enter__(self):
self._old_targets.append(getattr(sys, self._stream))
setattr(sys, self._stream, self._new_target)
return self._new_target
def __exit__(self, exctype, excinst, exctb):
setattr(sys, self._stream, self._old_targets.pop())
class redirect_stdout(_RedirectStream):
"""Context manager for temporarily redirecting stdout to another file.
# How to send help() to stderr
with redirect_stdout(sys.stderr):
help(dir)
# How to write help() to a file
with open('help.txt', 'w') as f:
with redirect_stdout(f):
help(pow)
"""
_stream = "stdout"
class redirect_stderr(_RedirectStream):
"""Context manager for temporarily redirecting stderr to another file."""
_stream = "stderr"
class suppress(AbstractContextManager):
"""Context manager to suppress specified exceptions
After the exception is suppressed, execution proceeds with the next
statement following the with statement.
with suppress(FileNotFoundError):
os.remove(somefile)
# Execution still resumes here if the file was already removed
"""
def __init__(self, *exceptions):
self._exceptions = exceptions
def __enter__(self):
pass
def __exit__(self, exctype, excinst, exctb):
# Unlike isinstance and issubclass, CPython exception handling
# currently only looks at the concrete type hierarchy (ignoring
# the instance and subclass checking hooks). While Guido considers
# that a bug rather than a feature, it's a fairly hard one to fix
# due to various internal implementation details. suppress provides
# the simpler issubclass based semantics, rather than trying to
# exactly reproduce the limitations of the CPython interpreter.
#
# See http://bugs.python.org/issue12029 for more details
return exctype is not None and issubclass(exctype, self._exceptions)
class _BaseExitStack:
"""A base class for ExitStack and AsyncExitStack."""
@staticmethod
def _create_exit_wrapper(cm, cm_exit):
def _exit_wrapper(exc_type, exc, tb):
return cm_exit(cm, exc_type, exc, tb)
return _exit_wrapper
@staticmethod
def _create_cb_wrapper(callback, *args, **kwds):
def _exit_wrapper(exc_type, exc, tb):
callback(*args, **kwds)
return _exit_wrapper
def __init__(self):
self._exit_callbacks = deque()
def pop_all(self):
"""Preserve the context stack by transferring it to a new instance."""
new_stack = type(self)()
new_stack._exit_callbacks = self._exit_callbacks
self._exit_callbacks = deque()
return new_stack
def push(self, exit):
"""Registers a callback with the standard __exit__ method signature.
Can suppress exceptions the same way __exit__ method can.
Also accepts any object with an __exit__ method (registering a call
to the method instead of the object itself).
"""
# We use an unbound method rather than a bound method to follow
# the standard lookup behaviour for special methods.
_cb_type = type(exit)
try:
exit_method = _cb_type.__exit__
except AttributeError:
# Not a context manager, so assume it's a callable.
self._push_exit_callback(exit)
else:
self._push_cm_exit(exit, exit_method)
return exit # Allow use as a decorator.
def enter_context(self, cm):
"""Enters the supplied context manager.
If successful, also pushes its __exit__ method as a callback and
returns the result of the __enter__ method.
"""
# We look up the special methods on the type to match the with
# statement.
_cm_type = type(cm)
_exit = _cm_type.__exit__
result = _cm_type.__enter__(cm)
self._push_cm_exit(cm, _exit)
return result
def callback(self, callback, *args, **kwds):
"""Registers an arbitrary callback and arguments.
Cannot suppress exceptions.
"""
_exit_wrapper = self._create_cb_wrapper(callback, *args, **kwds)
# We changed the signature, so using @wraps is not appropriate, but
# setting __wrapped__ may still help with introspection.
_exit_wrapper.__wrapped__ = callback
self._push_exit_callback(_exit_wrapper)
return callback # Allow use as a decorator
def _push_cm_exit(self, cm, cm_exit):
"""Helper to correctly register callbacks to __exit__ methods."""
_exit_wrapper = self._create_exit_wrapper(cm, cm_exit)
_exit_wrapper.__self__ = cm
self._push_exit_callback(_exit_wrapper, True)
def _push_exit_callback(self, callback, is_sync=True):
self._exit_callbacks.append((is_sync, callback))
# Inspired by discussions on http://bugs.python.org/issue13585
class ExitStack(_BaseExitStack, AbstractContextManager):
"""Context manager for dynamic management of a stack of exit callbacks.
For example:
with ExitStack() as stack:
files = [stack.enter_context(open(fname)) for fname in filenames]
# All opened files will automatically be closed at the end of
# the with statement, even if attempts to open files later
# in the list raise an exception.
"""
def __enter__(self):
return self
def __exit__(self, *exc_details):
received_exc = exc_details[0] is not None
# We manipulate the exception state so it behaves as though
# we were actually nesting multiple with statements
frame_exc = sys.exc_info()[1]
def _fix_exception_context(new_exc, old_exc):
# Context may not be correct, so find the end of the chain
while 1:
exc_context = new_exc.__context__
if exc_context is old_exc:
# Context is already set correctly (see issue 20317)
return
if exc_context is None or exc_context is frame_exc:
break
new_exc = exc_context
# Change the end of the chain to point to the exception
# we expect it to reference
new_exc.__context__ = old_exc
# Callbacks are invoked in LIFO order to match the behaviour of
# nested context managers
suppressed_exc = False
pending_raise = False
while self._exit_callbacks:
is_sync, cb = self._exit_callbacks.pop()
assert is_sync
try:
if cb(*exc_details):
suppressed_exc = True
pending_raise = False
exc_details = (None, None, None)
except:
new_exc_details = sys.exc_info()
# simulate the stack of exceptions by setting the context
_fix_exception_context(new_exc_details[1], exc_details[1])
pending_raise = True
exc_details = new_exc_details
if pending_raise:
try:
# bare "raise exc_details[1]" replaces our carefully
# set-up context
fixed_ctx = exc_details[1].__context__
raise exc_details[1]
except BaseException:
exc_details[1].__context__ = fixed_ctx
raise
return received_exc and suppressed_exc
def close(self):
"""Immediately unwind the context stack."""
self.__exit__(None, None, None)
# Inspired by discussions on https://bugs.python.org/issue29302
class AsyncExitStack(_BaseExitStack, AbstractAsyncContextManager):
"""Async context manager for dynamic management of a stack of exit
callbacks.
For example:
async with AsyncExitStack() as stack:
connections = [await stack.enter_async_context(get_connection())
for i in range(5)]
# All opened connections will automatically be released at the
# end of the async with statement, even if attempts to open a
# connection later in the list raise an exception.
"""
@staticmethod
def _create_async_exit_wrapper(cm, cm_exit):
async def _exit_wrapper(exc_type, exc, tb):
return await cm_exit(cm, exc_type, exc, tb)
return _exit_wrapper
@staticmethod
def _create_async_cb_wrapper(callback, *args, **kwds):
async def _exit_wrapper(exc_type, exc, tb):
await callback(*args, **kwds)
return _exit_wrapper
async def enter_async_context(self, cm):
"""Enters the supplied async context manager.
If successful, also pushes its __aexit__ method as a callback and
returns the result of the __aenter__ method.
"""
_cm_type = type(cm)
_exit = _cm_type.__aexit__
result = await _cm_type.__aenter__(cm)
self._push_async_cm_exit(cm, _exit)
return result
def push_async_exit(self, exit):
"""Registers a coroutine function with the standard __aexit__ method
signature.
Can suppress exceptions the same way __aexit__ method can.
Also accepts any object with an __aexit__ method (registering a call
to the method instead of the object itself).
"""
_cb_type = type(exit)
try:
exit_method = _cb_type.__aexit__
except AttributeError:
# Not an async context manager, so assume it's a coroutine function
self._push_exit_callback(exit, False)
else:
self._push_async_cm_exit(exit, exit_method)
return exit # Allow use as a decorator
def push_async_callback(self, callback, *args, **kwds):
"""Registers an arbitrary coroutine function and arguments.
Cannot suppress exceptions.
"""
_exit_wrapper = self._create_async_cb_wrapper(callback, *args, **kwds)
# We changed the signature, so using @wraps is not appropriate, but
# setting __wrapped__ may still help with introspection.
_exit_wrapper.__wrapped__ = callback
self._push_exit_callback(_exit_wrapper, False)
return callback # Allow use as a decorator
async def aclose(self):
"""Immediately unwind the context stack."""
await self.__aexit__(None, None, None)
def _push_async_cm_exit(self, cm, cm_exit):
"""Helper to correctly register coroutine function to __aexit__
method."""
_exit_wrapper = self._create_async_exit_wrapper(cm, cm_exit)
_exit_wrapper.__self__ = cm
self._push_exit_callback(_exit_wrapper, False)
async def __aenter__(self):
return self
async def __aexit__(self, *exc_details):
received_exc = exc_details[0] is not None
# We manipulate the exception state so it behaves as though
# we were actually nesting multiple with statements
frame_exc = sys.exc_info()[1]
def _fix_exception_context(new_exc, old_exc):
# Context may not be correct, so find the end of the chain
while 1:
exc_context = new_exc.__context__
if exc_context is old_exc:
# Context is already set correctly (see issue 20317)
return
if exc_context is None or exc_context is frame_exc:
break
new_exc = exc_context
# Change the end of the chain to point to the exception
# we expect it to reference
new_exc.__context__ = old_exc
# Callbacks are invoked in LIFO order to match the behaviour of
# nested context managers
suppressed_exc = False
pending_raise = False
while self._exit_callbacks:
is_sync, cb = self._exit_callbacks.pop()
try:
if is_sync:
cb_suppress = cb(*exc_details)
else:
cb_suppress = await cb(*exc_details)
if cb_suppress:
suppressed_exc = True
pending_raise = False
exc_details = (None, None, None)
except:
new_exc_details = sys.exc_info()
# simulate the stack of exceptions by setting the context
_fix_exception_context(new_exc_details[1], exc_details[1])
pending_raise = True
exc_details = new_exc_details
if pending_raise:
try:
# bare "raise exc_details[1]" replaces our carefully
# set-up context
fixed_ctx = exc_details[1].__context__
raise exc_details[1]
except BaseException:
exc_details[1].__context__ = fixed_ctx
raise
return received_exc and suppressed_exc
class nullcontext(AbstractContextManager):
"""Context manager that does no additional processing.
Used as a stand-in for a normal context manager, when a particular
block of code is only sometimes used with a normal context manager:
cm = optional_cm if condition else nullcontext()
with cm:
# Perform operation, using optional_cm if condition is True
"""
def __init__(self, enter_result=None):
self.enter_result = enter_result
def __enter__(self):
return self.enter_result
def __exit__(self, *excinfo):
pass
| FFMG/myoddweb.piger | monitor/api/python/Python-3.7.2/Lib/contextlib.py | Python | gpl-2.0 | 23,774 |
from Const import *
from DownloadFile import DownloadFile
from threading import Lock
class DownloadFileList:
def __init__(self):
self.downloadFileList = []
self.completedFileList = []
self.downloadFileListLock = None
self.completedFileListLock = None
def setLock(self):
self.downloadFileListLock = Lock()
self.completedFileListLock = Lock()
def unsetLock(self):
self.downloadFileListLock.acquire()
for downloadFile in self.downloadFileList:
downloadFile.removeDownloadParts()
self.downloadFileListLock.release()
self.completedFileListLock.acquire()
for downloadFile in self.completedFileList:
downloadFile.removeDownloadParts()
self.completedFileListLock.release()
self.downloadFileListLock = None
self.completedFileListLock = None
def getList(self, getCompletedList = None):
if (getCompletedList == None):
return self.downloadFileList
else:
return self.completedFileList
def addQueueingFile(self, downloadFile):
self.downloadFileListLock.acquire()
self.downloadFileList.append(downloadFile)
self.downloadFileListLock.release()
def getQueueingFile(self):
#print ' size of self.downloadFileList is ', len (self.downloadFileList)
self.downloadFileListLock.acquire()
for downloadFile in self.downloadFileList:
#if (downloadFile.getStatus() == STAT_Q or (downloadFile.getStatus() == STAT_E and downloadFile.isRetryPossible())):
#print 'Checking ', downloadFile.getFileURL(), ' status ', downloadFile.getStatus(), ' retryposible ', downloadFile.isRetryPossible()
if (downloadFile.getStatus() == STAT_Q and downloadFile.isRetryPossible()):
self.downloadFileListLock.release()
return downloadFile
#print 'NO Queueing file retriable found, return NONE'
self.downloadFileListLock.release()
return None
def deleteDownloadFileFromDownloadList(self, id, moveToCompleted = None):
i = 0
self.downloadFileListLock.acquire()
for downloadFile in self.downloadFileList:
if (downloadFile.getId() == id):
if (moveToCompleted == None):
del self.downloadFileList[i]
else:
downloadFile = self.downloadFileList.pop(i)
self.completedFileListLock.acquire()
self.completedFileList.append(downloadFile)
self.completedFileListLock.release()
break
i += 1
self.downloadFileListLock.release()
def deleteDownloadFileFromCompletedList(self, id, moveToDownload = None):
i = 0
self.completedFileListLock.acquire()
for downloadFile in self.completedFileList:
if downloadFile.getId() == id:
if moveToDownload is None:
del self.completedFileList[i]
self.completedFileListLock.release()
return
else:
downloadFile = self.completedFileList.pop(i)
self.downloadFileListLock.acquire()
self.downloadFileList.append(downloadFile)
self.downloadFileListLock.release()
self.completedFileListLock.release()
return
i += 1
self.completedFileListLock.release()
def getNumberOfDownloadingFile(self):
i = 0
self.downloadFileListLock.acquire()
for downloadFile in self.downloadFileList:
if (downloadFile.getStatus() == STAT_D):
i += 1
self.downloadFileListLock.release()
return i
def getNumberOfQueueingFile(self):
i = 0
self.downloadFileListLock.acquire()
for downloadFile in self.downloadFileList:
if (downloadFile.getStatus() == STAT_Q and downloadFile.isRetryPossible()):
i += 1
self.downloadFileListLock.release()
return i
def resetStatus(self, status):
self.downloadFileListLock.acquire()
for downloadFile in self.downloadFileList:
if (downloadFile.getStatus() != STAT_D):
#print 'Reseting', downloadFile.getFileURL(), ' to ', status
downloadFile.setStatus(status)
downloadFile.setErrorStr('')
self.downloadFileListLock.release()
def getDownloadFileById(self, id, fromQueueingList = True):
if fromQueueingList:
self.downloadFileListLock.acquire()
for downloadFile in self.downloadFileList:
if (downloadFile.getId() == id):
self.downloadFileListLock.release()
return downloadFile
self.downloadFileListLock.release()
else:
self.completedFileListLock.acquire()
for downloadFile in self.completedFileList:
if (downloadFile.getId() == id):
self.completedFileListLock.release()
return downloadFile
self.completedFileListLock.release()
def getDownloadFileByFileURL(self, fileURL, fromQueueingList = True):
if fromQueueingList:
self.downloadFileListLock.acquire()
for downloadFile in self.downloadFileList:
if (downloadFile.getFileURL() == fileURL):
self.downloadFileListLock.release()
return downloadFile
self.downloadFileListLock.release()
def changeStatus(self, id, status):
self.getDownloadFileById(id).setStatus(status)
def emptyList(self):
self.downloadFileListLock.acquire()
self.downloadFileList = []
self.downloadFileListLock.release()
| phamngochai/lrg | DownloadFileList.py | Python | gpl-3.0 | 4,928 |
import operator
from django.db.backends.base.features import BaseDatabaseFeatures
from django.utils.functional import cached_property
class DatabaseFeatures(BaseDatabaseFeatures):
empty_fetchmany_value = ()
allows_group_by_pk = True
related_fields_match_type = True
# MySQL doesn't support sliced subqueries with IN/ALL/ANY/SOME.
allow_sliced_subqueries_with_in = False
has_select_for_update = True
supports_forward_references = False
supports_regex_backreferencing = False
supports_date_lookup_using_string = False
can_introspect_autofield = True
can_introspect_binary_field = False
can_introspect_duration_field = False
can_introspect_small_integer_field = True
can_introspect_positive_integer_field = True
introspected_boolean_field_type = 'IntegerField'
supports_index_column_ordering = False
supports_timezones = False
requires_explicit_null_ordering_when_grouping = True
allows_auto_pk_0 = False
can_release_savepoints = True
atomic_transactions = False
can_clone_databases = True
supports_temporal_subtraction = True
supports_select_intersection = False
supports_select_difference = False
supports_slicing_ordering_in_compound = True
supports_index_on_text_field = False
has_case_insensitive_like = False
create_test_procedure_without_params_sql = """
CREATE PROCEDURE test_procedure ()
BEGIN
DECLARE V_I INTEGER;
SET V_I = 1;
END;
"""
create_test_procedure_with_int_param_sql = """
CREATE PROCEDURE test_procedure (P_I INTEGER)
BEGIN
DECLARE V_I INTEGER;
SET V_I = P_I;
END;
"""
db_functions_convert_bytes_to_str = True
# Neither MySQL nor MariaDB support partial indexes.
supports_partial_indexes = False
supports_order_by_nulls_modifier = False
order_by_nulls_first = True
@cached_property
def _mysql_storage_engine(self):
"Internal method used in Django tests. Don't rely on this from your code"
with self.connection.cursor() as cursor:
cursor.execute("SELECT ENGINE FROM INFORMATION_SCHEMA.ENGINES WHERE SUPPORT = 'DEFAULT'")
result = cursor.fetchone()
return result[0]
@cached_property
def update_can_self_select(self):
return self.connection.mysql_is_mariadb and self.connection.mysql_version >= (10, 3, 2)
@cached_property
def can_introspect_foreign_keys(self):
"Confirm support for introspected foreign keys"
return self._mysql_storage_engine != 'MyISAM'
@cached_property
def can_return_columns_from_insert(self):
return self.connection.mysql_is_mariadb and self.connection.mysql_version >= (10, 5, 0)
can_return_rows_from_bulk_insert = property(operator.attrgetter('can_return_columns_from_insert'))
@cached_property
def has_zoneinfo_database(self):
# Test if the time zone definitions are installed. CONVERT_TZ returns
# NULL if 'UTC' timezone isn't loaded into the mysql.time_zone.
with self.connection.cursor() as cursor:
cursor.execute("SELECT CONVERT_TZ('2001-01-01 01:00:00', 'UTC', 'UTC')")
return cursor.fetchone()[0] is not None
@cached_property
def is_sql_auto_is_null_enabled(self):
with self.connection.cursor() as cursor:
cursor.execute('SELECT @@SQL_AUTO_IS_NULL')
result = cursor.fetchone()
return result and result[0] == 1
@cached_property
def supports_over_clause(self):
if self.connection.mysql_is_mariadb:
return True
return self.connection.mysql_version >= (8, 0, 2)
supports_frame_range_fixed_distance = property(operator.attrgetter('supports_over_clause'))
@cached_property
def supports_column_check_constraints(self):
if self.connection.mysql_is_mariadb:
return self.connection.mysql_version >= (10, 2, 1)
return self.connection.mysql_version >= (8, 0, 16)
supports_table_check_constraints = property(operator.attrgetter('supports_column_check_constraints'))
@cached_property
def can_introspect_check_constraints(self):
if self.connection.mysql_is_mariadb:
version = self.connection.mysql_version
return (version >= (10, 2, 22) and version < (10, 3)) or version >= (10, 3, 10)
return self.connection.mysql_version >= (8, 0, 16)
@cached_property
def has_select_for_update_skip_locked(self):
return not self.connection.mysql_is_mariadb and self.connection.mysql_version >= (8, 0, 1)
@cached_property
def has_select_for_update_nowait(self):
if self.connection.mysql_is_mariadb:
return self.connection.mysql_version >= (10, 3, 0)
return self.connection.mysql_version >= (8, 0, 1)
@cached_property
def supports_explain_analyze(self):
return self.connection.mysql_is_mariadb or self.connection.mysql_version >= (8, 0, 18)
@cached_property
def supported_explain_formats(self):
# Alias MySQL's TRADITIONAL to TEXT for consistency with other
# backends.
formats = {'JSON', 'TEXT', 'TRADITIONAL'}
if not self.connection.mysql_is_mariadb and self.connection.mysql_version >= (8, 0, 16):
formats.add('TREE')
return formats
@cached_property
def supports_transactions(self):
"""
All storage engines except MyISAM support transactions.
"""
return self._mysql_storage_engine != 'MyISAM'
@cached_property
def ignores_table_name_case(self):
with self.connection.cursor() as cursor:
cursor.execute('SELECT @@LOWER_CASE_TABLE_NAMES')
result = cursor.fetchone()
return result and result[0] != 0
@cached_property
def supports_default_in_lead_lag(self):
# To be added in https://jira.mariadb.org/browse/MDEV-12981.
return not self.connection.mysql_is_mariadb
@cached_property
def supports_json_field(self):
if self.connection.mysql_is_mariadb:
return self.connection.mysql_version >= (10, 2, 7)
return self.connection.mysql_version >= (5, 7, 8)
@cached_property
def can_introspect_json_field(self):
if self.connection.mysql_is_mariadb:
return self.supports_json_field and self.can_introspect_check_constraints
return self.supports_json_field
| theo-l/django | django/db/backends/mysql/features.py | Python | bsd-3-clause | 6,495 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class SecurityGroupNetworkInterface(Model):
"""Network interface and all its associated security rules.
:param id: ID of the network interface.
:type id: str
:param security_rule_associations:
:type security_rule_associations: :class:`SecurityRuleAssociations
<azure.mgmt.network.v2016_09_01.models.SecurityRuleAssociations>`
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'security_rule_associations': {'key': 'securityRuleAssociations', 'type': 'SecurityRuleAssociations'},
}
def __init__(self, id=None, security_rule_associations=None):
self.id = id
self.security_rule_associations = security_rule_associations
| v-iam/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2016_09_01/models/security_group_network_interface.py | Python | mit | 1,217 |
##import ConfigParser
##NOTE: Files must have an empty line at the end
endline = '\t}\n\n'
#Armor Types: - "CombatClassDefend"
#------------
armortypes = {}
armortypes['Flesh'] = 'DOTA_COMBAT_CLASS_DEFEND_SOFT'
armortypes['small'] = 'DOTA_COMBAT_CLASS_DEFEND_WEAK'
armortypes['medium'] = 'DOTA_COMBAT_CLASS_DEFEND_BASIC'
armortypes['large'] = 'DOTA_COMBAT_CLASS_DEFEND_STRONG'
armortypes['fort'] = 'DOTA_COMBAT_CLASS_DEFEND_STRUCTURE'
armortypes['hero'] = 'DOTA_COMBAT_CLASS_DEFEND_HERO'
armortypes['none'] = 'NONE' # Ignored
armortypes['divine'] = 'DIVINE' # Needs custom mechanic
#Attack Types: - "CombatClassAttack"
#--------------
attacktypes = {}
attacktypes['normal'] = 'DOTA_COMBAT_CLASS_ATTACK_BASIC'
attacktypes['pierce'] = 'DOTA_COMBAT_CLASS_ATTACK_PIERCE'
attacktypes['siege'] = 'DOTA_COMBAT_CLASS_ATTACK_SIEGE'
attacktypes['chaos'] = 'DOTA_COMBAT_CLASS_ATTACK_LIGHT'
attacktypes['hero'] = 'DOTA_COMBAT_CLASS_ATTACK_HERO'
attacktypes['magic'] = 'MAGIC' # Needs custom mechanic
attacktypes['spells'] = 'SPELLS' # Needs custom mechanic
attacktypes['unknown'] = 'U W0T M8'
#Movement Types: - ""
#--------------
movementtypes = {}
movementtypes['foot'] = 'DOTA_UNIT_CAP_MOVE_GROUND'
movementtypes['fly'] = 'DOTA_UNIT_CAP_MOVE_FLY'
movementtypes['float'] = 'DOTA_UNIT_CAP_MOVE_GROUND'
movementtypes['hover'] = 'DOTA_UNIT_CAP_MOVE_GROUND'
movementtypes['_'] = 'DOTA_UNIT_CAP_MOVE_NONE'
movementtypes[''] = 'DOTA_UNIT_CAP_MOVE_NONE'
movementtypes['amph'] = 'DOTA_UNIT_CAP_MOVE_GROUND'
movementtypes['horse'] = 'DOTA_UNIT_CAP_MOVE_GROUND'
#Attribute Primary
attributeprimary = {}
attributeprimary['STR'] = 'DOTA_ATTRIBUTE_STRENGTH'
attributeprimary['INT'] = 'DOTA_ATTRIBUTE_INTELLECT'
attributeprimary['AGI'] = 'DOTA_ATTRIBUTE_AGILITY'
class wc3pars:
def __init__(self, section):
self.npc_name = ''
if 'Name' in section:
self.name = section['Name']
else:
self.name = "ERROR, NO 'Name'"
if 'race' in section:
self.race = section['race']
# BaseClass
self.baseclass = 'npc_dota_creature'
self.level = 0
if 'level' in section:
if section['level'] is not '-':
self.level = section['level']
# Abilities
self.abilitycounter = 1
self.abilitylist = None
if 'abilList' in section:
if section['abilList'] is not '_':
self.abilitylist = section['abilList']
self.abilitylist = self.abilitylist.split(',')
self.heroabilitylist = None
if 'heroAbilList' in section:
if section['heroAbilList'] is not '':
self.heroabilitylist = section['heroAbilList']
self.heroabilitylist = self.heroabilitylist.split(',')
self.combatclassdefend = 'DOTA_COMBAT_CLASS_DEFEND_BASIC'
if 'defType' in section:
self.combatclassdefend = armortypes[section['defType']]
self.armorphys = None
if 'def' in section:
self.armorphys = section['def']
self.armormagic = 0
self.attackcapabilities = 'DOTA_UNIT_CAP_MELEE_ATTACK'
self.attackdamagemin = None
self.attackdamagemax = None
if (('dice1' and 'sides1') in section):
if section['dice1'] is not '-' and section['sides1'] is not '-':
self.attackdamagemin = str(float(section['dice1']) + float(section['dmgplus1']))
self.attackdamagemax = str(float(section['dice1']) * float(section['sides1']) + float(section['dmgplus1']))
self.attackdamagetype = 'DAMAGE_TYPE_ArmorPhysical'
self.attackrate = None
if 'cool1' in section:
self.attackrate = section['cool1']
self.attackanimationpoint = None
if 'dmgpt1' in section:
self.attackanimationpoint = section['dmgpt1']
self.attackacqurange = None
if 'acquire' in section:
self.attackacqurange = section['acquire']
self.attackrange = None
if 'rangeN1' in section:
self.attackrange = section['rangeN1']
self.projectilemodel = None
self.projectilespeed = None
if 'Missilespeed' in section:
if section['Missilespeed'] is not '':
self.projectilemodel = ''
self.projectilespeed = section['Missilespeed']
self.attackcapabilities = 'DOTA_UNIT_CAP_RANGED_ATTACK'
self.combatclassattack = 'DOTA_COMBAT_CLASS_ATTACK_BASIC'
if 'atkType1' in section:
if section['atkType1'] is not 'none':
self.combatclassattack = attacktypes[section['atkType1']]
else:
self.combatclassattack = None
self.attackcapabilities = 'DOTA_UNIT_CAP_NO_ATTACK'
# Add Hero Attributes
self.attributeprimary = None
if 'Primary' in section:
if section['Primary'] is not '_':
self.attributeprimary = attributeprimary[section['Primary']]
self.attributebasestrength = section['STR']
self.attributestrengthgain = section['STRplus']
self.attributebaseintelligence = section['INT']
self.attributeintelligencegain = section['INTplus']
self.attributebaseagility = section['AGI']
self.attributeagilitygain = section['AGIplus']
# Add Custom Gold and Lumber Cost
self.goldcost = 0
if 'goldcost' in section:
self.goldcost = section['goldcost']
self.lumbercost = 0
if 'lumbercost' in section:
self.lumbercost = section['lumbercost']
self.foodcost = 0
if 'fused' in section:
if section['fused'] is not '-':
self.foodcost = section['fused']
self.bountygoldmin = None
self.bountygoldmax = None
if 'bountydice' in section:
self.bountygoldmin = str(float(section['bountydice']) + float(section['bountyplus']))
self.bountygoldmax = str(float(section['bountydice']) * float(section['bountysides']) + float(section['bountyplus']))
self.statushealth= '1'
if 'HP' in section:
self.statushealth = section['HP']
self.statushealthregen = '0'
if 'regenHP' in section:
if section['regenHP'] is not '-':
self.statushealthregen = section['regenHP']
self.statusmana = '0'
if 'manaN' in section:
if section['manaN'] not in '-':
self.statusmana = section['manaN']
self.statusmanaregen = '0'
if 'regenMana' in section:
if section['regenMana'] is not ' - ' and section['regenMana'] is not '-':
self.statusmanaregen = section['regenMana']
self.statusstartingmana = '0'
if 'mana0' in section:
if section['mana0'] is not ' - ' and section['mana0'] is not '-':
self.statusstartingmana = section['mana0']
self.visiondaytimerange = 10
if 'sight' in section:
self.visiondaytimerange = section['sight']
self.visionnighttimerange = 10
if 'nsight' in section:
self.visionnighttimerange = section['nsight']
self.movementcapabilities = 'DOTA_UNIT_CAP_MOVE_NONE'
self.movementspeed = '100'
if 'spd' in section:
self.movementspeed = section['spd']
if 'movetp' in section:
self.movementcapabilities = movementtypes[section['movetp']]
self.movementturnrate = '0.5'
if 'turnRate' in section:
self.movementturnrate = section['turnRate']
# Defaults, no wc3 equivalent
self.boundshullname = 'DOTA_HULL_SIZE_HERO'
self.healthbaroffset = 140
self.team = 'DOTA_TEAM_NEUTRALS'
self.unitrelationshipclass = 'DOTA_NPC_UNIT_RELATIONSHIP_TYPE_DEFAULT'
self.comments = ''
self.description = None
if 'Ubertip' in section:
self.description = section['Ubertip']
def check(self):
print(self.name)
print(self.statushealthregen)
print(self.statusmanaregen)
def writetofile(self, nfile, write): ##if you need to edit the format or spelling or whatever, do it here
newfile = open(nfile, write)
lines = []
section = ''
lines.append('\n')
if self.name is not None:
lines.append(self.unitcomment(self.name))
lines.append(self.kline(self.name.replace(' ', '_').lower()))
lines.append(self.kvcomment(' General'))
lines.append(self.kvcomment('----------------------------------------------------------------'))
lines.append(self.kvline('BaseClass', self.baseclass,None))
lines.append(self.kvline('Model', '', 'Add model'))
lines.append(self.kvline('ModelScale', '1', None))
lines.append(self.kvline('Level', self.level, None))
lines.append(self.kvline('BoundsHullName', self.boundshullname, None))
lines.append(self.kvline('HealthBarOffset', self.healthbaroffset, None))
lines.append(self.kvcomment(None))
lines.append(self.kvcomment(' Abilities'))
lines.append(self.kvcomment('----------------------------------------------------------------'))
if self.abilitylist is not None:
for abil in self.abilitylist:
lines.append(self.kvline('Ability' + str(self.abilitycounter), '', 'Reference: ' + abil))
self.abilitycounter += 1
if self.heroabilitylist is not None:
for abil in self.heroabilitylist:
lines.append(self.kvline('Ability' + str(self.abilitycounter), '', 'Reference: ' + abil))
self.abilitycounter += 1
lines.append(self.kvcomment(None))
lines.append(self.kvcomment(' Armor'))
lines.append(self.kvcomment('----------------------------------------------------------------'))
lines.append(self.kvline('ArmorPhysical', self.armorphys, None))
lines.append(self.kvline('MagicalResistance', self.armormagic, None))
lines.append(self.kvcomment(None))
lines.append(self.kvcomment(' Attack'))
lines.append(self.kvcomment('----------------------------------------------------------------'))
lines.append(self.kvline('AttackCapabilities',self.attackcapabilities, None))
lines.append(self.kvline('AttackDamageMin', self.attackdamagemin, None))
lines.append(self.kvline('AttackDamageMax', self.attackdamagemax, None))
lines.append(self.kvline('AttackDamageType', self.attackdamagetype, None))
if not self.attackrate.find('-') != -1:
lines.append(self.kvline('AttackRate', self.attackrate, None))
if not self.attackanimationpoint.find('-') != -1:
lines.append(self.kvline('AttackAnimationPoint',self.attackanimationpoint, None))
if not self.attackacqurange.find('-') != -1:
lines.append(self.kvline('AttackAcquisitionRange', self.attackacqurange, None))
if not self.attackrange.find('-') != -1:
lines.append(self.kvline('AttackRange',self.attackrange, None))
lines.append(self.kvline('ProjectileModel', self.projectilemodel, 'Add projectile'))
lines.append(self.kvline('ProjectileSpeed', self.projectilespeed, None))
lines.append(self.kvcomment(None))
# Only for heroes
if self.attributeprimary is not None:
lines.append(self.kvcomment(' Attributes'))
lines.append(self.kvcomment('----------------------------------------------------------------'))
lines.append(self.kvline('AttributePrimary',self.attributeprimary, None))
lines.append(self.kvline('AttributeBaseStrength', self.attributebasestrength, None))
lines.append(self.kvline('AttributeStrengthGain', self.attributestrengthgain, None))
lines.append(self.kvline('AttributeBaseIntelligence', self.attributebaseintelligence, None))
lines.append(self.kvline('AttributeIntelligenceGain', self.attributeintelligencegain, None))
lines.append(self.kvline('AttributeBaseAgility',self.attributebaseagility, None))
lines.append(self.kvline('AttributeAgilityGain', self.attributeagilitygain, None))
lines.append(self.kvcomment(None))
lines.append(self.kvcomment(' Bounty'))
lines.append(self.kvcomment('----------------------------------------------------------------'))
lines.append(self.kvline('BountyGoldMin', self.bountygoldmin, None))
lines.append(self.kvline('BountyGoldMax', self.bountygoldmax, None))
lines.append(self.kvcomment(None))
lines.append(self.kvcomment(' Gold and Lumber'))
lines.append(self.kvcomment('----------------------------------------------------------------'))
lines.append(self.kvline('GoldCost', self.goldcost, None))
lines.append(self.kvline('LumberCost', self.goldcost, None))
lines.append(self.kvcomment(None))
lines.append(self.kvcomment(' Movement'))
lines.append(self.kvcomment('----------------------------------------------------------------'))
lines.append(self.kvline('MovementCapabilities', self.movementcapabilities, None))
if self.movementspeed is not '-' and self.movementturnrate is not '-':
lines.append(self.kvline('MovementSpeed', self.movementspeed, None))
lines.append(self.kvline('MovementTurnRate', self.movementturnrate, None))
lines.append(self.kvcomment(None))
lines.append(self.kvcomment(' Status'))
lines.append(self.kvcomment('----------------------------------------------------------------'))
lines.append(self.kvline('StatusHealth', self.statushealth, None))
if not self.statushealth.find('-') != -1:
lines.append(self.kvline('StatusHealthRegen', self.statushealthregen, None))
else:
lines.append(self.kvline('StatusHealthRegen', self.statushealthregen, "Negative regen doesnt decrease HP ingame"))
# Careful with negative mana regen
if not self.statusmana.find('-') != -1:
lines.append(self.kvline('StatusMana', self.statusmana, None))
if not self.statusmanaregen.find('-') != -1:
lines.append(self.kvline('StatusManaRegen', self.statusmanaregen, None))
if not self.statusstartingmana.find('-') != -1:
lines.append(self.kvline('StatusStartingMana', self.statusstartingmana, None))
lines.append(self.kvcomment(None))
lines.append(self.kvcomment(' Vision'))
lines.append(self.kvcomment('----------------------------------------------------------------'))
lines.append(self.kvline('VisionDaytimeRange', self.visiondaytimerange, None))
lines.append(self.kvline('VisionNighttimeRange', self.visionnighttimerange, None))
lines.append(self.kvcomment(None))
lines.append(self.kvcomment(' Team'))
lines.append(self.kvcomment('----------------------------------------------------------------'))
lines.append(self.kvline('TeamName', self.team, None))
if self.combatclassattack is 'MAGIC':
self.combatclassattack = 'DOTA_COMBAT_CLASS_ATTACK_HERO'
lines.append(self.kvline('CombatClassAttack', self.combatclassattack, "MAGIC - Attacks deal magic damage, ignores physical armor"))
elif self.combatclassattack is 'SPELLS':
self.combatclassattack = 'DOTA_COMBAT_CLASS_ATTACK_HERO'
lines.append(self.kvline('CombatClassAttack', self.combatclassattack, "SPELLS - Attacks only through spells"))
else:
lines.append(self.kvline('CombatClassAttack', self.combatclassattack, None))
if self.combatclassdefend is 'DIVINE':
self.combatclassdefend = 'DOTA_COMBAT_CLASS_DEFEND_HERO'
lines.append(self.kvline('CombatClassDefend', self.combatclassdefend, "DIVINE - Takes only 1/10 dmg from all types of atacks."))
elif self.combatclassdefend is not 'NONE':
lines.append(self.kvline('CombatClassDefend', self.combatclassdefend, None))
lines.append(self.kvline('UnitRelationShipClass', self.unitrelationshipclass, None))
lines.append(self.kvcomment(None))
lines.append(self.kvcomment(' Creature Data'))
lines.append(self.kvcomment('----------------------------------------------------------------'))
lines.append(endline)
for line in lines:
section += line
newfile.write(section)
def kvline(self, key, val, comment):
line = ''
if val is not None:
key = str(key)
val = str(val)
line = '\t\t"' + key + '"\t'
# At least 1 tab, desired is align to the equivalent of 5 tabs
# Need to account for the extra 2 "" characters
if len(key) < 2:
line += '\t'
if len(key) < 6:
line += '\t'
if len(key) < 10:
line += '\t'
if len(key) < 14:
line += '\t'
if len(key) < 18:
line += '\t'
if len(key) < 22:
line += '\t'
line += '"' + val +'"'
if comment is not None:
line += '\t //' + comment
line += '\n'
return line
def kvcomment(self, comment):
line = '\t\t'
if comment is not None:
line += '//' + comment
line += '\n'
return line
def unitcomment(self, comment):
line = '\t//=================================================================================\n'
line += '\t// Creature: ' + comment +'\n'
if self.description is not None:
line += '\t// Description: ' + self.description + '\n'
line += '\t//=================================================================================\n'
return line
def kline(self, unit_name):
line = '\t"'+ unit_name +'"\n' + '\t{\n'
return line
def parse_file_section(textsec):
lines = {}
counter = 0
with open(textsec) as f:
for line in f:
pair = line[0:-1].split('=')
if len(pair) == 2:
lines[pair[0]] = pair[1]
return lines
def parse_text_section(textsec):
textsec = textsec.split('\n')
lines = {}
counter = 0
for line in textsec:
pair = line.split('=')
if len(pair) == 2:
lines[pair[0]] = pair[1]
return lines
def sectionoff(textfile):
sections = {}
with open(textfile, 'r') as f:
secname = ''
sec = ''
for line in f:
if line[0:1] == '[':
sections[secname] = sec
secname = line
secname = secname[1:-2]
sec = ''
else:
sec += line
return sections
if __name__ == '__main__':
fullfile = sectionoff('units.txt')
print(fullfile[''])
f = open('kv_units.txt','w')
f.write('')
for key in fullfile:
if key is not '':
afile = parse_text_section(fullfile[key])
work = wc3pars(afile)
work.writetofile('kv_units.txt', 'a')
print('Finished Unit Parsing')
| nob8/SDMD-Renewed | content/dota_addons/sdmd/panorama/images/spellicons/KVUP-master/KVUP.py | Python | apache-2.0 | 19,425 |
from django.dispatch import Signal
resource_limit_update_succeeded = Signal(providing_args=['order_item'])
resource_limit_update_failed = Signal(providing_args=['order_item', 'error_message'])
resource_plan_switch_succeeded = Signal(providing_args=['instance'])
resource_creation_succeeded = Signal(providing_args=['instance'])
resource_deletion_succeeded = Signal(providing_args=['instance'])
| opennode/nodeconductor-assembly-waldur | src/waldur_mastermind/marketplace/signals.py | Python | mit | 395 |
#encoding:utf-8
subreddit = 'Awwducational'
t_channel = '@Awwducational'
def send_post(submission, r2t):
return r2t.send_simple(submission,
gif='{title}\n\n{self_text}',
img='{title}\n\n{self_text}',
album=False,
text=False,
other=False
)
| Fillll/reddit2telegram | reddit2telegram/channels/awwducational/app.py | Python | mit | 287 |
# Copyright (c) 2012 NTT DOCOMO, INC.
# Copyright (c) 2011 University of Southern California / ISI
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.virt.baremetal import base
from nova.virt import firewall
class FakeDriver(base.NodeDriver):
def cache_images(self, context, node, instance, **kwargs):
pass
def destroy_images(self, context, node, instance):
pass
def activate_bootloader(self, context, node, instance, **kwargs):
pass
def deactivate_bootloader(self, context, node, instance):
pass
def activate_node(self, context, node, instance):
"""For operations after power on."""
pass
def deactivate_node(self, context, node, instance):
"""For operations before power off."""
pass
def get_console_output(self, node, instance):
return 'fake\nconsole\noutput for instance %s' % instance.id
class FakePowerManager(base.PowerManager):
def __init__(self, **kwargs):
super(FakePowerManager, self).__init__(**kwargs)
class FakeFirewallDriver(firewall.NoopFirewallDriver):
def __init__(self):
super(FakeFirewallDriver, self).__init__()
class FakeVifDriver(object):
def __init__(self):
super(FakeVifDriver, self).__init__()
def plug(self, instance, vif):
pass
def unplug(self, instance, vif):
pass
class FakeVolumeDriver(object):
def __init__(self, virtapi):
super(FakeVolumeDriver, self).__init__()
self.virtapi = virtapi
self._initiator = "fake_initiator"
def attach_volume(self, connection_info, instance, mountpoint):
pass
def detach_volume(self, connection_info, instance, mountpoint):
pass
| ChinaMassClouds/copenstack-server | openstack/src/nova-2014.2/nova/virt/baremetal/fake.py | Python | gpl-2.0 | 2,285 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import time
from . import ircnotice
class InvalidCTCPNotice(Exception): pass
class CTCPNotice(ircnotice.IRCNotice):
def __init__(self, message):
super().__init__(message)
self.ctcpify()
def ctcpify(self):
"""Special initialisation for CTCPNotice
It's useless /and/ boring - isn't that just great?"""
if not (self.message.startswith("\x01") and self.message.endswith("\x01")):
raise InvalidCTCPNotice("not a CTCP notice")
oldMessage = self.message
self.rawMessage = oldMessage
self.fullMessage = self.rawMessage[1:-1]
msgPart = self.fullMessage.partition(" ")
self.ctcpCommand = msgPart[0].lower()
self.message = msgPart[2]
@property
def isCTCPNotice(self):
return True
| IjonTichy/TichyBot | src/objects/ctcpnotice.py | Python | bsd-3-clause | 868 |
from ethereum.tools import tester
from ethereum.tools.tester import TransactionFailed
from pytest import fixture, raises, mark
from utils import longToHexString, EtherDelta, TokenDelta, PrintGasUsed
from reporting_utils import generateFees, proceedToNextRound, proceedToFork, finalizeFork, getExpectedFees
def test_redeem_participation_tokens(kitchenSinkFixture, universe, market, cash):
reputationToken = kitchenSinkFixture.applySignature("ReputationToken", universe.getReputationToken())
# proceed to the next round and buy some more fee window tokens
proceedToNextRound(kitchenSinkFixture, market, doGenerateFees = True)
feeWindow = kitchenSinkFixture.applySignature('FeeWindow', market.getFeeWindow())
# We'll make the window active then purchase some participation tokens
kitchenSinkFixture.contracts["Time"].setTimestamp(feeWindow.getStartTime() + 1)
feeWindowAmount = 100
# Distribute REP
for testAccount in [tester.a1, tester.a2, tester.a3]:
reputationToken.transfer(testAccount, 1 * 10**6 * 10**18)
assert feeWindow.buy(feeWindowAmount, sender=tester.k1)
assert feeWindow.buy(feeWindowAmount, sender=tester.k2)
assert feeWindow.buy(feeWindowAmount, sender=tester.k3)
# proceed to the next round and buy some more fee window tokens
proceedToNextRound(kitchenSinkFixture, market, doGenerateFees = True)
newFeeWindow = kitchenSinkFixture.applySignature('FeeWindow', market.getFeeWindow())
assert newFeeWindow.buy(feeWindowAmount, sender=tester.k1)
assert newFeeWindow.buy(feeWindowAmount, sender=tester.k2)
assert newFeeWindow.buy(feeWindowAmount, sender=tester.k3)
# Now end the window
kitchenSinkFixture.contracts["Time"].setTimestamp(newFeeWindow.getEndTime() + 1)
reporterFees = 1000 * market.getNumTicks() / universe.getOrCacheReportingFeeDivisor()
totalStake = feeWindow.getTotalFeeStake() + newFeeWindow.getTotalFeeStake()
assert cash.balanceOf(feeWindow.address) == reporterFees
assert cash.balanceOf(newFeeWindow.address) == reporterFees
expectedParticipationFees = reporterFees * feeWindowAmount * 2 / totalStake
# Cashing out Participation tokens will awards fees proportional to the total winning stake in the window
with TokenDelta(reputationToken, feeWindowAmount * 2, tester.a3, "Redeeming participation tokens didn't refund REP"):
with TokenDelta(feeWindow, -feeWindowAmount, tester.a3, "Redeeming participation tokens didn't decrease participation token balance correctly"):
with EtherDelta(expectedParticipationFees, tester.a3, kitchenSinkFixture.chain, "Redeeming participation tokens didn't increase ETH correctly"):
with PrintGasUsed(kitchenSinkFixture, "Universe Redeem:", 0):
assert universe.redeemStake([], [feeWindow.address, newFeeWindow.address], sender = tester.k3)
with TokenDelta(reputationToken, feeWindowAmount * 2, tester.a1, "Redeeming participation tokens didn't refund REP"):
with TokenDelta(feeWindow, -feeWindowAmount, tester.a1, "Redeeming participation tokens didn't decrease participation token balance correctly"):
with EtherDelta(expectedParticipationFees, tester.a1, kitchenSinkFixture.chain, "Redeeming participation tokens didn't increase ETH correctly"):
assert universe.redeemStake([], [feeWindow.address, newFeeWindow.address], sender = tester.k1)
with TokenDelta(reputationToken, feeWindowAmount * 2, tester.a2, "Redeeming participation tokens didn't refund REP"):
with TokenDelta(feeWindow, -feeWindowAmount, tester.a2, "Redeeming participation tokens didn't decrease participation token balance correctly"):
with EtherDelta(expectedParticipationFees, tester.a2, kitchenSinkFixture.chain, "Redeeming participation tokens didn't increase ETH correctly"):
assert universe.redeemStake([], [feeWindow.address, newFeeWindow.address], sender = tester.k2)
def test_redeem_reporting_participants(kitchenSinkFixture, market, categoricalMarket, scalarMarket, universe, cash):
reputationToken = kitchenSinkFixture.applySignature("ReputationToken", universe.getReputationToken())
constants = kitchenSinkFixture.contracts["Constants"]
# Initial Report
proceedToNextRound(kitchenSinkFixture, market, doGenerateFees = True)
# Initial Report Losing
proceedToNextRound(kitchenSinkFixture, market, doGenerateFees = True)
# Initial Report Winning
proceedToNextRound(kitchenSinkFixture, market, doGenerateFees = True)
# Initial Report Losing
proceedToNextRound(kitchenSinkFixture, market, doGenerateFees = True)
# Initial Report Winning
proceedToNextRound(kitchenSinkFixture, market, doGenerateFees = True)
# Get the winning reporting participants
initialReporter = kitchenSinkFixture.applySignature('InitialReporter', market.getReportingParticipant(0))
winningDisputeCrowdsourcer1 = kitchenSinkFixture.applySignature('DisputeCrowdsourcer', market.getReportingParticipant(2))
winningDisputeCrowdsourcer2 = kitchenSinkFixture.applySignature('DisputeCrowdsourcer', market.getReportingParticipant(4))
# Fast forward time until the new fee window is over and we can redeem
feeWindow = kitchenSinkFixture.applySignature("FeeWindow", market.getFeeWindow())
kitchenSinkFixture.contracts["Time"].setTimestamp(feeWindow.getEndTime() + 1)
assert market.finalize()
expectedFees = getExpectedFees(kitchenSinkFixture, cash, winningDisputeCrowdsourcer1, 4)
expectedFees += getExpectedFees(kitchenSinkFixture, cash, winningDisputeCrowdsourcer2, 2)
expectedFees += getExpectedFees(kitchenSinkFixture, cash, initialReporter, 5)
expectedRep = long(winningDisputeCrowdsourcer2.getStake() + winningDisputeCrowdsourcer1.getStake())
expectedRep = long(expectedRep + expectedRep / 2)
expectedRep += long(initialReporter.getStake() + initialReporter.getStake() / 2)
with TokenDelta(reputationToken, expectedRep, tester.a0, "Redeeming didn't refund REP"):
with PrintGasUsed(kitchenSinkFixture, "Universe Redeem:", 0):
assert universe.redeemStake([initialReporter.address, winningDisputeCrowdsourcer1.address, winningDisputeCrowdsourcer2.address], [])
| AugurProject/augur-core | tests/reporting/test_universe_redeem.py | Python | gpl-3.0 | 6,263 |
from __future__ import division
import datetime
import os
import time
import threading
from pulsar.client.util import filter_destination_params
from pulsar.managers import ManagerProxy
from pulsar.managers import status
from pulsar.managers.util.retry import RetryActionExecutor
from .staging import preprocess
from .staging import postprocess
import logging
log = logging.getLogger(__name__)
DEFAULT_DO_MONITOR = False
DECACTIVATE_FAILED_MESSAGE = "Failed to deactivate job with job id %s. May be problems when starting Pulsar next."
ACTIVATE_FAILED_MESSAGE = "Failed to activate job wiht job id %s. This job may not recover properly upon Pulsar restart."
JOB_FILE_FINAL_STATUS = "final_status"
JOB_FILE_POSTPROCESSED = "postprocessed"
JOB_FILE_PREPROCESSED = "preprocessed"
JOB_METADATA_RUNNING = "running"
DEFAULT_MIN_POLLING_INTERVAL = 0.5
class StatefulManagerProxy(ManagerProxy):
"""
"""
def __init__(self, manager, **manager_options):
super(StatefulManagerProxy, self).__init__(manager)
min_polling_interval = float(manager_options.get("min_polling_interval", DEFAULT_MIN_POLLING_INTERVAL))
preprocess_retry_action_kwds = filter_destination_params(manager_options, "preprocess_action_")
postprocess_retry_action_kwds = filter_destination_params(manager_options, "postprocess_action_")
self.__preprocess_action_executor = RetryActionExecutor(**preprocess_retry_action_kwds)
self.__postprocess_action_executor = RetryActionExecutor(**postprocess_retry_action_kwds)
self.min_polling_interval = datetime.timedelta(0, min_polling_interval)
self.active_jobs = ActiveJobs.from_manager(manager)
self.__state_change_callback = self._default_status_change_callback
self.__monitor = None
def set_state_change_callback(self, state_change_callback):
self.__state_change_callback = state_change_callback
self.__monitor = ManagerMonitor(self)
def _default_status_change_callback(self, status, job_id):
log.info("Status of job [%s] changed to [%s]. No callbacks enabled." % (status, job_id))
@property
def name(self):
return self._proxied_manager.name
def setup_job(self, *args, **kwargs):
job_id = self._proxied_manager.setup_job(*args, **kwargs)
return job_id
def handle_remote_staging(self, job_id, staging_config):
job_directory = self._proxied_manager.job_directory(job_id)
job_directory.store_metadata("staging_config", staging_config)
def launch(self, job_id, *args, **kwargs):
job_directory = self._proxied_manager.job_directory(job_id)
def do_preprocess():
try:
staging_config = job_directory.load_metadata("staging_config", {})
# TODO: swap out for a generic "job_extra_params"
if 'action_mapper' in staging_config and \
'ssh_key' in staging_config['action_mapper'] and \
'setup' in staging_config:
for action in staging_config['setup']:
action['action'].update(ssh_key=staging_config['action_mapper']['ssh_key'])
preprocess(job_directory, staging_config.get("setup", []), self.__preprocess_action_executor)
self._proxied_manager.launch(job_id, *args, **kwargs)
with job_directory.lock("status"):
job_directory.store_metadata(JOB_FILE_PREPROCESSED, True)
self.active_jobs.activate_job(job_id)
except Exception:
log.exception("Failed job preprocess for %s:", job_id)
self.__state_change_callback(status.FAILED, job_id)
new_thread_for_job(self, "preprocess", job_id, do_preprocess, daemon=False)
def get_status(self, job_id):
""" Compute status used proxied manager and handle state transitions
and track additional state information needed.
"""
job_directory = self._proxied_manager.job_directory(job_id)
with job_directory.lock("status"):
proxy_status, state_change = self.__proxy_status(job_directory, job_id)
if state_change == "to_complete":
self.__deactivate(job_id, proxy_status)
elif state_change == "to_running":
self.__state_change_callback(status.RUNNING, job_id)
return self.__status(job_directory, proxy_status)
def __proxy_status(self, job_directory, job_id):
""" Determine state with proxied job manager and if this job needs
to be marked as deactivated (this occurs when job first returns a
complete status from proxy.
"""
state_change = None
if not job_directory.has_metadata(JOB_FILE_PREPROCESSED):
proxy_status = status.PREPROCESSING
elif job_directory.has_metadata(JOB_FILE_FINAL_STATUS):
proxy_status = job_directory.load_metadata(JOB_FILE_FINAL_STATUS)
else:
proxy_status = self._proxied_manager.get_status(job_id)
if proxy_status == status.RUNNING:
if not job_directory.has_metadata(JOB_METADATA_RUNNING):
job_directory.store_metadata(JOB_METADATA_RUNNING, True)
state_change = "to_running"
elif proxy_status in [status.COMPLETE, status.CANCELLED]:
job_directory.store_metadata(JOB_FILE_FINAL_STATUS, proxy_status)
state_change = "to_complete"
return proxy_status, state_change
def __status(self, job_directory, proxy_status):
""" Use proxied manager's status to compute the real
(stateful) status of job.
"""
if proxy_status == status.COMPLETE:
if not job_directory.has_metadata(JOB_FILE_POSTPROCESSED):
job_status = status.POSTPROCESSING
else:
job_status = status.COMPLETE
else:
job_status = proxy_status
return job_status
def __deactivate(self, job_id, proxy_status):
self.active_jobs.deactivate_job(job_id)
deactivate_method = getattr(self._proxied_manager, "_deactivate_job", None)
if deactivate_method:
try:
deactivate_method(job_id)
except Exception:
log.exception("Failed to deactivate via proxied manager job %s" % job_id)
if proxy_status == status.COMPLETE:
self.__handle_postprocessing(job_id)
def __handle_postprocessing(self, job_id):
def do_postprocess():
postprocess_success = False
try:
postprocess_success = postprocess(self._proxied_manager.job_directory(job_id), self.__postprocess_action_executor)
except Exception:
log.exception("Failed to postprocess results for job id %s" % job_id)
final_status = status.COMPLETE if postprocess_success else status.FAILED
self.__state_change_callback(final_status, job_id)
new_thread_for_job(self, "postprocess", job_id, do_postprocess, daemon=False)
def shutdown(self, timeout=None):
if self.__monitor:
try:
self.__monitor.shutdown(timeout)
except Exception:
log.exception("Failed to shutdown job monitor for manager %s" % self.name)
super(StatefulManagerProxy, self).shutdown(timeout)
def recover_active_jobs(self):
recover_method = getattr(self._proxied_manager, "_recover_active_job", None)
if recover_method is None:
return
for job_id in self.active_jobs.active_job_ids():
try:
recover_method(job_id)
except Exception:
log.exception("Failed to recover active job %s" % job_id)
self.__handle_recovery_problem(job_id)
def __handle_recovery_problem(self, job_id):
# Make sure we tell the client we have lost this job.
self.active_jobs.deactivate_job(job_id)
self.__state_change_callback(status.LOST, job_id)
class ActiveJobs(object):
""" Keeps track of active jobs (those that are not yet "complete").
Current implementation is file based, but could easily be made
database-based instead.
TODO: Keep active jobs in memory after initial load so don't need to repeatedly
hit disk to recover this information.
"""
@staticmethod
def from_manager(manager):
persistence_directory = manager.persistence_directory
manager_name = manager.name
return ActiveJobs(manager_name, persistence_directory)
def __init__(self, manager_name, persistence_directory):
if persistence_directory:
active_job_directory = os.path.join(persistence_directory, "%s-active-jobs" % manager_name)
if not os.path.exists(active_job_directory):
os.makedirs(active_job_directory)
else:
active_job_directory = None
self.active_job_directory = active_job_directory
def active_job_ids(self):
job_ids = []
if self.active_job_directory:
job_ids = os.listdir(self.active_job_directory)
return job_ids
def activate_job(self, job_id):
if self.active_job_directory:
path = self._active_job_file(job_id)
try:
open(path, "w").close()
except Exception:
log.warn(ACTIVATE_FAILED_MESSAGE % job_id)
def deactivate_job(self, job_id):
if self.active_job_directory:
path = self._active_job_file(job_id)
if os.path.exists(path):
try:
os.remove(path)
except Exception:
log.warn(DECACTIVATE_FAILED_MESSAGE % job_id)
def _active_job_file(self, job_id):
return os.path.join(self.active_job_directory, job_id)
class ManagerMonitor(object):
""" Monitors active jobs of a StatefulManagerProxy.
"""
def __init__(self, stateful_manager):
self.stateful_manager = stateful_manager
self.active = True
thread = new_thread_for_manager(self.stateful_manager, "[action=monitor]", self._run, True)
self.thread = thread
def shutdown(self, timeout=None):
self.active = False
self.thread.join(timeout)
if self.thread.isAlive():
log.warn("Failed to join monitor thread [%s]" % self.thread)
def _run(self):
""" Main loop, repeatedly checking active jobs of stateful manager.
"""
while self.active:
try:
self._monitor_active_jobs()
except Exception:
log.exception("Failure in stateful manager monitor step.")
def _monitor_active_jobs(self):
active_job_ids = self.stateful_manager.active_jobs.active_job_ids()
iteration_start = datetime.datetime.now()
for active_job_id in active_job_ids:
try:
self._check_active_job_status(active_job_id)
except Exception:
log.exception("Failed checking active job status for job_id %s" % active_job_id)
iteration_end = datetime.datetime.now()
iteration_length = iteration_end - iteration_start
if iteration_length < self.stateful_manager.min_polling_interval:
to_sleep = (self.stateful_manager.min_polling_interval - iteration_length)
microseconds = to_sleep.microseconds + (to_sleep.seconds + to_sleep.days * 24 * 3600) * (10 ** 6)
total_seconds = microseconds / (10 ** 6)
time.sleep(total_seconds)
def _check_active_job_status(self, active_job_id):
# Manager itself will handle state transitions when status changes,
# just need to poll get_status
self.stateful_manager.get_status(active_job_id)
def new_thread_for_job(manager, action, job_id, target, daemon):
name = "[action=%s]-[job=%s]" % (action, job_id)
return new_thread_for_manager(manager, name, target, daemon)
def new_thread_for_manager(manager, name, target, daemon):
thread_name = "[manager=%s]-%s" % (manager.name, name)
thread = threading.Thread(name=thread_name, target=target)
thread.daemon = daemon
thread.start()
return thread
__all__ = ['StatefulManagerProxy']
| ssorgatem/pulsar | pulsar/managers/stateful.py | Python | apache-2.0 | 12,343 |
# -*- coding: utf-8 -*-
"""
Test levels
"""
import datetime
from django import test
from factories import ProgramFactory, LevelFactory, LevelTierFactory
class TestLevelProperties(test.TestCase):
"""
Test the get_level_depth method on the Level model to assure that the depth calculation is correct.
"""
def setUp(self):
self.program = ProgramFactory(
reporting_period_start=datetime.date(2018, 1, 1),
reporting_period_end=datetime.date(2019, 1, 1),
)
self.level1 = LevelFactory(program=self.program, customsort=1)
self.level1_1 = LevelFactory(parent=self.level1, customsort=1, program=self.program)
self.level1_1_1 = LevelFactory(parent=self.level1_1, customsort=1, program=self.program)
self.level1_1_2 = LevelFactory(parent=self.level1_1, customsort=2, program=self.program)
self.level1_2 = LevelFactory(parent=self.level1, customsort=2, program=self.program)
self.level1_2_1 = LevelFactory(parent=self.level1_2, customsort=1, program=self.program)
self.level1_2_2 = LevelFactory(parent=self.level1_2, customsort=2, program=self.program)
self.level1_2_3 = LevelFactory(parent=self.level1_2, customsort=3, program=self.program)
self.level1_2_3_1 = LevelFactory(parent=self.level1_2_3, customsort=1, program=self.program)
self.level1_2_3_1_1 = LevelFactory(parent=self.level1_2_3_1, customsort=1, program=self.program)
self.tier1 = LevelTierFactory(program=self.program, name='Tier1', tier_depth=1)
self.tier2 = LevelTierFactory(program=self.program, name='Tier2', tier_depth=2)
self.tier3 = LevelTierFactory(program=self.program, name='Tier3', tier_depth=3)
self.tier4 = LevelTierFactory(program=self.program, name='Tier4', tier_depth=4)
def test_level_depth(self):
self.assertEqual(self.level1.get_level_depth(), 1)
self.assertEqual(self.level1_1.get_level_depth(), 2)
self.assertEqual(self.level1_1_1.get_level_depth(), 3)
self.assertEqual(self.level1_1_2.get_level_depth(), 3)
def test_leveltier_method(self):
self.assertEqual(self.level1.leveltier.tier_depth, 1)
self.assertEqual(self.level1_1.leveltier.tier_depth, 2)
self.assertEqual(self.level1_2_3_1_1.leveltier, None)
# check if the property is still correct after a reordering and if the numbers don't start with 1
self.tier1.tier_depth = 10
self.tier1.save()
self.tier2.tier_depth = 9
self.tier2.save()
self.assertEqual(self.level1.leveltier.name, 'Tier3')
self.assertEqual(self.level1_1.leveltier.name, 'Tier4')
def test_ontology_method(self):
self.assertEqual(self.level1.ontology, '1.0.0.0')
self.assertEqual(self.level1_1.ontology, '1.1.0.0')
self.assertEqual(self.level1_1_1.ontology, '1.1.1.0')
self.assertEqual(self.level1_1_2.ontology, '1.1.2.0')
self.assertEqual(self.level1_2.ontology, '1.2.0.0')
self.assertEqual(self.level1_2_1.ontology, '1.2.1.0')
self.assertEqual(self.level1_2_2.ontology, '1.2.2.0')
self.assertEqual(self.level1_2_3.ontology, '1.2.3.0')
self.assertEqual(self.level1_2_3_1.ontology, '1.2.3.1')
| mercycorps/TolaActivity | indicators/tests/test_levels.py | Python | apache-2.0 | 3,265 |
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from google.appengine.ext import ndb
from dashboard.common import testing_common
from dashboard.common import utils
from dashboard.models import alert
from dashboard.models import anomaly
class AlertTest(testing_common.TestCase):
"""Test case for some functions in anomaly."""
def testGetBotNamesFromAlerts_EmptyList_ReturnsEmptySet(self):
self.assertEqual(set(), alert.GetBotNamesFromAlerts([]))
def testGetBotNamesFromAlerts_RemovesDuplicates(self):
testing_common.AddTests(
['SuperGPU'], ['Bot1'], {'foo': {'bar': {}}})
anomaly.Anomaly(test=utils.TestKey('SuperGPU/Bot1/foo/bar')).put()
anomaly.Anomaly(test=utils.TestKey('SuperGPU/Bot1/foo/bar')).put()
anomalies = anomaly.Anomaly.query().fetch()
bot_names = alert.GetBotNamesFromAlerts(anomalies)
self.assertEqual(2, len(anomalies))
self.assertEqual(1, len(bot_names))
def testGetBotNamesFromAlerts_ReturnsBotNames(self):
testing_common.AddTests(
['SuperGPU'], ['Bot1', 'Bot2', 'Bot3'], {'foo': {'bar': {}}})
anomaly.Anomaly(test=utils.TestKey('SuperGPU/Bot1/foo/bar')).put()
anomaly.Anomaly(test=utils.TestKey('SuperGPU/Bot2/foo/bar')).put()
anomaly.Anomaly(test=utils.TestKey('SuperGPU/Bot3/foo/bar')).put()
anomalies = anomaly.Anomaly.query().fetch()
bot_names = alert.GetBotNamesFromAlerts(anomalies)
self.assertEqual({'Bot1', 'Bot2', 'Bot3'}, bot_names)
def testGetTestMetadataKey_Test(self):
a = anomaly.Anomaly(
test=ndb.Key('Master', 'm', 'Bot', 'b', 'Test', 't', 'Test', 't'))
k = a.GetTestMetadataKey()
self.assertEqual('TestMetadata', k.kind())
self.assertEqual('m/b/t/t', k.id())
self.assertEqual('m/b/t/t', utils.TestPath(k))
def testGetTestMetadataKey_TestMetadata(self):
a = anomaly.Anomaly(test=utils.TestKey('a/b/c/d'))
k = a.GetTestMetadataKey()
self.assertEqual('TestMetadata', k.kind())
self.assertEqual('a/b/c/d', k.id())
self.assertEqual('a/b/c/d', utils.TestPath(k))
def testGetTestMetadataKey_None(self):
a = anomaly.Anomaly()
k = a.GetTestMetadataKey()
self.assertIsNone(k)
def testGetAlertsForTest(self):
old_style_key1 = utils.OldStyleTestKey('master/bot/test1/metric')
new_style_key1 = utils.TestMetadataKey('master/bot/test1/metric')
old_style_key2 = utils.OldStyleTestKey('master/bot/test2/metric')
new_style_key2 = utils.TestMetadataKey('master/bot/test2/metric')
anomaly.Anomaly(id="old_1", test=old_style_key1).put()
anomaly.Anomaly(id="old_1a", test=old_style_key1).put()
anomaly.Anomaly(id="old_2", test=old_style_key2).put()
anomaly.Anomaly(id="new_1", test=new_style_key1).put()
anomaly.Anomaly(id="new_2", test=new_style_key2).put()
anomaly.Anomaly(id="new_2a", test=new_style_key2).put()
key1_alerts = anomaly.Anomaly.GetAlertsForTest(new_style_key1)
self.assertEqual(
['new_1', 'old_1', 'old_1a'], [a.key.id() for a in key1_alerts])
key2_alerts = anomaly.Anomaly.GetAlertsForTest(old_style_key2)
self.assertEqual(
['new_2', 'new_2a', 'old_2'], [a.key.id() for a in key2_alerts])
key2_alerts_limit = anomaly.Anomaly.GetAlertsForTest(
old_style_key2, limit=2)
self.assertEqual(
['new_2', 'new_2a'], [a.key.id() for a in key2_alerts_limit])
def testComputedTestProperties(self):
anomaly.Anomaly(
id="foo",
test=utils.TestKey('master/bot/benchmark/metric/page')).put()
a = ndb.Key('Anomaly', 'foo').get()
self.assertEqual(a.master_name, 'master')
self.assertEqual(a.bot_name, 'bot')
self.assertEqual(a.benchmark_name, 'benchmark')
if __name__ == '__main__':
unittest.main()
| sahiljain/catapult | dashboard/dashboard/models/alert_test.py | Python | bsd-3-clause | 3,846 |
#!/usr/bin/python
#coding=utf-8
"""****************************************************************************
Copyright (c) 2013 cocos2d-x.org
http://www.cocos2d-x.org
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************"""
import platform
import os, os.path
import shutil
import threading
import time
#import head files by python version.
if int(platform.python_version().split('.')[0])>=3:
from tkinter import *
from tkinter.filedialog import *
from tkinter.messagebox import *
from queue import *
else:
from Tkinter import *
from tkFileDialog import *
from tkMessageBox import *
from Queue import *
class ThreadedTask(threading.Thread):
"""Create cocos project thread.
"""
def __init__(self, queue, projectName, packageName, language, projectPath):
threading.Thread.__init__(self)
self.queue = queue
self.projectName = projectName
self.packageName = packageName
self.language = language
self.projectPath = projectPath
def run(self):
"""Create cocos project.
custom message rules to notify ui
As follow:
begin@%d@%d@%s --- create before
doing@%d@%d@%s --- creating
end@%d@%d@%s --- create after
"""
#delete exist project.
if os.path.exists(os.path.join(self.projectPath, self.projectName)):
print ("###begin remove: " + self.projectName)
try:
shutil.rmtree(os.path.join(self.projectPath, self.projectName))
print ("###remove finish: " + self.projectName)
except:
print ("###remove folder failure %s" %self.projectName)
putMsg = "end@%d@%d@%s" %(100, 100, "create failure")
self.queue.put(putMsg)
putMsg = "begin@%d@%d@%s" %(0, 100, "begin create")
self.queue.put(putMsg)
from core import create_platform_projects
breturn = create_platform_projects(
self.language,
self.projectName,
self.projectPath,
self.packageName,
has_native = True,
callbackfun = self.newProjectCallBack
)
if breturn:
putMsg = "end@%d@%d@%s" %(100, 100, "Projected created successfully")
else:
putMsg = "end@%d@%d@%s" %(100, 100, "Failed to create project")
self.queue.put(putMsg)
def newProjectCallBack(self, step, totalStep, showMsg):
"""Creating cocos project callback.
"""
putMsg = "doing@%d@%d@%s" %(step, totalStep, showMsg)
self.queue.put(putMsg)
class StdoutRedirector(object):
"""Redirect output.
"""
def __init__(self, text_area):
self.text_area = text_area
def write(self, str):
self.text_area.insert(END, str)
self.text_area.see(END)
class TkCocosDialog(Frame):
def __init__(self, parent):
Frame.__init__(self,parent)
self.projectName = ""
self.packageName = ""
self.language = ""
self.parent = parent
self.columnconfigure(3, weight=1)
self.rowconfigure(5, weight=1)
# project name frame
self.labName = Label(self, text="Project Name:")
self.strName = StringVar()
self.strName.set("MyGame")
self.editName = Entry(self, textvariable=self.strName)
self.labName.grid(sticky=W, pady=4, padx=5)
self.editName.grid(row=0, column=1, columnspan=4,padx=5, pady=2,sticky=E+W)
# package name frame
self.labPackage = Label(self, text="Package Name:")
self.strPackage=StringVar()
self.strPackage.set("com.MyCompany.AwesomeGame")
self.editPackage = Entry(self, textvariable=self.strPackage)
self.labPackage.grid(row=1, column=0,sticky=W, padx=5)
self.editPackage.grid(row=1, column=1, columnspan=4,padx=5, pady=2,sticky=E+W)
# project path frame
self.labPath = Label(self, text="Project Path:")
self.editPath = Entry(self)
self.btnPath = Button(self, text="...", width = 6, command = self.pathCallback)
self.labPath.grid(row=2, column=0,sticky=W, pady=4, padx=5)
self.editPath.grid(row=2, column=1, columnspan=3,padx=5, pady=2, sticky=E+W)
self.btnPath.grid(row=2, column=4,)
# language frame
self.labLanguage = Label(self, text="Language:")
self.var=IntVar()
self.var.set(1)
self.checkcpp = Radiobutton(self, text="C++", variable=self.var, value=1)
self.checklua = Radiobutton(self, text="Lua", variable=self.var, value=2)
self.checkjs = Radiobutton(self, text="JavaScript", variable=self.var, value=3)
self.labLanguage.grid(row=3, column=0,sticky=W, padx=5)
self.checkcpp.grid(row=3, column=1,sticky=N+W)
self.checklua.grid(row=3, column=2,padx=5,sticky=N+W)
self.checkjs.grid(row=3, column=3,padx=5,sticky=N+W)
# show progress
self.progress = Scale(self, state= DISABLED, from_=0, to=100, orient=HORIZONTAL)
self.progress.set(0)
self.progress.grid(row=4, column=0, columnspan=5,padx=5, pady=2,sticky=E+W+S+N)
# msg text frame
self.text=Text(self,background = '#d9efff')
self.text.bind("<KeyPress>", lambda e : "break")
self.text.grid(row=5, column=0, columnspan=5, rowspan=1, padx=5, sticky=E+W+S+N)
# new project button
self.btnCreate = Button(self, text="create", command = self.createBtnCallback)
self.btnCreate.grid(row=7, column=3, columnspan=1, rowspan=1,pady=2,ipadx=15,ipady =10, sticky=W)
#center window on desktop
curWidth = 500
curHeight = 450
scnWidth = self.parent.winfo_screenwidth()
scnHeight = self.parent.winfo_screenheight()
tmpcnf = '%dx%d+%d+%d'%(curWidth, curHeight, int((scnWidth-curWidth)/2), int((scnHeight-curHeight)/2))
self.parent.geometry(tmpcnf)
self.parent.title("Cocos2d Project Creator")
#fix size
#self.parent.maxsize(curWidth, curHeight)
#self.parent.minsize(curWidth, curHeight)
#redirect out to text
self.pack(fill=BOTH, expand=1)
sys.stdout = StdoutRedirector(self.text)
def process_queue(self):
"""
"""
#message is empty
if self.queue.empty():
self.parent.after(100, self.process_queue)
return
#parse message
msg = self.queue.get(0)
msglist = msg.split("@")
if len(msglist) < 4:
return
#begin
if msglist[0] == "begin":
self.progress['state'] = NORMAL
#doing
elif msglist[0] == "doing":
pass
self.progress.set(int(int(msglist[1])*100/int(msglist[2])))
#end
if msglist[0] == "end":
showwarning("create", msglist[3])
self.progress.set(0)
self.text.insert(END,"=================END==============\n")
self.progress['state'] = DISABLED
self.btnCreate['state'] = NORMAL
return
self.parent.after(100, self.process_queue)
def createBtnCallback(self):
"""Create button event.
"""
#Check project name
projectName = self.editName.get()
if projectName == "":
showwarning("warning", "projectName is empty")
return
#Check the package name is effective
packageName = self.editPackage.get()
packageList = packageName.split(".")
if len(packageList) < 2:
showwarning("warning", "packageName format error!")
return
for index in range(len(packageList)):
if (packageList[index] == "") or (packageList[index][0].isdigit()):
showwarning("warning", "packageName format error!")
return
# get select language type
language = "cpp"
if self.var.get() == 1:
language = "cpp"
elif self.var.get() == 2:
language = "lua"
elif self.var.get() == 3:
language = "js"
projectPath = self.editPath.get()
if projectPath == "":
showwarning("warning", "projectPath is empty")
return
# if project has already exist,....
if os.path.exists(os.path.join(projectPath, projectName)):
if not askyesno("warning", "%s had exist,do you want to recreate!" %projectName ):
return
#create a new thread to deal with create new project.
self.btnCreate['state'] = DISABLED
self.queue = Queue()
ThreadedTask(self.queue, projectName, packageName, language, projectPath).start()
self.parent.after(100, self.process_queue)
def pathCallback(self):
"""Paht button event.
"""
filepath = askdirectory()
if filepath:
self.editPath.delete(0, END)
self.editPath.insert(0, filepath)
def createTkCocosDialog():
old_stdout = sys.stdout
root = Tk()
app = TkCocosDialog(root)
root.mainloop()
sys.stdout = old_stdout
if __name__ =='__main__':
createTkCocosDialog()
| Gamesjiazhi/rwproject | undeadClient2D/frameworks/cocos2d-x/tools/cocos2d-console/plugins/project_new/ui.py | Python | apache-2.0 | 10,269 |
import random
from datetime import datetime
from multiprocessing import Pool
import numpy as np
from scipy.optimize import minimize
def worker_func(args):
self = args[0]
m = args[1]
k = args[2]
r = args[3]
return (self.eval_func(m, k, r) -
self.eval_func(m, k, self.rt) -
self.temporal_diff_sum(m, k)) ** 2
def optimized_func_i_der(args):
"""
The derivative of the optimized function with respect to the
ith component of the vector r
"""
self = args[0]
r = args[1]
i = args[2]
result = 0
M = len(self.data)
for m in range(M):
Nm = self.data[m].shape[0] - 1
for k in range(Nm + 1):
result += ((self.eval_func(m, k, r) -
self.eval_func(m, k, self.rt) -
self.temporal_diff_sum(m, k)) * 2 *
self.eval_func_der(m, k, r, i))
return result
def worker_func_der(args):
self = args[0]
m = args[1]
k = args[2]
r = args[3]
i = args[4]
return ((self.eval_func(m, k, r) -
self.eval_func(m, k, self.rt) -
self.temporal_diff_sum(m, k)) * 2 *
self.eval_func_der(m, k, r, i))
class Agent:
num_features = 22
def __init__(self):
self.lf = 0.2 # Learning factor lambda
self.data = [] # The features' values for all the games
self.rewards = [] # Reward values for moving from 1 state to the next
self.rt = np.array([])
self.max_iter = 50
def set_learning_factor(self, learning_factor):
assert(learning_factor >= 0 and learning_factor <= 1)
self.lf = learning_factor
def set_rt(self, rt):
assert(len(rt) == self.num_features)
self.rt = rt
def set_iter(self, max_iter):
self.max_iter = max_iter
def set_data(self, data):
self.data = []
self.rewards = []
for game in data:
game = np.vstack((game, np.zeros(self.num_features + 1)))
self.data.append(game[:, :-1])
self.rewards.append(game[:, -1:])
def eval_func(self, m, k, r):
"""
The evaluation function value for the set of weights (vector) r
at the mth game and kth board state """
return np.dot(r, self.data[m][k])
def eval_func_der(self, m, k, r, i):
"""
Find the derivative of the evaluation function with respect
to the ith component of the vector r
"""
return self.data[m][k][i]
def get_reward(self, m, s):
"""
Get reward for moving from state s to state (s + 1)
"""
return self.rewards[m][s + 1][0]
def temporal_diff(self, m, s):
"""
The temporal diffence value for state s to state (s+1) in the mth game
"""
return (self.get_reward(m, s) + self.eval_func(m, s + 1, self.rt) -
self.eval_func(m, s, self.rt))
def temporal_diff_sum(self, m, k):
Nm = self.data[m].shape[0] - 1
result = 0
for s in range(k, Nm):
result += self.lf**(s - k) * self.temporal_diff(m, s)
return result
def optimized_func(self, r):
result = 0
M = len(self.data)
pool = Pool(processes=4)
for m in range(M):
Nm = self.data[m].shape[0] - 1
k_args = range(Nm + 1)
self_args = [self] * len(k_args)
m_args = [m] * len(k_args)
r_args = [r] * len(k_args)
result += sum(pool.map(worker_func,
zip(self_args, m_args, k_args, r_args)))
return result
def optimized_func_i_der(self, r, i):
"""
The derivative of the optimized function with respect to the
ith component of the vector r
"""
result = 0
M = len(self.data)
for m in range(M):
Nm = self.data[m].shape[0] - 1
for k in range(Nm + 1):
result += ((self.eval_func(m, k, r) -
self.eval_func(m, k, self.rt) -
self.temporal_diff_sum(m, k)) * 2 *
self.eval_func_der(m, k, r, i))
return result
def optimized_func_der(self, r):
p = Pool(processes=4)
self_args = [self] * len(r)
i_args = range(len(r))
r_args = [r] * len(r)
return np.array(p.map(optimized_func_i_der,
zip(self_args, r_args, i_args)))
def callback(self, r):
print("Iteration %d completed at %s" %
(self.cur_iter, datetime.now().strftime("%d/%m/%Y %H:%M:%S")))
self.cur_iter += 1
def compute_next_rt(self):
print("Start computing at %s" %
(datetime.now().strftime("%d/%m/%Y %H:%M:%S")))
self.cur_iter = 1
r0 = np.array([random.randint(-10, 10)
for i in range(self.num_features)])
res = minimize(self.optimized_func, r0, method='BFGS',
jac=self.optimized_func_der,
options={'maxiter': self.max_iter, 'disp': True},
callback=self.callback)
return res.x
| ndt93/tetris | scripts/agent3.py | Python | mit | 5,234 |
from twisted.internet import reactor
from stratum.event_handler import GenericEventHandler
from jobs import Job
import utils
import version as _version
import stratum_listener
import stratum.logger
log = stratum.logger.get_logger('proxy')
class ClientMiningService(GenericEventHandler):
job_registry = None # Reference to JobRegistry instance
timeout = None # Reference to IReactorTime object
@classmethod
def reset_timeout(cls):
if cls.timeout != None:
if not cls.timeout.called:
cls.timeout.cancel()
cls.timeout = None
cls.timeout = reactor.callLater(2*60, cls.on_timeout)
@classmethod
def on_timeout(cls):
'''
Try to reconnect to the pool after two minutes of no activity on the connection.
It will also drop all Stratum connections to sub-miners
to indicate connection issues.
'''
log.error("Connection to upstream pool timed out")
cls.reset_timeout()
cls.job_registry.f.reconnect()
def handle_event(self, method, params, connection_ref):
'''Handle RPC calls and notifications from the pool'''
# Yay, we received something from the pool,
# let's restart the timeout.
self.reset_timeout()
if method == 'mining.notify':
'''Proxy just received information about new mining job'''
(job_id, prevhash, coinb1, coinb2, merkle_branch, version, nbits, ntime, clean_jobs) = params[:9]
#print len(str(params)), len(merkle_branch)
'''
log.debug("Received new job #%s" % job_id)
log.debug("prevhash = %s" % prevhash)
log.debug("version = %s" % version)
log.debug("nbits = %s" % nbits)
log.debug("ntime = %s" % ntime)
log.debug("clean_jobs = %s" % clean_jobs)
log.debug("coinb1 = %s" % coinb1)
log.debug("coinb2 = %s" % coinb2)
log.debug("merkle_branch = %s" % merkle_branch)
'''
# Broadcast to Stratum clients
stratum_listener.MiningSubscription.on_template(
job_id, prevhash, coinb1, coinb2, merkle_branch, version, nbits, ntime, clean_jobs)
# Broadcast to getwork clients
job = Job.build_from_broadcast(job_id, prevhash, coinb1, coinb2, merkle_branch, version, nbits, ntime)
log.info("New job %s for prevhash %s, clean_jobs=%s" % \
(job.job_id, utils.format_hash(job.prevhash), clean_jobs))
self.job_registry.add_template(job, clean_jobs)
elif method == 'mining.set_difficulty':
difficulty = params[0]
log.info("Setting new difficulty: %s" % difficulty)
stratum_listener.DifficultySubscription.on_new_difficulty(difficulty)
self.job_registry.set_difficulty(difficulty)
elif method == 'client.reconnect':
(hostname, port, wait) = params[:3]
new = list(self.job_registry.f.main_host[::])
if hostname: new[0] = hostname
if port: new[1] = port
log.info("Server asked us to reconnect to %s:%d" % tuple(new))
self.job_registry.f.reconnect(new[0], new[1], wait)
elif method == 'client.add_peers':
'''New peers which can be used on connection failure'''
return False
'''
peerlist = params[0] # TODO
for peer in peerlist:
self.job_registry.f.add_peer(peer)
return True
'''
elif method == 'client.get_version':
return "stratum-proxy/%s" % _version.VERSION
elif method == 'client.show_message':
# Displays message from the server to the terminal
utils.show_message(params[0])
return True
elif method == 'mining.get_hashrate':
return {} # TODO
elif method == 'mining.get_temperature':
return {} # TODO
else:
'''Pool just asked us for something which we don't support...'''
log.error("Unhandled method %s with params %s" % (method, params))
| oxagast/hashnet | stratum-mining-proxy/mining_libs/client_service.py | Python | gpl-2.0 | 4,418 |
#!/usr/bin/env python
"""Earth Engine OAuth2 helper functions for generating client tokens.
Typical use-case consists of:
1. Calling 'get_authorization_url'
2. Using a browser to access the output URL and copy the generated OAuth2 code
3. Calling 'request_token' to request a token using that code and the OAuth API
4. Calling 'write_token' to save the token at the path given by
'get_credentials_path'
"""
import errno
import json
import os
import urllib
import urllib2
CLIENT_ID = ('517222506229-vsmmajv00ul0bs7p89v5m89qs8eb9359.'
'apps.googleusercontent.com')
CLIENT_SECRET = 'RUP0RZ6e0pPhDzsqIJ7KlNd1'
REDIRECT_URI = 'urn:ietf:wg:oauth:2.0:oob' # Prompts user to copy-paste code
SCOPE = ('https://www.googleapis.com/auth/earthengine'
' https://www.googleapis.com/auth/devstorage.read_write')
def get_credentials_path():
return os.path.expanduser('~/.config/earthengine/credentials')
def get_authorization_url():
"""Returns a URL to generate an auth code."""
return 'https://accounts.google.com/o/oauth2/auth?' + urllib.urlencode({
'client_id': CLIENT_ID,
'scope': SCOPE,
'redirect_uri': REDIRECT_URI,
'response_type': 'code',
})
def request_token(auth_code):
"""Uses authorization code to request tokens."""
request_args = {
'code': auth_code,
'client_id': CLIENT_ID,
'client_secret': CLIENT_SECRET,
'redirect_uri': REDIRECT_URI,
'grant_type': 'authorization_code',
}
refresh_token = None
try:
response = urllib2.urlopen('https://accounts.google.com/o/oauth2/token',
urllib.urlencode(request_args)).read()
refresh_token = json.loads(response)['refresh_token']
except urllib2.HTTPError as e:
raise Exception('Problem requesting tokens. Please try again. %s %s' %
(e, e.read()))
return refresh_token
def write_token(refresh_token):
"""Attempts to write the passed token to the given user directory."""
credentials_path = get_credentials_path()
dirname = os.path.dirname(credentials_path)
try:
os.makedirs(dirname)
except OSError as e:
if e.errno != errno.EEXIST:
raise Exception('Error creating directory %s: %s' % (dirname, e))
json.dump({'refresh_token': refresh_token}, open(credentials_path, 'w'))
| Servir-Mekong/ecodash | lib/ee/oauth.py | Python | gpl-3.0 | 2,314 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='CardGenerator',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('Level', models.IntegerField(default=1)),
('Name', models.CharField(max_length=100)),
('Formula', models.TextField()),
('Question', models.TextField()),
],
),
]
| jillson/chrononaut | combat/migrations/0001_initial.py | Python | gpl-3.0 | 647 |
from quark_runtime import *
def factorial(n):
if ((n) == (0)):
return 1
else:
return (n) * (factorial((n) - (1)))
def call_main(): import sys; main(_List(sys.argv[1:]))
def main(args):
_println(factorial(1));
_println(factorial(2));
_println(factorial(3));
_println(factorial(4));
_println(factorial(5));
_println(factorial(6));
_println(factorial(7));
_println(factorial(8));
_println(factorial(9));
_println(factorial(10));
| bozzzzo/quark | quarkc/test/emit/expected/py/factorial/factorial/__init__.py | Python | apache-2.0 | 495 |
""" Project Euler, problem 11
Largest product in a grid
https://projecteuler.net/problem=11
"""
# Store all the inputs in a 20 by 20 matrix
mat = []
# Prompt for input
file = open('Problem11Input.txt', 'r').readlines()
# We get a 20 by 20 grid as input, so use split to store input into the array
for i in range(0, len(file)):
row = file[i].split()
int_row = []
# Go through the row to remove leading zeroes
j = 0
for j in range(0, len(row)):
if row[j][0] == '0':
int_row.append(int(row[j][1]))
else:
int_row.append(int(row[j]))
mat.append(int_row)
# Check the product of length-4 downwards column of numbers
def check_down(r, c):
if r <= 16:
product = 1
for i in range(0, 4):
product *= mat[r + i][c]
return product
return 0
# Check the product of the length-4 right row of numbers
def check_right(r, c):
if c <= 16:
product = 1
for i in range(0, 4):
product *= mat[r][c + i]
return product
return 0
# Check the length-4 major diagonal
def check_rdiag(r, c):
if r <= 16 and c <= 16:
product = 1
for i in range(0, 4):
product *= mat[r + i][c + i]
return product
return 0
# Check the length-4 minor diagonal
def check_ldiag(r, c):
if r >= 3 and c <= 16:
product = 1
for i in range(0, 4):
product *= mat[r - i][c + i]
return product
return 0
# Run tests on every cell in the matrix and store the max
def run_checks():
maxi = 0
r = 0
while r < 20:
c = 0
while c < 20:
temp = max(check_down(r, c), check_right(r, c),
check_rdiag(r, c), check_ldiag(r, c))
if temp > maxi:
maxi = temp
c += 1
r += 1
return maxi
print(run_checks())
| ruar18/competitive-programming | project-euler/p11.py | Python | mit | 1,896 |
# -*- coding: utf-8 -*-
#
# This file is part of pypuppetdbquery.
# Copyright © 2016 Chris Boot <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Obtain selected structured fact values from nodes matching a query.
"""
import pypuppetdb
import pypuppetdbquery
pdb = pypuppetdb.connect()
node_facts = pypuppetdbquery.query_fact_contents(
pdb,
'(processorcount=4 or processorcount=8) and kernel=Linux',
['system_uptime.days', 'os.lsb.~"dist.*"'])
for node in node_facts:
facts = node_facts[node]
print(node, facts)
| bootc/pypuppetdbquery | examples/fact_contents.py | Python | apache-2.0 | 1,056 |
#!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
"""Wordcount exercise
Google's Python class
The main() below is already defined and complete. It calls print_words()
and print_top() functions which you write.
1. For the --count flag, implement a print_words(filename) function that counts
how often each word appears in the text and prints:
word1 count1
word2 count2
...
Print the above list in order sorted by word (python will sort punctuation to
come before letters -- that's fine). Store all the words as lowercase,
so 'The' and 'the' count as the same word.
2. For the --topcount flag, implement a print_top(filename) which is similar
to print_words() but which prints just the top 20 most common words sorted
so the most common word is first, then the next most common, and so on.
Use str.split() (no arguments) to split on all whitespace.
Workflow: don't build the whole program at once. Get it to an intermediate
milestone and print your data structure and sys.exit(0).
When that's working, try for the next milestone.
Optional: define a helper function to avoid code duplication inside
print_words() and print_top().
"""
import sys
# +++your code here+++
# Define print_words(filename) and print_top(filename) functions.
def split_words(filename):
f = open(filename,"r")
returnDict = {}
for lines in f:
for word in lines.split():
if word in returnDict:
returnDict[word] += 1
else:
returnDict[word] = 1
return returnDict
def sortedByWordCount(filename):
return sorted(split_words(filename).items(),key=lambda wordcount: wordcount[1], reverse=True)
# You could write a helper utility function that reads a file
# and builds and returns a word/count dict for it.
# Then print_words() and print_top() can just call the utility function.
def print_words(filename):
print sortedByWordCount(filename)
def print_top(filename):
print sortedByWordCount(filename)[:20]
# This basic command line argument parsing code is provided and
# calls the print_words() and print_top() functions which you must define.
def main():
if len(sys.argv) != 3:
print 'usage: ./wordcount.py {--count | --topcount} file'
sys.exit(1)
option = sys.argv[1]
filename = sys.argv[2]
if option == '--count':
print_words(filename)
elif option == '--topcount':
print_top(filename)
else:
print 'unknown option: ' + option
sys.exit(1)
if __name__ == '__main__':
main()
| superpolock/googleclass | basic/wordcount.py | Python | apache-2.0 | 2,619 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
from __future__ import (division, absolute_import, print_function,
unicode_literals)
import os
import re
import sys
from test._common import unittest
pkgpath = os.path.dirname(__file__) or '.'
sys.path.append(pkgpath)
os.chdir(pkgpath)
# Make sure we use local version of beetsplug and not system namespaced version
# for tests
try:
del sys.modules["beetsplug"]
except KeyError:
pass
def suite():
s = unittest.TestSuite()
# Get the suite() of every module in this directory beginning with
# "test_".
for fname in os.listdir(pkgpath):
match = re.match(r'(test_\S+)\.py$', fname)
if match:
modname = match.group(1)
s.addTest(__import__(modname).suite())
return s
if __name__ == b'__main__':
unittest.main(defaultTest='suite')
| parapente/beets | test/testall.py | Python | mit | 1,521 |
#! /usr/bin/env python
"""
This script can be used to produce a standalone executable from
arbitrary Python code. You supply the name of the starting Python
file to import, and this script attempts to generate an executable
that will produce the same results as "python startfile.py".
This script is actually a wrapper around Panda's FreezeTool.py, which
is itself a tool to use Python's built-in "freeze" utility to compile
Python code into a standalone executable. It also uses Python's
built-in modulefinder module, which it uses to find all of the modules
imported directly or indirectly by the original startfile.py.
Usage:
pfreeze.py [opts] startfile
Options:
-o output
Specifies the name of the resulting executable file to produce.
If this ends in ".mf", a multifile is written instead of a frozen
binary. If it ends in ".dll", ".pyd", or ".so", a shared library
is written.
-x module[,module...]
Specifies a comma-separated list of Python modules to exclude from
the resulting file, even if they appear to be referenced. You
may also repeat the -x command for each module.
-i module[,module...]
Specifies a comma-separated list of Python modules to include in
the resulting file, even if they do not appear to be referenced.
You may also repeat the -i command for each module.
-p module[,module...]
Specifies a list of Python modules that do run-time manipulation
of the __path__ variable, and thus must be actually imported to
determine the true value of __path__.
-s
Adds the standard set of modules that are necessary for embedding
the Python interpreter. Implicitly set if an executable is
generated.
"""
import getopt
import sys
import os
from direct.showutil import FreezeTool
def usage(code, msg = ''):
print >> sys.stderr, __doc__
print >> sys.stderr, msg
sys.exit(code)
# We're not protecting the next part under a __name__ == __main__
# check, just so we can import this file directly in ppython.cxx.
freezer = FreezeTool.Freezer()
basename = None
addStartupModules = False
try:
opts, args = getopt.getopt(sys.argv[1:], 'o:i:x:p:sh')
except getopt.error, msg:
usage(1, msg)
for opt, arg in opts:
if opt == '-o':
basename = arg
elif opt == '-i':
for module in arg.split(','):
freezer.addModule(module)
elif opt == '-x':
for module in arg.split(','):
freezer.excludeModule(module)
elif opt == '-p':
for module in arg.split(','):
freezer.handleCustomPath(module)
elif opt == '-s':
addStartupModules = True
elif opt == '-h':
usage(0)
else:
print 'illegal option: ' + flag
sys.exit(1)
if not basename:
usage(1, 'You did not specify an output file.')
if len(args) > 1:
usage(1, 'Only one main file may be specified.')
outputType = 'exe'
bl = basename.lower()
if bl.endswith('.mf'):
outputType = 'mf'
elif bl.endswith('.c'):
outputType = 'c'
elif bl.endswith('.dll') or bl.endswith('.pyd') or bl.endswith('.so'):
basename = os.path.splitext(basename)[0]
outputType = 'dll'
elif bl.endswith('.exe'):
basename = os.path.splitext(basename)[0]
compileToExe = False
if args:
startfile = args[0]
startmod = startfile
if startfile.endswith('.py') or startfile.endswith('.pyw') or \
startfile.endswith('.pyc') or startfile.endswith('.pyo'):
startmod = os.path.splitext(startfile)[0]
if outputType == 'dll' or outputType == 'c':
freezer.addModule(startmod, filename = startfile)
else:
freezer.addModule('__main__', filename = startfile)
compileToExe = True
addStartupModules = True
elif outputType == 'exe':
# We must have a main module when making an executable.
usage(0)
freezer.done(addStartupModules = addStartupModules)
if outputType == 'mf':
freezer.writeMultifile(basename)
elif outputType == 'c':
freezer.writeCode(basename)
else:
freezer.generateCode(basename, compileToExe = compileToExe)
| Wilee999/panda3d | direct/src/showutil/pfreeze.py | Python | bsd-3-clause | 4,090 |
pyg = 'ay'
| vpstudios/Codecademy-Exercise-Answers | Language Skills/Python/Unit 3/2-PygLatin/PygLatin PART2/7-Ay B C.py | Python | mit | 11 |
# -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from autocomplete_light.forms import modelform_factory
from geonode.maps.models import Map, MapLayer, MapSnapshot
from geonode.base.admin import MediaTranslationAdmin, ResourceBaseAdminForm
from geonode.base.admin import metadata_batch_edit
from django.contrib import admin
class MapLayerInline(admin.TabularInline):
model = MapLayer
class MapAdminForm(ResourceBaseAdminForm):
class Meta:
model = Map
fields = '__all__'
class MapAdmin(MediaTranslationAdmin):
inlines = [MapLayerInline, ]
list_display_links = ('title',)
list_display = ('id', 'title', 'owner', 'category', 'group', 'is_approved', 'is_published', 'featured',)
list_editable = ('owner', 'category', 'group', 'is_approved', 'is_published', 'featured',)
list_filter = ('owner', 'category', 'group', 'featured',
'is_approved', 'is_published',)
search_fields = ('title', 'abstract', 'purpose',
'is_approved', 'is_published',)
form = MapAdminForm
actions = [metadata_batch_edit]
class MapLayerAdmin(admin.ModelAdmin):
list_display = ('id', 'map', 'name')
list_filter = ('map',)
search_fields = ('map__title', 'name',)
form = modelform_factory(MapLayer, fields='__all__')
admin.site.register(Map, MapAdmin)
admin.site.register(MapLayer, MapLayerAdmin)
admin.site.register(MapSnapshot)
| timlinux/geonode | geonode/maps/admin.py | Python | gpl-3.0 | 2,205 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django import http
from django.core.urlresolvers import reverse
from django.utils.http import urlencode
from django.utils.html import escape
from django.utils.datastructures import SortedDict
from mox import IsA, IgnoreArg
from copy import deepcopy
from horizon import api
from horizon import test
from .workflows import CreateNetwork
INDEX_URL = reverse('horizon:nova:networks:index')
class NetworkTests(test.TestCase):
@test.create_stubs({api.quantum: ('network_list',)})
def test_index(self):
api.quantum.network_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False).AndReturn(self.networks.list())
api.quantum.network_list(
IsA(http.HttpRequest),
shared=True).AndReturn([])
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'nova/networks/index.html')
networks = res.context['networks_table'].data
self.assertItemsEqual(networks, self.networks.list())
@test.create_stubs({api.quantum: ('network_list',)})
def test_index_network_list_exception(self):
api.quantum.network_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False).AndRaise(self.exceptions.quantum)
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'nova/networks/index.html')
self.assertEqual(len(res.context['networks_table'].data), 0)
self.assertMessageCount(res, error=1)
@test.create_stubs({api.quantum: ('network_get',
'subnet_list',
'port_list',)})
def test_network_detail(self):
network_id = self.networks.first().id
api.quantum.network_get(IsA(http.HttpRequest), network_id)\
.AndReturn(self.networks.first())
api.quantum.subnet_list(IsA(http.HttpRequest), network_id=network_id)\
.AndReturn([self.subnets.first()])
api.quantum.port_list(IsA(http.HttpRequest), network_id=network_id)\
.AndReturn([self.ports.first()])
api.quantum.network_get(IsA(http.HttpRequest), network_id)\
.AndReturn(self.networks.first())
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:nova:networks:detail',
args=[network_id]))
self.assertTemplateUsed(res, 'nova/networks/detail.html')
subnets = res.context['subnets_table'].data
ports = res.context['ports_table'].data
self.assertItemsEqual(subnets, [self.subnets.first()])
self.assertItemsEqual(ports, [self.ports.first()])
@test.create_stubs({api.quantum: ('network_get',
'subnet_list',
'port_list',)})
def test_network_detail_network_exception(self):
network_id = self.networks.first().id
api.quantum.network_get(IsA(http.HttpRequest), network_id)\
.AndRaise(self.exceptions.quantum)
self.mox.ReplayAll()
url = reverse('horizon:nova:networks:detail', args=[network_id])
res = self.client.get(url)
redir_url = INDEX_URL
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.quantum: ('network_get',
'subnet_list',
'port_list',)})
def test_network_detail_subnet_exception(self):
network_id = self.networks.first().id
api.quantum.network_get(IsA(http.HttpRequest), network_id).\
AndReturn(self.networks.first())
api.quantum.subnet_list(IsA(http.HttpRequest), network_id=network_id).\
AndRaise(self.exceptions.quantum)
api.quantum.port_list(IsA(http.HttpRequest), network_id=network_id).\
AndReturn([self.ports.first()])
# Called from SubnetTable
api.quantum.network_get(IsA(http.HttpRequest), network_id).\
AndReturn(self.networks.first())
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:nova:networks:detail',
args=[network_id]))
self.assertTemplateUsed(res, 'nova/networks/detail.html')
subnets = res.context['subnets_table'].data
ports = res.context['ports_table'].data
self.assertEqual(len(subnets), 0)
self.assertItemsEqual(ports, [self.ports.first()])
@test.create_stubs({api.quantum: ('network_get',
'subnet_list',
'port_list',)})
def test_network_detail_port_exception(self):
network_id = self.networks.first().id
api.quantum.network_get(IsA(http.HttpRequest), network_id).\
AndReturn(self.networks.first())
api.quantum.subnet_list(IsA(http.HttpRequest), network_id=network_id).\
AndReturn([self.subnets.first()])
api.quantum.port_list(IsA(http.HttpRequest), network_id=network_id).\
AndRaise(self.exceptions.quantum)
# Called from SubnetTable
api.quantum.network_get(IsA(http.HttpRequest), network_id).\
AndReturn(self.networks.first())
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:nova:networks:detail',
args=[network_id]))
self.assertTemplateUsed(res, 'nova/networks/detail.html')
subnets = res.context['subnets_table'].data
ports = res.context['ports_table'].data
self.assertItemsEqual(subnets, [self.subnets.first()])
self.assertEqual(len(ports), 0)
def test_network_create_get(self):
# no api methods are called.
self.mox.ReplayAll()
url = reverse('horizon:nova:networks:create')
res = self.client.get(url)
workflow = res.context['workflow']
self.assertTemplateUsed(res, 'nova/networks/create.html')
self.assertEqual(workflow.name, CreateNetwork.name)
expected_objs = ['<CreateNetworkInfo: createnetworkinfoaction>',
'<CreateSubnetInfo: createsubnetinfoaction>']
self.assertQuerysetEqual(workflow.steps, expected_objs)
@test.create_stubs({api.quantum: ('network_create',)})
def test_network_create_post(self):
network = self.networks.first()
api.quantum.network_create(IsA(http.HttpRequest), name=network.name)\
.AndReturn(network)
self.mox.ReplayAll()
form_data = {'net_name': network.name,
'with_subnet': False,
'subnet_name': '',
'cidr': '',
'ip_version': 4,
'gateway_ip': ''}
url = reverse('horizon:nova:networks:create')
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.quantum: ('network_create',
'subnet_create',)})
def test_network_create_post_with_subnet(self):
network = self.networks.first()
subnet = self.subnets.first()
api.quantum.network_create(IsA(http.HttpRequest), name=network.name)\
.AndReturn(network)
api.quantum.subnet_create(IsA(http.HttpRequest),
network_id=network.id,
name=subnet.name,
cidr=subnet.cidr,
ip_version=subnet.ip_version,
gateway_ip=subnet.gateway_ip)\
.AndReturn(subnet)
self.mox.ReplayAll()
form_data = {'net_name': network.name,
'with_subnet': True,
'subnet_name': subnet.name,
'cidr': subnet.cidr,
'ip_version': subnet.ip_version,
'gateway_ip': subnet.gateway_ip}
url = reverse('horizon:nova:networks:create')
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.quantum: ('network_create',)})
def test_network_create_post_network_exception(self):
network = self.networks.first()
api.quantum.network_create(IsA(http.HttpRequest), name=network.name)\
.AndRaise(self.exceptions.quantum)
self.mox.ReplayAll()
form_data = {'net_name': network.name,
'with_subnet': False,
'subnet_name': '',
'cidr': '',
'ip_version': 4,
'gateway_ip': ''}
url = reverse('horizon:nova:networks:create')
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.quantum: ('network_create',)})
def test_network_create_post_with_subnet_network_exception(self):
network = self.networks.first()
subnet = self.subnets.first()
api.quantum.network_create(IsA(http.HttpRequest), name=network.name)\
.AndRaise(self.exceptions.quantum)
self.mox.ReplayAll()
form_data = {'net_name': network.name,
'with_subnet': True,
'subnet_name': subnet.name,
'cidr': subnet.cidr,
'ip_version': subnet.ip_version,
'gateway_ip': subnet.gateway_ip}
url = reverse('horizon:nova:networks:create')
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.quantum: ('network_create',
'subnet_create',)})
def test_network_create_post_with_subnet_subnet_exception(self):
network = self.networks.first()
subnet = self.subnets.first()
api.quantum.network_create(IsA(http.HttpRequest), name=network.name)\
.AndReturn(network)
api.quantum.subnet_create(IsA(http.HttpRequest),
network_id=network.id,
name=subnet.name,
cidr=subnet.cidr,
ip_version=subnet.ip_version,
gateway_ip=subnet.gateway_ip)\
.AndRaise(self.exceptions.quantum)
self.mox.ReplayAll()
form_data = {'net_name': network.name,
'with_subnet': True,
'subnet_name': subnet.name,
'cidr': subnet.cidr,
'ip_version': subnet.ip_version,
'gateway_ip': subnet.gateway_ip}
url = reverse('horizon:nova:networks:create')
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
def test_network_create_post_with_subnet_nocidr(self):
network = self.networks.first()
subnet = self.subnets.first()
self.mox.ReplayAll()
form_data = {'net_name': network.name,
'with_subnet': True,
'subnet_name': subnet.name,
'cidr': '',
'ip_version': subnet.ip_version,
'gateway_ip': subnet.gateway_ip}
url = reverse('horizon:nova:networks:create')
res = self.client.post(url, form_data)
self.assertContains(res, escape('Specify "Network Address" or '
'clear "Create Subnet" checkbox.'))
def test_network_create_post_with_subnet_cidr_inconsistent(self):
network = self.networks.first()
subnet = self.subnets.first()
self.mox.ReplayAll()
# dummy IPv6 address
cidr = '2001:0DB8:0:CD30:123:4567:89AB:CDEF/60'
form_data = {'net_name': network.name,
'with_subnet': True,
'subnet_name': subnet.name,
'cidr': cidr,
'ip_version': subnet.ip_version,
'gateway_ip': subnet.gateway_ip}
url = reverse('horizon:nova:networks:create')
res = self.client.post(url, form_data)
expected_msg = 'Network Address and IP version are inconsistent.'
self.assertContains(res, expected_msg)
def test_network_create_post_with_subnet_gw_inconsistent(self):
network = self.networks.first()
subnet = self.subnets.first()
self.mox.ReplayAll()
# dummy IPv6 address
gateway_ip = '2001:0DB8:0:CD30:123:4567:89AB:CDEF'
form_data = {'net_name': network.name,
'with_subnet': True,
'subnet_name': subnet.name,
'cidr': subnet.cidr,
'ip_version': subnet.ip_version,
'gateway_ip': gateway_ip}
url = reverse('horizon:nova:networks:create')
res = self.client.post(url, form_data)
self.assertContains(res, 'Gateway IP and IP version are inconsistent.')
@test.create_stubs({api.quantum: ('network_get',)})
def test_network_update_get(self):
network = self.networks.first()
api.quantum.network_get(IsA(http.HttpRequest), network.id)\
.AndReturn(network)
self.mox.ReplayAll()
url = reverse('horizon:nova:networks:update', args=[network.id])
res = self.client.get(url)
self.assertTemplateUsed(res, 'nova/networks/update.html')
@test.create_stubs({api.quantum: ('network_get',)})
def test_network_update_get_exception(self):
network = self.networks.first()
api.quantum.network_get(IsA(http.HttpRequest), network.id)\
.AndRaise(self.exceptions.quantum)
self.mox.ReplayAll()
url = reverse('horizon:nova:networks:update', args=[network.id])
res = self.client.get(url)
redir_url = INDEX_URL
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.quantum: ('network_modify',
'network_get',)})
def test_network_update_post(self):
network = self.networks.first()
api.quantum.network_modify(IsA(http.HttpRequest), network.id,
name=network.name)\
.AndReturn(network)
api.quantum.network_get(IsA(http.HttpRequest), network.id)\
.AndReturn(network)
self.mox.ReplayAll()
formData = {'network_id': network.id,
'name': network.name,
'tenant_id': network.tenant_id}
url = reverse('horizon:nova:networks:update', args=[network.id])
res = self.client.post(url, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.quantum: ('network_modify',
'network_get',)})
def test_network_update_post_exception(self):
network = self.networks.first()
api.quantum.network_modify(IsA(http.HttpRequest), network.id,
name=network.name)\
.AndRaise(self.exceptions.quantum)
api.quantum.network_get(IsA(http.HttpRequest), network.id)\
.AndReturn(network)
self.mox.ReplayAll()
form_data = {'network_id': network.id,
'name': network.name,
'tenant_id': network.tenant_id}
url = reverse('horizon:nova:networks:update', args=[network.id])
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.quantum: ('network_list',
'subnet_list',
'network_delete')})
def test_delete_network_no_subnet(self):
network = self.networks.first()
api.quantum.network_list(IsA(http.HttpRequest),
tenant_id=network.tenant_id,
shared=False)\
.AndReturn([network])
api.quantum.network_list(IsA(http.HttpRequest),
shared=True)\
.AndReturn([])
api.quantum.subnet_list(IsA(http.HttpRequest), network_id=network.id)\
.AndReturn([])
api.quantum.network_delete(IsA(http.HttpRequest), network.id)
self.mox.ReplayAll()
form_data = {'action': 'networks__delete__%s' % network.id}
res = self.client.post(INDEX_URL, form_data)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.quantum: ('network_list',
'subnet_list',
'network_delete',
'subnet_delete')})
def test_delete_network_with_subnet(self):
network = self.networks.first()
subnet = self.subnets.first()
api.quantum.network_list(IsA(http.HttpRequest),
tenant_id=network.tenant_id,
shared=False)\
.AndReturn([network])
api.quantum.network_list(IsA(http.HttpRequest), shared=True)\
.AndReturn([])
api.quantum.subnet_list(IsA(http.HttpRequest), network_id=network.id)\
.AndReturn([subnet])
api.quantum.subnet_delete(IsA(http.HttpRequest), subnet.id)
api.quantum.network_delete(IsA(http.HttpRequest), network.id)
self.mox.ReplayAll()
form_data = {'action': 'networks__delete__%s' % network.id}
res = self.client.post(INDEX_URL, form_data)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.quantum: ('network_list',
'subnet_list',
'network_delete',
'subnet_delete')})
def test_delete_network_exception(self):
network = self.networks.first()
subnet = self.subnets.first()
api.quantum.network_list(IsA(http.HttpRequest),
tenant_id=network.tenant_id,
shared=False)\
.AndReturn([network])
api.quantum.network_list(IsA(http.HttpRequest),
shared=True)\
.AndReturn([])
api.quantum.subnet_list(IsA(http.HttpRequest), network_id=network.id)\
.AndReturn([subnet])
api.quantum.subnet_delete(IsA(http.HttpRequest), subnet.id)
api.quantum.network_delete(IsA(http.HttpRequest), network.id)\
.AndRaise(self.exceptions.quantum)
self.mox.ReplayAll()
form_data = {'action': 'networks__delete__%s' % network.id}
res = self.client.post(INDEX_URL, form_data)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.quantum: ('subnet_get',)})
def test_subnet_detail(self):
subnet = self.subnets.first()
api.quantum.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(self.subnets.first())
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:nova:networks:subnets:detail',
args=[subnet.id]))
self.assertTemplateUsed(res, 'nova/networks/subnets/detail.html')
self.assertEqual(res.context['subnet'].id, subnet.id)
@test.create_stubs({api.quantum: ('subnet_get',)})
def test_subnet_detail_exception(self):
subnet = self.subnets.first()
api.quantum.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndRaise(self.exceptions.quantum)
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:nova:networks:subnets:detail',
args=[subnet.id]))
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.quantum: ('network_get',)})
def test_subnet_create_get(self):
network = self.networks.first()
api.quantum.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
self.mox.ReplayAll()
url = reverse('horizon:nova:networks:addsubnet',
args=[network.id])
res = self.client.get(url)
self.assertTemplateUsed(res, 'nova/networks/subnets/create.html')
@test.create_stubs({api.quantum: ('network_get',
'subnet_create',)})
def test_subnet_create_post(self):
network = self.networks.first()
subnet = self.subnets.first()
api.quantum.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
api.quantum.subnet_create(IsA(http.HttpRequest),
network_id=network.id,
network_name=network.name,
name=subnet.name,
cidr=subnet.cidr,
ip_version=subnet.ip_version,
gateway_ip=subnet.gateway_ip)\
.AndReturn(subnet)
self.mox.ReplayAll()
form_data = {'network_id': subnet.network_id,
'network_name': network.name,
'name': subnet.name,
'cidr': subnet.cidr,
'ip_version': subnet.ip_version,
'gateway_ip': subnet.gateway_ip}
url = reverse('horizon:nova:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
redir_url = reverse('horizon:nova:networks:detail',
args=[subnet.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.quantum: ('network_get',
'subnet_create',)})
def test_subnet_create_post_network_exception(self):
network = self.networks.first()
subnet = self.subnets.first()
api.quantum.network_get(IsA(http.HttpRequest),
network.id)\
.AndRaise(self.exceptions.quantum)
self.mox.ReplayAll()
form_data = {'network_id': subnet.network_id,
'network_name': network.name,
'name': subnet.name,
'cidr': subnet.cidr,
'ip_version': subnet.ip_version,
'gateway_ip': subnet.gateway_ip}
url = reverse('horizon:nova:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.quantum: ('network_get',
'subnet_create',)})
def test_subnet_create_post_subnet_exception(self):
network = self.networks.first()
subnet = self.subnets.first()
api.quantum.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
api.quantum.subnet_create(IsA(http.HttpRequest),
network_id=network.id,
network_name=network.name,
name=subnet.name,
cidr=subnet.cidr,
ip_version=subnet.ip_version,
gateway_ip=subnet.gateway_ip)\
.AndRaise(self.exceptions.quantum)
self.mox.ReplayAll()
form_data = {'network_id': subnet.network_id,
'network_name': network.name,
'name': subnet.name,
'cidr': subnet.cidr,
'ip_version': subnet.ip_version,
'gateway_ip': subnet.gateway_ip}
url = reverse('horizon:nova:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
redir_url = reverse('horizon:nova:networks:detail',
args=[subnet.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.quantum: ('network_get',)})
def test_subnet_create_post_cidr_inconsistent(self):
network = self.networks.first()
subnet = self.subnets.first()
api.quantum.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
self.mox.ReplayAll()
# dummy IPv6 address
cidr = '2001:0DB8:0:CD30:123:4567:89AB:CDEF/60'
form_data = {'network_id': subnet.network_id,
'network_name': network.name,
'name': subnet.name,
'cidr': cidr,
'ip_version': subnet.ip_version,
'gateway_ip': subnet.gateway_ip}
url = reverse('horizon:nova:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
expected_msg = 'Network Address and IP version are inconsistent.'
self.assertContains(res, expected_msg)
@test.create_stubs({api.quantum: ('network_get',)})
def test_subnet_create_post_gw_inconsistent(self):
network = self.networks.first()
subnet = self.subnets.first()
api.quantum.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
self.mox.ReplayAll()
# dummy IPv6 address
gateway_ip = '2001:0DB8:0:CD30:123:4567:89AB:CDEF'
form_data = {'network_id': subnet.network_id,
'network_name': network.name,
'name': subnet.name,
'cidr': subnet.cidr,
'ip_version': subnet.ip_version,
'gateway_ip': gateway_ip}
url = reverse('horizon:nova:networks:addsubnet',
args=[subnet.network_id])
res = self.client.post(url, form_data)
self.assertContains(res, 'Gateway IP and IP version are inconsistent.')
@test.create_stubs({api.quantum: ('subnet_modify',
'subnet_get',)})
def test_subnet_update_post(self):
subnet = self.subnets.first()
api.quantum.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
api.quantum.subnet_modify(IsA(http.HttpRequest), subnet.id,
name=subnet.name,
gateway_ip=subnet.gateway_ip)\
.AndReturn(subnet)
self.mox.ReplayAll()
formData = {'network_id': subnet.network_id,
'subnet_id': subnet.id,
'name': subnet.name,
'cidr': subnet.cidr,
'ip_version': subnet.ip_version,
'gateway_ip': subnet.gateway_ip}
url = reverse('horizon:nova:networks:editsubnet',
args=[subnet.network_id, subnet.id])
res = self.client.post(url, formData)
redir_url = reverse('horizon:nova:networks:detail',
args=[subnet.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.quantum: ('subnet_modify',
'subnet_get',)})
def test_subnet_update_post_gw_inconsistent(self):
subnet = self.subnets.first()
api.quantum.subnet_get(IsA(http.HttpRequest), subnet.id)\
.AndReturn(subnet)
self.mox.ReplayAll()
# dummy IPv6 address
gateway_ip = '2001:0DB8:0:CD30:123:4567:89AB:CDEF'
formData = {'network_id': subnet.network_id,
'subnet_id': subnet.id,
'name': subnet.name,
'cidr': subnet.cidr,
'ip_version': subnet.ip_version,
'gateway_ip': gateway_ip}
url = reverse('horizon:nova:networks:editsubnet',
args=[subnet.network_id, subnet.id])
res = self.client.post(url, formData)
self.assertContains(res, 'Gateway IP and IP version are inconsistent.')
@test.create_stubs({api.quantum: ('subnet_delete',
'subnet_list',
'network_get',
'port_list',)})
def test_subnet_delete(self):
subnet = self.subnets.first()
network_id = subnet.network_id
api.quantum.subnet_delete(IsA(http.HttpRequest), subnet.id)
api.quantum.subnet_list(IsA(http.HttpRequest), network_id=network_id)\
.AndReturn([self.subnets.first()])
api.quantum.network_get(IsA(http.HttpRequest), network_id)\
.AndReturn(self.networks.first())
api.quantum.port_list(IsA(http.HttpRequest), network_id=network_id)\
.AndReturn([self.ports.first()])
# Called from SubnetTable
api.quantum.network_get(IsA(http.HttpRequest), network_id)\
.AndReturn(self.networks.first())
self.mox.ReplayAll()
formData = {'action': 'subnets__delete__%s' % subnet.id}
url = reverse('horizon:nova:networks:detail',
args=[network_id])
res = self.client.post(url, formData)
self.assertRedirectsNoFollow(res, url)
@test.create_stubs({api.quantum: ('subnet_delete',
'subnet_list',
'network_get',
'port_list',)})
def test_subnet_delete_excceeption(self):
subnet = self.subnets.first()
network_id = subnet.network_id
api.quantum.subnet_delete(IsA(http.HttpRequest), subnet.id)\
.AndRaise(self.exceptions.quantum)
api.quantum.subnet_list(IsA(http.HttpRequest), network_id=network_id)\
.AndReturn([self.subnets.first()])
api.quantum.network_get(IsA(http.HttpRequest), network_id)\
.AndReturn(self.networks.first())
api.quantum.port_list(IsA(http.HttpRequest), network_id=network_id)\
.AndReturn([self.ports.first()])
# Called from SubnetTable
api.quantum.network_get(IsA(http.HttpRequest), network_id)\
.AndReturn(self.networks.first())
self.mox.ReplayAll()
formData = {'action': 'subnets__delete__%s' % subnet.id}
url = reverse('horizon:nova:networks:detail',
args=[network_id])
res = self.client.post(url, formData)
self.assertRedirectsNoFollow(res, url)
@test.create_stubs({api.quantum: ('port_get',)})
def test_port_detail(self):
port = self.ports.first()
api.quantum.port_get(IsA(http.HttpRequest), port.id)\
.AndReturn(self.ports.first())
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:nova:networks:ports:detail',
args=[port.id]))
self.assertTemplateUsed(res, 'nova/networks/ports/detail.html')
self.assertEqual(res.context['port'].id, port.id)
@test.create_stubs({api.quantum: ('port_get',)})
def test_port_detail_exception(self):
port = self.ports.first()
api.quantum.port_get(IsA(http.HttpRequest), port.id)\
.AndRaise(self.exceptions.quantum)
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:nova:networks:ports:detail',
args=[port.id]))
self.assertRedirectsNoFollow(res, INDEX_URL)
| tylertian/Openstack | openstack F/horizon/horizon/dashboards/nova/networks/tests.py | Python | apache-2.0 | 32,726 |
#!/usr/env python
class Flyable:
def fly(self):
pass
class Quackable(object):
def quack(self):
pass
class ReadHeadDuckFly(Flyable):
def fly(self):
print "I am a readheadduck, I can fly"
class ReadHeadDuckQack(Quackable):
def quack(self):
print "I am a readheadduck,Dcuk duck duck..."
class Duck():
def swim(self):
print "I am a duck,I can swim..."
class ReadHeadDuck(Duck):
def __init__(self,flyable,quackable):
self.f = flyable
self.q = quackable
def fly(self):
return self.f.fly()
def quack(self):
return self.q.quack()
class Mallardduckflyable(Flyable):
def fly(self):
print "I am a Mallardduck....,I can fly"
class MallardduckQuackble(Quackable):
def quack(self):
print "I am a Mallardduck,Duck.duck..duck.."
class Mallardduck(Duck):
def __init__(self,flyable,quackable):
self.f = flyable
self.q = quackable
def fly(self):
return self.f.fly()
def quack(self):
return self.q.quack()
if __name__ == "__main__":
duck = Duck()
duck.swim()
rhduck = ReadHeadDuck(ReadHeadDuckFly(),ReadHeadDuckQack())
rhduck.fly()
rhduck.swim()
rhduck.quack()
md = Mallardduck(Mallardduckflyable(),MallardduckQuackble())
md.fly()
md.quack()
md.swim()
| sahikaru/DP | chapter1/strategymode.py | Python | gpl-2.0 | 1,364 |
from os.path import join as pjoin
# Format expected by setup.py and doc/source/conf.py: string of form "X.Y.Z"
_version_major = 0
_version_minor = 1
_version_micro = '' # use '' for first of series, number for 1 and above
_version_extra = 'dev'
#_version_extra = '' # Uncomment this for full releases
# Construct full version string from these.
_ver = [_version_major, _version_minor]
if _version_micro:
_ver.append(_version_micro)
if _version_extra:
_ver.append(_version_extra)
__version__ = '.'.join(map(str, _ver))
CLASSIFIERS = ["Development Status :: 3 - Alpha",
"Environment :: Console",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Scientific/Engineering"]
# Description should be a one-liner:
description = "SMpy: Flexible stellar pops., masses and mock observations with python"
# Long description will go up on the pypi page
long_description = """
Stellar populations and Masses with Python
==========================================
This package contains Python software designed for building and processing composite stellar populations in a simple but flexible manner. It allows for easy synthetic photometry to be produced for single models or large suites of models.
The code makes use of the `Astropy <https://astropy.readthedocs.org>`_ module throughout and therefore allows for easy conversion of physical units and a wide range of allowed cosmologies.
Currently supported simple stellar population models are:
1. `Bruzual & Charlot 2003 <http://www.bruzual.org/bc03/Updated_version_2012/>`_
2. `BPASS V1 & V2 <http://bpass.auckland.ac.nz/>`_
License
=======
``SMpy`` is licensed under the terms of the MIT license. See the file
"LICENSE" for information on the history of this software, terms & conditions
for usage, and a DISCLAIMER OF ALL WARRANTIES.
All trademarks referenced herein are property of their respective holders.
Copyright (c) 2015--, Kenneth Duncan
"""
NAME = "astro-smpy"
MAINTAINER = "Kenneth Duncan"
MAINTAINER_EMAIL = "[email protected]"
DESCRIPTION = description
LONG_DESCRIPTION = long_description
URL = "http://github.com/dunkenj/smpy"
DOWNLOAD_URL = ""
LICENSE = "MIT"
AUTHOR = "Kenneth Duncan"
AUTHOR_EMAIL = "[email protected]"
PLATFORMS = "OS Independent"
MAJOR = _version_major
MINOR = _version_minor
MICRO = _version_micro
VERSION = __version__
PACKAGES = ['smpy',
'smpy.tests']
PACKAGE_DATA = {'smpy': [pjoin('data', '*')]}
REQUIRES = ["numpy", "scipy", "h5py", "astropy", "six"]
| dunkenj/smpy | dist/astro-smpy-0.1.dev/smpy/version.py | Python | mit | 2,693 |
# Copyright (c) 2013, Frappe Technologies Pvt. Ltd. and Contributors and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe, json
def execute(filters=None):
data = []
parents = {
"Product Bundle Item": "Product Bundle",
"BOM Explosion Item": "BOM",
"BOM Item": "BOM"
}
for doctype in ("Product Bundle Item",
"BOM Explosion Item" if filters.search_sub_assemblies else "BOM Item"):
all_boms = {}
for d in frappe.get_all(doctype, fields=["parent", "item_code"]):
all_boms.setdefault(d.parent, []).append(d.item_code)
for parent, items in all_boms.iteritems():
valid = True
for key, item in filters.iteritems():
if key != "search_sub_assemblies":
if item and item not in items:
valid = False
if valid:
data.append((parent, parents[doctype]))
return [{
"fieldname": "parent",
"label": "BOM",
"width": 200,
"fieldtype": "Dynamic Link",
"options": "doctype"
},
{
"fieldname": "doctype",
"label": "Type",
"width": 200,
"fieldtype": "Data"
}], data
| mahabuber/erpnext | erpnext/stock/report/bom_search/bom_search.py | Python | agpl-3.0 | 1,083 |
"""
Core snippets models
@author: Faraz Masood Khan [email protected]
@copyright: Copyright (c) 2013 FanaticLab
"""
import re
import datetime
from django.db import models, connection, transaction
from django.db.models import F
from django.template.defaultfilters import slugify
from codehunkit import memoize
from codehunkit.db import models as db_models
from codehunkit.app import stats
from codehunkit.app.models.core import Language, LanguageGraph, User, UserGraph
class Snippet(models.Model):
"""
Code snippet
"""
id = db_models.BigAutoField(primary_key=True)
slug = models.SlugField()
user = models.ForeignKey(User)
gist = models.TextField(db_index=True)
code = models.TextField(db_index=True)
group = models.ForeignKey('self', null=True, blank=True) # Allows to group together codes in different languages
language = models.ForeignKey(Language)
tags = models.CharField(max_length=200)
votes = models.IntegerField(default=0)
up_votes = models.IntegerField(default=0)
down_votes = models.IntegerField(default=0)
rank = models.FloatField(default=0, db_index=True)
comments_count = models.IntegerField(default=0)
is_enabled = models.BooleanField(default=True)
updated_on = models.DateTimeField(auto_now=True)
updated_by = models.CharField(max_length=100)
created_on = models.DateTimeField(auto_now_add=True)
created_by = models.CharField(max_length=100)
class Meta:
app_label = 'app'
def __unicode__(self):
return self.gist
@memoize.method
@models.permalink
def get_absolute_url(self):
return ('app_snippet_read', (self.id, self.slug,))
@memoize.method
@models.permalink
def get_short_url(self):
"""
Returns snippet url with id only
"""
return ('app_snippet_read', (self.id,))
def tags_list(self):
tags = (tag.strip() for tag in self.tags.split(','))
return [tag for tag in tags if tag]
def rating(self):
"""
Returns rating of snippet on the scale of 0 to 1
"""
return stats.rating(self.up_votes, self.down_votes)
@classmethod
def read(cls, snippet_id, user, comment_id=None, max_comments=20):
"""
Returns snippet with all it's comments sorted
"""
result = list(cls.objects.raw('''
SELECT s.*, l.name AS lang_name, l.slug AS lang_slug, u.username, v.index AS vote_index
FROM app_snippet s
INNER JOIN app_user u ON s.user_id = u.id
INNER JOIN app_language l ON s.language_id = l.id
LEFT OUTER JOIN app_snippet_vote v ON s.id = v.snippet_id AND v.user_id = %s
WHERE s.id = %s
''', [user.id, snippet_id]))
if len(result) == 0:
raise cls.DoesNotExist
snippet = result[0]
if snippet.is_enabled and max_comments:
if comment_id:
comments = list(Comment.objects.raw('''
SELECT c.*, v.index AS vote_index
FROM app_comment c
LEFT OUTER JOIN app_comment_vote v ON c.id = v.comment_id AND v.user_id = %s
WHERE c.snippet_id = %s AND c.id >= %s
ORDER BY c.rank DESC, c.id
LIMIT %s
''', [user.id, snippet_id, comment_id, max_comments]))
else:
comments = list(Comment.objects.raw('''
SELECT c.*, v.index AS vote_index
FROM app_comment c
LEFT OUTER JOIN app_comment_vote v ON c.id = v.comment_id AND v.user_id = %s
WHERE c.snippet_id = %s
ORDER BY c.rank DESC, c.id
LIMIT %s
''', [user.id, snippet_id, max_comments]))
snippet.loaded_comments = comments
if comment_id:
snippet.comments = [comment for comment in comments if comment.id == comment_id]
else:
snippet.comments = [comment for comment in comments if comment.reply_to_id == None]
for comment in comments:
comment.snippet = snippet
comment.replies = [reply for reply in comments if reply.reply_to_id == comment.id]
return snippet
@classmethod
def get_snippets(cls, user, page_index, page_size, sort_by_new):
"""
Returns all snippets
"""
if sort_by_new:
sql_query = '''
SELECT s.*, u.username, l.name AS lang_name, l.slug AS lang_slug, v.index AS vote_index
FROM app_snippet s
INNER JOIN app_user u ON s.user_id = u.id
INNER JOIN app_language l ON s.language_id = l.id
LEFT OUTER JOIN app_snippet_vote v ON s.id = v.snippet_id AND v.user_id = %s
ORDER BY s.id DESC
LIMIT %s OFFSET %s
'''
else:
sql_query = '''
SELECT s.*, u.username, l.name AS lang_name, l.slug AS lang_slug, v.index AS vote_index
FROM app_snippet s
INNER JOIN app_user u ON s.user_id = u.id
INNER JOIN app_language l ON s.language_id = l.id
LEFT OUTER JOIN app_snippet_vote v ON s.id = v.snippet_id AND v.user_id = %s
ORDER BY s.rank DESC, s.id DESC
LIMIT %s OFFSET %s
'''
return [snippet for snippet in cls.objects.raw(sql_query, [user.id, page_size, page_index * page_size])]
@classmethod
def lang_snippets(cls, lang, user, page_index, page_size, sort_by_new):
"""
Returns all snippets
"""
if sort_by_new:
sql_query = '''
SELECT s.*, u.username, l.name AS lang_name, l.slug AS lang_slug, v.index AS vote_index
FROM app_snippet s
INNER JOIN app_user u ON s.user_id = u.id
INNER JOIN app_language l ON s.language_id = l.id AND l.id = %s
LEFT OUTER JOIN app_snippet_vote v ON s.id = v.snippet_id AND v.user_id = %s
ORDER BY s.id DESC
LIMIT %s OFFSET %s
'''
else:
sql_query = '''
SELECT s.*, u.username, l.name AS lang_name, l.slug AS lang_slug, v.index AS vote_index
FROM app_snippet s
INNER JOIN app_user u ON s.user_id = u.id
INNER JOIN app_language l ON s.language_id = l.id AND l.id = %s
LEFT OUTER JOIN app_snippet_vote v ON s.id = v.snippet_id AND v.user_id = %s
ORDER BY s.rank DESC, s.id DESC
LIMIT %s OFFSET %s
'''
return [snippet for snippet in cls.objects.raw(sql_query, [lang.id, user.id, page_size, page_index * page_size])]
@classmethod
def user_snippets(cls, user, app_user, page_index, page_size, sort_by_new):
"""
Returns all snippets
"""
if sort_by_new:
sql_query = '''
SELECT s.*, u.username, l.name AS lang_name, l.slug AS lang_slug, v.index AS vote_index
FROM app_snippet s
INNER JOIN app_user u ON s.user_id = u.id AND u.id = %s
INNER JOIN app_language l ON s.language_id = l.id
LEFT OUTER JOIN app_snippet_vote v ON s.id = v.snippet_id AND v.user_id = %s
ORDER BY s.id DESC
LIMIT %s OFFSET %s
'''
else:
sql_query = '''
SELECT s.*, u.username, l.name AS lang_name, l.slug AS lang_slug, v.index AS vote_index
FROM app_snippet s
INNER JOIN app_user u ON s.user_id = u.id AND u.id = %s
INNER JOIN app_language l ON s.language_id = l.id
LEFT OUTER JOIN app_snippet_vote v ON s.id = v.snippet_id AND v.user_id = %s
ORDER BY s.rank DESC, s.id DESC
LIMIT %s OFFSET %s
'''
return [snippet for snippet in cls.objects.raw(sql_query, [user.id, app_user.id, page_size, page_index * page_size])]
@classmethod
def tag_snippets(cls, tag_name, user, page_index, page_size, sort_by_new):
"""
Returns all snippets
"""
tag_name = '%' + tag_name + '%'
if sort_by_new:
sql_query = '''
SELECT s.*, u.username, l.name AS lang_name, l.slug AS lang_slug, v.index AS vote_index
FROM app_snippet s
INNER JOIN app_user u ON s.user_id = u.id
INNER JOIN app_language l ON s.language_id = l.id
LEFT OUTER JOIN app_snippet_vote v ON s.id = v.snippet_id AND v.user_id = %s
WHERE s.tags ILIKE %s
ORDER BY s.id DESC
LIMIT %s OFFSET %s
'''
else:
sql_query = '''
SELECT s.*, u.username, l.name AS lang_name, l.slug AS lang_slug, v.index AS vote_index
FROM app_snippet s
INNER JOIN app_user u ON s.user_id = u.id
INNER JOIN app_language l ON s.language_id = l.id
LEFT OUTER JOIN app_snippet_vote v ON s.id = v.snippet_id AND v.user_id = %s
WHERE s.tags ILIKE %s
ORDER BY s.rank DESC, s.id DESC
LIMIT %s OFFSET %s
'''
return [snippet for snippet in cls.objects.raw(sql_query, [user.id, tag_name, page_size, page_index * page_size])]
@classmethod
def search_snippets(cls, text, user, page_index, page_size, sort_by_new):
"""
Returns all snippets
"""
if sort_by_new:
sql_query = '''
SELECT s.*, u.username, l.name AS lang_name, l.slug AS lang_slug, v.index AS vote_index
FROM app_snippet s
INNER JOIN app_user u ON s.user_id = u.id
INNER JOIN app_language l ON s.language_id = l.id
LEFT OUTER JOIN app_snippet_vote v ON s.id = v.snippet_id AND v.user_id = %s
WHERE s.gist ILIKE %s OR s.tags ILIKE %s
ORDER BY s.id DESC
LIMIT %s OFFSET %s
'''
else:
sql_query = '''
SELECT s.*, u.username, l.name AS lang_name, l.slug AS lang_slug, v.index AS vote_index
FROM app_snippet s
INNER JOIN app_user u ON s.user_id = u.id
INNER JOIN app_language l ON s.language_id = l.id
LEFT OUTER JOIN app_snippet_vote v ON s.id = v.snippet_id AND v.user_id = %s
WHERE s.gist ILIKE %s OR s.tags ILIKE %s
ORDER BY s.rank DESC, s.id DESC
LIMIT %s OFFSET %s
'''
text = '%' + text + '%'
return [snippet for snippet in cls.objects.raw(sql_query, [user.id, text, text, page_size, page_index * page_size])]
@classmethod
def create(cls, gist, code, language_id, tags, user):
"""
Creates a new code snippet in database
"""
language = Language.objects.get(id=language_id)
tags = tags.split(',')
if language.name in tags: tags.remove(language.name)
tags = Tag.clean_tags(tags)
snippet = cls.objects.create(gist=gist,
slug=slugify(gist[:50]),
user=user,
code=code,
language=language,
tags=tags,
updated_by=str(user),
created_by=str(user))
Tag.add_tags(tags, user)
LanguageGraph.objects.filter(language_id=language.id).update(snippets_count=F('snippets_count') + 1)
UserGraph.objects.filter(user_id=user.id).update(snippets_count=F('snippets_count') + 1)
SnippetVote.vote_up(user, snippet.id)
return snippet
class SnippetVote(models.Model):
"""
User and snippet vote
"""
user = models.ForeignKey(User)
snippet = models.ForeignKey(Snippet)
index = models.IntegerField(default=0)
updated_on = models.DateTimeField(auto_now=True)
updated_by = models.CharField(max_length=100)
created_on = models.DateTimeField(auto_now_add=True)
created_by = models.CharField(max_length=100)
class Meta:
app_label = 'app'
db_table = 'app_snippet_vote'
unique_together = ('user', 'snippet')
@classmethod
def vote_up(cls, user, snippet_id):
"""
Vote up if user not voted otherwise makes it zero
"""
snippet = Snippet.objects.get(id=snippet_id)
try:
snippet_vote = cls.objects.get(snippet_id=snippet_id, user=user)
except cls.DoesNotExist:
snippet_vote = cls.objects.create(snippet_id=snippet_id, user=user, updated_by=str(user), created_by=str(user))
cursor = connection.cursor()
try:
if snippet_vote.index == 0:
cls.objects.filter(id=snippet_vote.id).update(index=1, updated_by=str(user))
UserGraph.objects.filter(user=user).update(likes=F('likes') + 1)
UserGraph.objects.filter(user=snippet.user).update(up_votes=F('up_votes') + 1)
LanguageGraph.objects.filter(language_id=snippet.language_id).update(up_votes=F('up_votes') + 1)
cursor.execute('''UPDATE app_snippet SET up_votes = up_votes + 1, votes = (up_votes - down_votes + 1), rank = compute_rank(up_votes - down_votes + 1, created_on) WHERE id = %s''', [snippet_id])
vote = [1, 1]
elif snippet_vote.index > 0:
cls.objects.filter(id=snippet_vote.id).update(index=0, updated_by=str(user))
UserGraph.objects.filter(user=user).update(likes=F('likes') - 1)
UserGraph.objects.filter(user=snippet.user).update(up_votes=F('up_votes') - 1)
LanguageGraph.objects.filter(language_id=snippet.language_id).update(up_votes=F('up_votes') - 1)
cursor.execute('''UPDATE app_snippet SET up_votes = up_votes - 1, votes = (up_votes - down_votes - 1), rank = compute_rank(up_votes - down_votes - 1, created_on) WHERE id = %s''', [snippet_id])
vote = [0, -1]
else:
cls.objects.filter(id=snippet_vote.id).update(index=1, updated_by=str(user))
UserGraph.objects.filter(user=user).update(likes=F('likes') + 1, dislikes=F('dislikes') - 1)
UserGraph.objects.filter(user=snippet.user).update(up_votes=F('up_votes') + 1, down_votes=F('down_votes') - 1)
LanguageGraph.objects.filter(language_id=snippet.language_id).update(up_votes=F('up_votes') + 1, down_votes=F('down_votes') - 1)
cursor.execute('''UPDATE app_snippet SET up_votes = up_votes + 1, down_votes = down_votes - 1, votes = (up_votes - down_votes + 2), rank = compute_rank(up_votes - down_votes + 2, created_on) WHERE id = %s''', [snippet_id])
vote = [1, 2]
return vote
finally:
cursor.close()
@classmethod
def vote_down(cls, user, snippet_id):
"""
Vote down if user not voted otherwise makes it zero
"""
snippet = Snippet.objects.get(id=snippet_id)
try:
snippet_vote = cls.objects.get(snippet_id=snippet_id, user=user)
except cls.DoesNotExist:
snippet_vote = cls.objects.create(snippet_id=snippet_id, user=user, updated_by=str(user), created_by=str(user))
cursor = connection.cursor()
try:
if snippet_vote.index == 0:
cls.objects.filter(id=snippet_vote.id).update(index=-1, updated_by=str(user))
UserGraph.objects.filter(user=user).update(dislikes=F('dislikes') + 1)
UserGraph.objects.filter(user=snippet.user).update(down_votes=F('down_votes') + 1)
LanguageGraph.objects.filter(language_id=snippet.language_id).update(down_votes=F('down_votes') + 1)
cursor.execute('''UPDATE app_snippet SET down_votes = down_votes + 1, votes = (up_votes - down_votes - 1), rank = compute_rank(up_votes - down_votes - 1, created_on) WHERE id = %s''', [snippet_id])
vote = [-1, -1]
elif snippet_vote.index > 0:
cls.objects.filter(id=snippet_vote.id).update(index=-1, updated_by=str(user))
UserGraph.objects.filter(user=user).update(likes=F('likes') - 1, dislikes=F('dislikes') + 1)
UserGraph.objects.filter(user=snippet.user).update(up_votes=F('up_votes') - 1, down_votes=F('down_votes') + 1)
LanguageGraph.objects.filter(language_id=snippet.language_id).update(up_votes=F('up_votes') - 1, down_votes=F('down_votes') + 1)
cursor.execute('''UPDATE app_snippet SET up_votes = up_votes - 1, down_votes = down_votes + 1, votes = (up_votes - down_votes - 2), rank = compute_rank(up_votes - down_votes - 2, created_on) WHERE id = %s''', [snippet_id])
vote = [-1, -2]
else:
cls.objects.filter(id=snippet_vote.id).update(index=0, updated_by=str(user))
UserGraph.objects.filter(user=user).update(dislikes=F('dislikes') - 1)
UserGraph.objects.filter(user=snippet.user).update(down_votes=F('down_votes') - 1)
LanguageGraph.objects.filter(language_id=snippet.language_id).update(down_votes=F('down_votes') - 1)
cursor.execute('''UPDATE app_snippet SET down_votes = down_votes - 1, votes = (up_votes - down_votes + 1), rank = compute_rank(up_votes - down_votes + 1, created_on) WHERE id = %s''', [snippet_id])
vote = [0, 1]
return vote
finally:
cursor.close()
class Comment(models.Model):
"""
User's comment on snippent or reply on comment
"""
id = db_models.BigAutoField(primary_key=True)
user = models.ForeignKey(User)
snippet = models.ForeignKey(Snippet)
reply_to = models.ForeignKey('self', null=True, blank=True)
comment_text = models.TextField()
votes = models.IntegerField(default=0)
up_votes = models.IntegerField(default=0)
down_votes = models.IntegerField(default=0)
rank = models.FloatField(default=0, db_index=True)
replies_count = models.IntegerField(default=0)
is_enabled = models.BooleanField(default=True)
updated_on = models.DateTimeField(auto_now=True)
updated_by = models.CharField(max_length=100)
created_on = models.DateTimeField(auto_now_add=True)
created_by = models.CharField(max_length=100)
class Meta:
app_label = 'app'
def __unicode__(self):
return self.comment_text[:50] if self.comment_text else ''
@memoize.method
@models.permalink
def get_absolute_url(self):
return ('app_comment_read', (self.snippet_id, self.id,))
@classmethod
def save_comment(cls, user, snippet_id, comment_text):
"""
Save user comments for the post in database
"""
from badges import Badge, UserBadge
if Snippet.objects.filter(id=snippet_id).update(comments_count=F('comments_count') + 1) == 1:
UserGraph.objects.filter(user=user).update(comments_count=F('comments_count') + 1)
comment = cls.objects.create(snippet_id=snippet_id, user=user, comment_text=comment_text, updated_by=str(user), created_by=str(user))
if not UserBadge.objects.filter(user=user, badge=Badge.get_commentator()).exists() \
and UserGraph.objects.filter(user=user, comments_count__gte=10).exists():
# User earned a commentator badge
UserBadge.award(user, Badge.get_commentator())
#Message.add_comment_msg(comment, user)
return comment
@classmethod
def save_reply(cls, user, snippet_id, comment_id, comment_text):
"""
Save user comments for the post in database
"""
from badges import Badge, UserBadge
if Snippet.objects.filter(id=snippet_id).update(comments_count=F('comments_count') + 1) == 1:
UserGraph.objects.filter(user=user).update(comments_count=F('comments_count') + 1)
cls.objects.filter(id=comment_id).update(replies_count=F('replies_count') + 1)
comment = cls.objects.create(snippet_id=snippet_id, reply_to_id=comment_id, user=user, comment_text=comment_text, updated_by=str(user), created_by=str(user))
if not UserBadge.objects.filter(user=user, badge=Badge.get_commentator()).exists() \
and UserGraph.objects.filter(user=user, comments_count__gte=10).exists():
# User earned a commentator badge
UserBadge.award(user, Badge.get_commentator())
#Message.add_reply_msg(comment, user)
return comment
class CommentVote(models.Model):
"""
User and snippet vote
"""
user = models.ForeignKey(User)
comment = models.ForeignKey(Comment)
index = models.IntegerField(default=0)
updated_on = models.DateTimeField(auto_now=True)
updated_by = models.CharField(max_length=100)
created_on = models.DateTimeField(auto_now_add=True)
created_by = models.CharField(max_length=100)
class Meta:
app_label = 'app'
db_table = 'app_comment_vote'
unique_together = ('user', 'comment')
@classmethod
def vote_up(cls, user, comment_id):
"""
Vote up if user not voted otherwise makes it zero
"""
try:
comment_vote = cls.objects.get(comment_id=comment_id, user=user)
except CommentVote.DoesNotExist:
comment_vote = cls.objects.create(comment_id=comment_id, user=user, updated_by=str(user), created_by=str(user))
cursor = connection.cursor()
try:
if comment_vote.index == 0:
cls.objects.filter(comment_id=comment_id, user=user).update(index=1, updated_by=str(user))
UserGraph.objects.filter(user=user).update(likes=F('likes') + 1)
cursor.execute('''UPDATE app_comment SET up_votes = up_votes + 1, votes = (up_votes - down_votes + 1), rank = compute_rank(up_votes + 1, down_votes) WHERE comment_id = %s''', [comment_id])
vote = [1, 1]
elif comment_vote.index > 0:
cls.objects.filter(comment_id=comment_id, user=user).update(index=0, updated_by=str(user))
UserGraph.objects.filter(user=user).update(likes=F('likes') - 1)
cursor.execute('''UPDATE app_comment SET up_votes = up_votes - 1, votes = (up_votes - down_votes - 1), rank = compute_rank(up_votes - 1, down_votes) WHERE comment_id = %s''', [comment_id])
vote = [0, -1]
else:
cls.objects.filter(comment_id=comment_id, user=user).update(index=1, updated_by=str(user))
UserGraph.objects.filter(user=user).update(likes=F('likes') + 1, dislikes=F('dislikes') - 1)
cursor.execute('''UPDATE app_comment SET up_votes = up_votes + 1, down_votes = down_votes - 1, votes = (up_votes - down_votes + 2), rank = compute_rank(up_votes + 1, down_votes - 1) WHERE comment_id = %s''', [comment_id])
vote = [1, 2]
return vote
finally:
cursor.close()
@classmethod
def vote_down(cls, user, comment_id):
"""
Vote down if user not voted otherwise makes it zero
"""
try:
comment_vote = cls.objects.get(comment_id=comment_id, user=user)
except CommentVote.DoesNotExist:
comment_vote = cls.objects.create(comment_id=comment_id, user=user, updated_by=str(user), created_by=str(user))
cursor = connection.cursor()
try:
if comment_vote.index == 0:
cls.objects.filter(comment_id=comment_id, user=user).update(index= -1, updated_by=str(user))
UserGraph.objects.filter(user=user).update(dislikes=F('dislikes') + 1)
cursor.execute('''UPDATE app_comment SET down_votes = down_votes + 1, votes = (up_votes - down_votes - 1), rank = compute_rank(up_votes, down_votes + 1) WHERE comment_id = %s''', [comment_id])
vote = [-1, -1]
elif comment_vote.index > 0:
cls.objects.filter(comment_id=comment_id, user=user).update(index= -1, updated_by=str(user))
UserGraph.objects.filter(user=user).update(likes=F('likes') - 1, dislikes=F('dislikes') + 1)
cursor.execute('''UPDATE app_comment SET up_votes = up_votes - 1, down_votes = down_votes + 1, votes = (up_votes - down_votes - 2), rank = compute_rank(up_votes - 1, down_votes + 1) WHERE comment_id = %s''', [comment_id])
vote = [-1, -2]
else:
cls.objects.filter(comment_id=comment_id, user=user).update(index=0, updated_by=str(user))
UserGraph.objects.filter(user=user).update(dislikes=F('dislikes') - 1)
cursor.execute('''UPDATE app_comment SET down_votes = down_votes - 1, votes = (up_votes - down_votes + 1), rank = compute_rank(up_votes, down_votes - 1) WHERE comment_id = %s''', [comment_id])
vote = [0, 1]
return vote
finally:
cursor.close()
class Tag(models.Model):
name = models.CharField(unique=True, max_length=50)
description = models.TextField(blank=True, null=True)
is_muted = models.BooleanField(default=False)
is_default = models.BooleanField(default=False)
updated_on = models.DateTimeField(auto_now=True)
updated_by = models.CharField(max_length=75)
class Meta:
app_label = 'app'
def __unicode__(self):
return unicode(self.name)
@models.permalink
def get_absolute_url(self):
"""
Returns absolute tag url
"""
return ('app_tag', (self.name,))
@classmethod
def add_tags(cls, tags, user):
"""
Create new tags in database if doesn't exists
"""
sql = '''INSERT INTO app_tag (name, is_muted, is_default, updated_by, updated_on)
SELECT %s, false, false, %s, %s WHERE NOT EXISTS (SELECT 1 FROM app_tag WHERE lower(name) = lower(%s));'''
now = datetime.datetime.now()
parameters = ((tag, str(user), now, tag) for tag in tags.split(','))
cursor = connection.cursor()
try:
cursor.executemany(sql, parameters)
finally:
cursor.close()
@classmethod
def get_tags(cls):
return [tag for tag in cls.objects.filter(is_muted=False).order_by('name')]
@staticmethod
def clean_tags(tags):
"""
Return cleaned tags string, removed spaces and special characters
"""
tags = (re.sub(r'[^\w\.-]', '', tag) for tag in tags)
tags = ','.join(tag for tag in tags if len(tag) > 1 and len(tag) <= 10)
return tags
| mysteryjeans/codehunkit | codehunkit/app/models/snippets.py | Python | gpl-2.0 | 29,275 |
# -*- coding: utf-8 -*-
import scrapy
import json
import re
from datetime import date
from locations.items import GeojsonPointItem
class GapSpider(scrapy.Spider):
name = "gap"
allowed_domains = ["www.gap.com"]
gap_url = 'http://www.gap.com/products/store-locations.jsp'
store_url = 'http://www.gap.com/resources/storeLocations/v1/us/store/?storeid={}'
start_urls = (gap_url, )
def store_hours(self, store_hours):
if store_hours is None:
return ''
day_groups = []
this_day_group = None
for line in store_hours:
if 'CLOSED' in line:
match = re.search(r'^([A-z]{1,3}): ([A-z]*)$', line)
(day, closed) = match.groups()
hours = closed
else:
match = re.search(r'^([A-z]{1,3}): (\d{1,2})[:]?(\d{1,2})? (A|P)M - (\d{1,2})[:]?(\d{1,2})? (A|P)M$',
line)
(day, f_hr, f_min, f_ampm, t_hr, t_min, t_ampm) = match.groups()
f_hr = int(f_hr)
if f_ampm in ['p', 'P']:
f_hr += 12
elif f_ampm in ['a', 'A'] and f_hr == 12:
f_hr = 0
t_hr = int(t_hr)
if t_ampm in ['p', 'P']:
t_hr += 12
elif t_ampm in ['a', 'A'] and t_hr == 12:
t_hr = 0
try:
f_min = int(f_min)
except ValueError:
f_min = 0
try:
t_min = int(t_min)
except ValueError:
t_min = 0
hours = '{:02d}:{:02d}-{:02d}:{:02d}'.format(
f_hr,
f_min,
t_hr,
t_min
)
if not this_day_group:
this_day_group = {
'from_day': day,
'to_day': day,
'hours': hours
}
elif this_day_group['hours'] != hours:
day_groups.append(this_day_group)
this_day_group = {
'from_day': day,
'to_day': day,
'hours': hours
}
elif this_day_group['hours'] == hours:
this_day_group['to_day'] = day
if this_day_group:
day_groups.append(this_day_group)
opening_hours = ""
if len(day_groups) == 1 and day_groups[0]['hours'] in ('00:00-23:59', '00:00-00:00'):
opening_hours = '24/7'
else:
for day_group in day_groups:
if day_group['from_day'] == day_group['to_day']:
opening_hours += '{from_day} {hours}; '.format(**day_group)
elif day_group['from_day'] == 'Mo' and day_group['to_day'] == 'Su':
opening_hours += '{hours}; '.format(**day_group)
else:
opening_hours += '{from_day}-{to_day} {hours}; '.format(**day_group)
opening_hours = opening_hours[:-2]
return opening_hours
def parse(self, response):
data = response.xpath('//div[@id="sdStoreStates"]//li/a/@href').extract()
for store in data:
match = re.search(r'^.+(-store)\-(\d{1,4})(.jsp)$', store)
(_, store_id, _) = match.groups()
yield scrapy.Request(self.store_url.format(store_id), callback=self.parse_store)
def parse_store(self, response):
json_str = response.body_as_unicode()
store = json.loads(json_str)['storeLocations']['storeLocationList']
self.logger.info('store %s' % str(store))
store_addr = store['storeAddress']
self.logger.info('store_addr %s' % store_addr)
(num, street) = store_addr['addressLine1'].split(' ', 1)
zip_code = store_addr['postalCode']
properties = {
"phone": store_addr['phoneNumber'],
"ref": store['storeId'],
"name": store['storeName'],
"opening_hours": self.store_hours(store.get('storeHours', None)),
"lat": store['latitude'],
"lon": store['longitude'],
"addr_full": store_addr['addressLine1'],
"housenumber": num,
"street": street,
"city": store_addr['cityName'],
"state": store_addr['stateProvinceCode'],
"postcode": zip_code,
"country": store_addr['countryCode'],
"website": response.url,
}
yield GeojsonPointItem(**properties)
| iandees/all-the-places | locations/spiders/gap.py | Python | mit | 4,628 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.