text
stringlengths 4
1.02M
| meta
dict |
---|---|
from django.conf.urls import patterns, include, url
from dashboard import views
urlpatterns = patterns('',
url(r'^get/travis$', views.get_travis_builds),
url(r'^put/travis$', views.put_travis_build),
url(r'^put/clear_travis$', views.clear_travis),
url(r'^get/nightly$', views.get_nightly_builds),
url(r'^put/nightly$', views.put_nightly_build),
url(r'^get/codespeed$', views.get_codespeed_builds),
url(r'^put/codespeed$', views.put_codespeed_build),
url(r'^get/codespeed_envs$', views.get_codespeed_environments),
url(r'^put/codespeed_env$', views.put_codespeed_environment),
url(r'^download/(.+)', views.get_latest),
url(r'^stable/(.+)', views.get_stable),
)
| {
"content_hash": "4cefa0c5b961ebb549808b7d341fbf87",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 67,
"avg_line_length": 41.529411764705884,
"alnum_prop": 0.6742209631728046,
"repo_name": "staticfloat/status.julialang.org",
"id": "5374f816fad55709f103ce1b1ea357f98b299d53",
"size": "706",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "www/dashboard_project/dashboard/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "2547"
},
{
"name": "HTML",
"bytes": "10555"
},
{
"name": "JavaScript",
"bytes": "11449"
},
{
"name": "Python",
"bytes": "19700"
},
{
"name": "Shell",
"bytes": "2663"
}
],
"symlink_target": ""
} |
"""Handling configuration files."""
#Copyright (C) 2010 Sebastian Heinlein <[email protected]>
#
#Licensed under the GNU General Public License Version 2
#
#This program is free software; you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation; either version 2 of the License, or
#(at your option) any later version.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
__author__ = "Sebastian Heinlein <[email protected]>"
__all__ = ("ConfigWriter",)
import logging
import os
import apt_pkg
log = logging.getLogger("AptDaemon.ConfigWriter")
class Value(object):
"""Represents a value with position information.
.. attribute:: string
The value string.
.. attribute:: line
The line number of the configuration file in which the value is set.
.. attribute:: start
The position in the line at which the value starts.
.. attribute:: end
The position in the line at which the value ends.
.. attribute:: quotes
The outer qoutes of the value: ' or "
"""
def __init__(self, line, start, quotes):
self.string = ""
self.start = start
self.end = None
self.line = line
self.quotes = quotes
def __cmp__(self, other):
return self.string == other
def __repr__(self):
return "Value: '%s' (line %s: %s to %s)" % (self.string, self.line,
self.start, self.end)
class ConfigWriter(object):
"""Modifies apt configuration files."""
def parse(self, lines):
"""Parse an ISC based apt configuration.
:param lines: The list of lines of a configuration file.
:returns: Dictionary of key, values found in the parsed configuration.
"""
options = {}
in_comment = False
in_value = False
prev_char = None
option = []
value = None
option_name = ""
value_list = []
in_brackets = True
level = 0
for line_no, line in enumerate(lines):
for char_no, char in enumerate(line):
if not in_comment and char == "*" and prev_char == "/":
in_comment = True
prev_char = ""
continue
elif in_comment and char == "/" and prev_char == "*":
# A multiline comment was closed
in_comment = False
prev_char = ""
option_name = option_name[:-1]
continue
elif in_comment:
# We ignore the content of multiline comments
pass
elif not in_value and ((char == "/" and prev_char == "/") or
char == "#"):
# In the case of a line comment continue processing
# the next line
prev_char = ""
option_name = option_name[:-1]
break
elif char in "'\"":
if in_value and value.quotes == char:
value.end = char_no
in_value = not in_value
elif not value:
value = Value(line_no, char_no, char)
in_value = not in_value
else:
value.string += char
elif in_value:
value.string += char
elif option_name and char == ":" and prev_char == ":":
option.append(option_name[:-1])
option_name = ""
elif char.isalpha() or char in "/-:._+":
option_name += char.lower()
elif char == ";":
if in_brackets:
value_list.append(value)
value = None
continue
if value_list:
log.debug("Found %s \"%s\"", "::".join(option),
value_list)
options["::".join(option)] = value_list
value_list = []
elif value:
log.debug("Found %s \"%s\"", "::".join(option), value)
options["::".join(option)] = value
else:
log.debug("Skipping empty key %s", "::".join(option))
value = None
if level > 0:
option.pop()
else:
option = []
elif char == "}":
level -= 1
in_brackets = False
elif char == "{":
level += 1
if option_name:
option.append(option_name)
option_name = ""
in_brackets = True
elif char in "\t\n ":
if option_name:
option.append(option_name)
option_name = ""
in_brackets = False
else:
raise ValueError("Unknown char '%s' in line: '%s'" %
(char, line))
prev_char = char
return options
def set_value(self, option, value, defaultfile):
"""Change the value of an option in the configuration.
:param option: The name of the option, e.g.
'apt::periodic::AutoCleanInterval'.
:param value: The value of the option. Will be converted to string.
:param defaultfile: The filename of the ``/etc/apt/apt.conf.d``
configuration snippet in which the option should be set.
If the value is overriden by a later configuration file snippet
it will be disabled in the corresponding configuration file.
"""
#FIXME: Support value lists
# Convert the value to string
if value is True:
value = "true"
elif value is False:
value = "false"
else:
value = str(value)
# Check all configuration file snippets
etc_parts = os.path.join(apt_pkg.config.find_dir("Dir::Etc"),
apt_pkg.config.find_dir("Dir::Etc::Parts"))
for filename in os.listdir(etc_parts):
if filename < defaultfile:
continue
with open(os.path.join(etc_parts, filename)) as fd:
lines = fd.readlines()
config = self.parse(lines)
try:
val = config[option.lower()]
except KeyError:
if filename == defaultfile:
lines.append("%s '%s';\n" % (option, value))
else:
continue
else:
# Check if the value needs to be changed at all
if ((value == "true" and
val.string.lower() in ["yes", "with", "on",
"enable"]) or
(value == "false" and
val.string.lower() in ["no", "without", "off",
"disable"]) or
(str(value) == val.string)):
continue
if filename == defaultfile:
line = lines[val.line]
new_line = line[:val.start + 1]
new_line += value
new_line += line[val.end:]
lines[val.line] = new_line
else:
# Comment out existing values instead in non default
# configuration files
#FIXME Quite dangerous for brackets
lines[val.line] = "// %s" % lines[val.line]
with open(os.path.join(etc_parts, filename), "w") as fd:
log.debug("Writting %s", filename)
fd.writelines(lines)
if not os.path.exists(os.path.join(etc_parts, defaultfile)):
with open(os.path.join(etc_parts, defaultfile), "w") as fd:
log.debug("Writting %s", filename)
line = "%s '%s';\n" % (option, value)
fd.write(line)
def main():
apt_pkg.init_config()
cw = ConfigWriter()
for filename in sorted(os.listdir("/etc/apt/apt.conf.d/")):
lines = open("/etc/apt/apt.conf.d/%s" % filename).readlines()
cw.parse(lines)
print((cw.set_value("huhu::abc", "lumpi", "10glatzor")))
if __name__ == "__main__":
main()
# vim:ts=4:sw=4:et
| {
"content_hash": "e5f8bb4fa30592b0c4988abd0bb938bb",
"timestamp": "",
"source": "github",
"line_count": 243,
"max_line_length": 78,
"avg_line_length": 37.876543209876544,
"alnum_prop": 0.4780530204259018,
"repo_name": "yasoob/PythonRSSReader",
"id": "3d7f76c9e574dc83c55e99e0d171d612aba3f5a3",
"size": "9204",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "venv/lib/python2.7/dist-packages/aptdaemon/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "106"
},
{
"name": "C",
"bytes": "58615"
},
{
"name": "D",
"bytes": "1841"
},
{
"name": "HTML",
"bytes": "1638"
},
{
"name": "Objective-C",
"bytes": "1291"
},
{
"name": "Python",
"bytes": "22979347"
},
{
"name": "Shell",
"bytes": "5224"
},
{
"name": "XSLT",
"bytes": "152770"
}
],
"symlink_target": ""
} |
"""reqwire: wire up Python requirements with pip-tools."""
from __future__ import absolute_import
import io
import sys
import setuptools
__all__ = ('setup',)
def readme():
with io.open('README.rst') as fp:
return fp.read()
def setup():
"""Package setup entrypoint."""
extra_requirements = {
':python_version=="2.7"': ['enum34', 'pathlib2'],
}
install_requirements = [
'atomicwrites',
'biome',
'emoji',
'fasteners',
'ordered-set',
'pip-tools',
'requests',
'sh',
'typing',
]
setup_requirements = ['six', 'setuptools>=17.1', 'setuptools_scm']
needs_sphinx = {
'build_sphinx',
'docs',
'upload_docs',
}.intersection(sys.argv)
if needs_sphinx:
setup_requirements.append('sphinx')
setuptools.setup(
author='David Gidwani',
author_email='[email protected]',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: POSIX :: Linux',
'Operating System :: Unix',
'Operating System :: MacOS',
'Operating System :: Microsoft :: Windows',
],
description=__doc__,
entry_points={
'console_scripts': [
'reqwire = reqwire.cli:main',
],
},
extras_require=extra_requirements,
install_requires=install_requirements,
license='MIT',
long_description=readme(),
name='reqwire',
package_dir={'': 'src'},
packages=setuptools.find_packages('./src'),
setup_requires=setup_requirements,
url='https://github.com/darvid/reqwire',
use_scm_version=True,
zip_safe=False,
)
if __name__ == '__main__':
setup()
| {
"content_hash": "9b870d901c18b53f2c83e7d083140219",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 75,
"avg_line_length": 29.2183908045977,
"alnum_prop": 0.5310778914240756,
"repo_name": "darvid/reqwire",
"id": "80b424373af7133d7b8d967c2862317c62d67525",
"size": "2564",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "60013"
}
],
"symlink_target": ""
} |
import sys
import os
import os.path
import glob
import shutil
import logging
import optparse
from PIL import Image
import pickle
from tiler_functions import *
class mergeOptions:
def __init__(self, src_list):
self.tms = None
self.add_src_ext = None
self.src_list = src_list
self.remove_dest = None
self.quiet = None
self.strip_src_ext = None
self.nothreads = True
self.debug = None
self.tile_size = '256,256'
self.underlay = 0
def setMergeOptions(src_list):
global options
options = mergeOptions(src_list)
class KeyboardInterruptError(Exception):
pass
def modify_htmls(src_dir, dst_dir):
'adjusts destination gmaps.html and returns tile style (gmaps,TMS)'
googlemaps='gmaps.html'
s_html,d_html=[os.path.join(d,googlemaps) for d in (src_dir,dst_dir)]
if not os.path.exists(s_html):
return False
# check if it's TMS type
tms_tiles= 'true' in [ i for i in open(s_html) if 'var tms_tiles' in i][0]
if not os.path.exists(d_html):
shutil.copy(s_html,dst_dir)
else:
# get a list of zoom levels
try:
cwd=os.getcwd()
os.chdir(dst_dir)
dzooms=sorted([eval(i) for i in glob.glob('[0-9]*')])
finally:
os.chdir(cwd)
zoom_min=dzooms[0]
zoom_max=dzooms[-1]
bounds=[]
for f in (s_html,d_html):
txt=[ i for i in open(f)
if 'var mapBounds = new G.LatLngBounds' in i][0]
num_str=re.sub('[^-,.0-9]*','',re.sub('\.Lat*','',txt)) # leave only numbers there
bounds.append(map(float,num_str.split(',')))
s_bounds,d_bounds=bounds
ld((s_bounds,d_bounds))
if s_bounds[0] < d_bounds[0]: d_bounds[0]=s_bounds[0]
if s_bounds[1] < d_bounds[1]: d_bounds[1]=s_bounds[1]
if s_bounds[2] > d_bounds[2]: d_bounds[2]=s_bounds[2]
if s_bounds[3] > d_bounds[3]: d_bounds[3]=s_bounds[3]
ld(d_bounds)
# write back modified googlemaps.html
map_name=os.path.split(dst_dir)[1]
subs=[("(var mapBounds = new G.LatLngBounds).*;",
"\\1( new G.LatLng(%f, %f), new G.LatLng(%f, %f));" % tuple(d_bounds)),
('(var mapMinZoom =).*;','\\1 %i;' % zoom_min),
('(var mapMaxZoom =).*;','\\1 %i;' % zoom_max),
('<title>.*</title>','<title>%s</title>' % map_name),
('<h1>.*</h1>','<h1>%s</h1>' % map_name)]
re_sub_file(d_html, subs)
return tms_tiles
def transparency(img):
'estimate transparency of an image'
(r,g,b,a)=img.split()
(a_min,a_max)=a.getextrema() # get min/max values for alpha channel
return 1 if a_min == 255 else 0 if a_max == 0 else -1
class MergeSet:
def __init__(self,src_dir,dst_dir):
(self.src,self.dest)=(src_dir,dst_dir)
self.tile_sz=tuple(map(int,options.tile_size.split(',')))
if options.strip_src_ext:
self.src=os.path.splitext(src)[0]
if options.add_src_ext is not None:
self.src+=options.add_src_ext
pf(self.src+' ',end='')
try:
cwd=os.getcwd()
os.chdir(self.src)
self.src_lst=glob.glob('[0-9]*/*/*.png')
try:
self.max_zoom=max([int(i) for i in glob.glob('[0-9]*')])
except:
print "there is a problem"
print self.src
sys.exit()
finally:
os.chdir(cwd)
ld(self.src_lst)
# load cached tile transparency data if any
self.src_transp=dict.fromkeys(self.src_lst,None)
self.src_cache_path=os.path.join(self.src, 'merge-cache')
try:
self.src_transp.update(pickle.load(open(self.src_cache_path,'r')))
except:
ld("cache load failed")
ld(repr(self.src_transp))
# define crop map for underlay function
tsx,tsy=self.tile_sz
self.underlay_map=[ # lf up rt lw
( 0, 0,tsx/2,tsy/2), (tsx/2, 0, tsx,tsy/2),
( 0,tsy/2,tsx/2, tsy), (tsx/2,tsy/2, tsx, tsy),
]
# do the thing
self.merge_dirs()
def underlay(self,tile,src_path,src_raster,level):
if level <= 0:
return
level -= 1
(s,ext)=os.path.splitext(tile)
(s,y)=os.path.split(s)
(z,x)=os.path.split(s)
(z,y,x)=map(int,(z,y,x))
if z < self.max_zoom:
return
dz,dx,dy=z+1,x*2,y*2
dst_tiles=[(dx,dy), (dx+1,dy),
(dx,dy+1),(dx+1,dy+1)]
for (dst_xy,src_area) in zip(dst_tiles,self.underlay_map):
dst_tile='%i/%i/%i%s' % (dz,dst_xy[0],dst_xy[1],ext)
dst_path=os.path.join(self.dest,dst_tile)
if not os.path.exists(dst_path):
continue
dst_raster=Image.open(dst_path).convert("RGBA")
if transparency(dst_raster) == 1: # lower tile is fully opaque
continue
if not src_raster: # check if opening was deferred
src_raster=Image.open(src_path).convert("RGBA")
out_raster=src_raster.crop(src_area).resize(self.tile_sz,Image.BILINEAR)
out_raster=Image.composite(dst_raster,out_raster,dst_raster)
del dst_raster
out_raster.save(dst_path)
if options.debug:
pf('%i'%level,end='')
else:
pf('#',end='')
self.underlay(dst_tile,dst_path,out_raster,level)
def __call__(self,tile):
'''called by map() to merge a source tile into the destination tile set'''
try:
src_path=os.path.join(self.src,tile)
dst_tile=os.path.join(self.dest,tile)
dpath=os.path.dirname(dst_tile)
if not os.path.exists(dpath):
try: # thread race safety
os.makedirs(dpath)
except os.error: pass
src_raster=None
transp=self.src_transp[tile]
if transp == None: # transparency value not cached yet
#pf('!',end='')
src_raster=Image.open(src_path).convert("RGBA")
transp=transparency(src_raster)
if transp == 0 : # fully transparent
#pf('-',end='')
#os.remove(src_path)
pass
elif transp == 1 or not os.path.exists(dst_tile):
# fully opaque or no destination tile exists yet
#pf('>',end='')
shutil.copy(src_path,dst_tile)
else: # semitransparent, combine with destination (exists! see above)
pf('+',end='')
if not src_raster:
src_raster=Image.open(src_path).convert("RGBA")
#dst_raster=Image.composite(src_raster,Image.open(dst_tile),src_raster)
#dst_raster=Image.composite(src_raster,Image.open(dst_tile),Image.new("RGBA", (256, 256)))
dst_raster=Image.composite(src_raster,Image.open(dst_tile).convert("RGBA"),src_raster)
dst_raster.save(dst_tile)
if options.underlay and transp != 0:
self.underlay(tile,src_path,src_raster,options.underlay)
except KeyboardInterrupt: # http://jessenoller.com/2009/01/08/multiprocessingpool-and-keyboardinterrupt/
print 'got KeyboardInterrupt'
raise KeyboardInterruptError()
return (tile,transp) # send back transparency values for caching
def upd_stat(self,transparency_data):
self.src_transp.update(dict(transparency_data))
try:
pickle.dump(self.src_transp,open(self.src_cache_path,'w'))
except:
ld("cache save failed")
pf('')
def merge_dirs(self):
tms_html=modify_htmls(self.src, self.dest)
if options.tms or tms_html: # rearrange underlay crop map for TMS tiles
m=self.underlay_map
self.underlay_map=[m[2],m[3],m[0],m[1]]
src_transparency=parallel_map(self,self.src_lst)
self.upd_stat(src_transparency)
# MergeSet end
if __name__=='__main__':
parser = optparse.OptionParser(
usage="usage: %prog [--cut] [--dest-dir=DST_DIR] <tile_dirs>... <target_dir>",
version=version,
description="")
parser.add_option("-r", "--remove-dest", action="store_true",
help='delete destination directory before merging')
parser.add_option("-l", "--src-list", default=None,
help='read a list of source directories from a file; if no destination is provided then name destination after the list file without a suffix')
parser.add_option("-s", "--strip-src-ext", action="store_true",
help='strip extension suffix from a source parameter')
parser.add_option("-x", "--add-src-ext", default=None,
help='add extension suffix to a source parameter')
parser.add_option('-u',"--underlay", type='int', default=0,
help="underlay semitransparent tiles with a zoomed-in raster from a higher level")
parser.add_option("--tms", action="store_true",
help="force TMS type tiles")
parser.add_option("--tile-size", default='256,256',metavar="SIZE_X,SIZE_Y",
help='tile size (default: 256,256)')
parser.add_option("-q", "--quiet", action="store_true")
parser.add_option("-d", "--debug", action="store_true")
parser.add_option("--nothreads", action="store_true",
help="do not use multiprocessing")
(options, args) = parser.parse_args()
logging.basicConfig(level=logging.DEBUG if options.debug else
(logging.ERROR if options.quiet else logging.INFO))
ld(options)
if options.src_list:
src_dirs=[i.rstrip('\n') for i in open(options.src_list,'r')]
try:
dst_dir=args[-1]
except:
dst_dir=os.path.splitext(options.src_list)[0]
else:
try:
src_dirs=args[0:-1]
dst_dir=args[-1]
except:
raise Exception("No source(s) or/and destination specified")
if options.nothreads or options.debug:
set_nothreads()
if options.remove_dest:
shutil.rmtree(dst_dir,ignore_errors=True)
if not os.path.exists(dst_dir):
try:
os.makedirs(dst_dir)
except os.error: pass
for src in src_dirs:
if not (src.startswith("#") or src.strip() == ''): # ignore sources with names starting with "#"
MergeSet(src, dst_dir)
| {
"content_hash": "627b7a32cc56c400be41ab9f6cde3e7f",
"timestamp": "",
"source": "github",
"line_count": 285,
"max_line_length": 151,
"avg_line_length": 37.41754385964912,
"alnum_prop": 0.5541072768192048,
"repo_name": "manimaul/MX-Cart",
"id": "45355897c22f59bc58cc1c572b11928cfd0abc8c",
"size": "12006",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "my_tilers_tools/tiles_merge_simple.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "219404"
}
],
"symlink_target": ""
} |
import re
from webob import exc as w_exc
from midonetclient import exc
from neutron.api.v2 import base
from neutron.common import exceptions as n_exc
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
PLURAL_NAME_MAP = {}
def handle_api_error(fn):
"""Wrapper for methods that throws custom exceptions."""
def wrapped(*args, **kwargs):
try:
return fn(*args, **kwargs)
except (w_exc.HTTPException, exc.MidoApiConnectionError) as ex:
raise MidonetApiException(msg=ex)
return wrapped
class MidonetApiException(n_exc.NeutronException):
message = _("MidoNet API error: %(msg)s")
class MidonetPluginException(n_exc.NeutronException):
message = _("%(msg)s")
def generate_methods(*methods):
"""Decorator for classes that represents which methods are required by the
classes.
:param methods: The list of methods to be generated automatically. They
must be some or one of 'list', 'show', 'create', 'update'
and 'delete'.
"""
@handle_api_error
def create_resource(self, context, resource):
pass
@handle_api_error
def update_resource(self, context, id, resource):
pass
@handle_api_error
def get_resource(self, context, id, fields=None):
pass
@handle_api_error
def get_resources(self, context, filters=None, fields=None):
pass
@handle_api_error
def delete_resource(self, context, id):
pass
AVAILABLE_METHOD_MAP = {base.Controller.LIST: get_resources,
base.Controller.SHOW: get_resource,
base.Controller.CREATE: create_resource,
base.Controller.UPDATE: update_resource,
base.Controller.DELETE: delete_resource}
ALLOWED_METHODS = AVAILABLE_METHOD_MAP.keys()
required_methods = [method for method in methods
if method in ALLOWED_METHODS]
def wrapper(cls):
# Use the first capitalzed word as an alias.
try:
alias = getattr(cls, 'ALIAS')
except AttributeError:
[capitalized_resource] = re.findall(
'^[A-Z][a-z0-9_]*', cls.__name__)
alias = capitalized_resource.lower()
setattr(cls, 'ALIAS', alias)
parent = getattr(cls, 'PARENT', None)
if parent:
alias = '%s_%s' % (parent, alias)
for method in required_methods:
if method in [base.Controller.LIST, base.Controller.SHOW]:
if method == base.Controller.LIST:
pluralized_alias = PLURAL_NAME_MAP.get(
alias, '%ss' % alias)
method_name = 'get_' + pluralized_alias
else:
method_name = 'get_' + alias
else:
method_name = method + '_' + alias
try:
getattr(cls, method_name)
abstract_methods = getattr(cls, '__abstractmethods__', None)
if abstract_methods is not None and (
method_name in abstract_methods):
setattr(cls, method_name, AVAILABLE_METHOD_MAP[method])
implemented_method = frozenset([method_name])
abstract_methods = abstract_methods - implemented_method
setattr(cls, '__abstractmethods__', abstract_methods)
except AttributeError:
setattr(cls, method_name, AVAILABLE_METHOD_MAP[method])
return cls
return wrapper
| {
"content_hash": "4c3f710a2d9ab4874dbc4f64b0c16f0e",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 78,
"avg_line_length": 34.02803738317757,
"alnum_prop": 0.5781378742103818,
"repo_name": "midokura/python-neutron-plugin-midonet",
"id": "9b88933f2dfc15c114025b061398d86671bc3fc4",
"size": "4319",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "midonet/neutron/common/util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "254357"
},
{
"name": "Shell",
"bytes": "9711"
}
],
"symlink_target": ""
} |
import django.contrib.auth.models
import django.core.validators
from django.db import migrations, models
import django.utils.timezone
import re
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(db_index=True, help_text='Required. 30 characters or fewer. Letters, numbers and @/./+/-/_ characters', max_length=30, unique=True, validators=[django.core.validators.RegexValidator(re.compile('^[\\w.@+-]+$'), 'Enter a valid username.', 'invalid')], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=30, verbose_name='last name')),
('email', models.EmailField(max_length=254, unique=True, verbose_name='email')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'ordering': ['-date_joined'],
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
| {
"content_hash": "d6dd93153674eb65d5232d39ad96d90a",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 322,
"avg_line_length": 65.34883720930233,
"alnum_prop": 0.6377224199288256,
"repo_name": "SPARLab/BikeMaps",
"id": "d7fc97fa90b363f0c04838d8a6ac23de6d4998e8",
"size": "2857",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "spirit/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "15111"
},
{
"name": "HTML",
"bytes": "134960"
},
{
"name": "JavaScript",
"bytes": "73792"
},
{
"name": "Python",
"bytes": "965817"
}
],
"symlink_target": ""
} |
from cswaExtras import getConfig, getCSID
form = {'webapp': 'checkfilenamesProd'}
config = getConfig(form)
objectnumber = '5-1758'
print getCSID('approxobjectnumbers', objectnumber, config)
| {
"content_hash": "7089ac4023aff3010acd2968b443b5dd",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 58,
"avg_line_length": 31.833333333333332,
"alnum_prop": 0.7801047120418848,
"repo_name": "itsdavidbaxter/Tools",
"id": "51724e63f25c64aed474ee63550e58db654afa50",
"size": "351",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "scripts/pahma/filenamechecker/test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "2175"
},
{
"name": "CSS",
"bytes": "449505"
},
{
"name": "Cucumber",
"bytes": "34113"
},
{
"name": "HTML",
"bytes": "1397777"
},
{
"name": "Java",
"bytes": "3252608"
},
{
"name": "JavaScript",
"bytes": "3605790"
},
{
"name": "PLSQL",
"bytes": "14006"
},
{
"name": "PLpgSQL",
"bytes": "69887"
},
{
"name": "Perl",
"bytes": "78989"
},
{
"name": "Perl6",
"bytes": "5514"
},
{
"name": "Python",
"bytes": "507713"
},
{
"name": "Ruby",
"bytes": "13655"
},
{
"name": "SQLPL",
"bytes": "18518"
},
{
"name": "Shell",
"bytes": "335949"
}
],
"symlink_target": ""
} |
class Group:
def __init__(self, name, header, footer):
self.name = name
self.header = header
self.footer = footer | {
"content_hash": "e190ecb2f6759ef1e4133a225c7e21a6",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 45,
"avg_line_length": 24.5,
"alnum_prop": 0.5510204081632653,
"repo_name": "eugence/python_training",
"id": "677dea9918e60ab21e15df6e3b22efe8f56969e5",
"size": "147",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "group.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "7434"
}
],
"symlink_target": ""
} |
from google.cloud import compute_v1
# <INGREDIENT is_preemptible>
def is_preemptible(project_id: str, zone: str, instance_name: str) -> bool:
"""
Check if a given instance is preemptible or not.
Args:
project_id: project ID or project number of the Cloud project you want to use.
zone: name of the zone you want to use. For example: "us-west3-b"
instance_name: name of the virtual machine to check.
Returns:
The preemptible status of the instance.
"""
instance_client = compute_v1.InstancesClient()
instance = instance_client.get(
project=project_id, zone=zone, instance=instance_name
)
return instance.scheduling.preemptible
# </INGREDIENT>
| {
"content_hash": "209d09a6607dc350f73f97d68a9e3e29",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 86,
"avg_line_length": 36,
"alnum_prop": 0.6847222222222222,
"repo_name": "googleapis/python-compute",
"id": "7ff9fc860e668b11182b8dfa2de5149c2fd27dd6",
"size": "1567",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "samples/ingredients/instances/preemptible/get.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "32681847"
},
{
"name": "Shell",
"bytes": "30663"
}
],
"symlink_target": ""
} |
from feincms_markup.parsers.base import MarkupBaseParser
class TextileParser(MarkupBaseParser):
pass
| {
"content_hash": "5058af19ddca10ed3a8b41626baad626",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 56,
"avg_line_length": 21.4,
"alnum_prop": 0.822429906542056,
"repo_name": "indexofire/feincms-markup",
"id": "5bcaae108c08a2f53d504c2c15644c0d22320f32",
"size": "131",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "feincms_markup/parsers/textile/parser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "58257"
},
{
"name": "Python",
"bytes": "12215"
}
],
"symlink_target": ""
} |
import numpy as np
import pytest
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas import Categorical, CategoricalIndex, Index, Series, Timestamp
import pandas.util.testing as tm
class TestCategoricalDtypes:
def test_is_equal_dtype(self):
# test dtype comparisons between cats
c1 = Categorical(list('aabca'), categories=list('abc'), ordered=False)
c2 = Categorical(list('aabca'), categories=list('cab'), ordered=False)
c3 = Categorical(list('aabca'), categories=list('cab'), ordered=True)
assert c1.is_dtype_equal(c1)
assert c2.is_dtype_equal(c2)
assert c3.is_dtype_equal(c3)
assert c1.is_dtype_equal(c2)
assert not c1.is_dtype_equal(c3)
assert not c1.is_dtype_equal(Index(list('aabca')))
assert not c1.is_dtype_equal(c1.astype(object))
assert c1.is_dtype_equal(CategoricalIndex(c1))
assert (c1.is_dtype_equal(
CategoricalIndex(c1, categories=list('cab'))))
assert not c1.is_dtype_equal(CategoricalIndex(c1, ordered=True))
# GH 16659
s1 = Series(c1)
s2 = Series(c2)
s3 = Series(c3)
assert c1.is_dtype_equal(s1)
assert c2.is_dtype_equal(s2)
assert c3.is_dtype_equal(s3)
assert c1.is_dtype_equal(s2)
assert not c1.is_dtype_equal(s3)
assert not c1.is_dtype_equal(s1.astype(object))
def test_set_dtype_same(self):
c = Categorical(['a', 'b', 'c'])
result = c._set_dtype(CategoricalDtype(['a', 'b', 'c']))
tm.assert_categorical_equal(result, c)
def test_set_dtype_new_categories(self):
c = Categorical(['a', 'b', 'c'])
result = c._set_dtype(CategoricalDtype(list('abcd')))
tm.assert_numpy_array_equal(result.codes, c.codes)
tm.assert_index_equal(result.dtype.categories, Index(list('abcd')))
@pytest.mark.parametrize('values, categories, new_categories', [
# No NaNs, same cats, same order
(['a', 'b', 'a'], ['a', 'b'], ['a', 'b'],),
# No NaNs, same cats, different order
(['a', 'b', 'a'], ['a', 'b'], ['b', 'a'],),
# Same, unsorted
(['b', 'a', 'a'], ['a', 'b'], ['a', 'b'],),
# No NaNs, same cats, different order
(['b', 'a', 'a'], ['a', 'b'], ['b', 'a'],),
# NaNs
(['a', 'b', 'c'], ['a', 'b'], ['a', 'b']),
(['a', 'b', 'c'], ['a', 'b'], ['b', 'a']),
(['b', 'a', 'c'], ['a', 'b'], ['a', 'b']),
(['b', 'a', 'c'], ['a', 'b'], ['a', 'b']),
# Introduce NaNs
(['a', 'b', 'c'], ['a', 'b'], ['a']),
(['a', 'b', 'c'], ['a', 'b'], ['b']),
(['b', 'a', 'c'], ['a', 'b'], ['a']),
(['b', 'a', 'c'], ['a', 'b'], ['a']),
# No overlap
(['a', 'b', 'c'], ['a', 'b'], ['d', 'e']),
])
@pytest.mark.parametrize('ordered', [True, False])
def test_set_dtype_many(self, values, categories, new_categories,
ordered):
c = Categorical(values, categories)
expected = Categorical(values, new_categories, ordered)
result = c._set_dtype(expected.dtype)
tm.assert_categorical_equal(result, expected)
def test_set_dtype_no_overlap(self):
c = Categorical(['a', 'b', 'c'], ['d', 'e'])
result = c._set_dtype(CategoricalDtype(['a', 'b']))
expected = Categorical([None, None, None], categories=['a', 'b'])
tm.assert_categorical_equal(result, expected)
def test_codes_dtypes(self):
# GH 8453
result = Categorical(['foo', 'bar', 'baz'])
assert result.codes.dtype == 'int8'
result = Categorical(['foo%05d' % i for i in range(400)])
assert result.codes.dtype == 'int16'
result = Categorical(['foo%05d' % i for i in range(40000)])
assert result.codes.dtype == 'int32'
# adding cats
result = Categorical(['foo', 'bar', 'baz'])
assert result.codes.dtype == 'int8'
result = result.add_categories(['foo%05d' % i for i in range(400)])
assert result.codes.dtype == 'int16'
# removing cats
result = result.remove_categories(['foo%05d' % i for i in range(300)])
assert result.codes.dtype == 'int8'
@pytest.mark.parametrize('ordered', [True, False])
def test_astype(self, ordered):
# string
cat = Categorical(list('abbaaccc'), ordered=ordered)
result = cat.astype(object)
expected = np.array(cat)
tm.assert_numpy_array_equal(result, expected)
msg = 'could not convert string to float'
with pytest.raises(ValueError, match=msg):
cat.astype(float)
# numeric
cat = Categorical([0, 1, 2, 2, 1, 0, 1, 0, 2], ordered=ordered)
result = cat.astype(object)
expected = np.array(cat, dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = cat.astype(int)
expected = np.array(cat, dtype=np.int)
tm.assert_numpy_array_equal(result, expected)
result = cat.astype(float)
expected = np.array(cat, dtype=np.float)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize('dtype_ordered', [True, False])
@pytest.mark.parametrize('cat_ordered', [True, False])
def test_astype_category(self, dtype_ordered, cat_ordered):
# GH 10696/18593
data = list('abcaacbab')
cat = Categorical(data, categories=list('bac'), ordered=cat_ordered)
# standard categories
dtype = CategoricalDtype(ordered=dtype_ordered)
result = cat.astype(dtype)
expected = Categorical(
data, categories=cat.categories, ordered=dtype_ordered)
tm.assert_categorical_equal(result, expected)
# non-standard categories
dtype = CategoricalDtype(list('adc'), dtype_ordered)
result = cat.astype(dtype)
expected = Categorical(data, dtype=dtype)
tm.assert_categorical_equal(result, expected)
if dtype_ordered is False:
# dtype='category' can't specify ordered, so only test once
result = cat.astype('category')
expected = cat
tm.assert_categorical_equal(result, expected)
def test_iter_python_types(self):
# GH-19909
cat = Categorical([1, 2])
assert isinstance(list(cat)[0], int)
assert isinstance(cat.tolist()[0], int)
def test_iter_python_types_datetime(self):
cat = Categorical([Timestamp('2017-01-01'),
Timestamp('2017-01-02')])
assert isinstance(list(cat)[0], Timestamp)
assert isinstance(cat.tolist()[0], Timestamp)
| {
"content_hash": "2f219bbac079aacad26b0de6552a3c2f",
"timestamp": "",
"source": "github",
"line_count": 173,
"max_line_length": 78,
"avg_line_length": 38.630057803468205,
"alnum_prop": 0.5653149783031572,
"repo_name": "cbertinato/pandas",
"id": "b8c223ab3b04e2623c5a398e4f9248b2c660c9f4",
"size": "6683",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pandas/tests/arrays/categorical/test_dtypes.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "541"
},
{
"name": "C",
"bytes": "394466"
},
{
"name": "C++",
"bytes": "17248"
},
{
"name": "HTML",
"bytes": "606963"
},
{
"name": "Makefile",
"bytes": "529"
},
{
"name": "Python",
"bytes": "15010333"
},
{
"name": "Shell",
"bytes": "27209"
},
{
"name": "Smarty",
"bytes": "2040"
}
],
"symlink_target": ""
} |
"""
Gathers a set of related title given an input list of titles.
Usage:
./subsample_similar_titles <n>
Options:
-h --help Show this documentation
"""
import random
import sys
import traceback
import docopt
import requests
from menagerie.formatting import tsv
from menagerie.iteration import aggregate
HEADERS = [
'input_title',
'similar_title',
'rank',
'snippet'
]
def main():
args = docopt.docopt(__doc__)
similar_titles = tsv.Reader(sys.stdin)
n = int(args['<n>'])
run(similar_titles, n)
def run(similar_titles, n):
grouped_similar_titles = aggregate(similar_titles,
by=lambda r:r.input_title)
writer = tsv.Writer(sys.stdout, headers=HEADERS)
for input_title, similar_titles in grouped_similar_titles:
similar_titles = list(similar_titles)
random.shuffle(similar_titles)
for similar_title in similar_titles[:n]:
writer.write(similar_title.values())
if __name__ == "__main__": main()
| {
"content_hash": "44dcbd5bd935b6d183b513818c3fe536",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 65,
"avg_line_length": 21.19607843137255,
"alnum_prop": 0.6114708603145236,
"repo_name": "halfak/Newcomer-task-suggestions",
"id": "537a6b47bd1084d1f6842743b2e35c52d9e1d87d",
"size": "1081",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ts/subsample_similar_titles.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16357"
},
{
"name": "R",
"bytes": "30691"
},
{
"name": "Shell",
"bytes": "489"
}
],
"symlink_target": ""
} |
import datetime
import pathlib
import platform
import plistlib
import re
import subprocess
from distutils.version import StrictVersion
import sal
__version__ = "1.1.0"
def main():
sus_submission = {}
sus_submission["facts"] = get_sus_facts()
# Process managed items and update histories.
sus_submission["managed_items"] = get_sus_install_report()
sus_submission["update_history"] = []
pending = get_pending()
sus_submission["managed_items"].update(pending)
sal.set_checkin_results("Apple Software Update", sus_submission)
def get_sus_install_report():
"""Return installed apple updates from softwareupdate"""
try:
history = plistlib.loads(
pathlib.Path("/Library/Receipts/InstallHistory.plist").read_bytes()
)
except (IOError, plistlib.InvalidFileException):
history = []
return {
i["displayName"]: {
"date_managed": i["date"],
"status": "PRESENT",
"data": {
"type": "Apple SUS Install",
"version": i["displayVersion"].strip(),
},
}
for i in history
if i["processName"] == "softwareupdated"
}
def get_sus_facts():
result = {"checkin_module_version": __version__}
history_limit = datetime.datetime.now().astimezone(
datetime.timezone.utc
) - datetime.timedelta(days=1)
cmd = ["softwareupdate", "--dump-state"]
try:
subprocess.check_call(cmd)
except subprocess.CalledProcessError:
return result
with open("/var/log/install.log") as handle:
install_log = handle.readlines()
for line in reversed(install_log):
# TODO: Stop if we go before the subprocess call datetime-wise
if "Catalog: http" in line and "catalog" not in result:
result["catalog"] = line.split()[-1]
elif "SUScan: Elapsed scan time = " in line and "last_check" not in result:
result["last_check"] = _get_log_time(line).isoformat()
if (log_time := _get_log_time(line)) and log_time < history_limit:
# Let's not look earlier than when we started
# softwareupdate.
break
elif "catalog" in result and "last_check" in result:
# Once we have both facts, bail; no need to process the
# entire file.
break
return result
def _get_log_time(line):
# Example date 2019-02-08 10:49:56-05
raw_datetime = " ".join(line.split()[:2])
# Add 0's to make TZ offset work with strptime (expects a 4
# digit offset). This should hopefully cover even those off
# by minutes locations. e.g. I would hope the above log time in
# French Polynesia would look like this: 2019-02-08 10:49:56-0930
raw_datetime += (24 - len(raw_datetime)) * "0"
try:
aware_datetime = datetime.datetime.strptime(raw_datetime, "%Y-%m-%d %H:%M:%S%z")
except ValueError:
aware_datetime = None
# Convert to UTC time.
return (
None if not aware_datetime else aware_datetime.astimezone(datetime.timezone.utc)
)
def get_pending():
pending_items = {}
cmd = ["softwareupdate", "-l", "--no-scan"]
try:
# softwareupdate outputs "No new software available" to stderr,
# so we pipe it off.
output = subprocess.check_output(cmd, stderr=subprocess.PIPE, text=True)
except subprocess.CalledProcessError:
return pending_items
# The following regex code is from Shea Craig's work on the Salt
# mac_softwareupdate module. Reference that for future updates.
if StrictVersion(platform.mac_ver()[0]) >= StrictVersion("10.15"):
# Example output:
# Software Update Tool
#
# Finding available software
# Software Update found the following new or updated software:
# * Label: Command Line Tools beta 5 for Xcode-11.0
# Title: Command Line Tools beta 5 for Xcode, Version: 11.0, Size: 224804K, Recommended: YES,
# * Label: macOS Catalina Developer Beta-6
# Title: macOS Catalina Public Beta, Version: 5, Size: 3084292K, Recommended: YES, Action: restart,
# * Label: BridgeOSUpdateCustomer
# Title: BridgeOSUpdateCustomer, Version: 10.15.0.1.1.1560926689, Size: 390674K, Recommended: YES, Action: shut down,
# - Label: iCal-1.0.2
# Title: iCal, Version: 1.0.2, Size: 6520K,
rexp = re.compile(
r"(?m)" # Turn on multiline matching
r"^\s*[*-] Label: " # Name lines start with * or - and "Label: "
r"(?P<name>[^ ].*)[\r\n]" # Capture the rest of that line; this is the update name.
r".*Version: (?P<version>[^,]*), " # Grab the version number.
r"Size: (?P<size>[^,]*),\s*" # Grab the size; unused at this time.
r"(?P<recommended>Recommended: YES,)?\s*" # Optionally grab the recommended flag.
r"(?P<action>Action: (?:restart|shut down),)?" # Optionally grab an action.
)
else:
# Example output:
# Software Update Tool
#
# Finding available software
# Software Update found the following new or updated software:
# * Command Line Tools (macOS Mojave version 10.14) for Xcode-10.3
# Command Line Tools (macOS Mojave version 10.14) for Xcode (10.3), 199140K [recommended]
# * macOS 10.14.1 Update
# macOS 10.14.1 Update (10.14.1), 199140K [recommended] [restart]
# * BridgeOSUpdateCustomer
# BridgeOSUpdateCustomer (10.14.4.1.1.1555388607), 328394K, [recommended] [shut down]
# - iCal-1.0.2
# iCal, (1.0.2), 6520K
rexp = re.compile(
r"(?m)" # Turn on multiline matching
r"^\s+[*-] " # Name lines start with 3 spaces and either a * or a -.
r"(?P<name>.*)[\r\n]" # The rest of that line is the name.
r".*\((?P<version>[^ \)]*)" # Capture the last parenthesized value on the next line.
r"[^\r\n\[]*(?P<recommended>\[recommended\])?\s?" # Capture [recommended] if there.
r"(?P<action>\[(?:restart|shut down)\])?" # Capture an action if present.
)
# Convert local time to UTC time represented as a ISO 8601 str.
now = datetime.datetime.now().astimezone(datetime.timezone.utc).isoformat()
return {
m.group("name"): {
"date_managed": now,
"status": "PENDING",
"data": {
"version": m.group("version"),
"recommended": "TRUE"
if "recommended" in m.group("recommended")
else "FALSE",
"action": _bracket_cleanup(m, "action"),
},
}
for m in rexp.finditer(output)
}
def _bracket_cleanup(match, key):
"""Strip out [ and ] and uppercase SUS output"""
return re.sub(r"[\[\]]", "", match.group(key) or "").upper()
if __name__ == "__main__":
main()
| {
"content_hash": "9a4729cb719dea3f0846c3d37f5e9aaf",
"timestamp": "",
"source": "github",
"line_count": 186,
"max_line_length": 129,
"avg_line_length": 37.75806451612903,
"alnum_prop": 0.586928662964545,
"repo_name": "salopensource/sal-scripts",
"id": "a778d5d99e2b7248760b1c7a7255aee812ef09db",
"size": "7088",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "payload/usr/local/sal/checkin_modules/apple_sus_checkin.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "44368"
},
{
"name": "Python",
"bytes": "70419"
},
{
"name": "Shell",
"bytes": "2641"
},
{
"name": "Swift",
"bytes": "2394"
}
],
"symlink_target": ""
} |
""" RF2 Concept File access routines
"""
from rf2db.parsers.RF2BaseParser import RF2Concept
from rf2db.parsers.RF2Iterator import RF2ConceptList, iter_parms
from rf2db.db.RF2FileCommon import RF2FileWrapper, global_rf2_parms, rf2_values
from rf2db.db.RF2DBConnection import cp_values
from rf2db.utils.lfu_cache import lfu_cache, clear_caches
from rf2db.utils.listutils import listify
from rf2db.parameterparser.ParmParser import ParameterDefinitionList, intparam, enumparam, sctidparam
from rf2db.constants.RF2ValueSets import primitive, defined
# Parameters for concept access
concept_parms = ParameterDefinitionList(global_rf2_parms)
concept_parms.concept = sctidparam()
concept_list_parms = ParameterDefinitionList(global_rf2_parms)
concept_list_parms.add(iter_parms)
concept_list_parms.after = intparam()
new_concept_parms = ParameterDefinitionList(global_rf2_parms)
new_concept_parms.sctid = sctidparam()
new_concept_parms.effectiveTime = intparam()
new_concept_parms.definitionstatus = enumparam(['p', 'f'], default='p')
update_concept_parms = ParameterDefinitionList(global_rf2_parms)
update_concept_parms.definitionstatus = enumparam(['p', 'f'])
delete_concept_parms = ParameterDefinitionList(global_rf2_parms)
class ConceptDB(RF2FileWrapper):
directory = 'Terminology'
prefixes = ['sct2_Concept_']
table = 'concept'
createSTMT = """CREATE TABLE IF NOT EXISTS %(table)s (
%(base)s,
definitionStatusId bigint(20) NOT NULL,
KEY id (id),
%(keys)s );"""
def __init__(self, *args, **kwargs):
RF2FileWrapper.__init__(self, *args, **kwargs)
hasrf2rec = True
@classmethod
def rf2rec(cls, *args, **kwargs):
return RF2Concept(*args, **kwargs)
@lfu_cache(maxsize=200)
def read(self, cid, **kwargs):
"""
Read the concept record
@param cid: concept sctid
@return: Updated RF2Concept record if valid else None
"""
db = self.connect()
return db.singleton_query(self._fname, RF2Concept, filter_="id=%s" % cid, **kwargs)
def _doupdate(self, cid, changeset, effectivetime=None, definitionstatusid=None, **kwargs):
""" Helper function to update a concept record
@param cid: concept id to update
@param changeset: changeset
@param effectivetime: new effective time
@param definitionstatusid: new definition status is not empty
@param kwargs: context
"""
fname = self._fname
effectivetime, _ = self.effectivetime_and_moduleid(effectivetime, None)
query = "UPDATE %(fname)s SET effectiveTime=%(effectivetime)s, "
query += "definitionStatusId=%(definitionstatusid)s, " if definitionstatusid else ""
query += "WHERE id=%(cid)s AND changeset='%(changeset)s' AND locked=1"
db = self.connect()
db.execute_query(query % vars(), **kwargs)
db.commit()
def update(self, cid, changeset=None, definitionstatus=None, **kwargs):
""" Update an existing concept record
@param cid: sctid of concept to update
@param changeset: containing changeset
@param definitionstatus: field to update
@param kwargs: context
@return: new record if update successful, otherwise an error message.
"""
csuri = self.tochangesetuuid(changeset, **kwargs)
if not csuri:
return self.changeseterror(changeset)
changeset = csuri
current_value = self.read(cid, changeset=changeset, **kwargs)
if not current_value:
return "UnknownEntity - concept not found"
# Various situations:
# 1) record is not locked:
# 1a) record will change
# 1aa) snapshot: Not allowed.
# 1ab) full: update record with lock, changeset, new effectiveDate and changes
# 1b) record will not change
# Return existing record untouched
# 2) Record is locked
# 2a) record changeset = changeset?
# 1aa) snapshot: Change record value and effectivedate
# 1ab) full: add new record with lock, changeset, new effectiveDate and changes
# 2b) record changeset <> changeset
# 2aa) snapshot: Not allowed
# 2ab) full: ????
#
# Don't update effective time if nothing will change (PUT is idempotent)
if not current_value.locked:
if current_value.changeset != changeset or \
current_value.isPrimitive and definitionstatus == 'f' or \
current_value.isFullyDefined and definitionstatus == 'p':
if cp_values.ss:
return "Concept: Cannot update an existing snapshot record"
else:
return "Concept: Full record update is not implemented"
else:
return current_value
else:
if current_value.changeset == changeset:
if cp_values.ss:
definitionstatusid = primitive if (definitionstatus == 'p' and current_value.isFullyDefined) \
else defined if (definitionstatus == 'f' and current_value.isPrimitive) else None
self._doUpdate(cid, changeset, definitionstatusid=definitionstatusid, **kwargs)
clear_caches()
return self.read(cid, _nocache=True, **kwargs)
else:
return "Concept: Full record update is not implemented"
else:
return "Concept: Record is locked under a different changeset"
def delete(self, cid, changeset=None, **kwargs):
""" Delete or deactivate a concept
@param cid: concept identifier
@param changeset: containing changeset
@param kwargs: context
@return: None if success otherwise an error message
"""
csuri = self.tochangesetuuid(changeset, **kwargs)
if not csuri:
return self.changeseterror(changeset)
changeset = csuri
# delete is idempotent, so if we can't find it or it is already gone claim success
kwargs['active'] = 0 # read even if inactive
current_value = self.read(cid, **kwargs)
if not current_value or not current_value.isActive():
return None
if not current_value.locked:
if cp_values.ss:
return "Cannot delete committed concepts in a snapshot database"
else:
return "Concept: Full record delete is not implemented"
else:
if current_value.changeset == changeset:
db = self.connect()
db.execute_query(
"DELETE FROM %(fname)s WHERE id=%(cid)s AND changeset=%(changeset)s AND locked=1" % vars())
db.commit()
clear_caches()
return None
else:
return "Concept: Record is locked under a different changeset"
def add(self, changeset, cid=None, effectivetime=None, moduleid=None, definitionstatus='p', **kwargs):
"""
@param changeset: Changeset identifier.
@type changeset: UUID
@param cid: concept identifier. Default: next concept in server namespace
@param effectivetime: Timestamp for record. Default: today's date
@param moduleid: owning module. Default: service module (ep_values.moduleId)
@param definitionstatus: 'p' (primitive) or 'f' (fully defined). Default: 'p'
@return: Concept record or none if error.
"""
cs = self.tochangesetuuid(changeset, **kwargs)
if not cs:
return self.changeseterror(changeset)
db = self.connect()
if not cid:
cid = self.newconceptid()
effectivetime, moduleid = self.effectivetime_and_moduleid(effectivetime, moduleid)
definitionstatusid = defined if definitionstatus == 'f' else primitive
fname = self._fname
db.execute("INSERT INTO %(fname)s (id, effectiveTime, active, moduleId, "
"definitionStatusId, changeset, locked) "
"VALUES (%(cid)s, %(effectivetime)s, 1, %(moduleid)s, "
"%(definitionstatusid)s, '%(cs)s', 1 )" % vars())
db.commit()
clear_caches()
return self.read(cid, changeset=cs, **kwargs)
def buildQuery(self, active=1, order='asc', sort=None, page=0, maxtoreturn=None, after=0,
changeset=None, moduleid=None, locked=False, filtr=None,
id_only=False, **kwargs):
"""
:param active: Query active only or all
:param order: Sort order. 'asc', 'desc'. If None, don't sort
:param sort: Additional sort keys. Last key is id
:param page: start page in maxtoreturn units
:param maxtoreturn: maximum number to return. 0 means return count, -1 means return everything
:param after: id to start after
:param changeset: include this changeset name or uuid if present
:param moduleid: restrict query to this module
:param locked: if true, return only locked records
:param and_filter: additional SQL filter
:param id_only: True means only select ids
:return:
"""
if changeset:
changeset = self.tochangesetuuid(changeset, active=active, **kwargs)
if not changeset and locked:
return []
if maxtoreturn is None:
maxtoreturn = rf2_values.defaultblocksize
if not cp_values.ss:
raise Exception('FULL table not supported for complete concept list')
start = (page * maxtoreturn) if maxtoreturn > 0 else 0
query = 'SELECT %s FROM %s' % ('id' if id_only else 'count(id)' if maxtoreturn == 0 else '*', self._fname)
query += ' WHERE %s ' % ('active=1' if active else 'TRUE')
if locked and not changeset:
query += " AND locked = 1 "
elif locked and changeset:
query += " AND (changeset = '%s' AND locked = 1)" % changeset
elif changeset:
query += " AND (changeset = '%s' OR locked = 0)" % changeset
else:
query += ' AND locked = 0'
if after:
query += ' AND id > %s' % after
if moduleid:
query += ' AND ' + ' AND '.join(['moduleid = %s' % m for m in listify(moduleid)])
if filtr:
query += ' AND (' + filtr + ')'
if maxtoreturn != 0 and (sort is None or sort.lower() != 'none'):
query += ' ORDER BY '
if sort:
query += ", ".join(("%s " % s + order) for s in listify(sort)) + ", "
query += ' id %s' % order
if maxtoreturn > 0:
query += ' LIMIT %s' % (maxtoreturn + 1)
if start > 0:
query += ' OFFSET %s' % start
return query
def getAllConcepts(self, maxtoreturn=None, **kwargs):
"""
Read a number of concept records
@param parmlist: parsed parameter list
@param as_iter: True means leave the result unresolved
"""
query = self.buildQuery(maxtoreturn=maxtoreturn, **kwargs)
db = self.connect()
db.execute(query)
return [RF2Concept(c) for c in db.ResultsGenerator(db)] if maxtoreturn else \
list(db.ResultsGenerator(db))
@classmethod
def refsettype(cls, parms):
return RF2ConceptList(parms)
| {
"content_hash": "90ec659356b7d4aa91c470cbf0c161c3",
"timestamp": "",
"source": "github",
"line_count": 269,
"max_line_length": 114,
"avg_line_length": 42.765799256505574,
"alnum_prop": 0.6071801112656467,
"repo_name": "cts2/rf2db",
"id": "fca293b08cefb28ad9dbf80a12c01d7b30d030c8",
"size": "13066",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rf2db/db/RF2ConceptFile.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "900907"
},
{
"name": "Shell",
"bytes": "293"
}
],
"symlink_target": ""
} |
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import sys
with open('README.rst') as f:
readme = f.read()
svem_flag = '--single-version-externally-managed'
if svem_flag in sys.argv:
sys.argv.remove(svem_flag)
setup(name='saspy',
version='1.2.1',
description='A python interface to SAS',
long_description=readme,
author='Tom Weber',
author_email='[email protected]',
url='https://github.com/sassoftware/saspy',
packages=['saspy'],
cmdclass={},
package_data={'saspy': ['*.sas']},
install_requires=['pygments', 'IPython', 'pre-commit'],
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: Apache Software License',
]
)
| {
"content_hash": "1394fe6da5796fe57ba3ada5541751c0",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 61,
"avg_line_length": 27.517241379310345,
"alnum_prop": 0.6203007518796992,
"repo_name": "spljaa/saspy",
"id": "71ce05e1c167bf9124ff473c150fdc73936acfd1",
"size": "1404",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "31655"
},
{
"name": "SAS",
"bytes": "2833"
}
],
"symlink_target": ""
} |
import pytest
from hpe_test_utils import OneViewBaseFactsTest
from oneview_module_loader import SasLogicalJbodAttachmentFactsModule
ERROR_MSG = 'Fake message error'
PARAMS_GET_ALL = dict(
config='config.json',
name=None
)
PARAMS_GET_BY_NAME = dict(
config='config.json',
name="SAS Logical JBOD Attachment 2"
)
SAS_JBOD_LOGICAL_ATTACHMENTS = [{"name": "SAS Logical JBOD Attachment 1"},
{"name": "SAS Logical JBOD Attachment 2"}]
@pytest.mark.resource(TestSasLogicalJbodAttachmentFactsModule='sas_logical_jbod_attachments')
class TestSasLogicalJbodAttachmentFactsModule(OneViewBaseFactsTest):
def test_should_get_all_sas_logical_jbod_attachments(self):
self.resource.get_all.return_value = SAS_JBOD_LOGICAL_ATTACHMENTS
self.mock_ansible_module.params = PARAMS_GET_ALL
SasLogicalJbodAttachmentFactsModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
ansible_facts=dict(sas_logical_jbod_attachments=(SAS_JBOD_LOGICAL_ATTACHMENTS))
)
def test_should_get_sas_logical_jbod_attachment_by_name(self):
self.resource.get_by.return_value = [SAS_JBOD_LOGICAL_ATTACHMENTS[1]]
self.mock_ansible_module.params = PARAMS_GET_BY_NAME
SasLogicalJbodAttachmentFactsModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
ansible_facts=dict(sas_logical_jbod_attachments=([SAS_JBOD_LOGICAL_ATTACHMENTS[1]]))
)
if __name__ == '__main__':
pytest.main([__file__])
| {
"content_hash": "9e6d268fabcf97b78a333aca0a213933",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 96,
"avg_line_length": 33.583333333333336,
"alnum_prop": 0.6929280397022333,
"repo_name": "HewlettPackard/oneview-ansible",
"id": "4687f3f01cdd300c09d8b4c5df3416ed6988a6dc",
"size": "2271",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_oneview_sas_logical_jbod_attachment_facts.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1716153"
},
{
"name": "Shell",
"bytes": "5675"
}
],
"symlink_target": ""
} |
"""
.. See the NOTICE file distributed with this work for additional information
regarding copyright ownership.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import os
import sys
import tarfile
import shutil
from utils import logger
try:
if hasattr(sys, '_run_from_cmdl') is True:
raise ImportError
from pycompss.api.parameter import IN, FILE_IN, FILE_OUT
from pycompss.api.task import task
from pycompss.api.constraint import constraint
from pycompss.api.api import barrier, compss_wait_on, compss_open, compss_delete_file
except ImportError:
logger.warn("[Warning] Cannot import \"pycompss\" API packages.")
logger.warn(" Using mock decorators.")
from utils.dummy_pycompss import IN, FILE_IN, FILE_OUT # pylint: disable=ungrouped-imports
from utils.dummy_pycompss import task, constraint # pylint: disable=ungrouped-imports
from utils.dummy_pycompss import barrier, compss_wait_on, compss_open, compss_delete_file # pylint: disable=ungrouped-imports
from basic_modules.tool import Tool
from basic_modules.metadata import Metadata
from tool.fastq_splitter import fastq_splitter
from tool.aligner_utils import alignerUtils
from tool.bam_utils import bamUtilsTask
# ------------------------------------------------------------------------------
class bwaAlignerMEMTool(Tool): # pylint: disable=invalid-name
"""
Tool for aligning sequence reads to a genome using BWA
"""
def __init__(self, configuration=None):
"""
Initialise the tool with its configuration.
Parameters
----------
configuration : dict
a dictionary containing parameters that define how the operation
should be carried out, which are specific to each Tool.
"""
logger.info("BWA MEM Aligner")
Tool.__init__(self)
if configuration is None:
configuration = {}
self.configuration.update(configuration)
@task(returns=bool, genome_file_name=IN, genome_idx=FILE_IN,
amb_file=FILE_OUT, ann_file=FILE_OUT, bwt_file=FILE_OUT,
pac_file=FILE_OUT, sa_file=FILE_OUT)
def untar_index( # pylint: disable=too-many-locals,too-many-arguments
self, genome_file_name, genome_idx,
amb_file, ann_file, bwt_file, pac_file, sa_file):
"""
Extracts the BWA index files from the genome index tar file.
Parameters
----------
genome_file_name : str
Location string of the genome fasta file
genome_idx : str
Location of the BWA index file
amb_file : str
Location of the amb index file
ann_file : str
Location of the ann index file
bwt_file : str
Location of the bwt index file
pac_file : str
Location of the pac index file
sa_file : str
Location of the sa index file
Returns
-------
bool
Boolean indicating if the task was successful
"""
if "no-untar" in self.configuration and self.configuration["no-untar"] is True:
return True
gfl = genome_file_name.split("/")
au_handle = alignerUtils()
au_handle.bwa_untar_index(
gfl[-1], genome_idx, amb_file, ann_file, bwt_file, pac_file, sa_file)
return True
@constraint(ComputingUnits="4")
@task(returns=bool, genome_file_loc=FILE_IN, read_file_loc=FILE_IN,
bam_loc=FILE_OUT, amb_file=FILE_IN, ann_file=FILE_IN, bwt_file=FILE_IN,
pac_file=FILE_IN, sa_file=FILE_IN, mem_params=IN, isModifier=False)
def bwa_aligner_single( # pylint: disable=too-many-arguments, no-self-use
self, genome_file_loc, read_file_loc, bam_loc,
amb_file, ann_file, bwt_file, pac_file, sa_file, # pylint: disable=unused-argument
mem_params):
"""
BWA MEM Aligner - Single Ended
Parameters
----------
genome_file_loc : str
Location of the genomic fasta
read_file_loc : str
Location of the FASTQ file
bam_loc : str
Location of the output aligned bam file
amb_file : str
Location of the amb index file
ann_file : str
Location of the ann index file
bwt_file : str
Location of the bwt index file
pac_file : str
Location of the pac index file
sa_file : str
Location of the sa index file
mem_params : dict
Alignment parameters
Returns
-------
bam_loc : str
Location of the output file
"""
if (
os.path.isfile(read_file_loc) is False or
os.path.getsize(read_file_loc) == 0):
return False
out_bam = read_file_loc + '.out.bam'
au_handle = alignerUtils()
logger.info(
"BWA FINISHED: " + str(au_handle.bwa_mem_align_reads(
genome_file_loc, out_bam, mem_params, read_file_loc))
)
try:
with open(bam_loc, "wb") as f_out:
with open(out_bam, "rb") as f_in:
f_out.write(f_in.read())
except (OSError, IOError) as error:
logger.fatal("SINGLE ALIGNER: I/O error({0}): {1}".format(error.errno, error.strerror))
return False
os.remove(out_bam)
return True
@constraint(ComputingUnits="4")
@task(returns=bool, genome_file_loc=FILE_IN, read_file_loc1=FILE_IN,
read_file_loc2=FILE_IN, bam_loc=FILE_OUT,
amb_file=FILE_IN, ann_file=FILE_IN, bwt_file=FILE_IN,
pac_file=FILE_IN, sa_file=FILE_IN, mem_params=IN, isModifier=False)
def bwa_aligner_paired( # pylint: disable=too-many-arguments, no-self-use, too-many-locals
self, genome_file_loc, read_file_loc1, read_file_loc2, bam_loc,
amb_file, ann_file, bwt_file, pac_file, sa_file, mem_params): # pylint: disable=unused-argument
"""
BWA MEM Aligner - Paired End
Parameters
----------
genome_file_loc : str
Location of the genomic fasta
read_file_loc1 : str
Location of the FASTQ file
read_file_loc2 : str
Location of the FASTQ file
bam_loc : str
Location of the output aligned bam file
amb_file : str
Location of the amb index file
ann_file : str
Location of the ann index file
bwt_file : str
Location of the bwt index file
pac_file : str
Location of the pac index file
sa_file : str
Location of the sa index file
mem_params : dict
Alignment parameters
Returns
-------
bam_loc : str
Location of the output file
"""
out_bam = read_file_loc1 + '.out.bam'
au_handle = alignerUtils()
logger.info(
"BWA FINISHED: " + str(au_handle.bwa_mem_align_reads(
genome_file_loc, out_bam, mem_params, read_file_loc1, read_file_loc2))
)
try:
with open(bam_loc, "wb") as f_out:
with open(out_bam, "rb") as f_in:
f_out.write(f_in.read())
except (OSError, IOError) as error:
logger.fatal("PARIED ALIGNER: I/O error({0}): {1}".format(error.errno, error.strerror))
return False
os.remove(out_bam)
return True
@staticmethod
def get_mem_params(params):
"""
Function to handle to extraction of commandline parameters and formatting
them for use in the aligner for BWA MEM
Parameters
----------
params : dict
Returns
-------
list
"""
command_parameters = {
"bwa_mem_min_seed_len_param": ["-k", True],
"bwa_mem_band_width_param": ["-w", True],
"bwa_mem_zdropoff_param": ["-d", True],
"bwa_mem_reseeding_param": ["-r", True],
"bwa_mem_insensitive_param": ["-c", True],
"bwa_mem_paried_rescue_mode_param": ["-P", False],
"bwa_mem_matching_score_param": ["-A", True],
"bwa_mem_mismatch_penalty_param": ["-B", True],
"bwa_mem_gap_open_penalty_param": ["-O", True],
"bwa_mem_gap_ext_penalty_param": ["-E", True],
"bwa_mem_clipping_penalty_param": ["-L", True],
"bwa_mem_unpaired_penalty_param": ["-U", True],
"bwa_mem_reads_interleaved_param": ["-p", False],
"bwa_mem_complete_read_head_param": ["-R", True],
"bwa_mem_alignment_threshold_param": ["-T", True],
"bwa_mem_hard_clipping_param": ["-H", False],
"bwa_mem_short_split_secondary_param": ["-M", False]
}
command_params = []
for param in params:
if param in command_parameters:
if command_parameters[param][1] and params[param] != "":
command_params = command_params + [command_parameters[param][0], params[param]]
else:
if command_parameters[param][0] and params[param] is not False:
command_params.append(command_parameters[param][0])
return command_params
def run(self, input_files, input_metadata, output_files): # pylint: disable=too-many-branches,too-many-locals,too-many-statements
"""
The main function to align bam files to a genome using BWA
Parameters
----------
input_files : dict
File 0 is the genome file location, file 1 is the FASTQ file
metadata : dict
output_files : dict
Returns
-------
output_files : dict
First element is a list of output_bam_files, second element is the
matching meta data
output_metadata : dict
"""
tasks_done = 0
task_count = 7
untar_idx = True
if "no-untar" in self.configuration and self.configuration["no-untar"] is True:
untar_idx = False
task_count = 5
index_files = {
"amb": input_files["genome"] + ".amb",
"ann": input_files["genome"] + ".ann",
"bwt": input_files["genome"] + ".bwt",
"pac": input_files["genome"] + ".pac",
"sa": input_files["genome"] + ".sa"
}
if untar_idx:
logger.progress("Untar Index", task_id=tasks_done, total=task_count)
self.untar_index(
input_files["genome"],
input_files["index"],
index_files["amb"],
index_files["ann"],
index_files["bwt"],
index_files["pac"],
index_files["sa"]
)
tasks_done += 1
logger.progress("Untar Index", task_id=tasks_done, total=task_count)
sources = [input_files["genome"]]
fqs = fastq_splitter()
fastq1 = input_files["loc"]
sources.append(input_files["loc"])
logger.progress("FASTQ Splitter", task_id=tasks_done, total=task_count)
fastq_file_gz = os.path.join(
self.configuration["execution"], os.path.split(fastq1)[1] + ".tar.gz")
if "fastq2" in input_files:
fastq2 = input_files["fastq2"]
sources.append(input_files["fastq2"])
fastq_file_list = fqs.paired_splitter(
fastq1, fastq2, fastq_file_gz
)
else:
fastq_file_list = fqs.single_splitter(
fastq1, fastq_file_gz
)
# Required to prevent iterating over the future objects
fastq_file_list = compss_wait_on(fastq_file_list)
# compss_delete_file(fastq1)
# if "fastq2" in input_files:
# compss_delete_file(fastq2)
if not fastq_file_list:
logger.fatal("FASTQ SPLITTER: run failed")
return {}, {}
if hasattr(sys, '_run_from_cmdl') is True:
pass
else:
logger.info("Getting the tar file")
with compss_open(fastq_file_gz, "rb") as f_in:
with open(fastq_file_gz, "wb") as f_out:
f_out.write(f_in.read())
gz_data_path = os.path.split(fastq_file_gz)[0]
try:
tar = tarfile.open(fastq_file_gz)
tar.extractall(path=gz_data_path)
tar.close()
os.remove(fastq_file_gz)
compss_delete_file(fastq_file_gz)
except tarfile.TarError:
logger.fatal("Split FASTQ files: Malformed tar file")
return {}, {}
tasks_done += 1
logger.progress("FASTQ Splitter", task_id=tasks_done, total=task_count)
# input and output share most metadata
output_metadata = {}
output_bam_file = output_files["output"]
# output_bai_file = output_files["bai"]
logger.info("BWA ALIGNER: Aligning sequence reads to the genome")
logger.progress("ALIGNER - jobs = " + str(len(fastq_file_list)),
task_id=tasks_done, total=task_count)
output_bam_list = []
for fastq_file_pair in fastq_file_list:
if "fastq2" in input_files:
tmp_fq1 = os.path.join(gz_data_path, "tmp", fastq_file_pair[0])
tmp_fq2 = os.path.join(gz_data_path, "tmp", fastq_file_pair[1])
output_bam_file_tmp = tmp_fq1 + ".bam"
output_bam_list.append(output_bam_file_tmp)
logger.info("BWA MEM FILES: " + tmp_fq1 + " - " + tmp_fq2)
self.bwa_aligner_paired(
str(input_files["genome"]), tmp_fq1, tmp_fq2, output_bam_file_tmp,
index_files["amb"],
index_files["ann"],
index_files["bwt"],
index_files["pac"],
index_files["sa"],
self.get_mem_params(self.configuration)
)
else:
tmp_fq = os.path.join(gz_data_path, "tmp", fastq_file_pair[0])
output_bam_file_tmp = tmp_fq + ".bam"
output_bam_list.append(output_bam_file_tmp)
logger.info("BWA MEM FILES: " + tmp_fq)
self.bwa_aligner_single(
str(input_files["genome"]), tmp_fq, output_bam_file_tmp,
index_files["amb"],
index_files["ann"],
index_files["bwt"],
index_files["pac"],
index_files["sa"],
self.get_mem_params(self.configuration)
)
barrier()
# Remove all tmp fastq files now that the reads have been aligned
if untar_idx:
for idx_file in index_files:
compss_delete_file(index_files[idx_file])
if hasattr(sys, '_run_from_cmdl') is True:
pass
else:
for fastq_file_pair in fastq_file_list:
tmp_fq = os.path.join(gz_data_path, "tmp", fastq_file_pair[0])
compss_delete_file(tmp_fq)
try:
os.remove(tmp_fq)
except (OSError, IOError) as msg:
logger.warn(
"Unable to remove file I/O error({0}): {1}".format(
msg.errno, msg.strerror
)
)
if "fastq2" in input_files:
tmp_fq = os.path.join(gz_data_path, "tmp", fastq_file_pair[1])
compss_delete_file(tmp_fq)
try:
os.remove(tmp_fq)
except (OSError, IOError) as msg:
logger.warn(
"Unable to remove file I/O error({0}): {1}".format(
msg.errno, msg.strerror
)
)
tasks_done += 1
logger.progress("ALIGNER", task_id=tasks_done, total=task_count)
bam_handle = bamUtilsTask()
logger.progress("Merging bam files", task_id=tasks_done, total=task_count)
bam_handle.bam_merge(output_bam_list)
tasks_done += 1
logger.progress("Merging bam files", task_id=tasks_done, total=task_count)
# Remove all bam files that are not the final file
for i in output_bam_list[1:len(output_bam_list)]:
try:
compss_delete_file(i)
os.remove(i)
except (OSError, IOError) as msg:
logger.warn(
"Unable to remove file I/O error({0}): {1}".format(
msg.errno, msg.strerror
)
)
logger.progress("Sorting merged bam file", task_id=tasks_done, total=task_count)
bam_handle.bam_sort(output_bam_list[0])
tasks_done += 1
logger.progress("Sorting merged bam file", task_id=tasks_done, total=task_count)
logger.progress("Copying bam file into the output file",
task_id=tasks_done, total=task_count)
bam_handle.bam_copy(output_bam_list[0], output_bam_file)
tasks_done += 1
logger.progress("Copying bam file into the output file",
task_id=tasks_done, total=task_count)
logger.progress("Indexing output bam file",
task_id=tasks_done, total=task_count)
bam_handle.bam_index(output_bam_file, output_files["bai"])
tasks_done += 1
logger.progress("Indexing output bam file",
task_id=tasks_done, total=task_count)
compss_delete_file(output_bam_list[0])
logger.info("BWA ALIGNER: Alignments complete")
barrier()
try:
shutil.rmtree(gz_data_path + "/tmp")
except (OSError, IOError) as msg:
logger.warn(
"Already tidy I/O error({0}): {1}".format(
msg.errno, msg.strerror
)
)
output_metadata = {
"bam": Metadata(
data_type=input_metadata['loc'].data_type,
file_type="BAM",
file_path=output_files["output"],
sources=sources,
taxon_id=input_metadata["genome"].taxon_id,
meta_data={
"assembly": input_metadata["genome"].meta_data["assembly"],
"tool": "bwa_aligner",
"parameters": self.get_mem_params(self.configuration),
"associated_files": [output_files["bai"]]
}
),
"bai": Metadata(
data_type=input_metadata['loc'].data_type,
file_type="BAI",
file_path=output_files["bai"],
sources=sources,
taxon_id=input_metadata["genome"].taxon_id,
meta_data={
"assembly": input_metadata["genome"].meta_data["assembly"],
"tool": "bs_seeker_aligner",
"associated_master": output_bam_file
}
)
}
return (
{"bam": output_files["output"], "bai": output_files["bai"]},
output_metadata
)
# ------------------------------------------------------------------------------
| {
"content_hash": "86f9f49e90b1f6d3ad892bd620ab0f96",
"timestamp": "",
"source": "github",
"line_count": 552,
"max_line_length": 134,
"avg_line_length": 36.492753623188406,
"alnum_prop": 0.53445194598888,
"repo_name": "Multiscale-Genomics/mg-process-fastq",
"id": "f0f16695ff2d8d6c24244052226011547f92ca4f",
"size": "20144",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tool/bwa_mem_aligner.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "831267"
},
{
"name": "R",
"bytes": "5100"
},
{
"name": "Shell",
"bytes": "24651"
}
],
"symlink_target": ""
} |
"""
Default Django settings. Override these with settings in the module pointed to
by the DJANGO_SETTINGS_MODULE environment variable.
"""
# This is defined here as a do-nothing function because we can't import
# django.utils.translation -- that module depends on the settings.
def gettext_noop(s):
return s
####################
# CORE #
####################
DEBUG = False
# Whether the framework should propagate raw exceptions rather than catching
# them. This is useful under some testing situations and should never be used
# on a live site.
DEBUG_PROPAGATE_EXCEPTIONS = False
# People who get code error notifications.
# In the format [('Full Name', '[email protected]'), ('Full Name', '[email protected]')]
ADMINS = []
# List of IP addresses, as strings, that:
# * See debug comments, when DEBUG is true
# * Receive x-headers
INTERNAL_IPS = []
# Hosts/domain names that are valid for this site.
# "*" matches anything, ".example.com" matches example.com and all subdomains
ALLOWED_HOSTS = []
# Local time zone for this installation. All choices can be found here:
# https://en.wikipedia.org/wiki/List_of_tz_zones_by_name (although not all
# systems may support all possibilities). When USE_TZ is True, this is
# interpreted as the default user time zone.
TIME_ZONE = 'America/Chicago'
# If you set this to True, Django will use timezone-aware datetimes.
USE_TZ = False
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# Languages we provide translations for, out of the box.
LANGUAGES = [
('af', gettext_noop('Afrikaans')),
('ar', gettext_noop('Arabic')),
('ar-dz', gettext_noop('Algerian Arabic')),
('ast', gettext_noop('Asturian')),
('az', gettext_noop('Azerbaijani')),
('bg', gettext_noop('Bulgarian')),
('be', gettext_noop('Belarusian')),
('bn', gettext_noop('Bengali')),
('br', gettext_noop('Breton')),
('bs', gettext_noop('Bosnian')),
('ca', gettext_noop('Catalan')),
('cs', gettext_noop('Czech')),
('cy', gettext_noop('Welsh')),
('da', gettext_noop('Danish')),
('de', gettext_noop('German')),
('dsb', gettext_noop('Lower Sorbian')),
('el', gettext_noop('Greek')),
('en', gettext_noop('English')),
('en-au', gettext_noop('Australian English')),
('en-gb', gettext_noop('British English')),
('eo', gettext_noop('Esperanto')),
('es', gettext_noop('Spanish')),
('es-ar', gettext_noop('Argentinian Spanish')),
('es-co', gettext_noop('Colombian Spanish')),
('es-mx', gettext_noop('Mexican Spanish')),
('es-ni', gettext_noop('Nicaraguan Spanish')),
('es-ve', gettext_noop('Venezuelan Spanish')),
('et', gettext_noop('Estonian')),
('eu', gettext_noop('Basque')),
('fa', gettext_noop('Persian')),
('fi', gettext_noop('Finnish')),
('fr', gettext_noop('French')),
('fy', gettext_noop('Frisian')),
('ga', gettext_noop('Irish')),
('gd', gettext_noop('Scottish Gaelic')),
('gl', gettext_noop('Galician')),
('he', gettext_noop('Hebrew')),
('hi', gettext_noop('Hindi')),
('hr', gettext_noop('Croatian')),
('hsb', gettext_noop('Upper Sorbian')),
('hu', gettext_noop('Hungarian')),
('hy', gettext_noop('Armenian')),
('ia', gettext_noop('Interlingua')),
('id', gettext_noop('Indonesian')),
('io', gettext_noop('Ido')),
('is', gettext_noop('Icelandic')),
('it', gettext_noop('Italian')),
('ja', gettext_noop('Japanese')),
('ka', gettext_noop('Georgian')),
('kab', gettext_noop('Kabyle')),
('kk', gettext_noop('Kazakh')),
('km', gettext_noop('Khmer')),
('kn', gettext_noop('Kannada')),
('ko', gettext_noop('Korean')),
('lb', gettext_noop('Luxembourgish')),
('lt', gettext_noop('Lithuanian')),
('lv', gettext_noop('Latvian')),
('mk', gettext_noop('Macedonian')),
('ml', gettext_noop('Malayalam')),
('mn', gettext_noop('Mongolian')),
('mr', gettext_noop('Marathi')),
('my', gettext_noop('Burmese')),
('nb', gettext_noop('Norwegian Bokmål')),
('ne', gettext_noop('Nepali')),
('nl', gettext_noop('Dutch')),
('nn', gettext_noop('Norwegian Nynorsk')),
('os', gettext_noop('Ossetic')),
('pa', gettext_noop('Punjabi')),
('pl', gettext_noop('Polish')),
('pt', gettext_noop('Portuguese')),
('pt-br', gettext_noop('Brazilian Portuguese')),
('ro', gettext_noop('Romanian')),
('ru', gettext_noop('Russian')),
('sk', gettext_noop('Slovak')),
('sl', gettext_noop('Slovenian')),
('sq', gettext_noop('Albanian')),
('sr', gettext_noop('Serbian')),
('sr-latn', gettext_noop('Serbian Latin')),
('sv', gettext_noop('Swedish')),
('sw', gettext_noop('Swahili')),
('ta', gettext_noop('Tamil')),
('te', gettext_noop('Telugu')),
('th', gettext_noop('Thai')),
('tr', gettext_noop('Turkish')),
('tt', gettext_noop('Tatar')),
('udm', gettext_noop('Udmurt')),
('uk', gettext_noop('Ukrainian')),
('ur', gettext_noop('Urdu')),
('uz', gettext_noop('Uzbek')),
('vi', gettext_noop('Vietnamese')),
('zh-hans', gettext_noop('Simplified Chinese')),
('zh-hant', gettext_noop('Traditional Chinese')),
]
# Languages using BiDi (right-to-left) layout
LANGUAGES_BIDI = ["he", "ar", "ar-dz", "fa", "ur"]
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
LOCALE_PATHS = []
# Settings for language cookie
LANGUAGE_COOKIE_NAME = 'django_language'
LANGUAGE_COOKIE_AGE = None
LANGUAGE_COOKIE_DOMAIN = None
LANGUAGE_COOKIE_PATH = '/'
LANGUAGE_COOKIE_SECURE = False
LANGUAGE_COOKIE_HTTPONLY = False
LANGUAGE_COOKIE_SAMESITE = None
# If you set this to True, Django will format dates, numbers and calendars
# according to user current locale.
USE_L10N = False
# Not-necessarily-technical managers of the site. They get broken link
# notifications and other various emails.
MANAGERS = ADMINS
# Default charset to use for all HttpResponse objects, if a MIME type isn't
# manually specified. It's used to construct the Content-Type header.
DEFAULT_CHARSET = 'utf-8'
# Email address that error messages come from.
SERVER_EMAIL = 'root@localhost'
# Database connection info. If left empty, will default to the dummy backend.
DATABASES = {}
# Classes used to implement DB routing behavior.
DATABASE_ROUTERS = []
# The email backend to use. For possible shortcuts see django.core.mail.
# The default is to use the SMTP backend.
# Third-party backends can be specified by providing a Python path
# to a module that defines an EmailBackend class.
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# Host for sending email.
EMAIL_HOST = 'localhost'
# Port for sending email.
EMAIL_PORT = 25
# Whether to send SMTP 'Date' header in the local time zone or in UTC.
EMAIL_USE_LOCALTIME = False
# Optional SMTP authentication information for EMAIL_HOST.
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
EMAIL_USE_TLS = False
EMAIL_USE_SSL = False
EMAIL_SSL_CERTFILE = None
EMAIL_SSL_KEYFILE = None
EMAIL_TIMEOUT = None
# List of strings representing installed apps.
INSTALLED_APPS = []
TEMPLATES = []
# Default form rendering class.
FORM_RENDERER = 'django.forms.renderers.DjangoTemplates'
# Default email address to use for various automated correspondence from
# the site managers.
DEFAULT_FROM_EMAIL = 'webmaster@localhost'
# Subject-line prefix for email messages send with django.core.mail.mail_admins
# or ...mail_managers. Make sure to include the trailing space.
EMAIL_SUBJECT_PREFIX = '[Django] '
# Whether to append trailing slashes to URLs.
APPEND_SLASH = True
# Whether to prepend the "www." subdomain to URLs that don't have it.
PREPEND_WWW = False
# Override the server-derived value of SCRIPT_NAME
FORCE_SCRIPT_NAME = None
# List of compiled regular expression objects representing User-Agent strings
# that are not allowed to visit any page, systemwide. Use this for bad
# robots/crawlers. Here are a few examples:
# import re
# DISALLOWED_USER_AGENTS = [
# re.compile(r'^NaverBot.*'),
# re.compile(r'^EmailSiphon.*'),
# re.compile(r'^SiteSucker.*'),
# re.compile(r'^sohu-search'),
# ]
DISALLOWED_USER_AGENTS = []
ABSOLUTE_URL_OVERRIDES = {}
# List of compiled regular expression objects representing URLs that need not
# be reported by BrokenLinkEmailsMiddleware. Here are a few examples:
# import re
# IGNORABLE_404_URLS = [
# re.compile(r'^/apple-touch-icon.*\.png$'),
# re.compile(r'^/favicon.ico$'),
# re.compile(r'^/robots.txt$'),
# re.compile(r'^/phpmyadmin/'),
# re.compile(r'\.(cgi|php|pl)$'),
# ]
IGNORABLE_404_URLS = []
# A secret key for this particular Django installation. Used in secret-key
# hashing algorithms. Set this in your settings, or Django will complain
# loudly.
SECRET_KEY = ''
# Default file storage mechanism that holds media.
DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage'
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = None
# URL that handles the static files served from STATIC_ROOT.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = None
# List of upload handler classes to be applied in order.
FILE_UPLOAD_HANDLERS = [
'django.core.files.uploadhandler.MemoryFileUploadHandler',
'django.core.files.uploadhandler.TemporaryFileUploadHandler',
]
# Maximum size, in bytes, of a request before it will be streamed to the
# file system instead of into memory.
FILE_UPLOAD_MAX_MEMORY_SIZE = 2621440 # i.e. 2.5 MB
# Maximum size in bytes of request data (excluding file uploads) that will be
# read before a SuspiciousOperation (RequestDataTooBig) is raised.
DATA_UPLOAD_MAX_MEMORY_SIZE = 2621440 # i.e. 2.5 MB
# Maximum number of GET/POST parameters that will be read before a
# SuspiciousOperation (TooManyFieldsSent) is raised.
DATA_UPLOAD_MAX_NUMBER_FIELDS = 1000
# Directory in which upload streamed files will be temporarily saved. A value of
# `None` will make Django use the operating system's default temporary directory
# (i.e. "/tmp" on *nix systems).
FILE_UPLOAD_TEMP_DIR = None
# The numeric mode to set newly-uploaded files to. The value should be a mode
# you'd pass directly to os.chmod; see https://docs.python.org/library/os.html#files-and-directories.
FILE_UPLOAD_PERMISSIONS = 0o644
# The numeric mode to assign to newly-created directories, when uploading files.
# The value should be a mode as you'd pass to os.chmod;
# see https://docs.python.org/library/os.html#files-and-directories.
FILE_UPLOAD_DIRECTORY_PERMISSIONS = None
# Python module path where user will place custom format definition.
# The directory where this setting is pointing should contain subdirectories
# named as the locales, containing a formats.py file
# (i.e. "myproject.locale" for myproject/locale/en/formats.py etc. use)
FORMAT_MODULE_PATH = None
# Default formatting for date objects. See all available format strings here:
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'N j, Y'
# Default formatting for datetime objects. See all available format strings here:
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATETIME_FORMAT = 'N j, Y, P'
# Default formatting for time objects. See all available format strings here:
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
TIME_FORMAT = 'P'
# Default formatting for date objects when only the year and month are relevant.
# See all available format strings here:
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
YEAR_MONTH_FORMAT = 'F Y'
# Default formatting for date objects when only the month and day are relevant.
# See all available format strings here:
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
MONTH_DAY_FORMAT = 'F j'
# Default short formatting for date objects. See all available format strings here:
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
SHORT_DATE_FORMAT = 'm/d/Y'
# Default short formatting for datetime objects.
# See all available format strings here:
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
SHORT_DATETIME_FORMAT = 'm/d/Y P'
# Default formats to be used when parsing dates from input boxes, in order
# See all available format string here:
# https://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
DATE_INPUT_FORMATS = [
'%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '10/25/2006', '10/25/06'
'%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
'%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
'%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
'%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
]
# Default formats to be used when parsing times from input boxes, in order
# See all available format string here:
# https://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
TIME_INPUT_FORMATS = [
'%H:%M:%S', # '14:30:59'
'%H:%M:%S.%f', # '14:30:59.000200'
'%H:%M', # '14:30'
]
# Default formats to be used when parsing dates and times from input boxes,
# in order
# See all available format string here:
# https://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
DATETIME_INPUT_FORMATS = [
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M:%S.%f', # '10/25/2006 14:30:59.000200'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M:%S.%f', # '10/25/06 14:30:59.000200'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
]
# First day of week, to be used on calendars
# 0 means Sunday, 1 means Monday...
FIRST_DAY_OF_WEEK = 0
# Decimal separator symbol
DECIMAL_SEPARATOR = '.'
# Boolean that sets whether to add thousand separator when formatting numbers
USE_THOUSAND_SEPARATOR = False
# Number of digits that will be together, when splitting them by
# THOUSAND_SEPARATOR. 0 means no grouping, 3 means splitting by thousands...
NUMBER_GROUPING = 0
# Thousand separator symbol
THOUSAND_SEPARATOR = ','
# The tablespaces to use for each model when not specified otherwise.
DEFAULT_TABLESPACE = ''
DEFAULT_INDEX_TABLESPACE = ''
# Default X-Frame-Options header value
X_FRAME_OPTIONS = 'DENY'
USE_X_FORWARDED_HOST = False
USE_X_FORWARDED_PORT = False
# The Python dotted path to the WSGI application that Django's internal server
# (runserver) will use. If `None`, the return value of
# 'django.core.wsgi.get_wsgi_application' is used, thus preserving the same
# behavior as previous versions of Django. Otherwise this should point to an
# actual WSGI application object.
WSGI_APPLICATION = None
# If your Django app is behind a proxy that sets a header to specify secure
# connections, AND that proxy ensures that user-submitted headers with the
# same name are ignored (so that people can't spoof it), set this value to
# a tuple of (header_name, header_value). For any requests that come in with
# that header/value, request.is_secure() will return True.
# WARNING! Only set this if you fully understand what you're doing. Otherwise,
# you may be opening yourself up to a security risk.
SECURE_PROXY_SSL_HEADER = None
##############
# MIDDLEWARE #
##############
# List of middleware to use. Order is important; in the request phase, these
# middleware will be applied in the order given, and in the response
# phase the middleware will be applied in reverse order.
MIDDLEWARE = []
############
# SESSIONS #
############
# Cache to store session data if using the cache session backend.
SESSION_CACHE_ALIAS = 'default'
# Cookie name. This can be whatever you want.
SESSION_COOKIE_NAME = 'sessionid'
# Age of cookie, in seconds (default: 2 weeks).
SESSION_COOKIE_AGE = 60 * 60 * 24 * 7 * 2
# A string like "example.com", or None for standard domain cookie.
SESSION_COOKIE_DOMAIN = None
# Whether the session cookie should be secure (https:// only).
SESSION_COOKIE_SECURE = False
# The path of the session cookie.
SESSION_COOKIE_PATH = '/'
# Whether to use the HttpOnly flag.
SESSION_COOKIE_HTTPONLY = True
# Whether to set the flag restricting cookie leaks on cross-site requests.
# This can be 'Lax', 'Strict', or None to disable the flag.
SESSION_COOKIE_SAMESITE = 'Lax'
# Whether to save the session data on every request.
SESSION_SAVE_EVERY_REQUEST = False
# Whether a user's session cookie expires when the Web browser is closed.
SESSION_EXPIRE_AT_BROWSER_CLOSE = False
# The module to store session data
SESSION_ENGINE = 'django.contrib.sessions.backends.db'
# Directory to store session files if using the file session module. If None,
# the backend will use a sensible default.
SESSION_FILE_PATH = None
# class to serialize session data
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
#########
# CACHE #
#########
# The cache backends to use.
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
CACHE_MIDDLEWARE_KEY_PREFIX = ''
CACHE_MIDDLEWARE_SECONDS = 600
CACHE_MIDDLEWARE_ALIAS = 'default'
##################
# AUTHENTICATION #
##################
AUTH_USER_MODEL = 'auth.User'
AUTHENTICATION_BACKENDS = ['django.contrib.auth.backends.ModelBackend']
LOGIN_URL = '/accounts/login/'
LOGIN_REDIRECT_URL = '/accounts/profile/'
LOGOUT_REDIRECT_URL = None
# The number of days a password reset link is valid for
PASSWORD_RESET_TIMEOUT_DAYS = 3
# The minimum number of seconds a password reset link is valid for
# (default: 3 days).
PASSWORD_RESET_TIMEOUT = 60 * 60 * 24 * 3
# the first hasher in this list is the preferred algorithm. any
# password using different algorithms will be converted automatically
# upon login
PASSWORD_HASHERS = [
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.Argon2PasswordHasher',
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
]
AUTH_PASSWORD_VALIDATORS = []
###########
# SIGNING #
###########
SIGNING_BACKEND = 'django.core.signing.TimestampSigner'
########
# CSRF #
########
# Dotted path to callable to be used as view when a request is
# rejected by the CSRF middleware.
CSRF_FAILURE_VIEW = 'django.views.csrf.csrf_failure'
# Settings for CSRF cookie.
CSRF_COOKIE_NAME = 'csrftoken'
CSRF_COOKIE_AGE = 60 * 60 * 24 * 7 * 52
CSRF_COOKIE_DOMAIN = None
CSRF_COOKIE_PATH = '/'
CSRF_COOKIE_SECURE = False
CSRF_COOKIE_HTTPONLY = False
CSRF_COOKIE_SAMESITE = 'Lax'
CSRF_HEADER_NAME = 'HTTP_X_CSRFTOKEN'
CSRF_TRUSTED_ORIGINS = []
CSRF_USE_SESSIONS = False
############
# MESSAGES #
############
# Class to use as messages backend
MESSAGE_STORAGE = 'django.contrib.messages.storage.fallback.FallbackStorage'
# Default values of MESSAGE_LEVEL and MESSAGE_TAGS are defined within
# django.contrib.messages to avoid imports in this settings file.
###########
# LOGGING #
###########
# The callable to use to configure logging
LOGGING_CONFIG = 'logging.config.dictConfig'
# Custom logging configuration.
LOGGING = {}
# Default exception reporter class used in case none has been
# specifically assigned to the HttpRequest instance.
DEFAULT_EXCEPTION_REPORTER = 'django.views.debug.ExceptionReporter'
# Default exception reporter filter class used in case none has been
# specifically assigned to the HttpRequest instance.
DEFAULT_EXCEPTION_REPORTER_FILTER = 'django.views.debug.SafeExceptionReporterFilter'
###########
# TESTING #
###########
# The name of the class to use to run the test suite
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# Apps that don't need to be serialized at test database creation time
# (only apps with migrations are to start with)
TEST_NON_SERIALIZED_APPS = []
############
# FIXTURES #
############
# The list of directories to search for fixtures
FIXTURE_DIRS = []
###############
# STATICFILES #
###############
# A list of locations of additional static files
STATICFILES_DIRS = []
# The default file storage backend used during the build process
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.StaticFilesStorage'
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
]
##############
# MIGRATIONS #
##############
# Migration module overrides for apps, by app label.
MIGRATION_MODULES = {}
#################
# SYSTEM CHECKS #
#################
# List of all issues generated by system checks that should be silenced. Light
# issues like warnings, infos or debugs will not generate a message. Silencing
# serious issues like errors and criticals does not result in hiding the
# message, but Django will not stop you from e.g. running server.
SILENCED_SYSTEM_CHECKS = []
#######################
# SECURITY MIDDLEWARE #
#######################
SECURE_BROWSER_XSS_FILTER = False
SECURE_CONTENT_TYPE_NOSNIFF = True
SECURE_HSTS_INCLUDE_SUBDOMAINS = False
SECURE_HSTS_PRELOAD = False
SECURE_HSTS_SECONDS = 0
SECURE_REDIRECT_EXEMPT = []
SECURE_REFERRER_POLICY = None
SECURE_SSL_HOST = None
SECURE_SSL_REDIRECT = False
| {
"content_hash": "5c117bd642bc303545b7b45048f1c6f6",
"timestamp": "",
"source": "github",
"line_count": 642,
"max_line_length": 101,
"avg_line_length": 34.58255451713396,
"alnum_prop": 0.6886316548058733,
"repo_name": "simonw/django",
"id": "09c9b95d26d5c02e2d69c94c3d65dad0e3cb7bf6",
"size": "22203",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django/conf/global_settings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "85351"
},
{
"name": "HTML",
"bytes": "227641"
},
{
"name": "JavaScript",
"bytes": "258434"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "13501540"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "142"
}
],
"symlink_target": ""
} |
from django.apps import AppConfig
class ImagesConfig(AppConfig):
name = 'imagetagger.images'
| {
"content_hash": "8ffe8046a348710b77f022f3e7db61b1",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 33,
"avg_line_length": 19.8,
"alnum_prop": 0.7676767676767676,
"repo_name": "bit-bots/imagetagger",
"id": "fbaf036fb4d737ccf850834879a96544ef3f04bb",
"size": "99",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/imagetagger/images/apps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "12288"
},
{
"name": "Dockerfile",
"bytes": "2049"
},
{
"name": "HTML",
"bytes": "273837"
},
{
"name": "JavaScript",
"bytes": "234939"
},
{
"name": "Python",
"bytes": "252248"
},
{
"name": "Shell",
"bytes": "299"
}
],
"symlink_target": ""
} |
"""Imports for Python API.
This file is MACHINE GENERATED! Do not edit.
Generated by: tensorflow/tools/api/generator/create_python_api.py script.
"""
from tensorflow.tools.api.generator.api.contrib import stat_summarizer | {
"content_hash": "9116c9ffcf4c8f564bd9fa588f127dda",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 73,
"avg_line_length": 36.833333333333336,
"alnum_prop": 0.7963800904977375,
"repo_name": "ryfeus/lambda-packs",
"id": "7e28ef92d8adf87af0035c3570b480ff525f216f",
"size": "221",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Keras_tensorflow_nightly/source2.7/tensorflow/tools/api/generator/api/contrib/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9768343"
},
{
"name": "C++",
"bytes": "76566960"
},
{
"name": "CMake",
"bytes": "191097"
},
{
"name": "CSS",
"bytes": "153538"
},
{
"name": "Cuda",
"bytes": "61768"
},
{
"name": "Cython",
"bytes": "3110222"
},
{
"name": "Fortran",
"bytes": "110284"
},
{
"name": "HTML",
"bytes": "248658"
},
{
"name": "JavaScript",
"bytes": "62920"
},
{
"name": "MATLAB",
"bytes": "17384"
},
{
"name": "Makefile",
"bytes": "152150"
},
{
"name": "Python",
"bytes": "549307737"
},
{
"name": "Roff",
"bytes": "26398"
},
{
"name": "SWIG",
"bytes": "142"
},
{
"name": "Shell",
"bytes": "7790"
},
{
"name": "Smarty",
"bytes": "4090"
},
{
"name": "TeX",
"bytes": "152062"
},
{
"name": "XSLT",
"bytes": "305540"
}
],
"symlink_target": ""
} |
import functools
import hashlib
from django import http
from django.conf import settings
from django.core.cache import cache
from django.core.exceptions import PermissionDenied
from django.db.models import Q
from django.db.transaction import non_atomic_requests
from django.shortcuts import get_object_or_404
from django.utils.translation import ugettext, ugettext_lazy as _lazy
import six
from django_statsd.clients import statsd
from rest_framework import serializers
from rest_framework.viewsets import ModelViewSet
import olympia.core.logger
from olympia import amo
from olympia.access import acl
from olympia.accounts.utils import redirect_for_login
from olympia.accounts.views import AccountViewSet
from olympia.addons.models import Addon
from olympia.addons.views import BaseFilter
from olympia.amo import messages
from olympia.amo.decorators import (
allow_mine, json_view, login_required, post_required, use_primary_db)
from olympia.amo.urlresolvers import reverse
from olympia.amo.utils import paginate, render, urlparams
from olympia.api.filters import OrderingAliasFilter
from olympia.api.permissions import (
AllOf, AllowReadOnlyIfPublic, AnyOf, PreventActionPermission)
from olympia.legacy_api.utils import addon_to_dict
from olympia.translations.query import order_by_translation
from olympia.users.decorators import process_user_id
from olympia.users.models import UserProfile
from . import forms
from .models import SPECIAL_SLUGS, Collection, CollectionAddon
from .permissions import (
AllowCollectionAuthor, AllowCollectionContributor, AllowContentCurators)
from .serializers import (
CollectionAddonSerializer, CollectionSerializer,
CollectionWithAddonsSerializer)
log = olympia.core.logger.getLogger('z.collections')
@non_atomic_requests
def get_collection(request, user_id, slug):
if (slug in SPECIAL_SLUGS.values() and request.user.is_authenticated and
request.user.id == user_id):
return getattr(request.user, slug + '_collection')()
else:
return get_object_or_404(Collection.objects,
author_id=user_id, slug=slug)
def owner_required(f=None, require_owner=True):
"""Requires collection to be owned, by someone."""
def decorator(func):
@functools.wraps(func)
def wrapper(request, user_id, slug, *args, **kw):
collection = get_collection(request, user_id, slug)
if acl.check_collection_ownership(request, collection,
require_owner=require_owner):
return func(request, collection, user_id, slug, *args, **kw)
else:
raise PermissionDenied
return wrapper
return decorator(f) if f else decorator
@non_atomic_requests
def legacy_redirect(request, uuid, edit=False):
# Nicknames have a limit of 30, so len == 36 implies a uuid.
key = 'uuid' if len(uuid) == 36 else 'nickname'
collection = get_object_or_404(Collection.objects, **{key: uuid})
if edit:
return http.HttpResponseRedirect(collection.edit_url())
to = collection.get_url_path()
params = request.GET.urlencode()
if params:
to += '?' + params
return http.HttpResponseRedirect(to)
@non_atomic_requests
def legacy_directory_redirects(request, page):
sorts = {'editors_picks': 'featured', 'popular': 'popular'}
loc = base = reverse('collections.list')
if page in sorts:
loc = urlparams(base, sort=sorts[page])
elif request.user.is_authenticated:
if page == 'mine':
loc = reverse('collections.user', args=[request.user.id])
return http.HttpResponseRedirect(loc)
@non_atomic_requests
def render_cat(request, template, data=None, extra=None):
if extra is None:
extra = {}
if data is None:
data = {}
data.update(dict(search_cat='collections'))
return render(request, template, data, **extra)
@non_atomic_requests
def collection_listing(request, base=None):
qs = (
Collection.objects.listed()
.filter(Q(application=request.APP.id) | Q(application=None))
.filter(type=amo.COLLECTION_FEATURED)
.exclude(addon_count=0)
)
# Counts are hard to cache automatically, and accuracy for this
# one is less important. Remember it for 5 minutes.
countkey = hashlib.sha256(str(qs.query) + '_count').hexdigest()
count = cache.get(countkey)
if count is None:
count = qs.count()
cache.set(countkey, count, 300)
collections = paginate(request, qs, count=count)
return render_cat(request, 'bandwagon/impala/collection_listing.html',
{'collections': collections, 'src': 'co-hc-sidebar',
'dl_src': 'co-dp-sidebar'})
@allow_mine
@process_user_id
@non_atomic_requests
def user_listing(request, user_id):
author = get_object_or_404(UserProfile, id=user_id)
qs = (Collection.objects.filter(author_id=user_id)
.order_by('-created'))
mine = (request.user.is_authenticated and
request.user.id == user_id)
if mine:
page = 'mine'
else:
page = 'user'
qs = qs.filter(listed=True)
collections = paginate(request, qs)
return render_cat(request, 'bandwagon/user_listing.html',
{'collections': collections,
'page': page, 'author': author})
class CollectionAddonFilter(BaseFilter):
opts = (('added', _lazy(u'Added')),
('popular', _lazy(u'Popularity')),
('name', _lazy(u'Name')))
def filter_added(self):
return self.base_queryset.order_by('collectionaddon__created')
def filter_name(self):
return order_by_translation(self.base_queryset, 'name')
def filter_popular(self):
return self.base_queryset.order_by('-weekly_downloads')
@allow_mine
@process_user_id
@non_atomic_requests
def collection_detail(request, user_id, slug):
collection = get_collection(request, user_id, slug)
if not collection.listed:
if not request.user.is_authenticated:
return redirect_for_login(request)
if not acl.check_collection_ownership(request, collection):
raise PermissionDenied
base = Addon.objects.valid() & collection.addons.all()
filter = CollectionAddonFilter(request, base,
key='sort', default='popular')
notes = get_notes(collection)
# Go directly to CollectionAddon for the count to avoid joins.
count = CollectionAddon.objects.filter(
Addon.objects.all().valid_q(
amo.VALID_ADDON_STATUSES, prefix='addon__'),
collection=collection.id)
addons = paginate(request, filter.qs, per_page=15, count=count.count())
# `perms` is defined in django.contrib.auth.context_processors. Gotcha!
user_perms = {
'view_stats': acl.check_ownership(
request, collection, require_owner=False),
}
return render_cat(request, 'bandwagon/collection_detail.html',
{'collection': collection, 'filter': filter,
'addons': addons, 'notes': notes,
'user_perms': user_perms})
@json_view(has_trans=True)
@allow_mine
@process_user_id
@non_atomic_requests
def collection_detail_json(request, user_id, slug):
collection = get_collection(request, user_id, slug)
if not (collection.listed or acl.check_collection_ownership(
request, collection)):
raise PermissionDenied
# We evaluate the QuerySet with `list` to work around bug 866454.
addons_dict = [addon_to_dict(a) for a in list(collection.addons.valid())]
return {
'name': collection.name,
'url': collection.get_abs_url(),
'addons': addons_dict
}
def get_notes(collection, raw=False):
# This might hurt in a big collection with lots of notes.
# It's a generator so we don't evaluate anything by default.
notes = CollectionAddon.objects.filter(collection=collection,
comments__isnull=False)
rv = {}
for note in notes:
# Watch out for comments in a language we didn't pick up.
if note.comments:
rv[note.addon_id] = (note.comments.localized_string if raw
else note.comments)
yield rv
def initial_data_from_request(request):
return {'author': request.user, 'application': request.APP.id}
def collection_message(request, collection, option):
if option == 'add':
title = ugettext('Collection created!')
msg = ugettext(
'Your new collection is shown below. You can '
'<a href="%(url)s">edit additional settings</a> if you\'d '
'like.'
) % {'url': collection.edit_url()}
elif option == 'update':
title = ugettext('Collection updated!')
msg = ugettext(
'<a href="%(url)s">View your collection</a> to see the changes.'
) % {'url': collection.get_url_path()}
else:
raise ValueError('Incorrect option "%s", '
'takes only "add" or "update".' % option)
messages.success(request, title, msg, message_safe=True)
@use_primary_db
@login_required
def add(request):
"""Displays/processes a form to create a collection."""
ctx = {}
if request.method == 'POST':
form = forms.CollectionForm(
request.POST, request.FILES,
initial=initial_data_from_request(request))
aform = forms.AddonsForm(request.POST)
if form.is_valid():
collection = form.save(default_locale=request.LANG)
collection.save()
if aform.is_valid():
aform.save(collection)
collection_message(request, collection, 'add')
statsd.incr('collections.created')
log.info('Created collection %s' % collection.id)
return http.HttpResponseRedirect(collection.get_url_path())
else:
ctx['addons'] = Addon.objects.filter(pk__in=aform.clean_addon())
ctx['comments'] = aform.clean_addon_comment()
else:
form = forms.CollectionForm()
ctx['form'] = form
return render_cat(request, 'bandwagon/add.html', ctx)
@use_primary_db
@login_required(redirect=False)
def ajax_new(request):
form = forms.CollectionForm(
request.POST or None,
initial=initial_data_from_request(request))
if request.method == 'POST' and form.is_valid():
collection = form.save()
addon_id = request.POST['addon_id']
collection.add_addon(Addon.objects.get(pk=addon_id))
log.info('Created collection %s' % collection.id)
return http.HttpResponseRedirect(reverse('collections.ajax_list') +
'?addon_id=%s' % addon_id)
return render(request, 'bandwagon/ajax_new.html', {'form': form})
@login_required(redirect=False)
@non_atomic_requests
def ajax_list(request):
try:
addon_id = int(request.GET['addon_id'])
except (KeyError, ValueError):
return http.HttpResponseBadRequest()
qs = Collection.objects.owned_by(request.user).with_has_addon(addon_id)
return render(request, 'bandwagon/ajax_list.html',
{'collections': order_by_translation(qs, 'name')})
@use_primary_db
@login_required
@post_required
@process_user_id
def collection_alter(request, user_id, slug, action):
collection = get_collection(request, user_id, slug)
return change_addon(request, collection, action)
def change_addon(request, collection, action):
if not acl.check_collection_ownership(request, collection):
raise PermissionDenied
try:
addon = get_object_or_404(Addon.objects, pk=request.POST['addon_id'])
except (ValueError, KeyError):
return http.HttpResponseBadRequest()
getattr(collection, action + '_addon')(addon)
log.info(u'%s: %s %s to collection %s' %
(request.user, action, addon.id, collection.id))
if request.is_ajax():
url = '%s?addon_id=%s' % (reverse('collections.ajax_list'), addon.id)
else:
url = collection.get_url_path()
return http.HttpResponseRedirect(url)
@use_primary_db
@login_required
@post_required
def ajax_collection_alter(request, action):
try:
collection = get_object_or_404(
Collection.objects, pk=request.POST['id'])
except (ValueError, KeyError):
return http.HttpResponseBadRequest()
return change_addon(request, collection, action)
@use_primary_db
@login_required
@process_user_id
# Contributors are allowed to *see* the page, but there is another
# permission check below to prevent them from doing any modifications.
@owner_required(require_owner=False)
def edit(request, collection, user_id, slug):
is_admin = acl.action_allowed(request, amo.permissions.ADMIN_CURATION)
if not acl.check_collection_ownership(
request, collection, require_owner=True):
if request.method == 'POST':
raise PermissionDenied
form = None
elif request.method == 'POST':
initial = initial_data_from_request(request)
if collection.author_id: # Don't try to change the author.
initial['author'] = collection.author
form = forms.CollectionForm(request.POST, request.FILES,
initial=initial,
instance=collection)
if form.is_valid():
collection = form.save()
collection_message(request, collection, 'update')
log.info(u'%s edited collection %s' %
(request.user, collection.id))
return http.HttpResponseRedirect(collection.edit_url())
else:
form = forms.CollectionForm(instance=collection)
qs = (CollectionAddon.objects.using('default')
.filter(collection=collection))
meta = {c.addon_id: c for c in qs}
addons = collection.addons.all()
comments = next(get_notes(collection, raw=True))
data = {
'collection': collection,
'form': form,
'user_id': user_id,
'slug': slug,
'meta': meta,
'is_admin': is_admin,
'addons': addons,
'comments': comments
}
return render_cat(request, 'bandwagon/edit.html', data)
@use_primary_db
@login_required
@process_user_id
@owner_required(require_owner=False)
@post_required
def edit_addons(request, collection, user_id, slug):
if request.method == 'POST':
form = forms.AddonsForm(request.POST)
if form.is_valid():
form.save(collection)
collection_message(request, collection, 'update')
log.info(u'%s added add-ons to %s' %
(request.user, collection.id))
return http.HttpResponseRedirect(collection.edit_url() + '#addons-edit')
@use_primary_db
@login_required
@process_user_id
@owner_required
@post_required
def edit_privacy(request, collection, user_id, slug):
collection.listed = not collection.listed
collection.save()
log.info(u'%s changed privacy on collection %s' %
(request.user, collection.id))
return http.HttpResponseRedirect(collection.get_url_path())
@use_primary_db
@login_required
@process_user_id
def delete(request, user_id, slug):
collection = get_object_or_404(Collection, author_id=user_id, slug=slug)
if not acl.check_collection_ownership(request, collection, True):
log.info(u'%s is trying to delete collection %s'
% (request.user, collection.id))
raise PermissionDenied
data = dict(collection=collection, user_id=user_id, slug=slug)
if request.method == 'POST':
if request.POST['sure'] == '1':
collection.delete()
log.info(u'%s deleted collection %s' %
(request.user, collection.id))
url = reverse('collections.user', args=[user_id])
return http.HttpResponseRedirect(url)
else:
return http.HttpResponseRedirect(collection.get_url_path())
return render_cat(request, 'bandwagon/delete.html', data)
@login_required
@allow_mine
@non_atomic_requests
def mine(request, user_id=None, slug=None):
if slug is None:
return user_listing(request, user_id)
else:
return collection_detail(request, user_id, slug)
class CollectionViewSet(ModelViewSet):
# Note: CollectionAddonViewSet will call CollectionViewSet().get_object(),
# causing the has_object_permission() method of these permissions to be
# called. It will do so without setting an action however, bypassing the
# PreventActionPermission() parts.
permission_classes = [
AnyOf(
# Collection authors can do everything.
AllowCollectionAuthor,
# Collection contributors can access the featured themes collection
# (it's community-managed) and change it's addons, but can't delete
# or edit it's details.
AllOf(AllowCollectionContributor,
PreventActionPermission(('create', 'list', 'update',
'destroy', 'partial_update'))),
# Content curators can modify existing mozilla collections as they
# see fit, but can't list or delete them.
AllOf(AllowContentCurators,
PreventActionPermission(('create', 'destroy', 'list'))),
# Everyone else can do read-only stuff, except list.
AllOf(AllowReadOnlyIfPublic,
PreventActionPermission('list'))),
]
lookup_field = 'slug'
def get_account_viewset(self):
if not hasattr(self, 'account_viewset'):
self.account_viewset = AccountViewSet(
request=self.request,
permission_classes=[], # We handled permissions already.
kwargs={'pk': self.kwargs['user_pk']})
return self.account_viewset
def get_serializer_class(self):
with_addons = ('with_addons' in self.request.GET and
self.action == 'retrieve')
return (CollectionSerializer if not with_addons
else CollectionWithAddonsSerializer)
def get_queryset(self):
return Collection.objects.filter(
author=self.get_account_viewset().get_object()).order_by(
'-modified')
def get_addons_queryset(self):
collection_addons_viewset = CollectionAddonViewSet(
request=self.request
)
# Set this to avoid a pointless lookup loop.
collection_addons_viewset.collection = self.get_object()
# This needs to be list to make the filtering work.
collection_addons_viewset.action = 'list'
qs = collection_addons_viewset.get_queryset()
# Now limit and sort
limit = settings.REST_FRAMEWORK['PAGE_SIZE']
sort = collection_addons_viewset.ordering[0]
return qs.order_by(sort)[:limit]
class TranslationAwareOrderingAliasFilter(OrderingAliasFilter):
def filter_queryset(self, request, queryset, view):
ordering = self.get_ordering(request, queryset, view)
if len(ordering) > 1:
# We can't support multiple orderings easily because of
# how order_by_translation works.
raise serializers.ValidationError(
'You can only specify one "sort" argument. Multiple '
'orderings are not supported')
order_by = ordering[0]
if order_by in ('name', '-name'):
return order_by_translation(queryset, order_by, Addon)
sup = super(TranslationAwareOrderingAliasFilter, self)
return sup.filter_queryset(request, queryset, view)
class CollectionAddonViewSet(ModelViewSet):
permission_classes = [] # We don't need extra permissions.
serializer_class = CollectionAddonSerializer
lookup_field = 'addon'
filter_backends = (TranslationAwareOrderingAliasFilter,)
ordering_fields = ()
ordering_field_aliases = {'popularity': 'addon__weekly_downloads',
'name': 'name',
'added': 'created'}
ordering = ('-addon__weekly_downloads',)
def get_collection(self):
if not hasattr(self, 'collection'):
# We're re-using CollectionViewSet and making sure its get_object()
# method is called, which triggers the permission checks for that
# class so we don't need our own.
# Note that we don't pass `action`, so the PreventActionPermission
# part of the permission checks won't do anything.
self.collection = CollectionViewSet(
request=self.request,
kwargs={'user_pk': self.kwargs['user_pk'],
'slug': self.kwargs['collection_slug']}).get_object()
return self.collection
def get_object(self):
self.lookup_url_kwarg = self.lookup_url_kwarg or self.lookup_field
lookup_value = self.kwargs.get(self.lookup_url_kwarg)
# if the lookup is not a number, its probably the slug instead.
if lookup_value and not six.text_type(lookup_value).isdigit():
self.lookup_field = '%s__slug' % self.lookup_field
return super(CollectionAddonViewSet, self).get_object()
def get_queryset(self):
qs = (
CollectionAddon.objects
.filter(collection=self.get_collection())
.prefetch_related('addon'))
filter_param = self.request.GET.get('filter')
# We only filter list action.
include_all_with_deleted = (filter_param == 'all_with_deleted' or
self.action != 'list')
# If deleted addons are requested, that implies all addons.
include_all = filter_param == 'all' or include_all_with_deleted
if not include_all:
qs = qs.filter(
addon__status=amo.STATUS_PUBLIC, addon__disabled_by_user=False)
elif not include_all_with_deleted:
qs = qs.exclude(addon__status=amo.STATUS_DELETED)
return qs
| {
"content_hash": "f56e2db712839f280315026a48365478",
"timestamp": "",
"source": "github",
"line_count": 608,
"max_line_length": 79,
"avg_line_length": 36.57236842105263,
"alnum_prop": 0.6383792048929664,
"repo_name": "aviarypl/mozilla-l10n-addons-server",
"id": "9ce0ff53d847f7f43c4452ac3aab98fad1db5c9e",
"size": "22236",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/olympia/bandwagon/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "809734"
},
{
"name": "Dockerfile",
"bytes": "2898"
},
{
"name": "HTML",
"bytes": "515798"
},
{
"name": "JavaScript",
"bytes": "1070508"
},
{
"name": "Makefile",
"bytes": "827"
},
{
"name": "PLSQL",
"bytes": "316"
},
{
"name": "PLpgSQL",
"bytes": "10596"
},
{
"name": "Python",
"bytes": "5462821"
},
{
"name": "SQLPL",
"bytes": "645"
},
{
"name": "Shell",
"bytes": "8821"
},
{
"name": "Smarty",
"bytes": "1388"
}
],
"symlink_target": ""
} |
import zstackwoodpecker.test_state as ts_header
import os
TestAction = ts_header.TestAction
def path():
return dict(initial_formation="template5", checking_point=8, path_list=[
[TestAction.create_vm, 'vm1', 'flag=sblk'],
[TestAction.create_volume, 'volume1', 'flag=ceph,scsi'],
[TestAction.attach_volume, 'vm1', 'volume1'],
[TestAction.create_volume, 'volume2', 'flag=sblk,scsi'],
[TestAction.attach_volume, 'vm1', 'volume2'],
[TestAction.create_volume, 'volume3', 'flag=ceph,scsi'],
[TestAction.attach_volume, 'vm1', 'volume3'],
[TestAction.stop_vm, 'vm1'],
[TestAction.resize_volume, 'vm1', 5*1024*1024],
[TestAction.create_vm_snapshot, 'vm1', 'vm1-snapshot1'],
[TestAction.start_vm, 'vm1'],
[TestAction.reboot_vm, 'vm1'],
[TestAction.resize_volume, 'vm1', 5*1024*1024],
[TestAction.create_vm_snapshot, 'vm1', 'vm1-snapshot5'],
[TestAction.stop_vm, 'vm1'],
[TestAction.use_vm_snapshot, 'vm1-snapshot1'],
[TestAction.start_vm, 'vm1'],
])
'''
The final status:
Running:['vm1']
Stopped:[]
Enadbled:['vm1-snapshot1', 'volume1-snapshot1', 'volume2-snapshot1', 'volume3-snapshot1', 'vm1-snapshot5', 'volume1-snapshot5', 'volume2-snapshot5', 'volume3-snapshot5']
attached:['volume1', 'volume2', 'volume3']
Detached:[]
Deleted:[]
Expunged:[]
Ha:[]
Group:
vm_snap2:['vm1-snapshot5', 'volume1-snapshot5', 'volume2-snapshot5', 'volume3-snapshot5']---vm1volume1_volume2_volume3
vm_snap1:['vm1-snapshot1', 'volume1-snapshot1', 'volume2-snapshot1', 'volume3-snapshot1']---vm1volume1_volume2_volume3
'''
| {
"content_hash": "827fa732972ef7fa0683699e5e87afdf",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 169,
"avg_line_length": 37.4390243902439,
"alnum_prop": 0.6990228013029316,
"repo_name": "zstackio/zstack-woodpecker",
"id": "5f2076b6a34849627d766078bffa9fb4ba1cbea1",
"size": "1535",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "integrationtest/vm/multihosts/vm_snapshots/paths/sc_path57.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2356"
},
{
"name": "Go",
"bytes": "49822"
},
{
"name": "Makefile",
"bytes": "687"
},
{
"name": "Puppet",
"bytes": "875"
},
{
"name": "Python",
"bytes": "13070596"
},
{
"name": "Shell",
"bytes": "177861"
}
],
"symlink_target": ""
} |
import os
# Parse the readme, and add new script info
# based on the flags used for tampermonkey.
# Default section.
intro = """
#tampermonkey
My ongoing TamperMonkey scripts for Chrome.
## modules
"""
def _markdown_param(line, name):
return line.replace(
'// @{}'.format(name), '\n**@{}**: '.format(name)).strip()
def _parse_script(doc):
# Parse out the tampermonkey annotations and use them as markdown entries.
vals = ''
with open(doc) as jsdoc:
param = ''
for line in jsdoc.readlines():
if line.startswith('// @name '):
param = _markdown_param(line, 'name')
elif line.startswith('// @description '):
param = _markdown_param(line, 'description')
elif line.startswith('// @match '):
param = _markdown_param(line, 'match')
else:
continue
vals += '\n{}\n'.format(param)
return vals + '\n'
def parse():
# Overwrite to prevent concatenating random text, etc...
with open('README.md', 'wb') as markdownfile:
markdownfile.write(intro)
jsfiles = os.listdir('src/')
for jsfile in jsfiles:
markdownfile.write('#### {}\n'.format(jsfile))
markdownfile.write(_parse_script(
'{}/src/{}'.format(os.getcwd(), jsfile)))
if __name__ == '__main__':
parse()
| {
"content_hash": "4f950c8c8b347d971e132c3d5a5cd66d",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 78,
"avg_line_length": 26.39622641509434,
"alnum_prop": 0.5589706933523946,
"repo_name": "christabor/tampermonkey",
"id": "1c88f71d680a5adc4d9b8373b58079c9a4d52db9",
"size": "1399",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "script_parser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "14846"
},
{
"name": "Python",
"bytes": "1399"
}
],
"symlink_target": ""
} |
from util.hook import *
import praw
red = praw.Reddit('reddit code module ported by apollojustice''fetches stuff from leddit')
__author__ = "apollojustice"
# ported from newellworldorder's self-made bot into Code by apollojustice
@hook(cmds=["reddit"], args=False, rate=10)
def redditgb(code, input):
noargs = False
try:
args = input.group(2).split()
subreddit = args[0]
except (AttributeError, IndexError, UnboundLocalError):
noargs = True
nsfwstr = ''
gibensfw = red.get_random_subreddit(nsfw=True)
if noargs:
randsubm = red.get_random_submission()
if randsubm.over_18:
nsfwstr = '[NSFW] '
return code.reply("07Reddit 04%s10%s - 12%s 06( %s )" % (nsfwstr, randsubm.subreddit.url, randsubm.title, randsubm.url))
elif not noargs:
submi = ''
if args[0].lower() != 'user':
if len(args) < 2:
category = 'random'
else
category = args[1].lower()
if category == 'controversial':
subm = red.get_subreddit(subreddit.lower()).get_controversial(limit=1)
elif category == 'hot':
subm = red.get_subreddit(subreddit.lower()).get_hot(limit=1)
elif category == 'new':
subm = red.get_subreddit(subreddit.lower()).get_new(limit=1)
elif category == 'random':
try:
submi = red.get_subreddit(subreddit.lower()).get_random_submission()
except:
submi = red.get_subreddit(subreddit.lower()).get_random_submission()
elif category == 'rising':
subm = red.get_subreddit(subreddit.lower()).get_rising(limit=1)
elif category == 'search':
if len(args) > 2:
subm = red.get_subreddit(subreddit.lower()).search('+'.join(args[1:]),limit=1)
elif len(args) < 2:
return code.reply("no search terms given")
elif category == 'top':
subm = red.get_subreddit(subreddit.lower()).get_top(limit=1)
if not submi:
submi = next(subm)
if submi.over_18:
nsfwstr = 'NSFW '
if not submi.over_18:
nsfwstr = ''
return code.reply("07Reddit 04%s10%s - 12%s 06( %s )" % (nsfwstr, submi.subreddit.url, submi.title, submi.url))
elif args[0].lower() == 'user':
try:
usertarget = red.get_redditor(args[1])
return code.reply("07Reddit 10%s 06( %s )" % (usertarget.name, usertarget._url))
except:
return code.reply('user not found.')
| {
"content_hash": "bd3da5586f9d3b0fecd3d0897be82ede",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 137,
"avg_line_length": 43.53968253968254,
"alnum_prop": 0.5391906671527524,
"repo_name": "ApolloJustice/codemodules",
"id": "3340416f332d1af8190871ee22b2f22dd96ded24",
"size": "2743",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reddit.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8795"
}
],
"symlink_target": ""
} |
"""
listDiff.py
Kyle McChesney
Compare two sorted lists
"""
import logging, argparse, os, time
def main():
# args
parser = argparse.ArgumentParser(
description = (" Compare two txt file lists "),
)
time_stamp = str(time.time()).replace(".","")
log_file = os.path.join("./","{}.{}.{}".format("listDiff",time_stamp,"log"))
parser.add_argument("--A", help="First list file", required=True)
parser.add_argument("--B", help="Second list file", required=True)
parser.add_argument("--output", help="Name of output file", default=log_file)
args = parser.parse_args()
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
log_formatter = logging.Formatter('%(asctime)s {%(levelname)s}: %(message)s')
file_handler = logging.FileHandler(args.output)
file_handler.setLevel(logging.INFO)
file_handler.setFormatter(log_formatter)
# console log
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.INFO)
stream_handler.setFormatter(log_formatter)
# set it all up
log.addHandler(file_handler)
log.addHandler(stream_handler)
log.info("######### ListDiff ##########")
a_list = [ line.rstrip('\n') for line in open(args.A)]
b_list = [ line.rstrip('\n') for line in open(args.B)]
log.info("Comparing %s to %s", args.A, args.B)
log.info("%s has %i entries", args.A, len(a_list))
log.info("%s has %i entries", args.B, len(b_list))
log.info("The lists differ by %i entries", abs(len(a_list)-len(b_list)))
a_set = set(a_list)
b_set = set(b_list)
log.info("The intersection of the lists has %i values", len(a_set.intersection(b_set)))
log.info("The union of the lists has %i values", len(a_set.union(b_set)))
if a_set.issubset(b_set):
log.info("%s is a strict subset of %s", args.A, args.B)
else:
log.info("%s is NOT a strict subset of %s", args.A, args.B)
if b_set.issubset(a_set):
log.info("%s is a strict subset of %s", args.B, args.A)
else:
log.info("%s is NOT a strict subset of %s", args.B, args.A)
if __name__ == "__main__":
main() | {
"content_hash": "06849c6375c26dc7f3ba035ce612e142",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 91,
"avg_line_length": 31.66176470588235,
"alnum_prop": 0.6196005573618207,
"repo_name": "mbiokyle29/bioinformatics",
"id": "2999cf1e6a71b58cf561eceb24cafa7ee51abfc0",
"size": "2175",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "misc/python/listDiff.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2428"
},
{
"name": "HTML",
"bytes": "375"
},
{
"name": "JavaScript",
"bytes": "2432"
},
{
"name": "Perl",
"bytes": "205245"
},
{
"name": "Perl 6",
"bytes": "3956"
},
{
"name": "Python",
"bytes": "32480"
},
{
"name": "R",
"bytes": "2151"
},
{
"name": "Shell",
"bytes": "5537"
}
],
"symlink_target": ""
} |
"""Linear QL agent"""
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
import framework
import utils
DEBUG = False
GAMMA = 0.5 # discounted factor
TRAINING_EP = 0.5 # epsilon-greedy parameter for training
TESTING_EP = 0.05 # epsilon-greedy parameter for testing
NUM_RUNS = 5
NUM_EPOCHS = 600
NUM_EPIS_TRAIN = 25 # number of episodes for training at each epoch
NUM_EPIS_TEST = 50 # number of episodes for testing
ALPHA = 0.01 # learning rate for training
ACTIONS = framework.get_actions()
OBJECTS = framework.get_objects()
NUM_ACTIONS = len(ACTIONS)
NUM_OBJECTS = len(OBJECTS)
def tuple2index(action_index, object_index):
"""Converts a tuple (a,b) to an index c"""
return action_index * NUM_OBJECTS + object_index
def index2tuple(index):
"""Converts an index c to a tuple (a,b)"""
return index // NUM_OBJECTS, index % NUM_OBJECTS
# pragma: coderesponse template name="linear_epsilon_greedy"
def epsilon_greedy(state_vector, theta, epsilon):
"""Returns an action selected by an epsilon-greedy exploration policy
Args:
state_vector (np.ndarray): extracted vector representation
theta (np.ndarray): current weight matrix
epsilon (float): the probability of choosing a random command
Returns:
(int, int): the indices describing the action/object to take
"""
action_index, object_index = None, None
q_value = theta @ state_vector
if(np.random.binomial(1, epsilon)):
action_index, object_index = np.random.randint(0, NUM_ACTIONS), np.random.randint(0, NUM_OBJECTS)
else:
action_index, object_index = index2tuple (np.argmax( q_value ))
return (action_index, object_index)
# pragma: coderesponse end
# pragma: coderesponse template
def linear_q_learning(theta, current_state_vector, action_index, object_index,
reward, next_state_vector, terminal):
"""Update theta for a given transition
Args:
theta (np.ndarray): current weight matrix
current_state_vector (np.ndarray): vector representation of current state
action_index (int): index of the current action
object_index (int): index of the current object
reward (float): the immediate reward the agent recieves from playing current command
next_state_vector (np.ndarray): vector representation of next state
terminal (bool): True if this epsiode is over
Returns:
None
"""
index = tuple2index(action_index, object_index)
bestQ = 0
if not terminal:
bestQ = np.max(theta @ next_state_vector)
Q_s_c_theta = (theta @ current_state_vector)[index]
phi = current_state_vector
theta[index] += ALPHA*(reward + GAMMA*bestQ - Q_s_c_theta)*phi
# pragma: coderesponse end
def run_episode(for_training):
""" Runs one episode
If for training, update Q function
If for testing, computes and return cumulative discounted reward
Args:
for_training (bool): True if for training
Returns:
None
"""
epsilon = TRAINING_EP if for_training else TESTING_EP
# initialize for each episode
epi_reward = 0
i = 0
(current_room_desc, current_quest_desc, terminal) = framework.newGame()
while not terminal:
# Choose next action and execute
current_state = current_room_desc + current_quest_desc
current_state_vector = utils.extract_bow_feature_vector(
current_state, dictionary)
current_action, current_object = epsilon_greedy(
current_state_vector, theta, epsilon)
next_room_desc, next_quest_desc, reward, terminal = framework.step_game(
current_room_desc, current_quest_desc,
current_action, current_object)
next_state = next_room_desc + next_quest_desc
next_state_vector = utils.extract_bow_feature_vector(
next_state, dictionary)
if for_training:
# update Q-function.
linear_q_learning(theta,
current_state_vector, current_action, current_object,
reward, next_state_vector, terminal)
if not for_training:
# update reward
epi_reward += (GAMMA**i)*reward
# prepare next step
i += 1
current_room_desc, current_quest_desc = next_room_desc, next_quest_desc
if not for_training:
return epi_reward
def run_epoch():
"""Runs one epoch and returns reward averaged over test episodes"""
rewards = []
for _ in range(NUM_EPIS_TRAIN):
run_episode(for_training=True)
for _ in range(NUM_EPIS_TEST):
rewards.append(run_episode(for_training=False))
return np.mean(np.array(rewards))
def run():
"""Returns array of test reward per epoch for one run"""
global theta
theta = np.zeros([action_dim, state_dim])
single_run_epoch_rewards_test = []
pbar = tqdm(range(NUM_EPOCHS), ncols=80)
for _ in pbar:
single_run_epoch_rewards_test.append(run_epoch())
pbar.set_description(
"Avg reward: {:0.6f} | Ewma reward: {:0.6f}".format(
np.mean(single_run_epoch_rewards_test),
utils.ewma(single_run_epoch_rewards_test)))
return single_run_epoch_rewards_test
if __name__ == '__main__':
state_texts = utils.load_data('game.tsv')
dictionary = utils.bag_of_words(state_texts)
state_dim = len(dictionary)
action_dim = NUM_ACTIONS * NUM_OBJECTS
# set up the game
framework.load_game_data()
epoch_rewards_test = [] # shape NUM_RUNS * NUM_EPOCHS
for _ in range(NUM_RUNS):
epoch_rewards_test.append(run())
epoch_rewards_test = np.array(epoch_rewards_test)
x = np.arange(NUM_EPOCHS)
fig, axis = plt.subplots()
axis.plot(x, np.mean(epoch_rewards_test,
axis=0)) # plot reward per epoch averaged per run
axis.set_xlabel('Epochs')
axis.set_ylabel('reward')
axis.set_title(('Linear: nRuns=%d, Epilon=%.2f, Epi=%d, alpha=%.4f' %
(NUM_RUNS, TRAINING_EP, NUM_EPIS_TRAIN, ALPHA)))
| {
"content_hash": "3f93e27666590b834cf47fe0ec588495",
"timestamp": "",
"source": "github",
"line_count": 192,
"max_line_length": 106,
"avg_line_length": 32.020833333333336,
"alnum_prop": 0.6441119063109955,
"repo_name": "xunilrj/sandbox",
"id": "4c5d8599e4c10cb8e492219bc4ba650423f5059a",
"size": "6148",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "courses/MITx/MITx 6.86x Machine Learning with Python-From Linear Models to Deep Learning/project5/rl/agent_linear.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "235"
},
{
"name": "ASP.NET",
"bytes": "110"
},
{
"name": "Assembly",
"bytes": "28409"
},
{
"name": "Asymptote",
"bytes": "22978"
},
{
"name": "C",
"bytes": "1022035"
},
{
"name": "C#",
"bytes": "474510"
},
{
"name": "C++",
"bytes": "33387716"
},
{
"name": "CMake",
"bytes": "1288737"
},
{
"name": "CSS",
"bytes": "49690"
},
{
"name": "Common Lisp",
"bytes": "858"
},
{
"name": "Coq",
"bytes": "6200"
},
{
"name": "Dockerfile",
"bytes": "2912"
},
{
"name": "Elixir",
"bytes": "34"
},
{
"name": "Erlang",
"bytes": "8204"
},
{
"name": "F#",
"bytes": "33187"
},
{
"name": "Fortran",
"bytes": "20472"
},
{
"name": "GDB",
"bytes": "701"
},
{
"name": "GLSL",
"bytes": "7478"
},
{
"name": "Go",
"bytes": "8971"
},
{
"name": "HTML",
"bytes": "6469462"
},
{
"name": "Handlebars",
"bytes": "8236"
},
{
"name": "Haskell",
"bytes": "18581"
},
{
"name": "Java",
"bytes": "120539"
},
{
"name": "JavaScript",
"bytes": "5055335"
},
{
"name": "Jupyter Notebook",
"bytes": "1849172"
},
{
"name": "LLVM",
"bytes": "43431"
},
{
"name": "MATLAB",
"bytes": "462980"
},
{
"name": "Makefile",
"bytes": "1622666"
},
{
"name": "Objective-C",
"bytes": "2001"
},
{
"name": "PostScript",
"bytes": "45490"
},
{
"name": "PowerShell",
"bytes": "192867"
},
{
"name": "Python",
"bytes": "726138"
},
{
"name": "R",
"bytes": "31364"
},
{
"name": "Roff",
"bytes": "5700"
},
{
"name": "Ruby",
"bytes": "5865"
},
{
"name": "Rust",
"bytes": "797104"
},
{
"name": "Sage",
"bytes": "654"
},
{
"name": "Scala",
"bytes": "42383"
},
{
"name": "Shell",
"bytes": "154039"
},
{
"name": "TLA",
"bytes": "16779"
},
{
"name": "TSQL",
"bytes": "3412"
},
{
"name": "TeX",
"bytes": "6989202"
},
{
"name": "TypeScript",
"bytes": "8845"
},
{
"name": "Visual Basic .NET",
"bytes": "1090"
},
{
"name": "WebAssembly",
"bytes": "70321"
},
{
"name": "q",
"bytes": "13889"
}
],
"symlink_target": ""
} |
from bitmessage.bitmessage import Bitmessage
import unittest
import base64
import time
class TestBitmessage(unittest.TestCase):
def setUp(self):
try:
self.bitmessage = Bitmessage()
except:
self.fail("An exception was raised establishing a Bitmessage connection")
def tearDown(self):
self.bitmessage = None
def test_create_address_random(self):
label = "Test"
encoded_label = base64.b64encode(label) + '\n'
address = self.bitmessage.create_address(label, random=True)
addresses = self.bitmessage.get_addresses()
found = False
for ad in addresses['addresses']:
if encoded_label == ad['label'] and address == ad['address']:
found = True
break
if not found:
self.fail("Failed to create a new bitmessage address")
def test_create_address_deterministic(self):
password = "asdf123"
address = self.bitmessage.create_address(password, random=False)
self.assertTrue(address == 'BM-2DCBxwnwRV43bfsy3GnKRgxMY77mkqRKoE')
def test_broadcast(self):
message = "Hello World"
subject = "Test Broadcast"
address = self.bitmessage.create_address("Unit Test: Broadcast", random=True)
ack_data = self.bitmessage.send_broadcast(address, subject, message)
timeout = 600 # 10 minutes
start_time = time.time()
curr_time = time.time()
sent = False
while curr_time - start_time < timeout:
status = self.bitmessage.get_sending_status(ack_data)
if 'sent' in status:
sent = True
break
curr_time = time.time()
time.sleep(3)
if not sent:
self.fail("Failed to send broadcast")
def test_send_message(self):
message = "Hello World"
subject = "Test Message"
address = self.bitmessage.create_address("Unit Test: Message")
ack_data = self.bitmessage.send_message(
address, address, subject, message)
timeout = 600 # 10 minutes
start_time = time.time()
curr_time = time.time()
sent = False
while curr_time - start_time < timeout:
status = self.bitmessage.get_sending_status(ack_data)
if 'sent' in status:
sent = True
break
curr_time = time.time()
time.sleep(3)
if not sent:
self.fail("Failed to send message")
def test_check_inbox(self):
self.test_send_message()
inbox = self.bitmessage.check_inbox(trash=True)
self.assertTrue(len(inbox) >= 1)
suite = unittest.TestLoader().loadTestsFromTestCase(TestBitmessage)
unittest.TextTestRunner(verbosity=2).run(suite)
| {
"content_hash": "2087283e9c34b65efa9075ac9cf59326",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 85,
"avg_line_length": 31.477777777777778,
"alnum_prop": 0.5979527003176844,
"repo_name": "FreeJournal/freejournal",
"id": "c0f91e35adfd87de3d13d2bfeba4d171e6efef4c",
"size": "2833",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "unittests/test_bitmessage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5362"
},
{
"name": "HTML",
"bytes": "5137"
},
{
"name": "JavaScript",
"bytes": "542"
},
{
"name": "Python",
"bytes": "140221"
}
],
"symlink_target": ""
} |
"""
Copyright 2014 Sotera Defense Solutions, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
import tangelo
import datawake.util.dataconnector.factory as factory
from datawake.util.db import datawake_mysql as db
from datawake.util.graph import helper as graph_helper
from datawake.util.session.helper import is_in_session
from datawake.util.session.helper import has_team
from datawake.util.session.helper import has_domain
from datawake.util.session.helper import has_trail
from datawake.util.session import helper
"""
Serves graphs for the datawake forensic viewer. Graph building is primailry done in datawake.util.graphs
"""
DEBUG = True
#
# Return the graph display options
#
@is_in_session
def listGraphs():
graphs = dict(graphs=[
'browse path',
'browse path - with adjacent urls',
'browse path - with adjacent urls min degree 2',
'browse path - with adjacent phone #\'s',
'browse path - with adjacent email #\'s',
'browse path - with phone and email #\'s',
'browse path - with bitcoin addresses',
'browse path - with text selections',
'browse path - with adjacent info',])
return json.dumps(graphs)
#
# return all time stamps from the selected trail,users,org
# returns a dictionary of the form {'min':0,'max':0,'data':[]}
#
@is_in_session
def getTimeWindow(users, trail=u'*'):
org = helper.get_org()
if trail == u'':
trail = u'*'
print 'getTimeWindow(', users, ',', trail, ')'
if len(users) > 0:
users = users.split(",")
else:
users = []
return json.dumps(db.getTimeWindow(org, users, trail))
@is_in_session
def get_entities(trail_id):
tangelo.log('Getting entities for trail: %s' % trail_id)
entities = {}
entityList = []
urls = []
rows = db.getBrowsePathUrls(trail_id)
for row in rows:
urls.append(row['url'])
entity_data_connector = factory.get_entity_data_connector()
results = entity_data_connector.get_extracted_entities_from_urls(urls)
tangelo.log('Got entities')
for result in results:
for entityType in results[result]:
for entityName in results[result][entityType]:
if entityName in entities:
entities[entityName]['pages'] = entities[entityName]['pages'] + 1
else:
entities[entityName] = {'type': entityType, 'pages':1}
# TODO either figure out how how map the data or do this differently
for entity in entities:
entityList.append({'name': entity, 'type': entities[entity]['type'], 'pages': entities[entity]['pages']})
return json.dumps(entityList)
@is_in_session
def get_links(domain_name, trail_name):
tangelo.log('Getting links for %s:%s'%(domain_name,trail_name))
results = db.get_prefetch_results(domain_name, trail_name)
return json.dumps(results)
@is_in_session
def get_visited(trail_id):
tangelo.log('Getting visited links for %s'%trail_id)
results = db.getBrowsePathUrls(trail_id)
return json.dumps(results)
@is_in_session
@has_team
@has_domain
@has_trail
@tangelo.types(trail_id=int,domain_id=int,team_id=int,startdate=int,enddate=int)
def getGraph(team_id,domain_id,trail_id,view, startdate=u'', enddate=u'', users=[]):
tangelo.log('getGraph( )')
tangelo.log(users)
if view == 'browse path':
graph = graph_helper.getBrowsePathEdges(trail_id,startdate, enddate, users)
return json.dumps(graph_helper.processEdges(graph['edges'], graph['nodes']))
if view == 'browse path - with adjacent urls':
graph = graph_helper.getBrowsePathAndAdjacentWebsiteEdgesWithLimit(domain_id,trail_id, startdate, enddate, 1, users)
return json.dumps(graph_helper.processEdges(graph['edges'], graph['nodes']))
if view == 'browse path - with adjacent urls min degree 2':
graph = graph_helper.getBrowsePathAndAdjacentWebsiteEdgesWithLimit(domain_id,trail_id, startdate, enddate, 2, users)
return json.dumps(graph_helper.processEdges(graph['edges'], graph['nodes']))
if view == 'browse path - with adjacent phone #\'s':
graph = graph_helper.getBrowsePathAndAdjacentPhoneEdgesWithLimit(domain_id,trail_id, startdate, enddate, 1, users)
return json.dumps(graph_helper.processEdges(graph['edges'], graph['nodes']))
if view == 'browse path - with adjacent email #\'s':
graph = graph_helper.getBrowsePathAndAdjacentEmailEdgesWithLimit(domain_id,trail_id, startdate, enddate, 1, users)
return json.dumps(graph_helper.processEdges(graph['edges'], graph['nodes']))
if view == 'browse path - with phone and email #\'s':
graph = graph_helper.getBrowsePathAndAdjacentEdgesWithLimit(domain_id,trail_id,startdate,enddate,['email','phone'],1,users)
return json.dumps(graph_helper.processEdges(graph['edges'], graph['nodes']))
if view == 'browse path - with adjacent info':
graph = graph_helper.getBrowsePathAndAdjacentInfoEdges(domain_id,trail_id, startdate, enddate,1,users)
return json.dumps(graph_helper.processEdges(graph['edges'], graph['nodes']))
if view == 'browse path - with bitcoin addresses':
graph = graph_helper.getBrowsePathAndAdjacentBitcoinEdgesWithLimit(domain_id,trail_id,startdate,enddate,1,users)
return json.dumps(graph_helper.processEdges(graph['edges'], graph['nodes']))
if view == 'OculusForensicRequest':
rows = graph_helper.getOculusForensicGraph(org,startdate,enddate,userlist,trail,domain)
return json.dumps(rows)
if view == 'browse path - with text selections':
graph = graph_helper.getBrowsePathWithTextSelections(trail_id, startdate, enddate,users)
return json.dumps(graph_helper.processEdges(graph['edges'], graph['nodes']))
return json.dumps(dict(nodes=[], links=[]))
get_actions = {
'list': listGraphs,
}
post_actions = {
'timewindow': getTimeWindow,
'get': getGraph,
'entities': get_entities,
'links': get_links,
'visited': get_visited
}
@tangelo.restful
def post(action, *args, **kwargs):
post_data = json.loads(tangelo.request_body().read())
def unknown(**kwargs):
return tangelo.HTTPStatusCode(400, "invalid service call")
return post_actions.get(action, unknown)(**post_data)
@tangelo.restful
def get(action, *args, **kwargs):
def unknown(**kwargs):
return tangelo.HTTPStatusCode(400, "invalid service call")
return get_actions.get(action, unknown)(**kwargs)
| {
"content_hash": "db742cd90499f5b1dd18f979bd5a62d2",
"timestamp": "",
"source": "github",
"line_count": 196,
"max_line_length": 131,
"avg_line_length": 37.005102040816325,
"alnum_prop": 0.666758582655453,
"repo_name": "Sotera/Datawake",
"id": "cefae9d8b41b193aacb8df15cd2e4098fc1e51a1",
"size": "7253",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/datawake/forensic/graphservice.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "29169"
},
{
"name": "HTML",
"bytes": "45221"
},
{
"name": "JavaScript",
"bytes": "218139"
},
{
"name": "Perl",
"bytes": "3571"
},
{
"name": "Python",
"bytes": "199251"
},
{
"name": "Shell",
"bytes": "730"
}
],
"symlink_target": ""
} |
import os
import logging
from django.core.urlresolvers import reverse
from django.contrib.sites.models import RequestSite
from django.db.models import F
from django.http import Http404, HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.template.loader import render_to_string
import seaserv
from seaserv import seafile_api
from seahub.auth.decorators import login_required
from seahub.avatar.templatetags.avatar_tags import avatar
from seahub.avatar.templatetags.group_avatar_tags import grp_avatar
from seahub.contacts.models import Contact
from seahub.forms import RepoPassowrdForm
from seahub.options.models import UserOptions, CryptoOptionNotSetError
from seahub.share.models import FileShare, UploadLinkShare, \
check_share_link_access, set_share_link_access
from seahub.share.forms import SharedLinkPasswordForm
from seahub.views import gen_path_link, get_repo_dirents, \
check_repo_access_permission
from seahub.utils import gen_file_upload_url, is_org_context, \
get_fileserver_root, gen_dir_share_link, gen_shared_upload_link, \
get_max_upload_file_size, new_merge_with_no_conflict, \
get_commit_before_new_merge, user_traffic_over_limit, \
get_file_type_and_ext
from seahub.settings import ENABLE_SUB_LIBRARY, FORCE_SERVER_CRYPTO, \
ENABLE_UPLOAD_FOLDER, \
ENABLE_THUMBNAIL, THUMBNAIL_ROOT, THUMBNAIL_DEFAULT_SIZE, PREVIEW_DEFAULT_SIZE
from seahub.utils.file_types import IMAGE
from seahub.thumbnail.utils import get_thumbnail_src
# Get an instance of a logger
logger = logging.getLogger(__name__)
def get_repo(repo_id):
return seafile_api.get_repo(repo_id)
def get_commit(repo_id, repo_version, commit_id):
return seaserv.get_commit(repo_id, repo_version, commit_id)
def get_repo_size(repo_id):
return seafile_api.get_repo_size(repo_id)
def is_password_set(repo_id, username):
return seafile_api.is_password_set(repo_id, username)
# def check_dir_access_permission(username, repo_id, path):
# """Check user has permission to view the directory.
# 1. check whether this directory is private shared.
# 2. if failed, check whether the parent of this directory is private shared.
# """
# pfs = PrivateFileDirShare.objects.get_private_share_in_dir(username,
# repo_id, path)
# if pfs is None:
# dirs = PrivateFileDirShare.objects.list_private_share_in_dirs_by_user_and_repo(username, repo_id)
# for e in dirs:
# if path.startswith(e.path):
# return e.permission
# return None
# else:
# return pfs.permission
def get_path_from_request(request):
path = request.GET.get('p', '/')
if path[-1] != '/':
path = path + '/'
return path
def get_next_url_from_request(request):
return request.GET.get('next', None)
def get_nav_path(path, repo_name):
return gen_path_link(path, repo_name)
def get_shared_groups_by_repo_and_user(repo_id, username):
"""Get all groups which this repo is shared.
"""
repo_shared_groups = seaserv.get_shared_groups_by_repo(repo_id)
# Filter out groups that user is joined.
groups = [x for x in repo_shared_groups if seaserv.is_group_user(x.id, username)]
return groups
def is_no_quota(repo_id):
return True if seaserv.check_quota(repo_id) < 0 else False
def get_upload_url(request, repo_id):
username = request.user.username
if check_repo_access_permission(repo_id, request.user) == 'rw':
token = seafile_api.get_fileserver_access_token(repo_id, 'dummy',
'upload', username)
return gen_file_upload_url(token, 'upload')
else:
return ''
# def get_api_upload_url(request, repo_id):
# """Get file upload url for web api.
# """
# username = request.user.username
# if check_repo_access_permission(repo_id, request.user) == 'rw':
# token = seafile_api.get_fileserver_access_token(repo_id, 'dummy',
# 'upload', username)
# return gen_file_upload_url(token, 'upload-api')
# else:
# return ''
# def get_api_update_url(request, repo_id):
# username = request.user.username
# if check_repo_access_permission(repo_id, request.user) == 'rw':
# token = seafile_api.get_fileserver_access_token(repo_id, 'dummy',
# 'update', username)
# return gen_file_upload_url(token, 'update-api')
# else:
# return ''
def get_fileshare(repo_id, username, path):
if path == '/': # no shared link for root dir
return None
l = FileShare.objects.filter(repo_id=repo_id).filter(
username=username).filter(path=path)
return l[0] if len(l) > 0 else None
def get_dir_share_link(fileshare):
# dir shared link
if fileshare:
dir_shared_link = gen_dir_share_link(fileshare.token)
else:
dir_shared_link = ''
return dir_shared_link
def get_uploadlink(repo_id, username, path):
if path == '/': # no shared upload link for root dir
return None
l = UploadLinkShare.objects.filter(repo_id=repo_id).filter(
username=username).filter(path=path)
return l[0] if len(l) > 0 else None
def get_dir_shared_upload_link(uploadlink):
# dir shared upload link
if uploadlink:
dir_shared_upload_link = gen_shared_upload_link(uploadlink.token)
else:
dir_shared_upload_link = ''
return dir_shared_upload_link
def render_repo(request, repo):
"""Steps to show repo page:
If user has permission to view repo
If repo is encrypt and password is not set on server
return decrypt repo page
If repo is not encrypt or password is set on server
Show repo direntries based on requested path
If user does not have permission to view repo
return permission deny page
"""
username = request.user.username
path = get_path_from_request(request)
if not seafile_api.get_dir_id_by_path(repo.id, path):
raise Http404
user_perm = check_repo_access_permission(repo.id, request.user)
if user_perm is None:
return render_to_response('repo_access_deny.html', {
'repo': repo,
}, context_instance=RequestContext(request))
sub_lib_enabled = UserOptions.objects.is_sub_lib_enabled(username)
server_crypto = False
if repo.encrypted:
try:
server_crypto = UserOptions.objects.is_server_crypto(username)
except CryptoOptionNotSetError:
return render_to_response('options/set_user_options.html', {
}, context_instance=RequestContext(request))
if (repo.enc_version == 1 or (repo.enc_version == 2 and server_crypto)) \
and not is_password_set(repo.id, username):
return render_to_response('decrypt_repo_form.html', {
'repo': repo,
'next': get_next_url_from_request(request) or reverse('repo', args=[repo.id]),
'force_server_crypto': FORCE_SERVER_CRYPTO,
}, context_instance=RequestContext(request))
# query context args
fileserver_root = get_fileserver_root()
max_upload_file_size = get_max_upload_file_size()
protocol = request.is_secure() and 'https' or 'http'
domain = RequestSite(request).domain
for g in request.user.joined_groups:
g.avatar = grp_avatar(g.id, 20)
head_commit = get_commit(repo.id, repo.version, repo.head_cmmt_id)
if not head_commit:
raise Http404
if new_merge_with_no_conflict(head_commit):
info_commit = get_commit_before_new_merge(head_commit)
else:
info_commit = head_commit
repo_size = get_repo_size(repo.id)
no_quota = is_no_quota(repo.id)
if is_org_context(request):
repo_owner = seafile_api.get_org_repo_owner(repo.id)
else:
repo_owner = seafile_api.get_repo_owner(repo.id)
is_repo_owner = True if repo_owner == username else False
if is_repo_owner and not repo.is_virtual:
show_repo_settings = True
else:
show_repo_settings = False
more_start = None
file_list, dir_list, dirent_more = get_repo_dirents(request, repo,
head_commit, path,
offset=0, limit=100)
if dirent_more:
more_start = 100
zipped = get_nav_path(path, repo.name)
repo_groups = get_shared_groups_by_repo_and_user(repo.id, username)
if len(repo_groups) > 1:
repo_group_str = render_to_string("snippets/repo_group_list.html",
{'groups': repo_groups})
else:
repo_group_str = ''
upload_url = get_upload_url(request, repo.id)
fileshare = get_fileshare(repo.id, username, path)
dir_shared_link = get_dir_share_link(fileshare)
uploadlink = get_uploadlink(repo.id, username, path)
dir_shared_upload_link = get_dir_shared_upload_link(uploadlink)
if not repo.encrypted and ENABLE_THUMBNAIL:
size = THUMBNAIL_DEFAULT_SIZE
for f in file_list:
file_type, file_ext = get_file_type_and_ext(f.obj_name)
if file_type == IMAGE:
f.is_img = True
if os.path.exists(os.path.join(THUMBNAIL_ROOT, size, f.obj_id)):
f.thumbnail_src = get_thumbnail_src(repo.id, f.obj_id, size)
return render_to_response('repo.html', {
'repo': repo,
'user_perm': user_perm,
'repo_owner': repo_owner,
'is_repo_owner': is_repo_owner,
'show_repo_settings': show_repo_settings,
'current_commit': head_commit,
'info_commit': info_commit,
'password_set': True,
'repo_size': repo_size,
'dir_list': dir_list,
'file_list': file_list,
'dirent_more': dirent_more,
'more_start': more_start,
'path': path,
'zipped': zipped,
'groups': repo_groups,
'repo_group_str': repo_group_str,
'no_quota': no_quota,
'max_upload_file_size': max_upload_file_size,
'upload_url': upload_url,
'fileserver_root': fileserver_root,
'protocol': protocol,
'domain': domain,
'fileshare': fileshare,
'dir_shared_link': dir_shared_link,
'uploadlink': uploadlink,
'dir_shared_upload_link': dir_shared_upload_link,
'ENABLE_SUB_LIBRARY': ENABLE_SUB_LIBRARY,
'server_crypto': server_crypto,
'sub_lib_enabled': sub_lib_enabled,
'enable_upload_folder': ENABLE_UPLOAD_FOLDER,
'ENABLE_THUMBNAIL': ENABLE_THUMBNAIL,
'PREVIEW_DEFAULT_SIZE': PREVIEW_DEFAULT_SIZE,
}, context_instance=RequestContext(request))
@login_required
def repo(request, repo_id):
"""Show repo page and handle POST request to decrypt repo.
"""
repo = get_repo(repo_id)
if not repo:
raise Http404
if request.method == 'GET':
return render_repo(request, repo)
elif request.method == 'POST':
form = RepoPassowrdForm(request.POST)
next = get_next_url_from_request(request) or reverse('repo',
args=[repo_id])
if form.is_valid():
return HttpResponseRedirect(next)
else:
return render_to_response('decrypt_repo_form.html', {
'repo': repo,
'form': form,
'next': next,
'force_server_crypto': FORCE_SERVER_CRYPTO,
}, context_instance=RequestContext(request))
@login_required
def repo_history_view(request, repo_id):
"""View repo in history.
"""
repo = get_repo(repo_id)
if not repo:
raise Http404
username = request.user.username
path = get_path_from_request(request)
user_perm = check_repo_access_permission(repo.id, request.user)
if user_perm is None:
return render_to_response('repo_access_deny.html', {
'repo': repo,
}, context_instance=RequestContext(request))
try:
server_crypto = UserOptions.objects.is_server_crypto(username)
except CryptoOptionNotSetError:
# Assume server_crypto is ``False`` if this option is not set.
server_crypto = False
if repo.encrypted and \
(repo.enc_version == 1 or (repo.enc_version == 2 and server_crypto)) \
and not is_password_set(repo.id, username):
return render_to_response('decrypt_repo_form.html', {
'repo': repo,
'next': get_next_url_from_request(request) or reverse('repo', args=[repo.id]),
'force_server_crypto': FORCE_SERVER_CRYPTO,
}, context_instance=RequestContext(request))
commit_id = request.GET.get('commit_id', None)
if commit_id is None:
return HttpResponseRedirect(reverse('repo', args=[repo.id]))
current_commit = get_commit(repo.id, repo.version, commit_id)
if not current_commit:
current_commit = get_commit(repo.id, repo.version, repo.head_cmmt_id)
file_list, dir_list, dirent_more = get_repo_dirents(request, repo,
current_commit, path)
zipped = get_nav_path(path, repo.name)
return render_to_response('repo_history_view.html', {
'repo': repo,
'user_perm': user_perm,
'current_commit': current_commit,
'dir_list': dir_list,
'file_list': file_list,
'path': path,
'zipped': zipped,
}, context_instance=RequestContext(request))
########## shared dir/uploadlink
def view_shared_dir(request, token):
assert token is not None # Checked by URLconf
fileshare = FileShare.objects.get_valid_dir_link_by_token(token)
if fileshare is None:
raise Http404
if fileshare.is_encrypted():
if not check_share_link_access(request, token):
d = {'token': token, 'view_name': 'view_shared_dir', }
if request.method == 'POST':
post_values = request.POST.copy()
post_values['enc_password'] = fileshare.password
form = SharedLinkPasswordForm(post_values)
d['form'] = form
if form.is_valid():
set_share_link_access(request, token)
else:
return render_to_response('share_access_validation.html', d,
context_instance=RequestContext(request))
else:
return render_to_response('share_access_validation.html', d,
context_instance=RequestContext(request))
username = fileshare.username
repo_id = fileshare.repo_id
path = request.GET.get('p', '')
path = fileshare.path if not path else path
if path[-1] != '/': # Normalize dir path
path += '/'
if not path.startswith(fileshare.path):
path = fileshare.path # Can not view upper dir of shared dir
repo = get_repo(repo_id)
if not repo:
raise Http404
dir_name = os.path.basename(path[:-1])
current_commit = seaserv.get_commits(repo_id, 0, 1)[0]
file_list, dir_list, dirent_more = get_repo_dirents(request, repo,
current_commit, path)
zipped = gen_path_link(path, '')
if path == fileshare.path: # When user view the shared dir..
# increase shared link view_cnt,
fileshare = FileShare.objects.get(token=token)
fileshare.view_cnt = F('view_cnt') + 1
fileshare.save()
traffic_over_limit = user_traffic_over_limit(fileshare.username)
if not repo.encrypted and ENABLE_THUMBNAIL:
size = THUMBNAIL_DEFAULT_SIZE
for f in file_list:
file_type, file_ext = get_file_type_and_ext(f.obj_name)
if file_type == IMAGE:
f.is_img = True
if os.path.exists(os.path.join(THUMBNAIL_ROOT, size, f.obj_id)):
f.thumbnail_src = get_thumbnail_src(repo.id, f.obj_id, size)
return render_to_response('view_shared_dir.html', {
'repo': repo,
'token': token,
'path': path,
'username': username,
'dir_name': dir_name,
'file_list': file_list,
'dir_list': dir_list,
'zipped': zipped,
'traffic_over_limit': traffic_over_limit,
'ENABLE_THUMBNAIL': ENABLE_THUMBNAIL,
'PREVIEW_DEFAULT_SIZE': PREVIEW_DEFAULT_SIZE,
}, context_instance=RequestContext(request))
def view_shared_upload_link(request, token):
assert token is not None # Checked by URLconf
uploadlink = UploadLinkShare.objects.get_valid_upload_link_by_token(token)
if uploadlink is None:
raise Http404
if uploadlink.is_encrypted():
if not check_share_link_access(request, token):
d = {'token': token, 'view_name': 'view_shared_upload_link', }
if request.method == 'POST':
post_values = request.POST.copy()
post_values['enc_password'] = uploadlink.password
form = SharedLinkPasswordForm(post_values)
d['form'] = form
if form.is_valid():
set_share_link_access(request, token)
else:
return render_to_response('share_access_validation.html', d,
context_instance=RequestContext(request))
else:
return render_to_response('share_access_validation.html', d,
context_instance=RequestContext(request))
username = uploadlink.username
repo_id = uploadlink.repo_id
path = uploadlink.path
dir_name = os.path.basename(path[:-1])
repo = get_repo(repo_id)
if not repo:
raise Http404
uploadlink.view_cnt = F('view_cnt') + 1
uploadlink.save()
no_quota = True if seaserv.check_quota(repo_id) < 0 else False
token = seafile_api.get_fileserver_access_token(repo_id, 'dummy',
'upload', request.user.username)
ajax_upload_url = gen_file_upload_url(token, 'upload-aj')
return render_to_response('view_shared_upload_link.html', {
'repo': repo,
'token': token,
'path': path,
'username': username,
'dir_name': dir_name,
'max_upload_file_size': seaserv.MAX_UPLOAD_FILE_SIZE,
'no_quota': no_quota,
'ajax_upload_url': ajax_upload_url,
'uploadlink': uploadlink,
'enable_upload_folder': ENABLE_UPLOAD_FOLDER,
}, context_instance=RequestContext(request))
| {
"content_hash": "96a2614d40e94bb58a68331bf4b4c37f",
"timestamp": "",
"source": "github",
"line_count": 497,
"max_line_length": 107,
"avg_line_length": 38.80482897384306,
"alnum_prop": 0.5973244840817173,
"repo_name": "cloudcopy/seahub",
"id": "2ceaf3529bec977d461f44d4ec7ee38da5ff54bc",
"size": "19310",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "seahub/views/repo.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "231001"
},
{
"name": "HTML",
"bytes": "756152"
},
{
"name": "JavaScript",
"bytes": "2430927"
},
{
"name": "PLSQL",
"bytes": "16796"
},
{
"name": "Python",
"bytes": "1508638"
},
{
"name": "Shell",
"bytes": "9365"
}
],
"symlink_target": ""
} |
from flask import render_template, flash, redirect, g, url_for, request
from app import *
from models import *
from form import FlyForm, ITAForm, NonStopForm
##GLOBALS
HIDDEN_PAIRS_D = pkl.load(open('app/static/OrigDest_HiddenCity_Targets.pkl', 'rb'))
DEST_RANK_D = pkl.load(open('app/static/OrigDest_Ranking.pkl', 'rb'))
DEST_PREDS_D = pkl.load(open('app/static/OrigDest_PricePreds.pkl', 'rb'))
APT_CITY_D = pkl.load(open('app/static/AptCode_FullName.pkl', 'rb'))
#these are calculated in: zipflights_data_parsing.fare_dist_fit()
PRICE_DIST_MEAN = -15.51
PRICE_DIST_STD = 50.84
months_l = ['JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL', 'AUG', 'SEP', 'OCT', 'NOV', 'DEC']
MONTHS = [(str(i+1), mon) for i, mon in enumerate(months_l) ]
@app.route('/')
@app.route('/index')
def index():
return render_template("index.html")
@app.route('/fly', methods = ['GET', 'POST'])
def fly():
form = FlyForm()
if form.validate_on_submit():
return redirect(url_for('show_dests', origin_entered = form.origin.data.upper()))
elif form.is_submitted():
flash('Invalid Input')
return render_template('fly.html', title = "Where to?", form = form)
@app.route('/fly-nonstop', methods = ['GET', 'POST'])
def nonstops():
form = NonStopForm()
form.depart_month.choices = MONTHS
form.return_month.choices = MONTHS
if form.validate_on_submit():
dep_date = datetime.date(form.depart_year.data, int(form.depart_month.data), form.depart_day.data)
ret_date = datetime.date(form.return_year.data, int(form.return_month.data), form.return_day.data)
today = datetime.date.today()
origin_entered = form.origin.data.upper()
dest_entered = form.destination.data.upper()
if dep_date >= today and ret_date >= dep_date:
if origin_entered in APT_CITY_D:
if dest_entered in APT_CITY_D:
dep_date_s = date_obj_to_s(dep_date)
ret_date_s = date_obj_to_s(ret_date)
return redirect(url_for('nonstop_deals',
origin_entered = form.origin.data.upper(),
dest_entered = form.destination.data.upper(),
departing = dep_date_s,
returning = ret_date_s))
else:
flash("Sorry! We don't have any info on flights from: " + str(dest_entered))
else:
flash("Sorry! We don't have any info on flights from: " + str(origin_entered))
else:
flash('Invalid Input')
elif form.is_submitted():
flash('Invalid Input')
return render_template('fly_nonstop.html', title = "Search Non-Stops", form = form)
@app.route('/show_dests', methods = ['GET', 'POST'])
def show_dests():
origin_entered = request.args.get('origin_entered')
if origin_entered is None:
flash("Origin:"+str(origin_entered) )
return redirect(url_for('fly'))
if origin_entered in DEST_RANK_D:
if DEST_RANK_D[origin_entered]['max_rank'] >= 10:
dests_l = [DEST_RANK_D[origin_entered][float(i)] for i in range(1,11)]
else:
rank_max = int(1 + DEST_RANK_D[origin_entered]['max_rank'])
dests_l = [DEST_RANK_D[origin_entered][float(i)] for i in range(1,rank_max)]
else:
flash("Sorry! we don't have any info on flights from: " + str(origin_entered))
return redirect(url_for('fly'))
dests_cities = []
for dest in dests_l:
if dest in APT_CITY_D and type(APT_CITY_D[dest]) is str:
dests_cities.append((dest, APT_CITY_D[dest]))
else:
dests_cities.append((dest, dest))
itaform = ITAForm()
itaform.destination.choices = dests_cities
itaform.month.choices = MONTHS
if itaform.validate_on_submit():
return redirect(url_for('check_prices',
orig = origin_entered,
dest = itaform.destination.data,
month = itaform.month.data,
duration = itaform.duration.data))
elif itaform.is_submitted():
flash('Invalid Input')
return render_template('show_dests.html',
origin_entered = origin_entered,
itaform = itaform)
@app.route('/nonstop_deals')
def nonstop_deals():
origin_entered = request.args.get('origin_entered')
dest_entered = request.args.get('dest_entered')
departing = request.args.get('departing')
returning = request.args.get('returning')
faa_orig = str(origin_entered)
faa_dest = str(dest_entered)
if faa_orig not in APT_CITY_D:
flash("Sorry! we don't have any info on flights from: " + faa_orig)
return redirect(url_for('fly-nonstop'))
elif faa_dest not in APT_CITY_D:
flash("Sorry! we don't have any info on flights to: " + faa_orig)
return redirect(url_for('fly-nonstop'))
response = ita_search(faa_orig,
faa_dest,
departing,
returning,
duration = True,
out_constraints = 'N',
return_constraints = 'N',
month_search = False)
num_results = ita_response_d(response)['result']['solutionCount']
nonstops_avail = num_results != 0
if nonstops_avail:
nonstop_d = ita_response_hidden_parse(response, faa_orig, faa_dest)
deal_stars = deal_checker(faa_orig, faa_dest, nonstop_d['minprice'])
if (faa_orig, faa_dest) in HIDDEN_PAIRS_D:
out_fake_dests = HIDDEN_PAIRS_D[(faa_orig, faa_dest)]
back_fake_dests = HIDDEN_PAIRS_D[(faa_dest, faa_orig)]
out_response = ita_search(faa_orig,
out_fake_dests,
departing,
None,
duration = None,
out_constraints = faa_dest,
month_search = False)
back_response = ita_search(faa_dest,
back_fake_dests,
returning,
None,
duration = None,
out_constraints = faa_orig,
month_search = False)
out_num_results = ita_response_d(out_response)['result']['solutionCount']
back_num_results = ita_response_d(back_response)['result']['solutionCount']
hiddens_avail = True
if out_num_results != 0:
out_flights_d = ita_response_hidden_parse(out_response, faa_orig, faa_dest)
else:
hiddens_avail = False
if back_num_results != 0:
back_flights_d = ita_response_hidden_parse(back_response, faa_orig, faa_dest)
else:
hiddens_avail = False
else:
hiddens_avail = False
if hiddens_avail:
hiddens_help = nonstop_d['minprice'] > (out_flights_d['minprice'] + back_flights_d['minprice'])
total_hidden = str(out_flights_d['minprice'] + back_flights_d['minprice'])
else:
hiddens_help = False
else:
deal_stars = False #No non-stops available, so can't evaluate deal
route_price = None
airlines = None
hiddens_avail = False
total_hidden = 'Too Much'
return render_template('nonstop_deals.html', **locals())
@app.route('/check_prices')
def check_prices():
faa_orig = request.args.get('orig')
faa_dest = request.args.get('dest')
month = int(request.args.get('month'))
duration = int(request.args.get('duration'))
today = datetime.date.today()
if today.month < month:
year = today.year
day_start = 1
elif today.month == month:
year = today.year
day_start = today.day
else:
year = today.year + 1
day_start = 1
day_end = calendar.monthrange(year, month)[1]
start_date = str(year) + '-' + str(month) + '-' + str(day_start)
end_date = str(year) + '-' + str(month) + '-' + str(day_end)
response = ita_search(faa_orig, faa_dest, start_date, end_date, duration)
route_price, airlines = ita_response_airline_parse(response)
deal_stars = deal_checker(faa_orig, faa_dest, route_price)
return render_template('check_prices.html', **locals())
def deal_checker(orig, dest, route_price):
pred_price = DEST_PREDS_D[orig][dest]
if type(route_price) is not float and type(route_price) is not int:
avail_price = float(route_price.strip('USD'))
else:
avail_price = route_price
delta_price = (2 * pred_price) - avail_price
stars_int = int(stars_from_price(delta_price, PRICE_DIST_MEAN, PRICE_DIST_STD))
return str(stars_int) + (' Stars' if stars_int > 1 else ' Star')
| {
"content_hash": "803932f2d9d82c8b4ec44c156650b080",
"timestamp": "",
"source": "github",
"line_count": 242,
"max_line_length": 107,
"avg_line_length": 38.13636363636363,
"alnum_prop": 0.5527142702351284,
"repo_name": "BradAJ/zipflights",
"id": "b94408842b9b4cfcb88cb1c66d939c42fe777833",
"size": "9229",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web_app/app/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "8190"
},
{
"name": "HTML",
"bytes": "194584"
},
{
"name": "Python",
"bytes": "31026"
}
],
"symlink_target": ""
} |
import math
def is_prime(num):
if num < 2:
return False
for i in range(num):
if i < 2:
continue
if num % i == 0:
return False
return True
def get_largest_prime_factor(num):
upper = int(math.ceil(math.sqrt(num)))
prime_factor = -1
# using ceil, coz range will bump it down by 1
for i in range(upper):
if i < 2: # not a prime for sure
continue
if num % i != 0: # not a factor
continue
if not is_prime(i): # a bit expensive check
continue
prime_factor = i
return prime_factor
if __name__ == '__main__':
#print get_largest_prime_factor(13195)
print get_largest_prime_factor(600851475143)
| {
"content_hash": "9f12d07d6edfefafb4bcfe64a7f8a8bb",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 48,
"avg_line_length": 22.233333333333334,
"alnum_prop": 0.6146926536731634,
"repo_name": "birdchan/project_euler",
"id": "5b61d5a426ed9d35fee21bba3ec5acc7c7543a5e",
"size": "668",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "problems/003/run.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "36347"
}
],
"symlink_target": ""
} |
from baseparser import BaseParser
from BeautifulSoup import BeautifulSoup, Tag
class BBCParser(BaseParser):
SUFFIX = '?print=true'
domains = ['www.bbc.co.uk']
feeder_pat = '^http://www.bbc.co.uk/news/'
feeder_pages = ['http://www.bbc.co.uk/news/']
def _parse(self, html):
soup = BeautifulSoup(html, convertEntities=BeautifulSoup.HTML_ENTITIES,
fromEncoding='utf-8')
self.meta = soup.findAll('meta')
elt = soup.find('h1', 'story-header')
if elt is None:
self.real_article = False
return
self.title = elt.getText()
self.byline = ''
self.date = soup.find('span', 'date').getText()
div = soup.find('div', 'story-body')
if div is None:
# Hack for video articles
div = soup.find('div', 'emp-decription')
if div is None:
self.real_article = False
return
self.body = '\n'+'\n\n'.join([x.getText() for x in div.childGenerator()
if isinstance(x, Tag) and x.name == 'p'])
| {
"content_hash": "8143cb987a52ebd55e35ee4d39b44e03",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 79,
"avg_line_length": 33.75757575757576,
"alnum_prop": 0.5448833034111311,
"repo_name": "amandabee/newsdiffs",
"id": "0a0e25bf6590a5ab1007fdd950ca5bbce8a237ba",
"size": "1114",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "parsers/bbc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "340"
},
{
"name": "CSS",
"bytes": "2853"
},
{
"name": "HTML",
"bytes": "11642"
},
{
"name": "JavaScript",
"bytes": "192921"
},
{
"name": "Python",
"bytes": "147908"
}
],
"symlink_target": ""
} |
"""
Interface documentation.
Maintainer: Itamar Shtull-Trauring
"""
from zope.interface import Interface, Attribute
from twisted.python.deprecate import deprecatedModuleAttribute
from twisted.python.versions import Version
class IAddress(Interface):
"""
An address, e.g. a TCP C{(host, port)}.
Default implementations are in L{twisted.internet.address}.
"""
### Reactor Interfaces
class IConnector(Interface):
"""
Object used to interface between connections and protocols.
Each L{IConnector} manages one connection.
"""
def stopConnecting():
"""
Stop attempting to connect.
"""
def disconnect():
"""
Disconnect regardless of the connection state.
If we are connected, disconnect, if we are trying to connect,
stop trying.
"""
def connect():
"""
Try to connect to remote address.
"""
def getDestination():
"""
Return destination this will try to connect to.
@return: An object which provides L{IAddress}.
"""
class IResolverSimple(Interface):
def getHostByName(name, timeout = (1, 3, 11, 45)):
"""
Resolve the domain name C{name} into an IP address.
@type name: C{str}
@type timeout: C{tuple}
@rtype: L{twisted.internet.defer.Deferred}
@return: The callback of the Deferred that is returned will be
passed a string that represents the IP address of the specified
name, or the errback will be called if the lookup times out. If
multiple types of address records are associated with the name,
A6 records will be returned in preference to AAAA records, which
will be returned in preference to A records. If there are multiple
records of the type to be returned, one will be selected at random.
@raise twisted.internet.defer.TimeoutError: Raised (asynchronously)
if the name cannot be resolved within the specified timeout period.
"""
class IResolver(IResolverSimple):
def lookupRecord(name, cls, type, timeout = 10):
"""
Lookup the records associated with the given name
that are of the given type and in the given class.
"""
def query(query, timeout = 10):
"""
Interpret and dispatch a query object to the appropriate
lookup* method.
"""
def lookupAddress(name, timeout = 10):
"""
Lookup the A records associated with C{name}.
"""
def lookupAddress6(name, timeout = 10):
"""
Lookup all the A6 records associated with C{name}.
"""
def lookupIPV6Address(name, timeout = 10):
"""
Lookup all the AAAA records associated with C{name}.
"""
def lookupMailExchange(name, timeout = 10):
"""
Lookup the MX records associated with C{name}.
"""
def lookupNameservers(name, timeout = 10):
"""
Lookup the the NS records associated with C{name}.
"""
def lookupCanonicalName(name, timeout = 10):
"""
Lookup the CNAME records associated with C{name}.
"""
def lookupMailBox(name, timeout = 10):
"""
Lookup the MB records associated with C{name}.
"""
def lookupMailGroup(name, timeout = 10):
"""
Lookup the MG records associated with C{name}.
"""
def lookupMailRename(name, timeout = 10):
"""
Lookup the MR records associated with C{name}.
"""
def lookupPointer(name, timeout = 10):
"""
Lookup the PTR records associated with C{name}.
"""
def lookupAuthority(name, timeout = 10):
"""
Lookup the SOA records associated with C{name}.
"""
def lookupNull(name, timeout = 10):
"""
Lookup the NULL records associated with C{name}.
"""
def lookupWellKnownServices(name, timeout = 10):
"""
Lookup the WKS records associated with C{name}.
"""
def lookupHostInfo(name, timeout = 10):
"""
Lookup the HINFO records associated with C{name}.
"""
def lookupMailboxInfo(name, timeout = 10):
"""
Lookup the MINFO records associated with C{name}.
"""
def lookupText(name, timeout = 10):
"""
Lookup the TXT records associated with C{name}.
"""
def lookupResponsibility(name, timeout = 10):
"""
Lookup the RP records associated with C{name}.
"""
def lookupAFSDatabase(name, timeout = 10):
"""
Lookup the AFSDB records associated with C{name}.
"""
def lookupService(name, timeout = 10):
"""
Lookup the SRV records associated with C{name}.
"""
def lookupAllRecords(name, timeout = 10):
"""
Lookup all records associated with C{name}.
"""
def lookupZone(name, timeout = 10):
"""
Perform a zone transfer for the given C{name}.
"""
class IReactorArbitrary(Interface):
"""
This interface is redundant with L{IReactorFDSet} and is deprecated.
"""
deprecatedModuleAttribute(
Version("Twisted", 10, 1, 0),
"See IReactorFDSet.",
__name__,
"IReactorArbitrary")
def listenWith(portType, *args, **kw):
"""
Start an instance of the given C{portType} listening.
@type portType: type which implements L{IListeningPort}
@param portType: The object given by C{portType(*args, **kw)} will be
started listening.
@return: an object which provides L{IListeningPort}.
"""
def connectWith(connectorType, *args, **kw):
"""
Start an instance of the given C{connectorType} connecting.
@type connectorType: type which implements L{IConnector}
@param connectorType: The object given by C{connectorType(*args, **kw)}
will be started connecting.
@return: An object which provides L{IConnector}.
"""
# Alias for IReactorArbitrary so that internal Twisted code can continue to
# provide the interface without emitting a deprecation warning. This can be
# removed when IReactorArbitrary is removed.
_IReactorArbitrary = IReactorArbitrary
class IReactorTCP(Interface):
def listenTCP(port, factory, backlog=50, interface=''):
"""
Connects a given protocol factory to the given numeric TCP/IP port.
@param port: a port number on which to listen
@param factory: a L{twisted.internet.protocol.ServerFactory} instance
@param backlog: size of the listen queue
@param interface: The local IPv4 or IPv6 address to which to bind;
defaults to '', ie all IPv4 addresses. To bind to all IPv4 and IPv6
addresses, you must call this method twice.
@return: an object that provides L{IListeningPort}.
@raise CannotListenError: as defined here
L{twisted.internet.error.CannotListenError},
if it cannot listen on this port (e.g., it
cannot bind to the required port number)
"""
def connectTCP(host, port, factory, timeout=30, bindAddress=None):
"""
Connect a TCP client.
@param host: a host name
@param port: a port number
@param factory: a L{twisted.internet.protocol.ClientFactory} instance
@param timeout: number of seconds to wait before assuming the
connection has failed.
@param bindAddress: a (host, port) tuple of local address to bind
to, or None.
@return: An object which provides L{IConnector}. This connector will
call various callbacks on the factory when a connection is
made, failed, or lost - see
L{ClientFactory<twisted.internet.protocol.ClientFactory>}
docs for details.
"""
class IReactorSSL(Interface):
def connectSSL(host, port, factory, contextFactory, timeout=30, bindAddress=None):
"""
Connect a client Protocol to a remote SSL socket.
@param host: a host name
@param port: a port number
@param factory: a L{twisted.internet.protocol.ClientFactory} instance
@param contextFactory: a L{twisted.internet.ssl.ClientContextFactory} object.
@param timeout: number of seconds to wait before assuming the
connection has failed.
@param bindAddress: a (host, port) tuple of local address to bind to,
or C{None}.
@return: An object which provides L{IConnector}.
"""
def listenSSL(port, factory, contextFactory, backlog=50, interface=''):
"""
Connects a given protocol factory to the given numeric TCP/IP port.
The connection is a SSL one, using contexts created by the context
factory.
@param port: a port number on which to listen
@param factory: a L{twisted.internet.protocol.ServerFactory} instance
@param contextFactory: a L{twisted.internet.ssl.ContextFactory} instance
@param backlog: size of the listen queue
@param interface: the hostname to bind to, defaults to '' (all)
"""
class IReactorUNIX(Interface):
"""
UNIX socket methods.
"""
def connectUNIX(address, factory, timeout=30, checkPID=0):
"""
Connect a client protocol to a UNIX socket.
@param address: a path to a unix socket on the filesystem.
@param factory: a L{twisted.internet.protocol.ClientFactory} instance
@param timeout: number of seconds to wait before assuming the connection
has failed.
@param checkPID: if True, check for a pid file to verify that a server
is listening. If C{address} is a Linux abstract namespace path,
this must be C{False}.
@return: An object which provides L{IConnector}.
"""
def listenUNIX(address, factory, backlog=50, mode=0666, wantPID=0):
"""
Listen on a UNIX socket.
@param address: a path to a unix socket on the filesystem.
@param factory: a L{twisted.internet.protocol.Factory} instance.
@param backlog: number of connections to allow in backlog.
@param mode: The mode (B{not} umask) to set on the unix socket. See
platform specific documentation for information about how this
might affect connection attempts.
@type mode: C{int}
@param wantPID: if True, create a pidfile for the socket. If C{address}
is a Linux abstract namespace path, this must be C{False}.
@return: An object which provides L{IListeningPort}.
"""
class IReactorUNIXDatagram(Interface):
"""
Datagram UNIX socket methods.
"""
def connectUNIXDatagram(address, protocol, maxPacketSize=8192, mode=0666, bindAddress=None):
"""
Connect a client protocol to a datagram UNIX socket.
@param address: a path to a unix socket on the filesystem.
@param protocol: a L{twisted.internet.protocol.ConnectedDatagramProtocol} instance
@param maxPacketSize: maximum packet size to accept
@param mode: The mode (B{not} umask) to set on the unix socket. See
platform specific documentation for information about how this
might affect connection attempts.
@type mode: C{int}
@param bindAddress: address to bind to
@return: An object which provides L{IConnector}.
"""
def listenUNIXDatagram(address, protocol, maxPacketSize=8192, mode=0666):
"""
Listen on a datagram UNIX socket.
@param address: a path to a unix socket on the filesystem.
@param protocol: a L{twisted.internet.protocol.DatagramProtocol} instance.
@param maxPacketSize: maximum packet size to accept
@param mode: The mode (B{not} umask) to set on the unix socket. See
platform specific documentation for information about how this
might affect connection attempts.
@type mode: C{int}
@return: An object which provides L{IListeningPort}.
"""
class IReactorWin32Events(Interface):
"""
Win32 Event API methods
@since: 10.2
"""
def addEvent(event, fd, action):
"""
Add a new win32 event to the event loop.
@param event: a Win32 event object created using win32event.CreateEvent()
@param fd: an instance of L{twisted.internet.abstract.FileDescriptor}
@param action: a string that is a method name of the fd instance.
This method is called in response to the event.
@return: None
"""
def removeEvent(event):
"""
Remove an event.
@param event: a Win32 event object added using L{IReactorWin32Events.addEvent}
@return: None
"""
class IReactorUDP(Interface):
"""
UDP socket methods.
"""
def listenUDP(port, protocol, interface='', maxPacketSize=8192):
"""
Connects a given DatagramProtocol to the given numeric UDP port.
@return: object which provides L{IListeningPort}.
"""
class IReactorMulticast(Interface):
"""
UDP socket methods that support multicast.
IMPORTANT: This is an experimental new interface. It may change
without backwards compatability. Suggestions are welcome.
"""
def listenMulticast(port, protocol, interface='', maxPacketSize=8192,
listenMultiple=False):
"""
Connects a given
L{DatagramProtocol<twisted.internet.protocol.DatagramProtocol>} to the
given numeric UDP port.
@param listenMultiple: If set to True, allows multiple sockets to
bind to the same address and port number at the same time.
@type listenMultiple: C{bool}
@returns: An object which provides L{IListeningPort}.
@see: L{twisted.internet.interfaces.IMulticastTransport}
@see: U{http://twistedmatrix.com/documents/current/core/howto/udp.html}
"""
class IReactorSocket(Interface):
"""
Methods which allow a reactor to use externally created sockets.
For example, to use C{adoptStreamPort} to implement behavior equivalent
to that of L{IReactorTCP.listenTCP}, you might write code like this::
from socket import SOMAXCONN, AF_INET, SOCK_STREAM, socket
portSocket = socket(AF_INET, SOCK_STREAM)
# Set FD_CLOEXEC on port, left as an exercise. Then make it into a
# non-blocking listening port:
portSocket.setblocking(False)
portSocket.bind(('192.168.1.2', 12345))
portSocket.listen(SOMAXCONN)
# Now have the reactor use it as a TCP port
port = reactor.adoptStreamPort(
portSocket.fileno(), AF_INET, YourFactory())
# portSocket itself is no longer necessary, and needs to be cleaned
# up by us.
portSocket.close()
# Whenever the server is no longer needed, stop it as usual.
stoppedDeferred = port.stopListening()
Another potential use is to inherit a listening descriptor from a parent
process (for example, systemd or launchd), or to receive one over a UNIX
domain socket.
Some plans for extending this interface exist. See:
- U{http://twistedmatrix.com/trac/ticket/5570}: established connections
- U{http://twistedmatrix.com/trac/ticket/5573}: AF_UNIX ports
- U{http://twistedmatrix.com/trac/ticket/5574}: SOCK_DGRAM sockets
"""
def adoptStreamPort(fileDescriptor, addressFamily, factory):
"""
Add an existing listening I{SOCK_STREAM} socket to the reactor to
monitor for new connections to accept and handle.
@param fileDescriptor: A file descriptor associated with a socket which
is already bound to an address and marked as listening. The socket
must be set non-blocking. Any additional flags (for example,
close-on-exec) must also be set by application code. Application
code is responsible for closing the file descriptor, which may be
done as soon as C{adoptStreamPort} returns.
@type fileDescriptor: C{int}
@param addressFamily: The address family (or I{domain}) of the socket.
For example, L{socket.AF_INET6}.
@param factory: A L{ServerFactory} instance to use to create new
protocols to handle connections accepted via this socket.
@return: An object providing L{IListeningPort}.
@raise UnsupportedAddressFamily: If the given address family is not
supported by this reactor, or not supported with the given socket
type.
@raise UnsupportedSocketType: If the given socket type is not supported
by this reactor, or not supported with the given socket type.
"""
class IReactorProcess(Interface):
def spawnProcess(processProtocol, executable, args=(), env={}, path=None,
uid=None, gid=None, usePTY=0, childFDs=None):
"""
Spawn a process, with a process protocol.
@type processProtocol: L{IProcessProtocol} provider
@param processProtocol: An object which will be notified of all
events related to the created process.
@param executable: the file name to spawn - the full path should be
used.
@param args: the command line arguments to pass to the process; a
sequence of strings. The first string should be the
executable's name.
@type env: a C{dict} mapping C{str} to C{str}, or C{None}.
@param env: the environment variables to pass to the child process. The
resulting behavior varies between platforms. If
- C{env} is not set:
- On POSIX: pass an empty environment.
- On Windows: pass C{os.environ}.
- C{env} is C{None}:
- On POSIX: pass C{os.environ}.
- On Windows: pass C{os.environ}.
- C{env} is a C{dict}:
- On POSIX: pass the key/value pairs in C{env} as the
complete environment.
- On Windows: update C{os.environ} with the key/value
pairs in the C{dict} before passing it. As a
consequence of U{bug #1640
<http://twistedmatrix.com/trac/ticket/1640>}, passing
keys with empty values in an effort to unset
environment variables I{won't} unset them.
@param path: the path to run the subprocess in - defaults to the
current directory.
@param uid: user ID to run the subprocess as. (Only available on
POSIX systems.)
@param gid: group ID to run the subprocess as. (Only available on
POSIX systems.)
@param usePTY: if true, run this process in a pseudo-terminal.
optionally a tuple of C{(masterfd, slavefd, ttyname)},
in which case use those file descriptors.
(Not available on all systems.)
@param childFDs: A dictionary mapping file descriptors in the new child
process to an integer or to the string 'r' or 'w'.
If the value is an integer, it specifies a file
descriptor in the parent process which will be mapped
to a file descriptor (specified by the key) in the
child process. This is useful for things like inetd
and shell-like file redirection.
If it is the string 'r', a pipe will be created and
attached to the child at that file descriptor: the
child will be able to write to that file descriptor
and the parent will receive read notification via the
L{IProcessProtocol.childDataReceived} callback. This
is useful for the child's stdout and stderr.
If it is the string 'w', similar setup to the previous
case will occur, with the pipe being readable by the
child instead of writeable. The parent process can
write to that file descriptor using
L{IProcessTransport.writeToChild}. This is useful for
the child's stdin.
If childFDs is not passed, the default behaviour is to
use a mapping that opens the usual stdin/stdout/stderr
pipes.
@see: L{twisted.internet.protocol.ProcessProtocol}
@return: An object which provides L{IProcessTransport}.
@raise OSError: Raised with errno C{EAGAIN} or C{ENOMEM} if there are
insufficient system resources to create a new process.
"""
class IReactorTime(Interface):
"""
Time methods that a Reactor should implement.
"""
def seconds():
"""
Get the current time in seconds.
@return: A number-like object of some sort.
"""
def callLater(delay, callable, *args, **kw):
"""
Call a function later.
@type delay: C{float}
@param delay: the number of seconds to wait.
@param callable: the callable object to call later.
@param args: the arguments to call it with.
@param kw: the keyword arguments to call it with.
@return: An object which provides L{IDelayedCall} and can be used to
cancel the scheduled call, by calling its C{cancel()} method.
It also may be rescheduled by calling its C{delay()} or
C{reset()} methods.
"""
def getDelayedCalls():
"""
Retrieve all currently scheduled delayed calls.
@return: A tuple of all L{IDelayedCall} providers representing all
currently scheduled calls. This is everything that has been
returned by C{callLater} but not yet called or canceled.
"""
class IDelayedCall(Interface):
"""
A scheduled call.
There are probably other useful methods we can add to this interface;
suggestions are welcome.
"""
def getTime():
"""
Get time when delayed call will happen.
@return: time in seconds since epoch (a float).
"""
def cancel():
"""
Cancel the scheduled call.
@raises twisted.internet.error.AlreadyCalled: if the call has already
happened.
@raises twisted.internet.error.AlreadyCancelled: if the call has already
been cancelled.
"""
def delay(secondsLater):
"""
Delay the scheduled call.
@param secondsLater: how many seconds from its current firing time to delay
@raises twisted.internet.error.AlreadyCalled: if the call has already
happened.
@raises twisted.internet.error.AlreadyCancelled: if the call has already
been cancelled.
"""
def reset(secondsFromNow):
"""
Reset the scheduled call's timer.
@param secondsFromNow: how many seconds from now it should fire,
equivalent to C{.cancel()} and then doing another
C{reactor.callLater(secondsLater, ...)}
@raises twisted.internet.error.AlreadyCalled: if the call has already
happened.
@raises twisted.internet.error.AlreadyCancelled: if the call has already
been cancelled.
"""
def active():
"""
@return: True if this call is still active, False if it has been
called or cancelled.
"""
class IReactorThreads(Interface):
"""
Dispatch methods to be run in threads.
Internally, this should use a thread pool and dispatch methods to them.
"""
def getThreadPool():
"""
Return the threadpool used by L{callInThread}. Create it first if
necessary.
@rtype: L{twisted.python.threadpool.ThreadPool}
"""
def callInThread(callable, *args, **kwargs):
"""
Run the callable object in a separate thread.
"""
def callFromThread(callable, *args, **kw):
"""
Cause a function to be executed by the reactor thread.
Use this method when you want to run a function in the reactor's thread
from another thread. Calling L{callFromThread} should wake up the main
thread (where L{reactor.run()<reactor.run>} is executing) and run the
given callable in that thread.
If you're writing a multi-threaded application the C{callable} may need
to be thread safe, but this method doesn't require it as such. If you
want to call a function in the next mainloop iteration, but you're in
the same thread, use L{callLater} with a delay of 0.
"""
def suggestThreadPoolSize(size):
"""
Suggest the size of the internal threadpool used to dispatch functions
passed to L{callInThread}.
"""
class IReactorCore(Interface):
"""
Core methods that a Reactor must implement.
"""
running = Attribute(
"A C{bool} which is C{True} from I{during startup} to "
"I{during shutdown} and C{False} the rest of the time.")
def resolve(name, timeout=10):
"""
Return a L{twisted.internet.defer.Deferred} that will resolve a hostname.
"""
def run():
"""
Fire 'startup' System Events, move the reactor to the 'running'
state, then run the main loop until it is stopped with C{stop()} or
C{crash()}.
"""
def stop():
"""
Fire 'shutdown' System Events, which will move the reactor to the
'stopped' state and cause C{reactor.run()} to exit.
"""
def crash():
"""
Stop the main loop *immediately*, without firing any system events.
This is named as it is because this is an extremely "rude" thing to do;
it is possible to lose data and put your system in an inconsistent
state by calling this. However, it is necessary, as sometimes a system
can become wedged in a pre-shutdown call.
"""
def iterate(delay=0):
"""
Run the main loop's I/O polling function for a period of time.
This is most useful in applications where the UI is being drawn "as
fast as possible", such as games. All pending L{IDelayedCall}s will
be called.
The reactor must have been started (via the C{run()} method) prior to
any invocations of this method. It must also be stopped manually
after the last call to this method (via the C{stop()} method). This
method is not re-entrant: you must not call it recursively; in
particular, you must not call it while the reactor is running.
"""
def fireSystemEvent(eventType):
"""
Fire a system-wide event.
System-wide events are things like 'startup', 'shutdown', and
'persist'.
"""
def addSystemEventTrigger(phase, eventType, callable, *args, **kw):
"""
Add a function to be called when a system event occurs.
Each "system event" in Twisted, such as 'startup', 'shutdown', and
'persist', has 3 phases: 'before', 'during', and 'after' (in that
order, of course). These events will be fired internally by the
Reactor.
An implementor of this interface must only implement those events
described here.
Callbacks registered for the "before" phase may return either None or a
Deferred. The "during" phase will not execute until all of the
Deferreds from the "before" phase have fired.
Once the "during" phase is running, all of the remaining triggers must
execute; their return values must be ignored.
@param phase: a time to call the event -- either the string 'before',
'after', or 'during', describing when to call it
relative to the event's execution.
@param eventType: this is a string describing the type of event.
@param callable: the object to call before shutdown.
@param args: the arguments to call it with.
@param kw: the keyword arguments to call it with.
@return: an ID that can be used to remove this call with
removeSystemEventTrigger.
"""
def removeSystemEventTrigger(triggerID):
"""
Removes a trigger added with addSystemEventTrigger.
@param triggerID: a value returned from addSystemEventTrigger.
@raise KeyError: If there is no system event trigger for the given
C{triggerID}.
@raise ValueError: If there is no system event trigger for the given
C{triggerID}.
@raise TypeError: If there is no system event trigger for the given
C{triggerID}.
"""
def callWhenRunning(callable, *args, **kw):
"""
Call a function when the reactor is running.
If the reactor has not started, the callable will be scheduled
to run when it does start. Otherwise, the callable will be invoked
immediately.
@param callable: the callable object to call later.
@param args: the arguments to call it with.
@param kw: the keyword arguments to call it with.
@return: None if the callable was invoked, otherwise a system
event id for the scheduled call.
"""
class IReactorPluggableResolver(Interface):
"""
A reactor with a pluggable name resolver interface.
"""
def installResolver(resolver):
"""
Set the internal resolver to use to for name lookups.
@type resolver: An object implementing the L{IResolverSimple} interface
@param resolver: The new resolver to use.
@return: The previously installed resolver.
"""
class IReactorDaemonize(Interface):
"""
A reactor which provides hooks that need to be called before and after
daemonization.
Notes:
- This interface SHOULD NOT be called by applications.
- This interface should only be implemented by reactors as a workaround
(in particular, it's implemented currently only by kqueue()).
For details please see the comments on ticket #1918.
"""
def beforeDaemonize():
"""
Hook to be called immediately before daemonization. No reactor methods
may be called until L{afterDaemonize} is called.
@return: C{None}.
"""
def afterDaemonize():
"""
Hook to be called immediately after daemonization. This may only be
called after L{beforeDaemonize} had been called previously.
@return: C{None}.
"""
class IReactorFDSet(Interface):
"""
Implement me to be able to use L{IFileDescriptor} type resources.
This assumes that your main-loop uses UNIX-style numeric file descriptors
(or at least similarly opaque IDs returned from a .fileno() method)
"""
def addReader(reader):
"""
I add reader to the set of file descriptors to get read events for.
@param reader: An L{IReadDescriptor} provider that will be checked for
read events until it is removed from the reactor with
L{removeReader}.
@return: C{None}.
"""
def addWriter(writer):
"""
I add writer to the set of file descriptors to get write events for.
@param writer: An L{IWriteDescriptor} provider that will be checked for
write events until it is removed from the reactor with
L{removeWriter}.
@return: C{None}.
"""
def removeReader(reader):
"""
Removes an object previously added with L{addReader}.
@return: C{None}.
"""
def removeWriter(writer):
"""
Removes an object previously added with L{addWriter}.
@return: C{None}.
"""
def removeAll():
"""
Remove all readers and writers.
Should not remove reactor internal reactor connections (like a waker).
@return: A list of L{IReadDescriptor} and L{IWriteDescriptor} providers
which were removed.
"""
def getReaders():
"""
Return the list of file descriptors currently monitored for input
events by the reactor.
@return: the list of file descriptors monitored for input events.
@rtype: C{list} of C{IReadDescriptor}
"""
def getWriters():
"""
Return the list file descriptors currently monitored for output events
by the reactor.
@return: the list of file descriptors monitored for output events.
@rtype: C{list} of C{IWriteDescriptor}
"""
class IListeningPort(Interface):
"""
A listening port.
"""
def startListening():
"""
Start listening on this port.
@raise CannotListenError: If it cannot listen on this port (e.g., it is
a TCP port and it cannot bind to the required
port number).
"""
def stopListening():
"""
Stop listening on this port.
If it does not complete immediately, will return Deferred that fires
upon completion.
"""
def getHost():
"""
Get the host that this port is listening for.
@return: An L{IAddress} provider.
"""
class ILoggingContext(Interface):
"""
Give context information that will be used to log events generated by
this item.
"""
def logPrefix():
"""
@return: Prefix used during log formatting to indicate context.
@rtype: C{str}
"""
class IFileDescriptor(ILoggingContext):
"""
An interface representing a UNIX-style numeric file descriptor.
"""
def fileno():
"""
@raise: If the descriptor no longer has a valid file descriptor
number associated with it.
@return: The platform-specified representation of a file descriptor
number. Or C{-1} if the descriptor no longer has a valid file
descriptor number associated with it. As long as the descriptor
is valid, calls to this method on a particular instance must
return the same value.
"""
def connectionLost(reason):
"""
Called when the connection was lost.
This is called when the connection on a selectable object has been
lost. It will be called whether the connection was closed explicitly,
an exception occurred in an event handler, or the other end of the
connection closed it first.
See also L{IHalfCloseableDescriptor} if your descriptor wants to be
notified separately of the two halves of the connection being closed.
@param reason: A failure instance indicating the reason why the
connection was lost. L{error.ConnectionLost} and
L{error.ConnectionDone} are of special note, but the
failure may be of other classes as well.
"""
class IReadDescriptor(IFileDescriptor):
"""
An L{IFileDescriptor} that can read.
This interface is generally used in conjunction with L{IReactorFDSet}.
"""
def doRead():
"""
Some data is available for reading on your descriptor.
@return: If an error is encountered which causes the descriptor to
no longer be valid, a L{Failure} should be returned. Otherwise,
C{None}.
"""
class IWriteDescriptor(IFileDescriptor):
"""
An L{IFileDescriptor} that can write.
This interface is generally used in conjunction with L{IReactorFDSet}.
"""
def doWrite():
"""
Some data can be written to your descriptor.
@return: If an error is encountered which causes the descriptor to
no longer be valid, a L{Failure} should be returned. Otherwise,
C{None}.
"""
class IReadWriteDescriptor(IReadDescriptor, IWriteDescriptor):
"""
An L{IFileDescriptor} that can both read and write.
"""
class IHalfCloseableDescriptor(Interface):
"""
A descriptor that can be half-closed.
"""
def writeConnectionLost(reason):
"""
Indicates write connection was lost.
"""
def readConnectionLost(reason):
"""
Indicates read connection was lost.
"""
class ISystemHandle(Interface):
"""
An object that wraps a networking OS-specific handle.
"""
def getHandle():
"""
Return a system- and reactor-specific handle.
This might be a socket.socket() object, or some other type of
object, depending on which reactor is being used. Use and
manipulate at your own risk.
This might be used in cases where you want to set specific
options not exposed by the Twisted APIs.
"""
class IConsumer(Interface):
"""
A consumer consumes data from a producer.
"""
def registerProducer(producer, streaming):
"""
Register to receive data from a producer.
This sets self to be a consumer for a producer. When this object runs
out of data (as when a send(2) call on a socket succeeds in moving the
last data from a userspace buffer into a kernelspace buffer), it will
ask the producer to resumeProducing().
For L{IPullProducer} providers, C{resumeProducing} will be called once
each time data is required.
For L{IPushProducer} providers, C{pauseProducing} will be called
whenever the write buffer fills up and C{resumeProducing} will only be
called when it empties.
@type producer: L{IProducer} provider
@type streaming: C{bool}
@param streaming: C{True} if C{producer} provides L{IPushProducer},
C{False} if C{producer} provides L{IPullProducer}.
@raise RuntimeError: If a producer is already registered.
@return: C{None}
"""
def unregisterProducer():
"""
Stop consuming data from a producer, without disconnecting.
"""
def write(data):
"""
The producer will write data by calling this method.
The implementation must be non-blocking and perform whatever
buffering is necessary. If the producer has provided enough data
for now and it is a L{IPushProducer}, the consumer may call its
C{pauseProducing} method.
"""
deprecatedModuleAttribute(Version("Twisted", 11, 1, 0),
"Please use IConsumer (and IConsumer.unregisterProducer) instead.",
__name__, "IFinishableConsumer")
class IFinishableConsumer(IConsumer):
"""
A Consumer for producers that finish. This interface offers no advantages
over L{IConsumer} and is deprecated. Please use
L{IConsumer.unregisterProducer} instead of L{IFinishableConsumer.finish}.
"""
def finish():
"""
The producer has finished producing. This method is deprecated.
Please use L{IConsumer.unregisterProducer} instead.
"""
class IProducer(Interface):
"""
A producer produces data for a consumer.
Typically producing is done by calling the write method of an class
implementing L{IConsumer}.
"""
def stopProducing():
"""
Stop producing data.
This tells a producer that its consumer has died, so it must stop
producing data for good.
"""
class IPushProducer(IProducer):
"""
A push producer, also known as a streaming producer is expected to
produce (write to this consumer) data on a continuous basis, unless
it has been paused. A paused push producer will resume producing
after its resumeProducing() method is called. For a push producer
which is not pauseable, these functions may be noops.
"""
def pauseProducing():
"""
Pause producing data.
Tells a producer that it has produced too much data to process for
the time being, and to stop until resumeProducing() is called.
"""
def resumeProducing():
"""
Resume producing data.
This tells a producer to re-add itself to the main loop and produce
more data for its consumer.
"""
class IPullProducer(IProducer):
"""
A pull producer, also known as a non-streaming producer, is
expected to produce data each time resumeProducing() is called.
"""
def resumeProducing():
"""
Produce data for the consumer a single time.
This tells a producer to produce data for the consumer once
(not repeatedly, once only). Typically this will be done
by calling the consumer's write() method a single time with
produced data.
"""
class IProtocol(Interface):
def dataReceived(data):
"""
Called whenever data is received.
Use this method to translate to a higher-level message. Usually, some
callback will be made upon the receipt of each complete protocol
message.
@param data: a string of indeterminate length. Please keep in mind
that you will probably need to buffer some data, as partial
(or multiple) protocol messages may be received! I recommend
that unit tests for protocols call through to this method with
differing chunk sizes, down to one byte at a time.
"""
def connectionLost(reason):
"""
Called when the connection is shut down.
Clear any circular references here, and any external references
to this Protocol. The connection has been closed. The C{reason}
Failure wraps a L{twisted.internet.error.ConnectionDone} or
L{twisted.internet.error.ConnectionLost} instance (or a subclass
of one of those).
@type reason: L{twisted.python.failure.Failure}
"""
def makeConnection(transport):
"""
Make a connection to a transport and a server.
"""
def connectionMade():
"""
Called when a connection is made.
This may be considered the initializer of the protocol, because
it is called when the connection is completed. For clients,
this is called once the connection to the server has been
established; for servers, this is called after an accept() call
stops blocking and a socket has been received. If you need to
send any greeting or initial message, do it here.
"""
class IProcessProtocol(Interface):
"""
Interface for process-related event handlers.
"""
def makeConnection(process):
"""
Called when the process has been created.
@type process: L{IProcessTransport} provider
@param process: An object representing the process which has been
created and associated with this protocol.
"""
def childDataReceived(childFD, data):
"""
Called when data arrives from the child process.
@type childFD: C{int}
@param childFD: The file descriptor from which the data was
received.
@type data: C{str}
@param data: The data read from the child's file descriptor.
"""
def childConnectionLost(childFD):
"""
Called when a file descriptor associated with the child process is
closed.
@type childFD: C{int}
@param childFD: The file descriptor which was closed.
"""
def processExited(reason):
"""
Called when the child process exits.
@type reason: L{twisted.python.failure.Failure}
@param reason: A failure giving the reason the child process
terminated. The type of exception for this failure is either
L{twisted.internet.error.ProcessDone} or
L{twisted.internet.error.ProcessTerminated}.
@since: 8.2
"""
def processEnded(reason):
"""
Called when the child process exits and all file descriptors associated
with it have been closed.
@type reason: L{twisted.python.failure.Failure}
@param reason: A failure giving the reason the child process
terminated. The type of exception for this failure is either
L{twisted.internet.error.ProcessDone} or
L{twisted.internet.error.ProcessTerminated}.
"""
class IHalfCloseableProtocol(Interface):
"""
Implemented to indicate they want notification of half-closes.
TCP supports the notion of half-closing the connection, e.g.
closing the write side but still not stopping reading. A protocol
that implements this interface will be notified of such events,
instead of having connectionLost called.
"""
def readConnectionLost():
"""
Notification of the read connection being closed.
This indicates peer did half-close of write side. It is now
the responsibility of the this protocol to call
loseConnection(). In addition, the protocol MUST make sure a
reference to it still exists (i.e. by doing a callLater with
one of its methods, etc.) as the reactor will only have a
reference to it if it is writing.
If the protocol does not do so, it might get garbage collected
without the connectionLost method ever being called.
"""
def writeConnectionLost():
"""
Notification of the write connection being closed.
This will never be called for TCP connections as TCP does not
support notification of this type of half-close.
"""
class IFileDescriptorReceiver(Interface):
"""
Protocols may implement L{IFileDescriptorReceiver} to receive file
descriptors sent to them. This is useful in conjunction with
L{IUNIXTransport}, which allows file descriptors to be sent between
processes on a single host.
"""
def fileDescriptorReceived(descriptor):
"""
Called when a file descriptor is received over the connection.
@param descriptor: The descriptor which was received.
@type descriptor: C{int}
@return: C{None}
"""
class IProtocolFactory(Interface):
"""
Interface for protocol factories.
"""
def buildProtocol(addr):
"""
Called when a connection has been established to addr.
If None is returned, the connection is assumed to have been refused,
and the Port will close the connection.
@type addr: (host, port)
@param addr: The address of the newly-established connection
@return: None if the connection was refused, otherwise an object
providing L{IProtocol}.
"""
def doStart():
"""
Called every time this is connected to a Port or Connector.
"""
def doStop():
"""
Called every time this is unconnected from a Port or Connector.
"""
class ITransport(Interface):
"""
I am a transport for bytes.
I represent (and wrap) the physical connection and synchronicity
of the framework which is talking to the network. I make no
representations about whether calls to me will happen immediately
or require returning to a control loop, or whether they will happen
in the same or another thread. Consider methods of this class
(aside from getPeer) to be 'thrown over the wall', to happen at some
indeterminate time.
"""
def write(data):
"""
Write some data to the physical connection, in sequence, in a
non-blocking fashion.
If possible, make sure that it is all written. No data will
ever be lost, although (obviously) the connection may be closed
before it all gets through.
"""
def writeSequence(data):
"""
Write a list of strings to the physical connection.
If possible, make sure that all of the data is written to
the socket at once, without first copying it all into a
single string.
"""
def loseConnection():
"""
Close my connection, after writing all pending data.
Note that if there is a registered producer on a transport it
will not be closed until the producer has been unregistered.
"""
def getPeer():
"""
Get the remote address of this connection.
Treat this method with caution. It is the unfortunate result of the
CGI and Jabber standards, but should not be considered reliable for
the usual host of reasons; port forwarding, proxying, firewalls, IP
masquerading, etc.
@return: An L{IAddress} provider.
"""
def getHost():
"""
Similar to getPeer, but returns an address describing this side of the
connection.
@return: An L{IAddress} provider.
"""
class ITCPTransport(ITransport):
"""
A TCP based transport.
"""
def loseWriteConnection():
"""
Half-close the write side of a TCP connection.
If the protocol instance this is attached to provides
IHalfCloseableProtocol, it will get notified when the operation is
done. When closing write connection, as with loseConnection this will
only happen when buffer has emptied and there is no registered
producer.
"""
def abortConnection():
"""
Close the connection abruptly.
Discards any buffered data, stops any registered producer,
and, if possible, notifies the other end of the unclean
closure.
@since: 11.1
"""
def getTcpNoDelay():
"""
Return if C{TCP_NODELAY} is enabled.
"""
def setTcpNoDelay(enabled):
"""
Enable/disable C{TCP_NODELAY}.
Enabling C{TCP_NODELAY} turns off Nagle's algorithm. Small packets are
sent sooner, possibly at the expense of overall throughput.
"""
def getTcpKeepAlive():
"""
Return if C{SO_KEEPALIVE} is enabled.
"""
def setTcpKeepAlive(enabled):
"""
Enable/disable C{SO_KEEPALIVE}.
Enabling C{SO_KEEPALIVE} sends packets periodically when the connection
is otherwise idle, usually once every two hours. They are intended
to allow detection of lost peers in a non-infinite amount of time.
"""
def getHost():
"""
Returns L{IPv4Address} or L{IPv6Address}.
"""
def getPeer():
"""
Returns L{IPv4Address} or L{IPv6Address}.
"""
class IUNIXTransport(ITransport):
"""
Transport for stream-oriented unix domain connections.
"""
def sendFileDescriptor(descriptor):
"""
Send a duplicate of this (file, socket, pipe, etc) descriptor to the
other end of this connection.
The send is non-blocking and will be queued if it cannot be performed
immediately. The send will be processed in order with respect to other
C{sendFileDescriptor} calls on this transport, but not necessarily with
respect to C{write} calls on this transport. The send can only be
processed if there are also bytes in the normal connection-oriented send
buffer (ie, you must call C{write} at least as many times as you call
C{sendFileDescriptor}).
@param descriptor: An C{int} giving a valid file descriptor in this
process. Note that a I{file descriptor} may actually refer to a
socket, a pipe, or anything else POSIX tries to treat in the same
way as a file.
@return: C{None}
"""
class ITLSTransport(ITCPTransport):
"""
A TCP transport that supports switching to TLS midstream.
Once TLS mode is started the transport will implement L{ISSLTransport}.
"""
def startTLS(contextFactory):
"""
Initiate TLS negotiation.
@param contextFactory: A context factory (see L{ssl.py<twisted.internet.ssl>})
"""
class ISSLTransport(ITCPTransport):
"""
A SSL/TLS based transport.
"""
def getPeerCertificate():
"""
Return an object with the peer's certificate info.
"""
class IProcessTransport(ITransport):
"""
A process transport.
"""
pid = Attribute(
"From before L{IProcessProtocol.makeConnection} is called to before "
"L{IProcessProtocol.processEnded} is called, C{pid} is an L{int} "
"giving the platform process ID of this process. C{pid} is L{None} "
"at all other times.")
def closeStdin():
"""
Close stdin after all data has been written out.
"""
def closeStdout():
"""
Close stdout.
"""
def closeStderr():
"""
Close stderr.
"""
def closeChildFD(descriptor):
"""
Close a file descriptor which is connected to the child process, identified
by its FD in the child process.
"""
def writeToChild(childFD, data):
"""
Similar to L{ITransport.write} but also allows the file descriptor in
the child process which will receive the bytes to be specified.
@type childFD: C{int}
@param childFD: The file descriptor to which to write.
@type data: C{str}
@param data: The bytes to write.
@return: C{None}
@raise KeyError: If C{childFD} is not a file descriptor that was mapped
in the child when L{IReactorProcess.spawnProcess} was used to create
it.
"""
def loseConnection():
"""
Close stdin, stderr and stdout.
"""
def signalProcess(signalID):
"""
Send a signal to the process.
@param signalID: can be
- one of C{"KILL"}, C{"TERM"}, or C{"INT"}.
These will be implemented in a
cross-platform manner, and so should be used
if possible.
- an integer, where it represents a POSIX
signal ID.
@raise twisted.internet.error.ProcessExitedAlready: The process has
already exited.
"""
class IServiceCollection(Interface):
"""
An object which provides access to a collection of services.
"""
def getServiceNamed(serviceName):
"""
Retrieve the named service from this application.
Raise a C{KeyError} if there is no such service name.
"""
def addService(service):
"""
Add a service to this collection.
"""
def removeService(service):
"""
Remove a service from this collection.
"""
class IUDPTransport(Interface):
"""
Transport for UDP DatagramProtocols.
"""
def write(packet, addr=None):
"""
Write packet to given address.
@param addr: a tuple of (ip, port). For connected transports must
be the address the transport is connected to, or None.
In non-connected mode this is mandatory.
@raise twisted.internet.error.MessageLengthError: C{packet} was too
long.
"""
def connect(host, port):
"""
Connect the transport to an address.
This changes it to connected mode. Datagrams can only be sent to
this address, and will only be received from this address. In addition
the protocol's connectionRefused method might get called if destination
is not receiving datagrams.
@param host: an IP address, not a domain name ('127.0.0.1', not 'localhost')
@param port: port to connect to.
"""
def getHost():
"""
Returns L{IPv4Address}.
"""
def stopListening():
"""
Stop listening on this port.
If it does not complete immediately, will return L{Deferred} that fires
upon completion.
"""
class IUNIXDatagramTransport(Interface):
"""
Transport for UDP PacketProtocols.
"""
def write(packet, address):
"""
Write packet to given address.
"""
def getHost():
"""
Returns L{UNIXAddress}.
"""
class IUNIXDatagramConnectedTransport(Interface):
"""
Transport for UDP ConnectedPacketProtocols.
"""
def write(packet):
"""
Write packet to address we are connected to.
"""
def getHost():
"""
Returns L{UNIXAddress}.
"""
def getPeer():
"""
Returns L{UNIXAddress}.
"""
class IMulticastTransport(Interface):
"""
Additional functionality for multicast UDP.
"""
def getOutgoingInterface():
"""
Return interface of outgoing multicast packets.
"""
def setOutgoingInterface(addr):
"""
Set interface for outgoing multicast packets.
Returns Deferred of success.
"""
def getLoopbackMode():
"""
Return if loopback mode is enabled.
"""
def setLoopbackMode(mode):
"""
Set if loopback mode is enabled.
"""
def getTTL():
"""
Get time to live for multicast packets.
"""
def setTTL(ttl):
"""
Set time to live on multicast packets.
"""
def joinGroup(addr, interface=""):
"""
Join a multicast group. Returns L{Deferred} of success or failure.
If an error occurs, the returned L{Deferred} will fail with
L{error.MulticastJoinError}.
"""
def leaveGroup(addr, interface=""):
"""
Leave multicast group, return L{Deferred} of success.
"""
class IStreamClientEndpoint(Interface):
"""
A stream client endpoint is a place that L{ClientFactory} can connect to.
For example, a remote TCP host/port pair would be a TCP client endpoint.
@since: 10.1
"""
def connect(protocolFactory):
"""
Connect the C{protocolFactory} to the location specified by this
L{IStreamClientEndpoint} provider.
@param protocolFactory: A provider of L{IProtocolFactory}
@return: A L{Deferred} that results in an L{IProtocol} upon successful
connection otherwise a L{ConnectError}
"""
class IStreamServerEndpoint(Interface):
"""
A stream server endpoint is a place that a L{Factory} can listen for
incoming connections.
@since: 10.1
"""
def listen(protocolFactory):
"""
Listen with C{protocolFactory} at the location specified by this
L{IStreamServerEndpoint} provider.
@param protocolFactory: A provider of L{IProtocolFactory}
@return: A L{Deferred} that results in an L{IListeningPort} or an
L{CannotListenError}
"""
class IStreamServerEndpointStringParser(Interface):
"""
An L{IStreamServerEndpointStringParser} is like an
L{IStreamClientEndpointStringParser}, except for L{IStreamServerEndpoint}s
instead of clients. It integrates with L{endpoints.serverFromString} in
much the same way.
"""
prefix = Attribute(
"""
@see: L{IStreamClientEndpointStringParser.prefix}
"""
)
def parseStreamServer(reactor, *args, **kwargs):
"""
Parse a stream server endpoint from a reactor and string-only arguments
and keyword arguments.
@see: L{IStreamClientEndpointStringParser.parseStreamClient}
@return: a stream server endpoint
@rtype: L{IStreamServerEndpoint}
"""
class IStreamClientEndpointStringParser(Interface):
"""
An L{IStreamClientEndpointStringParser} is a parser which can convert
a set of string C{*args} and C{**kwargs} into an L{IStreamClientEndpoint}
provider.
This interface is really only useful in the context of the plugin system
for L{endpoints.clientFromString}. See the document entitled "I{The
Twisted Plugin System}" for more details on how to write a plugin.
If you place an L{IStreamClientEndpointStringParser} plugin in the
C{twisted.plugins} package, that plugin's C{parseStreamClient} method will
be used to produce endpoints for any description string that begins with
the result of that L{IStreamClientEndpointStringParser}'s prefix attribute.
"""
prefix = Attribute(
"""
A C{str}, the description prefix to respond to. For example, an
L{IStreamClientEndpointStringParser} plugin which had C{"foo"} for its
C{prefix} attribute would be called for endpoint descriptions like
C{"foo:bar:baz"} or C{"foo:"}.
"""
)
def parseStreamClient(*args, **kwargs):
"""
This method is invoked by L{endpoints.clientFromString}, if the type of
endpoint matches the return value from this
L{IStreamClientEndpointStringParser}'s C{prefix} method.
@param args: The string arguments, minus the endpoint type, in the
endpoint description string, parsed according to the rules
described in L{endpoints.quoteStringArgument}. For example, if the
description were C{"my-type:foo:bar:baz=qux"}, C{args} would be
C{('foo','bar')}
@param kwargs: The string arguments from the endpoint description
passed as keyword arguments. For example, if the description were
C{"my-type:foo:bar:baz=qux"}, C{kwargs} would be
C{dict(baz='qux')}.
@return: a client endpoint
@rtype: L{IStreamClientEndpoint}
"""
| {
"content_hash": "cdfddd0b6cbcb97d5bdd405c892baabf",
"timestamp": "",
"source": "github",
"line_count": 2046,
"max_line_length": 96,
"avg_line_length": 30.71065493646139,
"alnum_prop": 0.619123404526212,
"repo_name": "Varriount/Colliberation",
"id": "aac634fe40a8584578295a542fe5830d6246b71a",
"size": "62907",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "libs/twisted/internet/interfaces.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "509005"
},
{
"name": "D",
"bytes": "29"
},
{
"name": "GAP",
"bytes": "14120"
},
{
"name": "Objective-C",
"bytes": "1291"
},
{
"name": "Python",
"bytes": "10503398"
},
{
"name": "Shell",
"bytes": "1512"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('bookmarks', '0002_auto_20170714_1111'),
]
operations = [
migrations.AddField(
model_name='bookmark',
name='deleted_at',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AlterField(
model_name='bookmark',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='bookmarks', to=settings.AUTH_USER_MODEL),
),
]
| {
"content_hash": "0515a8ebd1803817f0b3807d2decccf2",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 136,
"avg_line_length": 28.04,
"alnum_prop": 0.6276747503566333,
"repo_name": "kennethlove/django_bookmarks",
"id": "0c658857db6a7b5fac68c42fde8cf25f917beb60",
"size": "774",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dj_bookmarks/bookmarks/migrations/0003_auto_20170721_1052.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "9316"
},
{
"name": "JavaScript",
"bytes": "594"
},
{
"name": "Python",
"bytes": "25493"
}
],
"symlink_target": ""
} |
import os
import sys
from sqlalchemy import engine_from_config
from pyramid.paster import (
get_appsettings,
setup_logging,
)
from pyramid.scripts.common import parse_vars
from ..models import (
DBSession,
Base,
)
def usage(argv):
cmd = os.path.basename(argv[0])
print('usage: %s <config_uri> [var=value]\n'
'(example: "%s development.ini")' % (cmd, cmd))
sys.exit(1)
def main(argv=sys.argv):
if len(argv) < 2:
usage(argv)
config_uri = argv[1]
options = parse_vars(argv[2:])
setup_logging(config_uri)
settings = get_appsettings(config_uri, options=options)
engine = engine_from_config(settings, 'sqlalchemy.')
DBSession.configure(bind=engine)
Base.metadata.create_all(engine)
#with transaction.manager:
# model = MyModel(name='one', value=1)
# DBSession.add(model)
| {
"content_hash": "e79387cf2ac3a1dffe97c17c57fd767d",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 59,
"avg_line_length": 23.07894736842105,
"alnum_prop": 0.645381984036488,
"repo_name": "demophoon/sams",
"id": "ace097bcd6fca6de4bd0df36190533c779a374db",
"size": "877",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sams/scripts/initializedb.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "8217"
},
{
"name": "JavaScript",
"bytes": "45906"
},
{
"name": "Python",
"bytes": "19303"
},
{
"name": "Shell",
"bytes": "313"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cellsamples', '0004'),
]
operations = [
migrations.AlterModelOptions(
name='celltype',
options={'ordering': ('species', 'cell_type'), 'verbose_name': 'Cell Type'},
),
migrations.RemoveField(
model_name='cellsample',
name='cell_source',
),
migrations.AlterUniqueTogether(
name='celltype',
unique_together=set([('cell_type', 'species', 'organ')]),
),
migrations.RemoveField(
model_name='celltype',
name='cell_subtype',
),
]
| {
"content_hash": "899cb48f29a5fac68221479cc7e320f8",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 88,
"avg_line_length": 26.25925925925926,
"alnum_prop": 0.5317348377997179,
"repo_name": "UPDDI/mps-database-server",
"id": "8c9ab6efe6a758130a4bd4e831d3fc089ad57f61",
"size": "735",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cellsamples/migrations/0005.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "14194"
},
{
"name": "HTML",
"bytes": "1128291"
},
{
"name": "JavaScript",
"bytes": "701549"
},
{
"name": "Python",
"bytes": "1735408"
},
{
"name": "Shell",
"bytes": "1535"
},
{
"name": "TSQL",
"bytes": "41508"
}
],
"symlink_target": ""
} |
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Logit'] , ['PolyTrend'] , ['Seasonal_Minute'] , ['MLP'] ); | {
"content_hash": "77cacec92104ddc61f6dd0a544c098bf",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 81,
"avg_line_length": 38.5,
"alnum_prop": 0.7012987012987013,
"repo_name": "antoinecarme/pyaf",
"id": "f189d7e2a2f09e54417bf8a8e500cf3343011b1d",
"size": "154",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/model_control/detailed/transf_Logit/model_control_one_enabled_Logit_PolyTrend_Seasonal_Minute_MLP.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
} |
try:
from httplib import BAD_REQUEST, FORBIDDEN, \
INTERNAL_SERVER_ERROR, ACCEPTED
except:
from http.client import BAD_REQUEST, FORBIDDEN, \
INTERNAL_SERVER_ERROR, ACCEPTED
from routes import route
from api import APIBaseHandler, EntityBuilder
import random
import time
from importlib import import_module
from constants import DEVICE_TYPE_IOS, DEVICE_TYPE_ANDROID, DEVICE_TYPE_WNS, \
DEVICE_TYPE_MPNS, DEVICE_TYPE_SMS
from pushservices.gcm import GCMUpdateRegIDsException, \
GCMInvalidRegistrationException, GCMNotRegisteredException, GCMException
import logging
_logger = logging.getLogger(__name__)
@route(r"/api/v2/push[\/]?")
class PushHandler(APIBaseHandler):
def validate_data(self, data):
data.setdefault('channel', 'default')
data.setdefault('sound', None)
data.setdefault('badge', None)
data.setdefault('extra', {})
return data
def get_apns_conn(self):
if not self.apnsconnections.has_key(self.app['shortname']):
self.send_response(INTERNAL_SERVER_ERROR, dict(error="APNs is offline"))
return
count = len(self.apnsconnections[self.app['shortname']])
# Find an APNS instance
random.seed(time.time())
instanceid = random.randint(0, count - 1)
return self.apnsconnections[self.app['shortname']][instanceid]
def post(self):
try:
""" Send notifications """
if not self.can("send_notification"):
self.send_response(FORBIDDEN, dict(error="No permission to send notification"))
return
# if request body is json entity
data = self.json_decode(self.request.body)
data = self.validate_data(data)
# Hook
if 'extra' in data:
if 'processor' in data['extra']:
try:
proc = import_module('hooks.' + data['extra']['processor'])
data = proc.process_pushnotification_payload(data)
except Exception as ex:
self.send_response(BAD_REQUEST, dict(error=str(ex)))
if not self.token:
self.token = data.get('token', None)
# application specific data
extra = data.get('extra', {})
_logger.info('push with extra dat: %s',extra)
device = data.get('device', DEVICE_TYPE_IOS).lower()
_logger.info('push for device: %s',device)
channel = data.get('channel', 'default')
_logger.info('push for channel: %s',channel)
token = self.db.tokens.find_one({'token': self.token})
if not token:
token = EntityBuilder.build_token(self.token, device, self.appname, channel)
if not self.can("create_token"):
self.send_response(BAD_REQUEST, dict(error="Unknow token and you have no permission to create"))
return
try:
# TODO check permission to insert
self.db.tokens.insert(token, safe=True)
except Exception as ex:
self.send_response(INTERNAL_SERVER_ERROR, dict(error=str(ex)))
if device == DEVICE_TYPE_SMS:
data.setdefault('sms', {})
data['sms'].setdefault('to', data.get('token', ''))
data['sms'].setdefault('message', data.get('message', ''))
sms = self.smsconnections[self.app['shortname']][0]
sms.process(token=data['token'], alert=data['alert'], extra=extra, sms=data['sms'])
self.send_response(ACCEPTED)
elif device == DEVICE_TYPE_IOS:
# Use sliptlines trick to remove line ending (only for iOs).
if type(data['alert']) is not dict:
alert = ''.join(data['alert'].splitlines())
else:
alert = data['alert']
data.setdefault('apns', {})
data['apns'].setdefault('badge', data.get('badge', None))
data['apns'].setdefault('sound', data.get('sound', None))
data['apns'].setdefault('custom', data.get('custom', None))
_logger.info('push for ios data: %s',data)
_logger.info('push for ios extra: %s',extra)
self.get_apns_conn().process(token=self.token, alert=alert, extra=extra, apns=data['apns'])
self.send_response(ACCEPTED)
elif device == DEVICE_TYPE_ANDROID:
data.setdefault('gcm', {})
try:
gcm = self.gcmconnections[self.app['shortname']][0]
_logger.info('push for android data: %s',data)
response = gcm.process(token=[self.token], alert=data['alert'], extra=data['extra'], gcm=data['gcm'])
responsedata = response.json()
if responsedata['failure'] == 0:
self.send_response(ACCEPTED)
except GCMUpdateRegIDsException as ex:
self.send_response(ACCEPTED)
except GCMInvalidRegistrationException as ex:
self.send_response(BAD_REQUEST, dict(error=str(ex), regids=ex.regids))
except GCMNotRegisteredException as ex:
self.send_response(BAD_REQUEST, dict(error=str(ex), regids=ex.regids))
except GCMException as ex:
self.send_response(INTERNAL_SERVER_ERROR, dict(error=str(ex)))
elif device == DEVICE_TYPE_WNS:
data.setdefault('wns', {})
wns = self.wnsconnections[self.app['shortname']][0]
wns.process(token=data['token'], alert=data['alert'], extra=extra, wns=data['wns'])
self.send_response(ACCEPTED)
elif device == DEVICE_TYPE_MPNS:
data.setdefault('mpns', {})
mpns = self.mpnsconnections[self.app['shortname']][0]
mpns.process(token=data['token'], alert=data['alert'], extra=extra, mpns=data['mpns'])
self.send_response(ACCEPTED)
else:
self.send_response(BAD_REQUEST, dict(error='Invalid device type'))
logmessage = 'Message length: %s, Access key: %s' %(len(data['alert']), self.appkey)
self.add_to_log('%s notification' % self.appname, logmessage)
except Exception as ex:
import traceback
traceback.print_exc()
self.send_response(INTERNAL_SERVER_ERROR, dict(error=str(ex)))
| {
"content_hash": "5459c45c03d4499b86aa48a61690a5d4",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 121,
"avg_line_length": 47.87769784172662,
"alnum_prop": 0.5619834710743802,
"repo_name": "imolainformatica/airnotifier",
"id": "0df1bb73e7a71ce3c43142577b6f33ac04c96cb7",
"size": "8210",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "api/push.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "179"
},
{
"name": "HTML",
"bytes": "39261"
},
{
"name": "Makefile",
"bytes": "58"
},
{
"name": "Python",
"bytes": "167810"
},
{
"name": "Shell",
"bytes": "850"
}
],
"symlink_target": ""
} |
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='pogoprotos/data/redeem/poke_candy.proto',
package='pogoprotos.data.redeem',
syntax='proto3',
serialized_pb=_b('\n\'pogoprotos/data/redeem/poke_candy.proto\x12\x16pogoprotos.data.redeem\"4\n\tPokeCandy\x12\x12\n\npokemon_id\x18\x01 \x01(\x06\x12\x13\n\x0b\x63\x61ndy_count\x18\x02 \x01(\x05\x62\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_POKECANDY = _descriptor.Descriptor(
name='PokeCandy',
full_name='pogoprotos.data.redeem.PokeCandy',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='pokemon_id', full_name='pogoprotos.data.redeem.PokeCandy.pokemon_id', index=0,
number=1, type=6, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='candy_count', full_name='pogoprotos.data.redeem.PokeCandy.candy_count', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=67,
serialized_end=119,
)
DESCRIPTOR.message_types_by_name['PokeCandy'] = _POKECANDY
PokeCandy = _reflection.GeneratedProtocolMessageType('PokeCandy', (_message.Message,), dict(
DESCRIPTOR = _POKECANDY,
__module__ = 'pogoprotos.data.redeem.poke_candy_pb2'
# @@protoc_insertion_point(class_scope:pogoprotos.data.redeem.PokeCandy)
))
_sym_db.RegisterMessage(PokeCandy)
# @@protoc_insertion_point(module_scope)
| {
"content_hash": "574e89fad049bee094cbd29b954b0f52",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 214,
"avg_line_length": 31.71232876712329,
"alnum_prop": 0.7235421166306696,
"repo_name": "bellowsj/aiopogo",
"id": "fa1a9399c906a26a77fc998431e88df5312e3359",
"size": "2425",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aiopogo/pogoprotos/data/redeem/poke_candy_pb2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "62068"
}
],
"symlink_target": ""
} |
import logging
import urllib2
import socket
import time
import re
import lighter.util as util
class Graphite(object):
def __init__(self, address, url, tags=[]):
"""
@param address hostname:port where the Graphite listens for the plaintext protocol, usually port 2003
@param url URL where Graphite's API is, usually port 80
"""
self._address = address
self._url = url
self._tags = tags + ['source:lighter', 'type:change']
def notify(self, metricname, title, message, tags=[]):
if not title or not message:
logging.warn('Graphite event title and message')
return
merged_tags = list(tags) + self._tags
now = int(time.time())
logging.debug('Sending Graphite deployment event %s', message)
self._send(self._address, '%s 1 %s\n' % (metricname, now))
# For info on Graphite tags and filtering see
# https://github.com/grafana/grafana/issues/1474#issuecomment-105811191
self._call('/events/', {
'what': title,
'data': message,
'tags': ' '.join(self._mangle(tag) for tag in merged_tags),
'when': now
})
def _mangle(self, tag):
return re.sub('[\s,]', '_', tag.strip())
def _send(self, address, data):
if not self._address or not self._url:
logging.debug('Graphite is not enabled')
return
ip, port = address.split(':')
try:
logging.debug('Sending Graphite metric to %s:%s' % (ip, port))
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect((ip, int(port)))
sock.send(data)
finally:
sock.close()
except (socket.error, ValueError) as e:
logging.warn('Graphite._send: ' + str(e))
def _call(self, endpoint, data):
if not self._address or not self._url:
logging.debug('Graphite is not enabled')
return
try:
url = self._url.rstrip('/') + endpoint
logging.debug('Calling Graphite endpoint %s', endpoint)
util.jsonRequest(url, data=data, method='POST')
except urllib2.URLError as e:
logging.warn('Graphite._call: ' + str(e))
return {}
| {
"content_hash": "7109899a7770e417a6da5b2e70ea2496",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 113,
"avg_line_length": 34.21739130434783,
"alnum_prop": 0.5565438373570522,
"repo_name": "meltwater/lighter",
"id": "1353c5a844fab0ff21697dc7a0227b87bb7a9d64",
"size": "2361",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/lighter/graphite.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "272"
},
{
"name": "Makefile",
"bytes": "247"
},
{
"name": "Python",
"bytes": "101880"
},
{
"name": "Shell",
"bytes": "3259"
}
],
"symlink_target": ""
} |
"""TensorFlow Lite tooling helper functionality.
EXPERIMENTAL: APIs here are unstable and likely to change without notice.
@@TocoConverter
@@toco_convert
@@toco_convert_protos
@@Interpreter
@@OpHint
@@convert_op_hints_to_stubs
@@build_toco_convert_protos
@@FLOAT
@@QUANTIZED_UINT8
@@TFLITE
@@GRAPHVIZ_DOT
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six import PY3
from google.protobuf import text_format as _text_format
from google.protobuf.message import DecodeError
from tensorflow.contrib.lite.python import lite_constants as constants
from tensorflow.contrib.lite.python.convert import build_toco_convert_protos # pylint: disable=unused-import
from tensorflow.contrib.lite.python.convert import ConverterMode
from tensorflow.contrib.lite.python.convert import tensor_name as _tensor_name
from tensorflow.contrib.lite.python.convert import toco_convert # pylint: disable=unused-import
from tensorflow.contrib.lite.python.convert import toco_convert_graph_def as _toco_convert_graph_def
from tensorflow.contrib.lite.python.convert import toco_convert_impl as _toco_convert_impl
from tensorflow.contrib.lite.python.convert import toco_convert_protos # pylint: disable=unused-import
from tensorflow.contrib.lite.python.convert_saved_model import freeze_saved_model as _freeze_saved_model
from tensorflow.contrib.lite.python.convert_saved_model import get_tensors_from_tensor_names as _get_tensors_from_tensor_names
from tensorflow.contrib.lite.python.convert_saved_model import set_tensor_shapes as _set_tensor_shapes
from tensorflow.contrib.lite.python.interpreter import Interpreter # pylint: disable=unused-import
from tensorflow.contrib.lite.python.op_hint import convert_op_hints_to_stubs # pylint: disable=unused-import
from tensorflow.contrib.lite.python.op_hint import OpHint # pylint: disable=unused-import
from tensorflow.core.framework import graph_pb2 as _graph_pb2
from tensorflow.python import keras as _keras
from tensorflow.python.client import session as _session
from tensorflow.python.framework import graph_util as _tf_graph_util
from tensorflow.python.framework import ops as _ops
from tensorflow.python.framework.errors_impl import NotFoundError as _NotFoundError
from tensorflow.python.framework.importer import import_graph_def as _import_graph_def
from tensorflow.python.lib.io import file_io as _file_io
from tensorflow.python.saved_model import signature_constants as _signature_constants
from tensorflow.python.saved_model import tag_constants as _tag_constants
class TocoConverter(object):
"""Convert a TensorFlow model into `output_format` using TOCO.
This is used to convert from a TensorFlow GraphDef or SavedModel into either a
TFLite FlatBuffer or graph visualization.
Attributes:
inference_type: Target data type of real-number arrays in the output file.
Must be `{FLOAT, QUANTIZED_UINT8}`. (default FLOAT)
inference_input_type: Target data type of real-number input arrays. Allows
for a different type for input arrays in the case of quantization.
Must be `{FLOAT, QUANTIZED_UINT8}`. (default `inference_type`)
output_format: Output file format. Currently must be `{TFLITE,
GRAPHVIZ_DOT}`. (default TFLITE)
quantized_input_stats: Dict of strings representing input tensor names
mapped to tuple of floats representing the mean and standard deviation
of the training data (e.g., {"foo" : (0., 1.)}). Only need if
`inference_input_type` is `QUANTIZED_UINT8`.
real_input_value = (quantized_input_value - mean_value) / std_dev_value.
(default {})
default_ranges_stats: Tuple of integers representing (min, max) range values
for all arrays without a specified range. Intended for experimenting with
quantization via "dummy quantization". (default None)
drop_control_dependency: Boolean indicating whether to drop control
dependencies silently. This is due to TFLite not supporting control
dependencies. (default True)
reorder_across_fake_quant: Boolean indicating whether to reorder FakeQuant
nodes in unexpected locations. Used when the location of the FakeQuant
nodes is preventing graph transformations necessary to convert the graph.
Results in a graph that differs from the quantized training graph,
potentially causing differing arithmetic behavior. (default False)
change_concat_input_ranges: Boolean to change behavior of min/max ranges for
inputs and outputs of the concat operator for quantized models. Changes
the ranges of concat operator overlap when true. (default False)
allow_custom_ops: Boolean indicating whether to allow custom operations.
When false any unknown operation is an error. When true, custom ops are
created for any op that is unknown. The developer will need to provide
these to the TensorFlow Lite runtime with a custom resolver.
(default False)
post_training_quantize: Boolean indicating whether to quantize the weights
of the converted float model. Model size will be reduced and there will be
latency improvements (at the cost of accuracy).
(default False)
dump_graphviz_dir: Full filepath of folder to dump the graphs at various
stages of processing GraphViz .dot files. Preferred over
--output_format=GRAPHVIZ_DOT in order to keep the requirements of the
output file. (default None)
dump_graphviz_video: Boolean indicating whether to dump the graph after
every graph transformation. (default False)
converter_mode: Experimental flag, subject to change. ConverterMode
indicating which converter to use. (default ConverterMode.DEFAULT)
Example usage:
```python
# Converting a GraphDef from session.
converter = lite.TocoConverter.from_session(sess, in_tensors, out_tensors)
tflite_model = converter.convert()
open("converted_model.tflite", "wb").write(tflite_model)
# Converting a GraphDef from file.
converter = lite.TocoConverter.from_frozen_graph(
graph_def_file, input_arrays, output_arrays)
tflite_model = converter.convert()
open("converted_model.tflite", "wb").write(tflite_model)
# Converting a SavedModel.
converter = lite.TocoConverter.from_saved_model(saved_model_dir)
tflite_model = converter.convert()
# Converting a tf.keras model.
converter = lite.TocoConverter.from_keras_model_file(keras_model)
tflite_model = converter.convert()
```
"""
def __init__(self,
graph_def,
input_tensors,
output_tensors,
input_arrays_with_shape=None,
output_arrays=None):
"""Constructor for TocoConverter.
Args:
graph_def: Frozen TensorFlow GraphDef.
input_tensors: List of input tensors. Type and shape are computed using
`foo.get_shape()` and `foo.dtype`.
output_tensors: List of output tensors (only .name is used from this).
input_arrays_with_shape: Tuple of strings representing input tensor names
and list of integers representing input shapes
(e.g., [("foo" : [1, 16, 16, 3])]). Use only when graph cannot be loaded
into TensorFlow and when `input_tensors` and `output_tensors` are None.
(default None)
output_arrays: List of output tensors to freeze graph with. Use only when
graph cannot be loaded into TensorFlow and when `input_tensors` and
`output_tensors` are None. (default None)
Raises:
ValueError: Invalid arguments.
"""
self._graph_def = graph_def
self._input_tensors = input_tensors
self._output_tensors = output_tensors
self.inference_type = constants.FLOAT
self.inference_input_type = None
self.output_format = constants.TFLITE
self.quantized_input_stats = {}
self.default_ranges_stats = None
self.drop_control_dependency = True
self.reorder_across_fake_quant = False
self.change_concat_input_ranges = False
self.allow_custom_ops = False
self.post_training_quantize = False
self.dump_graphviz_dir = None
self.dump_graphviz_video = False
self.converter_mode = ConverterMode.DEFAULT
# Attributes are used by models that cannot be loaded into TensorFlow.
if not self._has_valid_tensors():
if not input_arrays_with_shape or not output_arrays:
raise ValueError(
"If input_tensors and output_tensors are None, both "
"input_arrays_with_shape and output_arrays must be defined.")
self._input_arrays_with_shape = input_arrays_with_shape
self._output_arrays = output_arrays
@classmethod
def from_session(cls, sess, input_tensors, output_tensors):
"""Creates a TocoConverter class from a TensorFlow Session.
Args:
sess: TensorFlow Session.
input_tensors: List of input tensors. Type and shape are computed using
`foo.get_shape()` and `foo.dtype`.
output_tensors: List of output tensors (only .name is used from this).
Returns:
TocoConverter class.
"""
graph_def = _freeze_graph(sess, output_tensors)
return cls(graph_def, input_tensors, output_tensors)
@classmethod
def from_frozen_graph(cls,
graph_def_file,
input_arrays,
output_arrays,
input_shapes=None):
"""Creates a TocoConverter class from a file containing a frozen GraphDef.
Args:
graph_def_file: Full filepath of file containing frozen GraphDef.
input_arrays: List of input tensors to freeze graph with.
output_arrays: List of output tensors to freeze graph with.
input_shapes: Dict of strings representing input tensor names to list of
integers representing input shapes (e.g., {"foo" : [1, 16, 16, 3]}).
Automatically determined when input shapes is None (e.g., {"foo" :
None}). (default None)
Returns:
TocoConverter class.
Raises:
IOError:
File not found.
Unable to parse input file.
ValueError:
The graph is not frozen.
input_arrays or output_arrays contains an invalid tensor name.
input_shapes is not correctly defined when required
"""
with _ops.Graph().as_default():
with _session.Session() as sess:
# Read GraphDef from file.
if not _file_io.file_exists(graph_def_file):
raise IOError("File '{0}' does not exist.".format(graph_def_file))
with _file_io.FileIO(graph_def_file, "rb") as f:
file_content = f.read()
try:
graph_def = _graph_pb2.GraphDef()
graph_def.ParseFromString(file_content)
except (_text_format.ParseError, DecodeError):
try:
print("Ignore 'tcmalloc: large alloc' warnings.")
if not isinstance(file_content, str):
if PY3:
file_content = file_content.decode("utf-8")
else:
file_content = file_content.encode("utf-8")
graph_def = _graph_pb2.GraphDef()
_text_format.Merge(file_content, graph_def)
except (_text_format.ParseError, DecodeError):
raise IOError(
"Unable to parse input file '{}'.".format(graph_def_file))
# Handles models with custom TFLite ops that cannot be resolved in
# TensorFlow.
load_model_in_session = True
try:
_import_graph_def(graph_def, name="")
except _NotFoundError:
load_model_in_session = False
if load_model_in_session:
# Check if graph is frozen.
if not _is_frozen_graph(sess):
raise ValueError("Please freeze the graph using freeze_graph.py.")
# Get input and output tensors.
input_tensors = _get_tensors_from_tensor_names(
sess.graph, input_arrays)
output_tensors = _get_tensors_from_tensor_names(
sess.graph, output_arrays)
_set_tensor_shapes(input_tensors, input_shapes)
return cls(sess.graph_def, input_tensors, output_tensors)
else:
if not input_shapes:
raise ValueError("input_shapes must be defined for this model.")
if set(input_arrays) != set(input_shapes.keys()):
raise ValueError("input_shapes must contain a value for each item "
"in input_array.")
input_arrays_with_shape = [
(name, input_shapes[name]) for name in input_arrays
]
return cls(
graph_def,
input_tensors=None,
output_tensors=None,
input_arrays_with_shape=input_arrays_with_shape,
output_arrays=output_arrays)
@classmethod
def from_saved_model(cls,
saved_model_dir,
input_arrays=None,
input_shapes=None,
output_arrays=None,
tag_set=None,
signature_key=None):
"""Creates a TocoConverter class from a SavedModel.
Args:
saved_model_dir: SavedModel directory to convert.
input_arrays: List of input tensors to freeze graph with. Uses input
arrays from SignatureDef when none are provided. (default None)
input_shapes: Dict of strings representing input tensor names to list of
integers representing input shapes (e.g., {"foo" : [1, 16, 16, 3]}).
Automatically determined when input shapes is None (e.g., {"foo" :
None}). (default None)
output_arrays: List of output tensors to freeze graph with. Uses output
arrays from SignatureDef when none are provided. (default None)
tag_set: Set of tags identifying the MetaGraphDef within the SavedModel to
analyze. All tags in the tag set must be present. (default set("serve"))
signature_key: Key identifying SignatureDef containing inputs and outputs.
(default DEFAULT_SERVING_SIGNATURE_DEF_KEY)
Returns:
TocoConverter class.
"""
if tag_set is None:
tag_set = set([_tag_constants.SERVING])
if signature_key is None:
signature_key = _signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
result = _freeze_saved_model(saved_model_dir, input_arrays, input_shapes,
output_arrays, tag_set, signature_key)
return cls(
graph_def=result[0], input_tensors=result[1], output_tensors=result[2])
@classmethod
def from_keras_model_file(cls,
model_file,
input_arrays=None,
input_shapes=None,
output_arrays=None):
"""Creates a TocoConverter class from a tf.keras model file.
Args:
model_file: Full filepath of HDF5 file containing the tf.keras model.
input_arrays: List of input tensors to freeze graph with. Uses input
arrays from SignatureDef when none are provided. (default None)
input_shapes: Dict of strings representing input tensor names to list of
integers representing input shapes (e.g., {"foo" : [1, 16, 16, 3]}).
Automatically determined when input shapes is None (e.g., {"foo" :
None}). (default None)
output_arrays: List of output tensors to freeze graph with. Uses output
arrays from SignatureDef when none are provided. (default None)
Returns:
TocoConverter class.
"""
_keras.backend.clear_session()
_keras.backend.set_learning_phase(False)
keras_model = _keras.models.load_model(model_file)
sess = _keras.backend.get_session()
# Get input and output tensors.
if input_arrays:
input_tensors = _get_tensors_from_tensor_names(sess.graph, input_arrays)
else:
input_tensors = keras_model.inputs
if output_arrays:
output_tensors = _get_tensors_from_tensor_names(sess.graph, output_arrays)
else:
output_tensors = keras_model.outputs
_set_tensor_shapes(input_tensors, input_shapes)
graph_def = _freeze_graph(sess, output_tensors)
return cls(graph_def, input_tensors, output_tensors)
def convert(self):
"""Converts a TensorFlow GraphDef based on instance variables.
Returns:
The converted data in serialized format. Either a TFLite Flatbuffer or a
Graphviz graph depending on value in `output_format`.
Raises:
ValueError:
Input shape is not specified.
None value for dimension in input_tensor.
ConverterMode option is unsupported for the model.
"""
# Checks dimensions in input tensor.
if self._has_valid_tensors():
for tensor in self._input_tensors:
if not tensor.get_shape():
raise ValueError("Provide an input shape for input array "
"'{0}'.".format(_tensor_name(tensor)))
shape = tensor.get_shape().as_list()
if None in shape[1:]:
raise ValueError(
"None is only supported in the 1st dimension. Tensor '{0}' has "
"invalid shape '{1}'.".format(_tensor_name(tensor), shape))
elif shape[0] is None:
self._set_batch_size(batch_size=1)
# Get quantization stats. Ensures there is one stat per name if the stats
# are specified.
if self.quantized_input_stats:
quantized_stats = []
invalid_stats = []
for name in self.get_input_arrays():
if name in self.quantized_input_stats:
quantized_stats.append(self.quantized_input_stats[name])
else:
invalid_stats.append(name)
if invalid_stats:
raise ValueError("Quantization input stats are not available for input "
"tensors '{0}'.".format(",".join(invalid_stats)))
else:
quantized_stats = None
converter_kwargs = {
"inference_type": self.inference_type,
"inference_input_type": self.inference_input_type,
"input_format": constants.TENSORFLOW_GRAPHDEF,
"output_format": self.output_format,
"quantized_input_stats": quantized_stats,
"default_ranges_stats": self.default_ranges_stats,
"drop_control_dependency": self.drop_control_dependency,
"reorder_across_fake_quant": self.reorder_across_fake_quant,
"change_concat_input_ranges": self.change_concat_input_ranges,
"allow_custom_ops": self.allow_custom_ops,
"post_training_quantize": self.post_training_quantize,
"dump_graphviz_dir": self.dump_graphviz_dir,
"dump_graphviz_video": self.dump_graphviz_video
}
# Converts model.
if self._has_valid_tensors():
converter_kwargs["converter_mode"] = self.converter_mode
result = _toco_convert_impl(
input_data=self._graph_def,
input_tensors=self._input_tensors,
output_tensors=self._output_tensors,
**converter_kwargs)
else:
# Graphs without valid tensors cannot be loaded into tf.Session since they
# contain TFLite operation(s) that cannot be resolved in TensorFlow.
if self.converter_mode != ConverterMode.DEFAULT:
raise ValueError("This model can only be converted with the default "
"converter.")
result = _toco_convert_graph_def(
input_data=self._graph_def,
input_arrays_with_shape=self._input_arrays_with_shape,
output_arrays=self._output_arrays,
**converter_kwargs)
return result
def get_input_arrays(self):
"""Returns a list of the names of the input tensors.
Returns:
List of strings.
"""
if self._has_valid_tensors():
return [_tensor_name(tensor) for tensor in self._input_tensors]
else:
return [name for name, _ in self._input_arrays_with_shape]
def _has_valid_tensors(self):
"""Checks if the input and output tensors have been initialized.
Returns:
Bool.
"""
return self._input_tensors and self._output_tensors
def _set_batch_size(self, batch_size):
"""Sets the first dimension of the input tensor to `batch_size`.
Args:
batch_size: Batch size for the model. Replaces the first dimension of an
input size array if undefined. (default 1)
Raises:
ValueError: input_tensor is not defined.
"""
if not self._has_valid_tensors():
raise ValueError("The batch size cannot be set for this model. Please "
"use input_shapes parameter.")
for tensor in self._input_tensors:
shape = tensor.get_shape().as_list()
shape[0] = batch_size
tensor.set_shape(shape)
def _is_frozen_graph(sess):
"""Determines if the graph is frozen.
Determines if a graph has previously been frozen by checking for any
operations of type Variable*. If variables are found, the graph is not frozen.
Args:
sess: TensorFlow Session.
Returns:
Bool.
"""
for op in sess.graph.get_operations():
if op.type.startswith("Variable") or op.type.endswith("VariableOp"):
return False
return True
def _freeze_graph(sess, output_tensors):
"""Returns a frozen GraphDef.
Freezes a graph with Variables in it. Otherwise the existing GraphDef is
returned.
Args:
sess: TensorFlow Session.
output_tensors: List of output tensors (only .name is used from this).
Returns:
Frozen GraphDef.
"""
if not _is_frozen_graph(sess):
output_arrays = [_tensor_name(tensor) for tensor in output_tensors]
return _tf_graph_util.convert_variables_to_constants(
sess, sess.graph_def, output_arrays)
else:
return sess.graph_def
| {
"content_hash": "ec7074a030fc69150c09859c3daf320b",
"timestamp": "",
"source": "github",
"line_count": 527,
"max_line_length": 126,
"avg_line_length": 41.15370018975332,
"alnum_prop": 0.6711084470675027,
"repo_name": "xodus7/tensorflow",
"id": "2be24455d83e2ee9ac0f005ce5d4316d456b12a4",
"size": "22377",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/lite/python/lite.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "1286"
},
{
"name": "Batchfile",
"bytes": "9258"
},
{
"name": "C",
"bytes": "340946"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "48861698"
},
{
"name": "CMake",
"bytes": "195699"
},
{
"name": "Dockerfile",
"bytes": "36400"
},
{
"name": "Go",
"bytes": "1240309"
},
{
"name": "HTML",
"bytes": "4681865"
},
{
"name": "Java",
"bytes": "834061"
},
{
"name": "Jupyter Notebook",
"bytes": "2604756"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "52618"
},
{
"name": "Objective-C",
"bytes": "15650"
},
{
"name": "Objective-C++",
"bytes": "99243"
},
{
"name": "PHP",
"bytes": "1357"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "40952138"
},
{
"name": "Ruby",
"bytes": "553"
},
{
"name": "Shell",
"bytes": "459258"
},
{
"name": "Smarty",
"bytes": "6976"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, unicode_literals
from google.appengine.api.users import create_logout_url, get_current_user
from gaecookie.decorator import no_csrf
from gaepermission import facade
from gaepermission.decorator import login_required
from tekton.gae.middleware.redirect import RedirectResponse
from gaepermission.decorator import login_not_required
@login_not_required
@no_csrf
def index(_resp):
facade.logout(_resp).execute()
redirect_url = '/'
google_user = get_current_user()
if google_user:
redirect_url = create_logout_url(redirect_url)
return RedirectResponse(redirect_url) | {
"content_hash": "a999f8f5e44ae1bfd381c9838cba5f1b",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 74,
"avg_line_length": 35.05555555555556,
"alnum_prop": 0.7765451664025357,
"repo_name": "erikabarros/naguil",
"id": "00b3c7b4dc3d72b7517278da20241d0a90fcfbb7",
"size": "655",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/appengine/routes/logout.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "170554"
},
{
"name": "HTML",
"bytes": "48432"
},
{
"name": "JavaScript",
"bytes": "18917"
},
{
"name": "PowerShell",
"bytes": "8175"
},
{
"name": "Python",
"bytes": "110519"
},
{
"name": "Shell",
"bytes": "2917"
}
],
"symlink_target": ""
} |
""" Script to generate a json file containing book names """
import json
import urllib.request
import re
RESULT_JSON_NAME = "books.json"
# Get catalog.json
URL_CAT = "https://api.unfoldingword.org/uw/txt/2/catalog.json"
response_cat = urllib.request.urlopen(URL_CAT)
DATA = json.loads(response_cat.read().decode('utf-8'))
OUTPUT = []
LANGS = DATA["cat"][0]["langs"]
# just a pointer, since the full path is dynamic-ish
langIdx = []
for idx in range(len(LANGS)):
if LANGS[idx]['lc'] == 'en':
langIdx = LANGS[idx]
# skip obs for now, loop over all books
for x in range(0, 66):
# gives book name and order (the books are stored out of order in the json)
# Having to navigate through DATA a bit because the location of English may move as more languages are added
# Using English because the list of books may not be complete in other languages
# 0 index after cat is for Bible, 1 contains data for obs
# 0 index after vers is for ULB, though UDB would also be fine for this case
slug = langIdx["vers"][0]["toc"][x]["slug"]
name = langIdx["vers"][0]["toc"][x]["title"]
print(slug)
# sort+1 so that 0 can be for OBS in the future
number = x + 1
# anthology designates what higher collection a book is a part of
anthology = 'ot'
# door43 convention skips number 40. Makes sense to change the sort to be book number
if number > 39:
number = number + 1
anthology = 'nt'
# create a dictionary to store the book's data
book = {}
book['slug'] = slug
book['name'] = name
book['num'] = number
book['anth'] = anthology
# add to the list of books
OUTPUT.append(book)
# output all book data to a json file
with open(RESULT_JSON_NAME, 'w') as outfile:
json.dump(OUTPUT, outfile)
| {
"content_hash": "6fd51ed83f78f5cd91d7f8b681eb6c33",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 112,
"avg_line_length": 32.6,
"alnum_prop": 0.664249860568879,
"repo_name": "WycliffeAssociates/translationRecorder",
"id": "4633d774098c3e90aeba87073acd04e2911857e7",
"size": "1793",
"binary": false,
"copies": "1",
"ref": "refs/heads/export-with-user",
"path": "translationRecorder/app/src/scripts/get_books.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "6022"
},
{
"name": "HTML",
"bytes": "68998"
},
{
"name": "Java",
"bytes": "1144993"
},
{
"name": "JavaScript",
"bytes": "814"
},
{
"name": "Kotlin",
"bytes": "29304"
},
{
"name": "Python",
"bytes": "11904"
}
],
"symlink_target": ""
} |
from src.utility import HTTPClient,HmcHeaders,HMCClientLogger
import xml.etree.ElementTree as etree
import os
log = HMCClientLogger.HMCClientLogger(__name__)
from src.common.JobStatus import *
ROOT = 'ManagedSystem'
CONTENT_TYPE ='application/vnd.ibm.powervm.web+xml; type=JobRequest'
class PowerOnManagedSystem(JobStatus):
"""
Power On the Selected ManagedSystem if its not in operating state else
shows error
"""
def __init__(self):
"""
initializes root and content-type
"""
self.content_type = CONTENT_TYPE
self.root = ROOT
def poweron_ManagedSystem(self, ip, managedsystem_uuid, x_api_session):
"""
Args:
ip:ip address of hmc
managedsystem_uuid:UUID of managedsystem
x_api_session:session to be used
"""
super().__init__(ip, self.root, self.content_type, x_api_session)
log.log_debug("power on managed system started")
headers_object=HmcHeaders.HmcHeaders("web")
namespace=headers_object.ns["xmlns"]
directory = os.path.dirname(__file__)
inputpayload=open(directory+"\data\poweron_managedsystem.xml","r")
request_object=HTTPClient.HTTPClient('uom',ip,self.root,self.content_type,session_id=x_api_session)
request_object.HTTPPut(payload=inputpayload,append=str(managedsystem_uuid)+"/do/PowerOn")
log.log_debug(request_object.response)
if request_object.response_b:
self.get_job_status(request_object)
| {
"content_hash": "00ab3b0f442e5d0f39971f70beb4004a",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 107,
"avg_line_length": 38.225,
"alnum_prop": 0.6664486592544147,
"repo_name": "PowerHMC/HmcRestClient",
"id": "fecbe7a473bcd65b5c2f163186e090812329133b",
"size": "2162",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/managed_system/PowerOnManagedSystem.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "13780311"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from six.moves import xrange
try:
from lmfit import Parameters, Parameter
LMFIT_PARAMETERS_INSTALLED = True
except ImportError:
LMFIT_PARAMETERS_INSTALLED = False
class ParinfoList(list):
"""
Store a list of model parameter values and their associated metadata (name,
error, order, limits, etc.) in a class-friendly manner
"""
def __init__(self, *args, **kwargs):
"""
Create a parinfolist from either a list of parameters
or a list or a tuple
Parameters
----------
preserve_order : bool
Keep the p['n'] values for each parameter?
Default to false, so par #'s will go from 0 to n(pars)-1
"""
if LMFIT_PARAMETERS_INSTALLED:
list.__init__(self,[])
if len(args) == 1 and isinstance(args[0],Parameters):
self._from_Parameters(args[0])
self._dict = dict([(pp['parname'],pp) for pp in self])
return
list.__init__(self, *args)
preserve_order = kwargs.pop('preserve_order',False)
# re-order the parameters from 0 to n-1 unless told otherwise
if not preserve_order:
self._set_numbers()
self._check_names()
self._set_attributes()
self._dict = dict([(pp['parname'],pp) for pp in self])
def _set_numbers(self):
""" Set the parameters in order by their current order in the list """
for ii,pp in enumerate(self):
if pp.n != ii:
pp.n = ii
def __str__(self):
return "\n".join([repr(p) for p in self])
def _getter(attributename):
def getattribute(self):
return [v[attributename] for v in self]
return getattribute
def _setter(attributename):
def setattribute(self, values):
if len(values) == len(self):
for parinf,newval in zip(self,values):
parinf[attributename] = newval
else:
raise ValueError("Must have len(new values) = %i (was %i)" % (len(self),len(values)))
return setattribute
def keys(self):
""" Dictionary-like behavior """
return self.parnames
def items(self):
""" Dictionary-like behavior """
return zip(self.parnames, self[:])
#def values(self):
# """ Dictionary-like behavior """
# return [v['value'] for v in self]
names = property(fget=_getter('parname'), fset=_setter('parname'))
parnames=names
shortnames = property(fget=_getter('shortparname'), fset=_setter('shortparname'))
shortparnames=shortnames
values = property(fget=_getter('value'), fset=_setter('value'))
errors = property(fget=_getter('error'), fset=_setter('error'))
n = property(fget=_getter('n'), fset=_setter('n'))
order=n
fixed = property(fget=_getter('fixed'), fset=_setter('fixed'))
limits = property(fget=_getter('limits'), fset=_setter('limits'))
limited = property(fget=_getter('limited'), fset=_setter('limited'))
tied = property(fget=_getter('tied'), fset=_setter('tied'))
def __getitem__(self, key):
if type(key) in (int, slice):
return super(ParinfoList,self).__getitem__(key)
else:
return self._dict[key]
def __setitem__(self, key, val):
"""
DO NOT allow items to be replaced/overwritten,
instead use their own setters
"""
# if key already exists, use its setter
if key in self._dict or (type(key) is int and key < len(self)):
self[key] = val
elif type(key) is int:
# can't set a new list element this way
raise IndexError("Index %i out of range" % key)
elif isinstance(val,Parinfo):
# otherwise, add the item
self.__dict__[key] = val
else:
raise TypeError("Can only add Parinfo items to ParinfoLists")
def _set_attributes(self):
self.__dict__.update(dict([(pp['parname'],pp) for pp in self]))
def _check_names(self):
"""
Make sure all names are unique. If they're not, append #'s at the end
(but strip #'s first)
"""
name_counter = {}
names_stripped = [name.strip('0123456789') for name in self.names]
for ii,name in enumerate(names_stripped):
if names_stripped.count(name) > 1:
if name in name_counter:
name_counter[name] += 1
self[ii]['parname'] = self[ii]['parname'].strip('0123456789')+ "{0}".format(name_counter[name])
else:
name_counter[name] = 0
self[ii]['parname'] = self[ii]['parname'].strip('0123456789')+ "{0}".format(name_counter[name])
# remove un-numbered versions if numbered versions are now being used
if name in self.__dict__:
self.__dict__.pop(name)
def append(self, value, renumber=None):
"""
Append to the list. Will renumber the parameter if its number already
exists in the list unless renumber == False
"""
if hasattr(value,'n') and value.n in self.n and renumber is not False:
# indexed from 0, so len(self) = max(self.n)+1
value.n = len(self)
super(ParinfoList, self).append(value)
self._check_names()
self._set_attributes()
def as_Parameters(self):
"""
Convert a ParinfoList to an lmfit Parameters class
"""
if LMFIT_PARAMETERS_INSTALLED:
P = Parameters()
for par in self:
P.add(name=par.parname,
value=par.value,
vary=not(par.fixed),
expr=par.tied if par.tied is not '' else None,
min=par.limits[0] if par.limited[0] else None,
max=par.limits[1] if par.limited[1] else None)
return P
def _from_Parameters(self, lmpars):
"""
Read from an lmfit Parameters instance
"""
if len(lmpars) == len(self):
for P in lmpars.values():
self[P.name].value = P.value
self[P.name].error = P.stderr
self[P.name].limits = (P.min,P.max)
self[P.name].limited = (P.min is not None,P.max is not None)
self[P.name].expr = '' if P.expr is None else P.expr
else:
for par in lmpars.values():
self.append(Parinfo(par))
def tableprint(self, item_length=15, numbered=True):
"""
Print data in table-friendly format
Parameters
----------
item_length : int
Number of characters per item printed
numbered : bool
Are the parameters numbered? In pyspeckit, they will always be,
but this was included for compatibility with generic fitters
"""
stripped_names = list(set([par.parname.strip("0123456789") for par in self]))
nlines = len(self.n) / len(stripped_names)
strformat = "%" + str(item_length) + "s"
fltformat = "%" + str(item_length) + "g"
print(" ".join([strformat % name for name in stripped_names]))
if numbered:
for ii in xrange(nlines):
print(" ".join([fltformat % (self[name+"%i" % ii].value) for
name in stripped_names]))
else:
print(" ".join([fltformat % (self[name].value) for name in
stripped_names]))
class Parinfo(dict):
"""
A class for storing attributes of a fitted model parameter. It is based on
mpfit's parinfo dictionary, which is just a dictionary containing a few set
values. This implements them as 'gettable' attributes instead, but far
more importantly, includes sanity checks when setting values.
Attributes
----------
value: number
The value of the parameter. Arithmetic operations (*,/,+,-,**) will
use this value
error: number
The error on the value
n: int
The order of the parameter in the model or ParinfoList
fixed: bool
Can the value change? If False, error should be 0.
limits: (min,max)
The limits on the value of the parameter. Only applied
if limited
limited: (bool, bool)
Is the parameter value limited?
step: number
from MPFIT: the step size to be used in calculating the numerical
derivatives. If set to zero, then the step size is computed
automatically. Ignored when AUTODERIVATIVE=0.
scaleable: bool
Is the value scaled with the data? Important for normalization
procedures
tied: string
mpfit/lmift parameter. Allows you to specify arbitrary expressions for
how the parameter depends on other parameters
mpfit:
a string expression which "ties" the parameter to other free or
fixed parameters. Any expression involving constants and the
parameter array P are permitted. Example: if parameter 2 is always
to be twice parameter 1 then use the following: parinfo(2).tied =
'2 * p(1)'. Since they are totally constrained, tied parameters
are considered to be fixed; no errors are computed for them.
NOTE: the PARNAME can't be used in expressions.
parname: string
The parameter name
shortparname: string (tex)
A shortened version of the parameter name for plotting display
"""
def __init__(self, values=None, **kwargs):
dict.__init__(self, {'value':0.0, 'error':0.0, 'n':0, 'fixed':False,
'limits':(0.0,0.0), 'limited':(False,False),
'step':False, 'scaleable':False, 'tied':'',
'parname':'', 'shortparname':''}, **kwargs)
if LMFIT_PARAMETERS_INSTALLED:
if isinstance(values,Parameter):
self._from_Parameter(values)
self.__dict__ = self
return
if values is not None:
self.update(values)
self.__dict__ = self
def __repr__(self):
try:
reprint = "Param #%i %12s = %12g" % (self.n, self.parname, self.value)
if self.fixed:
reprint += " (fixed)"
elif self.error is not None:
reprint += " +/- %15g " % (self.error)
if any(self.limited):
lolim = "[%g," % self.limits[0] if self.limited[0] else "(-inf,"
uplim = "%g]" % self.limits[1] if self.limited[1] else "inf)"
myrange = lolim + uplim
reprint += " Range:%10s" % myrange
if self.tied is not '':
reprint += " Tied: %s" % self.tied
if self.shortparname is not '':
reprint += " Shortparname: %s" % self.shortparname
return reprint
except AttributeError:
return super(Parinfo,self).__repr__()
def __deepcopy__(self, memo):
copy = Parinfo(self)
copy.__dict__ = copy
return copy
def __copy__(self):
copy = Parinfo(self)
copy.__dict__ = copy
return copy
@property
def max(self):
return self.limits[1]
@max.setter
def max(self, value):
self.limits = (self.limits[0], value)
@property
def min(self):
return self.limits[0]
@min.setter
def min(self, value):
self.limits = (value, self.limits[1])
@property
def vary(self):
return not self.fixed
@vary.setter
def vary(self, value):
self.fixed = not value
@property
def expr(self):
return self.tied
@expr.setter
def expr(self, value):
self._check_OK('tied',value)
self.tied = value
def __setattr__(self, key, value):
# DEBUG print "Setting attribute %s = %s" % (key,value)
self._check_OK(key,value)
return super(Parinfo, self).__setattr__(key, value)
def __setitem__(self, key, value):
# DEBUG print "Setting item %s = %s" % (key,value)
self._check_OK(key,value)
return super(Parinfo, self).__setitem__(key, value)
def _check_OK(self,key,value):
# DEBUG print "Checking whether %s's value %s is OK" % (key,value)
if key == 'value':
if hasattr(self,'limited') and hasattr(self,'limits'):
if self.limited[0] and value < self.limits[0]:
raise ValueError('Set parameter value %r < limit value %r' % (value,self.limits[0]))
if self.limited[1] and value > self.limits[1]:
raise ValueError('Set parameter value %r > limit value %r' % (value,self.limits[1]))
if key in ('limits','limited'):
try:
if len(value) != 2:
raise ValueError("%s must be a 2-tuple" % key)
except TypeError: # if the input was scalar
raise ValueError("%s must be a 2-tuple" % key)
if key in ('parname','tied','shortparname'):
if type(value) is not str:
raise TypeError("%s must be a string" % key)
if key in ('fixed',):
try:
value = bool(value)
except:
raise ValueError("%s had value %s, which could not be converted to boolean" % (key,value))
def update(self, *args, **kwargs):
if args:
if len(args) > 1:
raise TypeError("update expected at most 1 arguments, got %d" % len(args))
other = dict(args[0])
for key in other:
self[key] = other[key]
for key in kwargs:
self[key] = kwargs[key]
def setdefault(self, key, value=None):
if key not in self:
self[key] = value
return self[key]
def _from_Parameter(self, lmpar):
"""
Read a Parinfo instance from and lmfit Parameter
"""
self['limits'] = lmpar.min,lmpar.max
self['limited'] = (lmpar.min not in (None,False),lmpar.max not in (None,False))
self['value'] = lmpar.value
self['error'] = lmpar.stderr
self['parname'] = lmpar.name
self['fixed'] = not(lmpar.vary)
def _operation_wrapper(operation, reverse=False):
"""
Perform an operation (addition, subtraction, mutiplication, division, etc.)
"""
def ofunc(self, other):
""" Operation Function """
intypes = type(other),type(self.value)
try:
returnval = getattr(self.value,'__%s__' % operation)(other)
if type(returnval) not in intypes:
raise TypeError("return value had wrong type: %s" % str(type(returnval)))
else:
return returnval
except TypeError as err: # integers don't have defined operations with floats
#print err
#print "TypeError1: ",self, self.value, other
try:
if hasattr(other,'__r%s__' % operation):
#print "r",operation,": ",self, self.value, other
return getattr(other,'__r%s__' % operation)(self.value)
elif hasattr(other,'__%s__' % operation[1:]):
#print operation,": ",self, self.value, other
return getattr(other,'__%s__' % operation[1:])(self.value)
except:
raise TypeError("Neither side of the operation has a %s attribute!" % operation)
return ofunc
__add__ = _operation_wrapper('add')
__radd__ = _operation_wrapper('radd')
__sub__ = _operation_wrapper('sub')
__rsub__ = _operation_wrapper('rsub')
__mul__ = _operation_wrapper('mul')
__rmul__ = _operation_wrapper('rmul')
__div__ = _operation_wrapper('div')
__rdiv__ = _operation_wrapper('rdiv')
__pow__ = _operation_wrapper('pow')
__rpow__ = _operation_wrapper('rpow')
try:
def __array__(self):
import numpy as np
return np.array(self.value)
except ImportError:
pass
if __name__=="__main__":
import unittest
def check_failure(value):
""" Raise a ValueError if value not in (0,1) """
P = Parinfo()
P.value = 1
P.limited = (True,True)
P.limits = (0,1)
P.value = value
def check_tied(value):
P = Parinfo()
P.tied = value
def check_limits(value):
P = Parinfo()
P.limits = value
def check_index(key):
PL = ParinfoList([Parinfo({'parname':'HEIGHT'}),
Parinfo({'value':15,'parname':'AMPLITUDE'}),
Parinfo({'value':3,'parname':'WIDTH'}),
Parinfo({'value':4,'parname':'WIDTH'})])
return PL[key]
def check_set_list(values):
PL = ParinfoList([Parinfo({'parname':'HEIGHT'}),
Parinfo({'value':15,'parname':'AMPLITUDE'}),
Parinfo({'value':3,'parname':'WIDTH','limits':(0,5),'limited':(True,True)}),
Parinfo({'value':4,'parname':'WIDTH'})])
PL.shortparnames = ['a','b','c','d']
PL.values = values
return PL.values
class MyTestCase(unittest.TestCase):
def __init__(self, methodName='runTest', param=None):
super(MyTestCase, self).__init__(methodName)
self.param = param
def test_checks_value_fail(self):
check_failure(0.5)
self.assertRaises(ValueError, check_failure, 5)
self.assertRaises(ValueError, check_failure, -5)
def test_checks_tied_fail(self):
check_tied('p[0]')
self.assertRaises(TypeError, check_tied, 5)
self.assertRaises(TypeError, check_tied, (1,2,3))
def test_checks_limits_fail(self):
check_limits((1,2))
self.assertRaises(ValueError, check_limits, -5)
self.assertRaises(ValueError, check_limits, (1,2,3))
def test_indexing(self):
self.assertEqual(check_index(0), check_index('HEIGHT'))
self.assertEqual(check_index(1), check_index('AMPLITUDE'))
self.assertEqual(check_index(2), check_index('WIDTH0'))
def test_set_list(self):
self.assertEqual(check_set_list([1,2,3,4]),[1,2,3,4])
self.assertRaises(ValueError,check_set_list,[1,2,10,4])
def test_arithmetic(self):
value = 25
par = Parinfo({'parname':'TEST', 'value': value})
assert par+5 == value+5
assert par-5 == value-5
assert par/5 == value/5
assert par*5 == value*5
def test_arithmetic2(self):
value = 25.
par = Parinfo({'parname':'TEST', 'value': value})
assert par+5 == value+5
assert par-5 == value-5
assert par/5 == value/5
assert par*5 == value*5
def test_arithmetic3(self):
value = 25.
par = Parinfo({'parname':'TEST', 'value': value})
assert par+5. == value+5.
assert par-5. == value-5.
assert par/5. == value/5.
assert par*5. == value*5.
def test_arithmetic4(self):
value = 25
par = Parinfo({'parname':'TEST', 'value': value})
assert par+5. == value+5.
assert par-5. == value-5.
assert par/5. == value/5.
assert par*5. == value*5.
def test_arithmetic5(self):
value = 25.
par = Parinfo({'parname':'TEST', 'value': value})
assert 5.+par == 5.+value
assert 5.-par == 5.-value
assert 5./par == 5./value
assert 5.*par == 5.*value
def test_arithmetic6(self):
value = 25
par = Parinfo({'parname':'TEST', 'value': value})
assert 5.+par == 5.+value
assert 5.-par == 5.-value
assert 5./par == 5./value
assert 5.*par == 5.*value
def test_arithmetic7(self):
value = 25.
par = Parinfo({'parname':'TEST', 'value': value})
assert 5+par == 5+value
assert 5-par == 5-value
assert 5/par == 5/value
assert 5*par == 5*value
def test_arithmetic8(self):
value = 25
par = Parinfo({'parname':'TEST', 'value': value})
assert 5+par == 5+value
assert 5-par == 5-value
assert 5/par == 5/value
assert 5*par == 5*value
def test_copy(self):
import copy
value = 25
par = Parinfo({'parname':'TEST', 'value': value})
parcopy = copy.copy(par)
assert parcopy.value == value
parcopy = copy.deepcopy(par)
assert parcopy.value == value
unittest.main()
| {
"content_hash": "3f450d265940b0ca600200367ece0d50",
"timestamp": "",
"source": "github",
"line_count": 588,
"max_line_length": 115,
"avg_line_length": 36.064625850340136,
"alnum_prop": 0.535980382910497,
"repo_name": "low-sky/pyspeckit",
"id": "3403eb29b60d57483ecd434fa007fbbd685aa1a9",
"size": "21206",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyspeckit/spectrum/parinfo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1245223"
},
{
"name": "Shell",
"bytes": "313"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from ionyweb.page_app.page_text.models import *
admin.site.register(PageApp_Text)
| {
"content_hash": "4c3cb46a5b8b8ce9a21c8f17d6868746",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 47,
"avg_line_length": 29,
"alnum_prop": 0.8103448275862069,
"repo_name": "makinacorpus/ionyweb",
"id": "36f197969e2461b7be91e01689d0b944439cd893",
"size": "140",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ionyweb/page_app/page_text/admin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "124754"
},
{
"name": "JavaScript",
"bytes": "260880"
},
{
"name": "Python",
"bytes": "1024305"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('ref', '0009_auto_20221002_1238'),
]
operations = [
migrations.AlterField(
model_name='componentinstance',
name='project',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='component_instances', to='ref.project', verbose_name='project'),
),
]
| {
"content_hash": "283210e095dc6277b79503c3f8ee1f32",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 159,
"avg_line_length": 28.647058823529413,
"alnum_prop": 0.6447638603696099,
"repo_name": "marcanpilami/MAGE",
"id": "0d8ff35e3378ce8b3d2692c0744605d56cceded1",
"size": "537",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ref/migrations/0010_alter_componentinstance_project.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "16033"
},
{
"name": "Dockerfile",
"bytes": "1730"
},
{
"name": "HTML",
"bytes": "88971"
},
{
"name": "JavaScript",
"bytes": "6024"
},
{
"name": "Python",
"bytes": "401724"
},
{
"name": "Shell",
"bytes": "20159"
}
],
"symlink_target": ""
} |
from ..base.utils.common import *
| {
"content_hash": "636cef6c99113a6988bda2039ff395cd",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 33,
"avg_line_length": 34,
"alnum_prop": 0.7352941176470589,
"repo_name": "DataDog/integrations-core",
"id": "df1dd22b711d23d6daf07aaab855493034642b70",
"size": "164",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "datadog_checks_base/datadog_checks/utils/common.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "578"
},
{
"name": "COBOL",
"bytes": "12312"
},
{
"name": "Dockerfile",
"bytes": "22998"
},
{
"name": "Erlang",
"bytes": "15518"
},
{
"name": "Go",
"bytes": "6988"
},
{
"name": "HCL",
"bytes": "4080"
},
{
"name": "HTML",
"bytes": "1318"
},
{
"name": "JavaScript",
"bytes": "1817"
},
{
"name": "Kotlin",
"bytes": "430"
},
{
"name": "Lua",
"bytes": "3489"
},
{
"name": "PHP",
"bytes": "20"
},
{
"name": "PowerShell",
"bytes": "2398"
},
{
"name": "Python",
"bytes": "13020828"
},
{
"name": "Roff",
"bytes": "359"
},
{
"name": "Ruby",
"bytes": "241"
},
{
"name": "Scala",
"bytes": "7000"
},
{
"name": "Shell",
"bytes": "83227"
},
{
"name": "Swift",
"bytes": "203"
},
{
"name": "TSQL",
"bytes": "29972"
},
{
"name": "TypeScript",
"bytes": "1019"
}
],
"symlink_target": ""
} |
import os
from kokki import Package, File, Template, Service
env.include_recipe("postgresql9")
Service("postgresql",
supports_restart = True,
supports_reload = True,
supports_status = True,
action = "nothing")
Package("postgresql-" + env.config.postgresql9.version,
notifies = [("stop", env.resources["Service"]["postgresql"], True)])
File("pg_hba.conf",
owner = "postgres",
group = "postgres",
mode = 0600,
path = os.path.join(env.config.postgresql9.config_dir, "pg_hba.conf"),
content = Template("postgresql9/pg_hba.conf.j2"),
notifies = [("reload", env.resources["Service"]["postgresql"])])
File("postgresql.conf",
owner = "postgres",
group = "postgres",
mode = 0666,
path = os.path.join(env.config.postgresql9.config_dir, "postgresql.conf"),
content = Template("postgresql9/postgresql.conf.j2"),
notifies = [("restart", env.resources["Service"]["postgresql"])])
| {
"content_hash": "1e5cbc50e9696ab6008f89fe5855201d",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 78,
"avg_line_length": 32.37931034482759,
"alnum_prop": 0.6645367412140575,
"repo_name": "samuel/kokki",
"id": "dfe761478fce8f30ce05751bb4b118380735fc9d",
"size": "940",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kokki/cookbooks/postgresql9/recipes/server.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "211476"
}
],
"symlink_target": ""
} |
""" Download views for editorial app. """
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render, redirect, get_object_or_404
from django.conf import settings
from django.core.mail import send_mail
from django.http import HttpResponse
from django.utils import timezone
from django.views.generic import TemplateView , UpdateView, DetailView, FormView, View
from django.views.decorators.csrf import csrf_exempt
from cStringIO import StringIO
from zipfile import ZipFile
import datetime
import json
from braces.views import LoginRequiredMixin, FormMessagesMixin
from editorial.views import CustomUserTest
# from editorial.forms import StoryDownloadForm
from editorial.models import (
Story,
Facet,
ImageAsset,
DocumentAsset,
AudioAsset,
VideoAsset
)
#----------------------------------------------------------------------#
# Download View
#----------------------------------------------------------------------#
class StoryDownloadTemplateView(CustomUserTest, TemplateView):
"""Display form for a story download."""
template_name = 'editorial/story/story_download_form.html'
def test_user(self, user):
"""User must be member of an org."""
if user.organization:
return True
raise PermissionDenied()
def get_context_data(self, pk):
story = Story.objects.get(id=pk)
story_images = story.get_story_images()
story_documents = story.get_story_documents()
story_audio = story.get_story_audio()
story_video = story.get_story_video()
return {
'story': story,
'story_images': story_images,
'story_documents': story_documents,
'story_audio': story_audio,
'story_video': story_video,
}
# ACCESS: Any org user, or user from an organization that is in collaborate_with
# should be able to download a story
# Contractors should not be able to download
class StoryDownloadProcessView(CustomUserTest, View):
"""Create the download for a story and its facets."""
def test_user(self, user):
"""User must be member of an org."""
if user.organization:
return True
raise PermissionDenied()
def post(self, request, pk):
""" Process download form to collect objects and create download file."""
# get the story and associated facets no matter what options are selected
story_id = request.POST.get('story')
story = get_object_or_404(Story, id=pk)
story_txt = story.get_story_download()
select_all_images = story.get_story_images()
select_all_documents = story.get_story_documents()
select_all_audio = story.get_story_audio()
select_all_video = story.get_story_video()
image_txt = ""
document_txt = ""
audio_txt = ""
video_txt = ""
# Set up zip file
fp = StringIO()
z = ZipFile(fp, mode="w")
# Always Zip up story meta
z.writestr("story_details.txt", story_txt)
# ------------------------------ #
# IF SELECT ALL #
# ------------------------------ #
# if select_all is chosen, then all items will be downloaded
story_sa_id = request.POST.get('select_all')
if story_sa_id:
story = get_object_or_404(Story, id=story_sa_id)
if story_sa_id:
# Zip up all facets and assets including story metadata
for facet in story.facet_set.all():
z.writestr("{name}.txt".format(name=facet.name), facet.get_facet_download())
for image in select_all_images:
z.writestr("{image}.jpg".format(image=image.title), image.photo.read())
new_info = image.get_asset_download_info()
image_txt += new_info
for document in select_all_documents:
if document.asset_type == "PDF":
z.writestr("{document}.pdf".format(document=document.title), document.document.read())
new_info = document.get_asset_download_info()
document_txt += new_info
if document.asset_type == "WORD DOC":
z.writestr("{document}.docx".format(document=document.title), document.document.read())
new_info = document.get_asset_download_info()
document_txt += new_info
if document.asset_type == "TEXT":
z.writestr("{document}.txt".format(document=document.title), document.document.read())
new_info = document.get_asset_download_info()
document_txt += new_info
if document.asset_type == "COMMA SEPARATED":
z.writestr("{document}.csv".format(document=document.title), document.document.read())
new_info = document.get_asset_download_info()
document_txt += new_info
if document.asset_type == "EXCEL":
z.writestr("{document}.xls".format(document=document.title), document.document.read())
new_info = document.get_asset_download_info()
document_txt += new_info
for audiofile in select_all_audio:
if audiofile.asset_type == "MP3":
z.writestr("{audiofile}.mp3".format(audiofile=audiofile.title), audiofile.audio.read())
new_info = audiofile.get_asset_download_info()
audio_txt += new_info
if audiofile.asset_type == "WAV":
z.writestr("{audiofile}.wav".format(audiofile=audiofile.title), audiofile.audio.read())
new_info = audiofile.get_asset_download_info()
audio_txt += new_info
for video in select_all_video:
if video.asset_type == "YOUTUBE":
# text = video.link.encode('utf-8')
# title = video.title.encode('utf-8')
# z.writestr("{title}_youtube_link.txt".format(title=title), text)
new_info = video.get_asset_download_info()
video_txt += new_info
if video.asset_type == "VIMEO":
# text = video.link.encode('utf-8')
# title = video.title.encode('utf-8')
# z.writestr("{title}_vimeo_link.txt".format(title=title), text)
new_info = video.get_asset_download_info()
video_txt += new_info
# user can also select download all items associated with specific facets
# ------------------------------ #
# IF FACET ALL #
# ------------------------------ #
facet_sa_id = request.POST.getlist('facet_select_all')
if facet_sa_id:
for facet in facet_sa_id:
facet = get_object_or_404(Facet, id=facet)
# Zip up story meta, facet content and facet images
if facet:
z.writestr("{name}.txt".format(name=facet.name), facet.get_facet_download())
for image in facet.image_assets.all():
z.writestr("{image}.jpg".format(image=image.title), image.photo.read())
new_info = image.get_asset_download_info()
image_txt += new_info
for document in facet.document_assets.all():
if document.asset_type == "PDF":
z.writestr("{document}.pdf".format(document=document.title), document.document.read())
new_info = document.get_asset_download_info()
document_txt += new_info
if document.asset_type == "WORD DOC":
z.writestr("{document}.docx".format(document=document.title), document.document.read())
new_info = document.get_asset_download_info()
document_txt += new_info
if document.asset_type == "TEXT":
z.writestr("{document}.txt".format(document=document.title), document.document.read())
new_info = document.get_asset_download_info()
document_txt += new_info
if document.asset_type == "COMMA SEPARATED":
z.writestr("{document}.csv".format(document=document.title), document.document.read())
new_info = document.get_asset_download_info()
document_txt += new_info
if document.asset_type == "EXCEL":
z.writestr("{document}.xls".format(document=document.title), document.document.read())
new_info = document.get_asset_download_info()
document_txt += new_info
for audiofile in facet.audio_assets.all():
if audiofile.asset_type == "MP3":
z.writestr("{audiofile}.mp3".format(audiofile=audiofile.title), audiofile.audio.read())
new_info = audiofile.get_asset_download_info()
audio_txt += new_info
if audiofile.asset_type == "WAV":
z.writestr("{audiofile}.wav".format(audiofile=audiofile.title), audiofile.audio.read())
new_info = audiofile.get_asset_download_info()
audio_txt += new_info
for video in facet.video_assets.all():
if video.asset_type == "YOUTUBE":
# text = video.link.encode('utf-8')
# title = video.title.encode('utf-8')
# z.writestr("{title}_youtube_link.txt".format(title=title), text)
new_info = video.get_asset_download_info()
video_txt += new_info
if video.asset_type == "VIMEO":
# text = video.link.encode('utf-8')
# title = video.title.encode('utf-8')
# z.writestr("{title}_vimeo_link.txt".format(title=title), text)
new_info = video.get_asset_download_info()
video_txt += new_info
# if not select all OR facet select all, then user chooses the facet and the images
# ------------------------------ #
# IF FACET SPECIFIC #
# ------------------------------ #
facet_sp_id = request.POST.getlist('facet_specific_content')
if facet_sp_id:
for facet_id in facet_sp_id:
facet = get_object_or_404(Facet, id=facet_id)
z.writestr("{name}.txt".format(name=facet.name), facet.get_facet_download())
# ------------------------------ #
# IF SPECIFIC IMAGES #
# ------------------------------ #
# if not select all or by facet, then user chooses specific images
images = request.POST.getlist('images')
images = ImageAsset.objects.filter(pk__in=images)
if images:
for image in images:
z.writestr("{image}.jpg".format(image=image.title), image.photo.read())
new_info = image.get_asset_download_info()
image_txt += new_info
# ------------------------------ #
# IF SPECIFIC DOCUMENTS #
# ------------------------------ #
# if not select all or by facet, then user chooses specific documents
documents = request.POST.getlist('documents')
documents = DocumentAsset.objects.filter(pk__in=documents)
if documents:
for document in documents:
if document.asset_type == "PDF":
z.writestr("{document}.pdf".format(document=document.title), document.document.read())
new_info = document.get_asset_download_info()
document_txt += new_info
if document.asset_type == "WORD DOC":
z.writestr("{document}.docx".format(document=document.title), document.document.read())
new_info = document.get_asset_download_info()
document_txt += new_info
if document.asset_type == "TEXT":
z.writestr("{document}.txt".format(document=document.title), document.document.read())
new_info = document.get_asset_download_info()
document_txt += new_info
if document.asset_type == "COMMA SEPARATED":
z.writestr("{document}.csv".format(document=document.title), document.document.read())
new_info = document.get_asset_download_info()
document_txt += new_info
if document.asset_type == "EXCEL":
z.writestr("{document}.xls".format(document=document.title), document.document.read())
new_info = document.get_asset_download_info()
document_txt += new_info
# ------------------------------ #
# IF SPECIFIC AUDIO #
# ------------------------------ #
# if not select all or by facet, then user chooses specific audiofiles
audiofiles = request.POST.getlist('audiofiles')
audiofiles = AudioAsset.objects.filter(pk__in=audiofiles)
if audiofiles:
for audiofile in audiofiles:
if audiofile.asset_type == "MP3":
z.writestr("{audiofile}.mp3".format(audiofile=audiofile.title), audiofile.audio.read())
new_info = audiofile.get_asset_download_info()
audio_txt += new_info
if audiofile.asset_type == "WAV":
z.writestr("{audiofile}.wav".format(audiofile=audiofile.title), audiofile.audio.read())
new_info = audiofile.get_asset_download_info()
audio_txt += new_info
# ------------------------------ #
# IF SPECIFIC VIDEO #
# ------------------------------ #
# if not select all or by facet, then user chooses specific video files
videos = request.POST.getlist('videofiles')
videos = VideoAsset.objects.filter(pk__in=videos)
if videos:
for video in videos:
if video.asset_type == "YOUTUBE":
# text = video.link.encode('utf-8')
# title = video.title.encode('utf-8')
# z.writestr("{title}_youtube_link.txt".format(title=title), text)
new_info = video.get_asset_download_info()
video_txt += new_info
if video.asset_type == "VIMEO":
# text = video.link.encode('utf-8')
# title = video.title.encode('utf-8')
# z.writestr("{title}_vimeo_link.txt".format(title=title), text)
new_info = video.get_asset_download_info()
video_txt += new_info
# ------------------------------ #
# Create download #
# ------------------------------ #
#Take the final version of asset_txts and write it.
if image_txt:
z.writestr("image_details.txt", image_txt)
if document_txt:
z.writestr("document_details.txt", document_txt)
if audio_txt:
z.writestr("audio_details.txt", audio_txt)
if video_txt:
z.writestr("video_details.txt", video_txt)
z.close()
fp.seek(0)
response = HttpResponse(fp, content_type='application/zip')
fp.close()
return response
| {
"content_hash": "bea93d90e1e3353af5204b97f97f7247",
"timestamp": "",
"source": "github",
"line_count": 340,
"max_line_length": 111,
"avg_line_length": 47.68529411764706,
"alnum_prop": 0.5129834083759945,
"repo_name": "ProjectFacet/facet",
"id": "563a0f6a8a520d5c627631d208fc653a11d49f34",
"size": "16213",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project/editorial/views/downloads.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4350483"
},
{
"name": "HTML",
"bytes": "1677386"
},
{
"name": "JavaScript",
"bytes": "1120019"
},
{
"name": "Python",
"bytes": "804022"
},
{
"name": "Ruby",
"bytes": "225"
},
{
"name": "Shell",
"bytes": "889"
}
],
"symlink_target": ""
} |
"""
The script identifies new items in the website inventory and saves them to the
database. Items already in the database will be ignored. In-depth details for
items will be scrapped with another script.
Information captured about items:
- name
- imageUrl
- productUrl
@author: eriel marimon
@created: july 1, 2017
@updated: july 4, 2017
"""
import re
import json
import time
import os
import sys
new_modules = "%s/.." % (os.path.dirname(os.path.realpath(__file__)))
sys.path.append(new_modules)
from ProductRepository import ProductRepository
from PageIdentifier import BjsPageWizard
import BjsUtil
import GlobalUtil
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import json
import time
import os
import sys
CONSOLE_LOG_TRUE = True
LOGFILE = ("bjs-logs/%s.log" % (os.path.basename(__file__))).replace(".py","")
# CATEGORY_NAME_ELEMENT_XPATH = '//*[@id="listing-container"]/div[1]/section/header/h1'
# aug 17, 2017 : xpath update
CATEGORY_NAME_ELEMENT_XPATH = '//*[@id="listing-container"]/div[1]/div[1]/header/h1'
def get_items_from_store_website(driver, wizard, category, url):
driver.get(url)
# Try to wait for a page identifier to appear
try:
category_name_element = WebDriverWait(driver, 3).until(
EC.presence_of_element_located((By.XPATH, CATEGORY_NAME_ELEMENT_XPATH))
)
except Exception, e:
message = "Cant find category=%s at url=%s" % (category, url)
GlobalUtil.log(LOGFILE, GlobalUtil.LOG_ERROR, message, console_out=CONSOLE_LOG_TRUE)
return []
soup = BeautifulSoup(driver.page_source, "html.parser")
products = []
if category_name_element.text != category:
"""Log warning message that names mismatched"""
message = "Mismatch in category=%s and url=%s" % (category, url)
GlobalUtil.log(LOGFILE, GlobalUtil.LOG_WARN, message, console_out=CONSOLE_LOG_TRUE)
elif wizard.is_product_page(soup):
"""Get products on this page"""
message = "Category=%s is a product page with url=%s." % (category, url)
GlobalUtil.log(LOGFILE, GlobalUtil.LOG_INFO, message, console_out=CONSOLE_LOG_TRUE)
return wizard.get_partial_products(soup)
elif wizard.is_categories_page(soup):
"""Get categories on this page and recurse"""
message = "Category=%s is a categories page with url=%s." % (category, url)
GlobalUtil.log(LOGFILE, GlobalUtil.LOG_INFO, message, console_out=CONSOLE_LOG_TRUE)
category_map = wizard.map_categories_to_urls(soup)
products = []
for key in category_map:
products += get_items_from_store_website(driver, wizard, key, category_map[key])
return products
else:
message = "This shouldn't be happening, category=%s, url=%s" % (category, url)
GlobalUtil.log(LOGFILE, GlobalUtil.LOG_ERROR, message, console_out=CONSOLE_LOG_TRUE)
return []
def get_and_save_new_items(
bjs_main_product_category_name,
bjs_main_product_page):
message = "Getting TOP category=%s, url=%s" % (
bjs_main_product_page,
bjs_main_product_category_name)
GlobalUtil.log(LOGFILE, GlobalUtil.LOG_INFO, message, console_out=CONSOLE_LOG_TRUE)
driver = webdriver.Firefox()
driver.get(bjs_main_product_page)
# CATEGORY_NAME_ELEMENT_XPATH = '//*[@id="listing-container"]/div[1]/section/header/h1'
# aug 17, 2017 : xpath update
CATEGORY_NAME_ELEMENT_XPATH = '//*[@id="listing-container"]/div[1]/div[1]/header/h1'
name_element = WebDriverWait(driver, 3).until(
EC.presence_of_element_located((By.XPATH, CATEGORY_NAME_ELEMENT_XPATH))
)
wizard = BjsPageWizard()
soup = BeautifulSoup(driver.page_source, "html.parser")
if wizard.is_categories_page(soup):
message = "Category=%s is a categories page with url=%s." % (
bjs_main_product_category_name,
bjs_main_product_page)
GlobalUtil.log(LOGFILE, GlobalUtil.LOG_INFO, message, console_out=CONSOLE_LOG_TRUE)
category_map = wizard.map_categories_to_urls(soup)
else:
message = "Not categories on page, category=%s, url=%s" % (
bjs_main_product_category_name,
bjs_main_product_page)
GlobalUtil.log(LOGFILE, GlobalUtil.LOG_INFO, message, console_out=CONSOLE_LOG_TRUE)
raise Exception(message)
rest_connection = GlobalUtil.get_rest_env()
repository = ProductRepository(
rest_connection["domain"],
rest_connection["port"],
rest_connection["base_path"])
items_on_db = json.loads(repository.get_items().content)
existing_item_names = []
for item in items_on_db:
existing_item_names.append(item["name"])
items = []
new_items_responses = []
for key in category_map:
new_items = get_items_from_store_website(driver, wizard, key, category_map[key])
items += new_items
for i in new_items:
if i["name"] in existing_item_names:
message = "Item=%s already exists. Skipping." % (i["name"])
GlobalUtil.log(LOGFILE, GlobalUtil.LOG_INFO, message, console_out=CONSOLE_LOG_TRUE)
continue
resp = repository.create_new_item(i)
new_items_responses.append(resp.content)
message = "Saved new item=%s" % (i["name"])
GlobalUtil.log(LOGFILE, GlobalUtil.LOG_INFO, message, console_out=CONSOLE_LOG_TRUE)
return new_items_responses
def run():
bjs_main_product_page = "http://www.bjs.com/grocery-household--pet.category.3000000000000117223.2001244"
bjs_main_product_category_name = "Grocery, Household & Pet"
get_and_save_new_items(
bjs_main_product_category_name,
bjs_main_product_page)
##########
## main ##
##########
if __name__ == "__main__":
run()
| {
"content_hash": "819cd549748015136292eb0118ce13ee",
"timestamp": "",
"source": "github",
"line_count": 184,
"max_line_length": 108,
"avg_line_length": 32.97826086956522,
"alnum_prop": 0.6557350032959789,
"repo_name": "aldeano19/databucket",
"id": "76c23dc162425bc74c086c525de408a1ffa382d4",
"size": "6090",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scrapers/bjs/ScrapeNewBjsProducts.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "57032"
},
{
"name": "HTML",
"bytes": "393176"
},
{
"name": "Java",
"bytes": "35425"
},
{
"name": "JavaScript",
"bytes": "103807"
},
{
"name": "Python",
"bytes": "55846"
},
{
"name": "Shell",
"bytes": "890"
}
],
"symlink_target": ""
} |
"""
The MIT License (MIT)
Copyright (c) 2013 Jeremy Gillick
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import os
import json
import time
import pause
from time import sleep
from lendingclub import LendingClub, LendingClubError
from lendingclub.filters import *
from lcinvestor import util
from lcinvestor.settings import Settings
class AutoInvestor:
"""
Regularly check a LendingClub account for available cash and reinvest it
automatically.
"""
lc = None
authed = False
verbose = False
auto_execute = True
settings = None
loop = False
app_dir = None
# The file that the summary from the last investment is saved to
last_investment_file = 'last_investment.json'
def __init__(self, verbose=False, auto_execute=True):
"""
Create an AutoInvestor instance
- Set verbose to True if you want to see debugging logs
"""
self.verbose = verbose
self.auto_execute = auto_execute
self.logger = util.create_logger(verbose)
self.app_dir = util.get_app_directory()
self.lc = LendingClub()
# Set logger on lc
if self.verbose:
self.lc.set_logger(self.logger)
# Create settings object
self.settings = Settings(investor=self, settings_dir=self.app_dir, logger=self.logger, verbose=self.verbose)
self.settings.investor = self # create a link back to this instance
def version(self):
"""
Return the version number of the Lending Club Investor tool
"""
return util.get_version();
def welcome_screen(self):
print "\n///--------------------------- $$$ ---------------------------\\\\\\"
print '| Welcome to the unofficial Lending Club investment tool |'
print " ---------------------------------------------------------------- \n"
def get_auth(self):
print 'To start, we need to log you into Lending Club (your password will never be saved)\n'
while True:
self.settings.get_auth_settings()
print '\nAuthenticating...'
try:
return self.authenticate()
except Exception as e:
print '\nLogin failed: {0}'.format(str(e.value))
print "Please try again\n"
def setup(self):
"""
Setup the investor to run
"""
if self.verbose:
print 'VERBOSE OUTPUT IS ON\n'
if not self.authed:
self.get_auth()
self.settings.select_profile()
print 'You have ${0} in your account, free to invest\n'.format(self.lc.get_cash_balance())
# Investment settings
print 'Now let\'s define what you want to do'
# Use the settings from last time
if self.settings.profile_loaded is not False:
summary = self.settings.show_summary()
if summary is False: # there was an error with saved settings
print '\nThere was an error with your saved settings. Please go through the prompts again.\n'
self.settings.get_investment_settings()
if util.prompt_yn('Would you like to use these settings?', 'y'):
self.settings.save() # to save the email that was just entered
else:
self.settings.get_investment_settings()
else:
self.settings.get_investment_settings()
# All ready to start running
print '\nThat\'s all we need. Now, as long as this is running, your account will be checked every {0} minutes and invested if enough funds are available.\n'.format(self.settings['frequency'])
def authenticate(self):
"""
Attempt to authenticate the user with the email/pass from the Settings object.
This is just a wrapper for LendingClub.authenticate()
Returns True or raises an exceptions
"""
self.authed = self.lc.authenticate(self.settings.auth['email'], self.settings.auth['pass'])
return self.authed
def run(self):
"""
Alias for investment_loop.
This is used by python-runner
"""
self.investment_loop()
def run_once(self):
"""
Try to invest, based on your settings, and then end the program.
"""
self.loop = False
# Make sure the site is available
attempts = 0
while not self.lc.is_site_available():
attempts += 1
if attempts % 5 == 0:
self.logger.warn('LendingClub is not responding. Trying again in 10 seconds...')
sleep(10)
# Invest
self.attempt_to_invest()
def stop(self):
"""
Called when the investment loop should end.
If the loop is currently attempting to invest cash, this will not be canceled.
"""
self.loop = False
self.logger.info("Stopping investor...")
def get_order_summary(self, portfolio):
"""
Log a summary of the investment portfolio which was ordered
"""
summary = 'Investment portfolio summary: {0} loan notes ('.format(portfolio['numberOfLoans'])
breakdown = []
for grade in ['a', 'aa', 'b', 'c', 'd', 'e', 'f', 'g']:
if portfolio[grade] > 0.0:
percent = int(round(portfolio[grade]))
breakdown.append('{0}:{1}%'.format(grade.upper(), percent))
if len(breakdown) > 0:
summary += ', '.join(breakdown)
summary += ')'
return summary
def attempt_to_invest(self):
"""
Attempt an investment if there is enough available cash and matching investment option
Returns true if money was invested
"""
# Authenticate
try:
self.authenticate()
self.logger.info('Authenticated')
except Exception as e:
self.logger.error('Could not authenticate: {0}'.format(e.value))
return False
# Try to invest
self.logger.info('Checking for funds to invest...')
try:
# Get current cash balance
cash = self.lc.get_investable_balance()
if cash > 0 and cash >= self.settings['min_cash']:
# Invest
self.logger.info(" $ $ $ $ $ $ $ $ $ $") # Create break in logs
try:
# Refresh saved filter
filters = self.settings['filters']
if type(filters) is SavedFilter:
filters.reload()
# Find investment portfolio, starting will all your cash,
# down to the minimum you're willing to invest
# No more than 10 searches
i = 0
portfolio = False
decrement = None
while portfolio is False and cash >= self.settings['min_cash'] and i < 10:
i += 1
# Try to find a portfolio
try:
self.logger.info('Searching for a portfolio for ${0}'.format(cash))
portfolio = self.lc.build_portfolio(cash,
max_per_note=self.settings['max_per_note'],
min_percent=self.settings['min_percent'],
max_percent=self.settings['max_percent'],
filters=filters,
do_not_clear_staging=True)
except LendingClubError as e:
pass
# Try a lower amount of cash to invest
if not portfolio:
self.logger.info('Could not find any matching portfolios for ${0}'.format(cash))
# Create decrement value that will search up to 5 more times
if decrement is None:
delta = cash - self.settings['min_cash']
if delta < 25:
break
elif delta <= 100:
decrement = 25
else:
decrement = delta / 4
# Just to be safe, shouldn't decrement in $10 increments
if decrement < 10:
break
# We are at our lowest
if cash <= self.settings['min_cash']:
break
# New amount to search for
cash -= decrement
if cash < self.settings['min_cash']:
cash = self.settings['min_cash']
else:
cash = util.nearest_25(cash)
if portfolio:
# Invest
assign_to = self.settings['portfolio']
order = self.lc.start_order()
order.add_batch(portfolio['loan_fractions'])
if self.auto_execute:
self.logger.info('Auto investing ${0} at {1}%...'.format(cash, portfolio['percentage']))
sleep(5) # last chance to cancel
order._Order__already_staged = True # Don't try this at home kids
order._Order__i_know_what_im_doing = True # Seriously, don't do it
order_id = order.execute(portfolio_name=assign_to)
else:
self.logger.info('Order staged but not completed, please to go LendingClub website to complete the order. (see the "--no-auto-execute" command flag)')
return False
# Success! Show summary and save the order
summary = self.get_order_summary(portfolio)
self.logger.info(summary)
self.logger.info('Done\n')
self.save_last_investment(cash, portfolio, order_id, portfolio_name=assign_to)
else:
self.logger.warning('No investment portfolios matched your filters at this time -- Trying again in {2} minutes'.format(self.settings['min_percent'], self.settings['max_percent'], self.settings['frequency']))
except Exception as e:
self.logger.exception('Failed trying to invest: {0}'.format(str(e)))
else:
self.logger.info('Only ${0} available for investing (of your ${1} balance)'.format(cash, self.lc.get_cash_balance()))
return False
except Exception as e:
self.logger.error(str(e))
return False
def save_last_investment(self, cash, portfolio, order_id, portfolio_name=None):
""""
Save a log of the last investment to the last_investment file
"""
try:
last_invested = {
'timestamp': int(time.time()),
'order_id': order_id,
'portfolio': portfolio_name,
'cash': cash,
'investment': portfolio
}
# Convert to JSON
json_out = json.dumps(last_invested)
self.logger.debug('Saving last investment file with JSON: {0}'.format(json_out))
# Save
file_path = os.path.join(self.app_dir, self.last_investment_file)
f = open(file_path, 'w')
f.write(json_out)
f.close()
except Exception as e:
self.logger.warning('Couldn\'t save the investment summary to file (this warning can be ignored). {0}'.format(str(e)))
def get_last_investment(self):
"""
Return the last investment summary that has been saved to the last_investment file
"""
try:
file_path = os.path.join(self.app_dir, self.last_investment_file)
if os.path.exists(file_path):
# Read file
f = open(file_path, 'r')
json_str = f.read()
f.close()
# Convert to dictionary and return
return json.loads(json_str)
except Exception as e:
self.logger.warning('Couldn\'t read the last investment file. {0}'.format(str(e)))
return None
def investment_loop(self):
"""
Start the investment loop
Check the account every so often (default is every 60 minutes) for funds to invest
The frequency is defined by the 'frequency' value in the ~/.lcinvestor/settings.yaml file
"""
self.loop = True
frequency = self.settings.user_settings['frequency']
while self.loop:
# Make sure the site is available (network could be reconnecting after sleep)
attempts = 0
while not self.lc.is_site_available() and self.loop:
attempts += 1
if attempts % 5 == 0:
self.logger.warn('LendingClub is not responding. Trying again in 10 seconds...')
sleep(10)
# Invest
self.attempt_to_invest()
pause.minutes(frequency)
class AutoInvestorError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
| {
"content_hash": "05d08f64c5a741d3e1a68b84fa1e9832",
"timestamp": "",
"source": "github",
"line_count": 387,
"max_line_length": 231,
"avg_line_length": 37.87596899224806,
"alnum_prop": 0.5402510574430345,
"repo_name": "jgillick/LendingClubAutoInvestor",
"id": "3725b931ff677ffed05450550fa5db5d31c5857f",
"size": "14681",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lcinvestor/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "96"
},
{
"name": "Python",
"bytes": "71839"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0012_remove_appuser_name'),
]
operations = [
migrations.AddField(
model_name='appuser',
name='name',
field=models.CharField(default='user', max_length=128),
),
]
| {
"content_hash": "1d926451149b79669fdacb49e955e2a9",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 67,
"avg_line_length": 21.72222222222222,
"alnum_prop": 0.5907928388746803,
"repo_name": "cffls/SmartTrainnerServer",
"id": "d6a65440a3ecd74fc227d89b4b8bdc56f567986a",
"size": "463",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "SmartTrainer/users/migrations/0013_appuser_name.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "27539"
},
{
"name": "CSS",
"bytes": "14170"
},
{
"name": "HTML",
"bytes": "1763207"
},
{
"name": "Java",
"bytes": "41494"
},
{
"name": "JavaScript",
"bytes": "827"
},
{
"name": "Matlab",
"bytes": "306"
},
{
"name": "Python",
"bytes": "24041"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import datetime
from django.db import models
from django.utils import timezone
from django.utils.encoding import python_2_unicode_compatible
# Create your models here.
@python_2_unicode_compatible
class Question(models.Model):
question_text = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
def __str__(self):
return self.question_text
def was_published_recently(self):
now = timezone.now()
return now - datetime.timedelta(days=1) <= self.pub_date <= now
was_published_recently.admin_order_field = 'pub_date'
was_published_recently.boolean = True
was_published_recently.short_description = 'Published recently?'
@python_2_unicode_compatible
class Choice(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE)
choice_text = models.CharField(max_length=200)
votes = models.IntegerField(default=0)
def __str__(self):
return self.choice_text
class SeatPressure(models.Model):
minutes = models.DateTimeField()
average = models.FloatField()
seat_count = models.IntegerField()
def pressure_sensor_graph(self):
return datetime.datetime.now() - datetime.timedelta(days=1) <= \
self.created < \
datetime.datetime.now()
| {
"content_hash": "24bad23b10f7deb988e950f8dfcb456f",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 72,
"avg_line_length": 30.066666666666666,
"alnum_prop": 0.6984478935698448,
"repo_name": "youkidearitai/health_graph",
"id": "8e032fc69069f09d7ee1d05dfe4832de6732f5e7",
"size": "1353",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "keep_health/health_graph/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "112"
},
{
"name": "HTML",
"bytes": "7411"
},
{
"name": "JavaScript",
"bytes": "484"
},
{
"name": "Python",
"bytes": "15478"
}
],
"symlink_target": ""
} |
from distutils.core import setup
setup(name='anthill',
version='0.2.0-dev',
description='Django apps for running a community website, developed for sunlightlabs.com',
author='James Turk',
author_email='[email protected]',
license='BSD',
url='http://github.com/sunlightlabs/anthill/',
packages=['anthill', 'anthill.events', 'anthill.events.templatetags',
'anthill.people', 'anthill.people.templatetags',
'anthill.projects', 'anthill.projects.templatetags'],
package_data={'anthill.projects': ['templates/projects/*.html']},
)
| {
"content_hash": "54924a157372a5a67b371292d403fba0",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 96,
"avg_line_length": 44.07142857142857,
"alnum_prop": 0.6628849270664505,
"repo_name": "robbie/anthill",
"id": "d246ea22ce8622417a38a5c8cfb0aaf885845387",
"size": "640",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from parse import parse_line, convert_dms_to_float
# NATFIX is defined with many 1 column wide blank separator. We roll them in to a data field and rely on strip() to clean it up
NATFIX_RECORDS = ((None, 2),
("id", 6),
("latitude_string", 8),
("longitude_string", 9),
(None, 1),
("artcc_id", 5),
("state_code", 3),
("icao_code", 3),
("fix_navaid_type", 7))
def parse_natfix_line(line):
r = parse_line(line[:-1], NATFIX_RECORDS)
# add in lat/lon converted to a simple float
r['lat'] = convert_dms_to_float(r['latitude_string'])
r['lon'] = convert_dms_to_float(r['longitude_string'])
return r
def parse_natfix_file(fp):
# Skip the preamble two lines
assert fp.readline().strip() == "NATFIX"
fp.readline()
r = []
for line in fp:
# $ indicates end of file
if line[0] == '$':
break
# XXX(nelson): should probably use an iterator or do useful work instead of making a giant list
r.append(parse_natfix_line(line))
return r
if __name__ == '__main__':
path = '/Users/nelson/Downloads/56DySubscription_November_18__2010_-_January_13__2011/'
raw = open(path + 'NATFIX.txt')
natfixes = parse_natfix_file(raw)
print "%d records found in NATFIX.txt" % len(natfixes)
import unittest
# Test data from 56DySubscription_November_18__2010_-_January_13__2011
class NatfixTests(unittest.TestCase):
def test_bad_line(self):
"Test that bad input signals an error"
self.assertRaises(Exception, parse_natfix_line, "invalid input")
def test_natfix_line(self):
natfix = parse_natfix_line("I SUNOL 373620N 1214837W 'ZOA CA REP-PT \n")
for (expected, key) in (('SUNOL', 'id'),
('373620N', 'latitude_string'),
('1214837W', 'longitude_string'),
('ZOA', 'artcc_id'),
('CA', 'state_code'),
('REP-PT', 'fix_navaid_type')):
self.assertEqual(expected, natfix[key])
self.assertAlmostEqual(37.605555555, natfix['lat'])
self.assertAlmostEqual(-121.8102777777, natfix['lon'])
def test_natfix_file(self):
from StringIO import StringIO
test_file = StringIO("""NATFIX
'20101118
I 00A 400415N 0745601W 'ZDC PA ARPT
I 00AK 595122N 1514147W 'ZAN AK ARPT
$
""")
natfixes = parse_natfix_file(test_file)
self.assertEqual(2, len(natfixes))
self.assertEqual('ZDC', natfixes[0]["artcc_id"])
| {
"content_hash": "4127931a699a6d2e2036dc9c5122dbb1",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 127,
"avg_line_length": 40.05555555555556,
"alnum_prop": 0.5256588072122053,
"repo_name": "adamfast/faddsdata",
"id": "cc9b4f7df8d2f1dfae84487cbd8e316e8b96164a",
"size": "2915",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "faddsdata/natfix.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "32354"
}
],
"symlink_target": ""
} |
from flask import Flask, render_template
app = Flask(__name__)
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from database_setup import Base, Restaurant, MenuItem
engine = create_engine('sqlite:///restaurantmenu.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
@app.route('/')
@app.route('/restaurants/<int:restaurant_id>/')
def restaurantMenu(restaurant_id):
restaurant = session.query(Restaurant).filter_by(id = restaurant_id).one()
items = session.query(MenuItem).filter_by(restaurant_id = restaurant.id)
return render_template('menu.html', restaurant=restaurant, items = items)
#Task 1: Create route for newMenuItem function here
@app.route('/restaurant/<int:restaurant_id>/new/')
def newMenuItem(restaurant_id):
return "page to create a new menu item. Task 1 complete!"
#Task 2: Create route for editMenuItem function here
@app.route('/restaurant/<int:restaurant_id>/<int:menu_id/edit/')
def editMenuItem(restaurant_id, menu_id)
return "page to edit a menu item. Task 2 complete!"
#Task 3: Create a route for deleteMenuItem function here
@app.route('/restaurant/<int:restaurant_id>/<int:menu_id>/delete/')
def deleteMenuItem(restaurant_id, menu_id):
return "page to delete a menu item. Task 3 complete!"
if __name__ == '__main__':
app.debug = True
app.run(host = '0.0.0.0', port = 5000)
| {
"content_hash": "55ed37818d83e6d607e2930dae3469b2",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 76,
"avg_line_length": 35.8974358974359,
"alnum_prop": 0.7357142857142858,
"repo_name": "tuanvu216/udacity-course",
"id": "04b6342d1f97c4976d71ec2cf700653398d14b60",
"size": "1400",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "full_stack_foundations/full_stack_foundations_master/lesson_3/08_menu_template_solution/project.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3736"
},
{
"name": "HTML",
"bytes": "143388"
},
{
"name": "JavaScript",
"bytes": "169689"
},
{
"name": "Jupyter Notebook",
"bytes": "3237655"
},
{
"name": "Python",
"bytes": "400129"
},
{
"name": "Ruby",
"bytes": "448"
},
{
"name": "Shell",
"bytes": "538"
}
],
"symlink_target": ""
} |
import time
from app.models import WeixinTextResponse
class AccessToken:
def __init__(self,wx):
self.weixin=wx
self._load_token()
def _load_token(self):
fp=open('token','r')
line=fp.readline()
token,expires_at=line.split('|')
if token=='' or time.time(expires_at)>time.time():
access_token=self.weixin.get_access_token()
self.token=access_token['access_token']
self.expires_at=access_token['access_token_expires_at']
self.save(access_token)
self.token=token
self.expires=expires_at
def get_token(self):
return self.token
def get_expires_at(self):
return self.expires_at
def _save(self,access_token):
if access_token is None:
return False
fp=open('token','w')
fp.seek(0)
fp.write('%s|%s' % (self.token,self.expires_at))
menu_stub={
'button':[
{
'name':u'介绍',
'sub_button':[
{
'type':'view',
'name':u'历史背景',
'url':'http://www.ganputang.com/weixin/history/'
},
{
'type':'click',
'name':u'冲泡方法',
'key':'menu_chongpao'
},
]
},
{
'type':'view',
'name':u'优惠',
'url':'http://www.ganputang.com/weixin/discount'
},
]
}
class WeixinTextMessageHandler:
def __init__(self,wechat):
self.wx=wechat
def make_response(self):
key=self.wx.message.content
response=WeixinTextResponse.query.filter(WeixinTextResponse.key==key).first()
return self.wx.response_text(content=u'抱歉,请输入问题序号' if response is None else response.response)
| {
"content_hash": "8bbb03928eabd78874cb2792c6d2fc77",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 96,
"avg_line_length": 21.33823529411765,
"alnum_prop": 0.6492074431426602,
"repo_name": "colaftc/webtool",
"id": "85f5a055127df47e7fe964e6e4b406cbefe81964",
"size": "1518",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/weixin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "12208"
},
{
"name": "HTML",
"bytes": "16773"
},
{
"name": "JavaScript",
"bytes": "2571"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "258023"
},
{
"name": "Ruby",
"bytes": "861"
},
{
"name": "VimL",
"bytes": "401921"
}
],
"symlink_target": ""
} |
import numpy as np
import xgboost as xgb
import random
from itertools import izip
def evalerror(preds, dtrain):
labels = dtrain.get_label()
# return a pair metric_name, result
# since preds are margin(before logistic transformation, cutoff at 0)
return 'error', float(sum(labels != (preds > 0.0))) / len(labels)
def cv(feature_prefix, feature_name, params, num_round=1000, early_stopping_rounds=10, kfolder=10):
vals = []
for i in xrange(kfolder):
train_f = feature_prefix + '/Folder%d/' % i + \
feature_name + '.train.xgboost.4rank.txt'
test_f = feature_prefix + '/Folder%d/' % i + \
feature_name + '.test.xgboost.4rank.txt'
bst, eresult = train(train_f, test_f, params,
num_round, early_stopping_rounds, evaluate=True)
vals.append(eresult)
return vals
def train(train_f, test_f, params, num_round, early_stopping_rounds, evaluate=False, eval_f=None):
train_group_f = train_f.replace('.txt', '.txt.group')
dtrain = xgb.DMatrix(train_f)
dtest = xgb.DMatrix(test_f)
dtrain.set_group(np.loadtxt(train_group_f).astype(int))
if evaluate:
test_group_f = test_f.replace('.txt', '.txt.group')
dtest.set_group(np.loadtxt(test_group_f).astype(int))
else:
test_group_f = eval_f.replace('.txt', '.txt.group')
dval = xgb.DMatrix(eval_f)
dval.set_group(np.loadtxt(test_group_f).astype(int))
if evaluate:
watchlist = [(dtrain, 'train'), (dtest, 'valid')]
else:
watchlist = [(dtrain, 'train'), (dval, 'valid')]
bst = xgb.train(params, dtrain, num_round, watchlist, obj=None,
feval=None, early_stopping_rounds=early_stopping_rounds)
return bst, dtest if not evaluate else bst.eval(dtest)
def normed_by_group(preds, groups):
min_v = np.min(preds)
max_v = np.max(preds)
print min_v, max_v
for lines in groups:
if len(lines) == 1:
preds[lines[0]] = 0
continue
tmp = preds[lines]
candidates = (tmp - min_v) / (max_v - min_v)
for i, l in enumerate(lines):
preds[l] = candidates[i]
return preds
def submit(bst, dtest, need_norm=False, kfolder=-1):
# make prediction
preds = bst.predict(dtest)
print preds.shape
groups = {}
with open('../data/0_raw/validate_nolabel.txt', 'r') as fp:
for i, line in enumerate(fp):
qid, uid = line.strip().split()
if qid in groups:
groups[qid].append(i)
else:
groups[qid] = [i]
if need_norm:
preds = normed_by_group(preds, groups.values())
fname = './submit/submit%d.csv' % kfolder if kfolder != -1 else './submit/submit.csv'
with open(fname, 'w') as fo:
fo.write('qid,uid,label\n')
with open('../data/0_raw/validate_nolabel.txt', 'r') as fp:
for i, line in enumerate(fp):
fo.write(line.strip().replace('\t', ',') +
',' + str(preds[i]) + '\n')
def submit_merge(kfolder):
data_infos = []
datas = []
for k in xrange(kfolder):
fname = './submit/submit%d.csv' % k
with open(fname, 'r') as fp:
for i, line in enumerate(fp):
if i == 0:
continue
q, u, label = line.strip().split(',')
if k == 0:
data_infos.append('%s,%s' % (q, u))
datas.append([float(label)])
else:
datas[i-1].append(float(label))
with open('./submit/submit.csv', 'w') as fo:
fo.write('qid,uid,label\n')
for info, labels in izip(data_infos, datas):
fo.write(info + ',' + str(np.mean(labels)) + '\n')
def gradsearch(feature_name='stat', kfolder=8, num_round=1000, early_stopping_rounds=20):
fo = open('gradsearch.%s.rs.txt' % feature_name, 'w')
min_child_weights = [1, 2, 5]
max_depths = [2, 3, 4, 5]
etas = [0.01, 0.05, 0.1]
max_delta_steps = [0, 1, 5, 10]
subsamples = [0.5, 0.7, 1]
colsample_bytrees = [0.5, 0.7, 1]
scale_pos_weights = [1, 5, 10]
tmp_len = len(etas) * len(max_delta_steps) * len(subsamples) * len(colsample_bytrees) * len(scale_pos_weights)
tmp_total_len = tmp_len * len(min_child_weights) * len(max_depths)
best_result = (0, )
for i, m1 in enumerate(min_child_weights):
for j, m2 in enumerate(max_depths):
fo.write('%d passed of %d\n\n' % (tmp_len * i * len(max_depths) + tmp_len * j, tmp_total_len))
for eta in etas:
for m3 in max_delta_steps:
for subsample in subsamples:
for colsample_bytree in colsample_bytrees:
for w in scale_pos_weights:
if random.randint(0, 9) != 0:
continue
params = {}
params['min_child_weight'] = m1
params['max_depth'] = m2
params['eta'] = eta
params['max_delta_step'] = m3
params['subsample'] = subsample
params['colsample_bytree'] = colsample_bytree
params['scale_pos_weight'] = w
params['silent'] = True
params['objective'] = 'reg:logistic'
# params['objective'] = 'rank:pairwise'
# params['objective'] = 'rank:ndcg'
params['eval_metric'] = ['ndcg@5-', 'ndcg@10-']
evals = cv('../feature/feature', feature_name, params,
num_round=num_round, early_stopping_rounds=early_stopping_rounds, kfolder=kfolder)
metrics = 0.
for eva in evals:
eva_tmp = eva.split('eval-ndcg@', 2)
ndcg_at5 = eva_tmp[1].strip().replace('5-:', '')
ndcg_at10 = eva_tmp[2].strip().replace('10-:', '')
metrics += (float(ndcg_at5) + float(ndcg_at10)) / 2
metrics /= len(evals)
if metrics > best_result[0]:
best_result = (metrics, m1, m2, eta, m3, subsample, colsample_bytree, w)
fo.write('%d %d %f %d %f %f %d\n' % (
m1, m2, eta, m3, subsample, colsample_bytree, w))
fo.write('\n'.join(evals) + '\n')
fo.write('average (ndcg@5 + ndcg@10)/2 %f\n\n' % metrics)
fo.flush()
fo.write('the best params and result is\nndcg@5 + ndcg@10)/2 = %f\nparams is %d %d %f %d %f %f %d\n' % best_result)
fo.close()
feature_prefix = '../feature/feature'
feature_name = 'stat'
# feature_name = 'merge.stat_tags'
# feature_name = 'merge.stat_tags_ngram'
# gradsearch(feature_name=feature_name, kfolder=3)
params = {'min_child_weight': 5, 'max_depth': 3, 'eta': 0.1,
'max_delta_step': 5, 'subsample': 0.5, 'colsample_bytree': 1}
params['scale_pos_weight'] = 1
params['silent'] = True
params['objective'] = 'binary:logistic'
# params['objective'] = 'rank:pairwise'
# params['objective'] = 'rank:ndcg'
params['eval_metric'] = ['ndcg@5-', 'ndcg@10-']
kfolder = 8
for i in xrange(kfolder):
train_f = feature_prefix + '/Folder%d/' % i + feature_name + '.train.xgboost.4rank.txt'
test_f = feature_prefix + '/' + feature_name + '.test.xgboost.txt'
eval_f = feature_prefix + '/Folder%d/' % i + feature_name + '.test.xgboost.4rank.txt'
bst, dtest = train(train_f, test_f, params, 1000, 100, evaluate=False, eval_f=eval_f)
submit(bst, dtest, kfolder=i)
submit_merge(kfolder)
| {
"content_hash": "423c9681f705343a931403c817682155",
"timestamp": "",
"source": "github",
"line_count": 186,
"max_line_length": 125,
"avg_line_length": 42.98924731182796,
"alnum_prop": 0.5182591295647824,
"repo_name": "jasonwbw/JustAPlaceholder",
"id": "bc7e67ce05e3c39523a333c047377b6536b4aff1",
"size": "8021",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code/ranking.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "73129"
}
],
"symlink_target": ""
} |
from .aws import Action as BaseAction
from .aws import BaseARN
service_name = "AWS Sustainability"
prefix = "sustainability"
class Action(BaseAction):
def __init__(self, action: str = None) -> None:
super().__init__(prefix, action)
class ARN(BaseARN):
def __init__(self, resource: str = "", region: str = "", account: str = "") -> None:
super().__init__(
service=prefix, resource=resource, region=region, account=account
)
GetCarbonFootprintSummary = Action("GetCarbonFootprintSummary")
| {
"content_hash": "135bb044c92292e8ba6d80d0b940838d",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 88,
"avg_line_length": 26.9,
"alnum_prop": 0.6486988847583643,
"repo_name": "cloudtools/awacs",
"id": "8c0e29d6eebc6727b4155bd12cd080d7ad1ba929",
"size": "654",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "awacs/sustainability.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "343"
},
{
"name": "Python",
"bytes": "963483"
}
],
"symlink_target": ""
} |
import os
import pytest
import core.config
@pytest.fixture
def defaultConfig():
return core.config.Config([])
def test_module():
modules = ["module-1", "module-2", "module-3"]
cfg = core.config.Config(["-m"] + modules)
assert cfg.modules() == modules
def test_module_ordering_maintained():
modules = ["module-1", "module-5", "module-7"]
more_modules = ["module-0", "module-2", "aaa"]
cfg = core.config.Config(["-m"] + modules + ["-m"] + more_modules)
assert cfg.modules() == modules + more_modules
def test_default_interval(defaultConfig):
assert defaultConfig.interval() == 1
def test_interval():
interval = 4
cfg = core.config.Config(["-p", "interval={}".format(interval)])
assert cfg.interval() == interval
def test_floating_interval():
interval = 4.5
cfg = core.config.Config(["-p", "interval={}".format(interval)])
assert cfg.interval() == interval
def test_default_theme(defaultConfig):
assert defaultConfig.theme() == "default"
def test_theme():
theme_name = "sample-theme"
cfg = core.config.Config(["-t", theme_name])
assert cfg.theme() == theme_name
def test_default_iconset(defaultConfig):
assert defaultConfig.iconset() == "auto"
def test_iconset():
iconset_name = "random-iconset"
cfg = core.config.Config(["-i", iconset_name])
assert cfg.iconset() == iconset_name
def test_reverse(defaultConfig):
assert defaultConfig.reverse() == False
cfg = core.config.Config(["-r"])
assert cfg.reverse() == True
def test_logfile(defaultConfig):
assert defaultConfig.logfile() is None
logfile = "some-random-logfile"
cfg = core.config.Config(["-f", logfile])
assert cfg.logfile() == logfile
def test_all_modules():
modules = core.config.all_modules()
assert len(modules) > 0
for module in modules:
pyname = "{}.py".format(module)
base = os.path.abspath(
os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"..",
"..",
"bumblebee_status",
"modules",
)
)
assert os.path.exists(os.path.join(base, "contrib", pyname)) or os.path.exists(
os.path.join(base, "core", pyname)
)
def test_list_output(mocker):
mocker.patch("core.config.sys")
cfg = core.config.Config(["-l", "themes"])
cfg = core.config.Config(["-l", "modules"])
cfg = core.config.Config(["-l", "modules-rst"])
def test_missing_parameter():
cfg = core.config.Config(["-p", "test.key"])
assert cfg.get("test.key") == None
assert cfg.get("test.key", "no-value-set") == "no-value-set"
#
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
| {
"content_hash": "978cb27843209517b72938bc7c5aa2a4",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 83,
"avg_line_length": 23.28813559322034,
"alnum_prop": 0.6102620087336245,
"repo_name": "tobi-wan-kenobi/bumblebee-status",
"id": "762c674f5eac70f3083d3ba3c154ef2c584f3de7",
"size": "2748",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/core/test_config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "149"
},
{
"name": "Python",
"bytes": "629537"
},
{
"name": "Shell",
"bytes": "2431"
}
],
"symlink_target": ""
} |
import sys
from twisted.internet import reactor
from pysnmp.carrier.twisted.dgram.base import DgramTwistedTransport
from pysnmp.carrier import error
domainName = snmpLocalDomain = (1, 3, 6, 1, 2, 1, 100, 1, 13)
class UnixTwistedTransport(DgramTwistedTransport):
# AbstractTwistedTransport API
def openClientMode(self, iface=''):
try:
self._lport = reactor.connectUNIXDatagram(iface, self)
except Exception:
raise error.CarrierError(sys.exc_info()[1])
return self
def openServerMode(self, iface=None):
try:
self._lport = reactor.listenUNIXDatagram(iface, self)
except Exception:
raise error.CarrierError(sys.exc_info()[1])
return self
UnixTransport = UnixTwistedTransport
| {
"content_hash": "02cf37f27f8f8b79afbaee85a704a685",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 67,
"avg_line_length": 30.653846153846153,
"alnum_prop": 0.6737766624843162,
"repo_name": "xfguo/pysnmp",
"id": "57d73eb60a6e89faa6585f45fd9c03e37d27e947",
"size": "853",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pysnmp/carrier/twisted/dgram/unix.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "804395"
}
],
"symlink_target": ""
} |
"""
pyexcel_io.plugins
~~~~~~~~~~~~~~~~~~~
factory for getting readers and writers
:copyright: (c) 2014-2022 by Onni Software Ltd.
:license: New BSD License, see LICENSE for more details
"""
import pyexcel_io.utils as ioutils
import pyexcel_io.manager as manager
import pyexcel_io.constants as constants
import pyexcel_io.exceptions as exceptions
from lml.loader import scan_plugins_regex
from lml.plugin import PluginInfo, PluginManager, PluginInfoChain
ERROR_MESSAGE_FORMATTER = "one of these plugins for %s data in '%s': %s"
UPGRADE_MESSAGE = "Please upgrade the plugin '%s' according to \
plugin compactibility table."
READER_PLUGIN = "pyexcel-io reader"
READER_PLUGIN_V2 = "pyexcel-io v2 reader"
WRITER_PLUGIN = "pyexcel-io writer"
WRITER_PLUGIN_V2 = "pyexcel-io v2 writer"
class IOPluginInfo(PluginInfo):
"""Pyexcel-io plugin info description"""
def tags(self):
for file_type in self.file_types:
yield file_type
class IOPluginInfoChain(PluginInfoChain):
"""provide custom functions to add a reader and a writer"""
def add_a_reader(
self,
relative_plugin_class_path=None,
file_types=None,
stream_type=None,
):
"""add pyexcle-io reader plugin info"""
a_plugin_info = IOPluginInfo(
READER_PLUGIN,
self._get_abs_path(relative_plugin_class_path),
file_types=file_types,
stream_type=stream_type,
)
return self.add_a_plugin_instance(a_plugin_info)
def add_a_writer(
self,
relative_plugin_class_path=None,
file_types=None,
stream_type=None,
):
"""add pyexcle-io writer plugin info"""
a_plugin_info = IOPluginInfo(
WRITER_PLUGIN,
self._get_abs_path(relative_plugin_class_path),
file_types=file_types,
stream_type=stream_type,
)
return self.add_a_plugin_instance(a_plugin_info)
class IOPluginInfoChainV2(PluginInfoChain):
"""provide custom functions to add a reader and a writer"""
def add_a_reader(
self,
relative_plugin_class_path=None,
locations=(),
file_types=None,
stream_type=None,
):
"""add pyexcle-io reader plugin info"""
a_plugin_info = IOPluginInfo(
READER_PLUGIN_V2,
self._get_abs_path(relative_plugin_class_path),
file_types=[
f"{location}-{file_type}"
for file_type in file_types
for location in locations
],
stream_type=stream_type,
)
return self.add_a_plugin_instance(a_plugin_info)
def add_a_writer(
self,
relative_plugin_class_path=None,
locations=(),
file_types=(),
stream_type=None,
):
"""add pyexcle-io writer plugin info"""
a_plugin_info = IOPluginInfo(
WRITER_PLUGIN_V2,
self._get_abs_path(relative_plugin_class_path),
file_types=[
f"{location}-{file_type}"
for file_type in file_types
for location in locations
],
stream_type=stream_type,
)
return self.add_a_plugin_instance(a_plugin_info)
class IOManager(PluginManager):
"""Manage pyexcel-io plugins"""
def __init__(self, plugin_type, known_list):
PluginManager.__init__(self, plugin_type)
self.known_plugins = known_list
self.action = "read"
if self.plugin_name == WRITER_PLUGIN:
self.action = "write"
def load_me_later(self, plugin_info):
PluginManager.load_me_later(self, plugin_info)
_do_additional_registration(plugin_info)
def register_a_plugin(self, cls, plugin_info):
"""for dynamically loaded plugin"""
PluginManager.register_a_plugin(self, cls, plugin_info)
_do_additional_registration(plugin_info)
def get_a_plugin(self, file_type=None, library=None, **keywords):
__file_type = file_type.lower()
try:
plugin = self.load_me_now(__file_type, library=library)
except Exception:
self.raise_exception(__file_type)
handler = plugin()
handler.set_type(__file_type)
return handler
def raise_exception(self, file_type):
plugins = self.known_plugins.get(file_type, None)
if plugins:
message = "Please install "
if len(plugins) > 1:
message += ERROR_MESSAGE_FORMATTER % (
self.action,
file_type,
",".join(plugins),
)
else:
message += plugins[0]
raise exceptions.SupportingPluginAvailableButNotInstalled(message)
else:
raise exceptions.NoSupportingPluginFound(
"No suitable library found for %s" % file_type
)
def get_all_formats(self):
"""return all supported formats"""
all_formats = set(
list(self.registry.keys()) + list(self.known_plugins.keys())
)
all_formats = all_formats.difference(
set([constants.DB_SQL, constants.DB_DJANGO])
)
return all_formats
class NewIOManager(IOManager):
def load_me_later(self, plugin_info):
PluginManager.load_me_later(self, plugin_info)
_do_additional_registration_for_new_plugins(plugin_info)
def register_a_plugin(self, cls, plugin_info):
"""for dynamically loaded plugin"""
PluginManager.register_a_plugin(self, cls, plugin_info)
_do_additional_registration_for_new_plugins(plugin_info)
def get_a_plugin(
self, file_type=None, location=None, library=None, **keywords
):
__file_type = file_type.lower()
plugin = self.load_me_now(f"{location}-{__file_type}", library=library)
return plugin
def raise_exception(self, file_type):
file_type = file_type.split("-")[1]
plugins = self.known_plugins.get(file_type, None)
if plugins:
message = "Please install "
if len(plugins) > 1:
message += ERROR_MESSAGE_FORMATTER % (
self.action,
file_type,
",".join(plugins),
)
else:
message += plugins[0]
raise exceptions.SupportingPluginAvailableButNotInstalled(message)
else:
raise exceptions.NoSupportingPluginFound(
"No suitable library found for %s" % file_type
)
def get_all_formats(self):
"""return all supported formats"""
all_formats = set(
[x.split("-")[1] for x in self.registry.keys()]
+ list(self.known_plugins.keys())
)
return all_formats
def _do_additional_registration(plugin_info):
for file_type in plugin_info.tags():
manager.register_stream_type(file_type, plugin_info.stream_type)
manager.register_a_file_type(file_type, plugin_info.stream_type, None)
def _do_additional_registration_for_new_plugins(plugin_info):
for file_type in plugin_info.tags():
manager.register_stream_type(
file_type.split("-")[1], plugin_info.stream_type
)
manager.register_a_file_type(
file_type.split("-")[1], plugin_info.stream_type, None
)
class AllReaders:
def get_all_formats(self):
return OLD_READERS.get_all_formats().union(
NEW_READERS.get_all_formats()
) - set([constants.DB_SQL, constants.DB_DJANGO])
class AllWriters:
def get_all_formats(self):
return OLD_WRITERS.get_all_formats().union(
NEW_WRITERS.get_all_formats()
) - set([constants.DB_SQL, constants.DB_DJANGO])
OLD_READERS = IOManager(READER_PLUGIN, ioutils.AVAILABLE_READERS)
OLD_WRITERS = IOManager(WRITER_PLUGIN, ioutils.AVAILABLE_WRITERS)
NEW_WRITERS = NewIOManager(WRITER_PLUGIN_V2, ioutils.AVAILABLE_WRITERS)
NEW_READERS = NewIOManager(READER_PLUGIN_V2, ioutils.AVAILABLE_READERS)
READERS = AllReaders()
WRITERS = AllWriters()
def load_plugins(plugin_name_patterns, path, black_list, white_list):
"""Try to discover all pyexcel-io plugins"""
scan_plugins_regex(
plugin_name_patterns=plugin_name_patterns,
pyinstaller_path=path,
black_list=black_list,
white_list=white_list,
)
| {
"content_hash": "445c226c8313e8c2889f6e4cf5df7775",
"timestamp": "",
"source": "github",
"line_count": 262,
"max_line_length": 79,
"avg_line_length": 32.41984732824427,
"alnum_prop": 0.6003060984224158,
"repo_name": "chfw/pyexcel-io",
"id": "495283aff408996ec3e43f65f1bdb4da3b5ae305",
"size": "8494",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pyexcel_io/plugins.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "129"
},
{
"name": "Makefile",
"bytes": "145"
},
{
"name": "Python",
"bytes": "109782"
},
{
"name": "Shell",
"bytes": "152"
}
],
"symlink_target": ""
} |
import datetime
from django.utils import timezone
from django.test import TestCase
from .models import User
def create_user(time=timezone.now()):
user = User.objects.create_user(
email='[email protected]',
password="example_password",
name='Jan',
surname='Kowalski',
)
user.register_date = time
user.register_updated = time
user.akey_expires = time + datetime.timedelta(2)
user.last_login = time
return user
def create_superuser(time=timezone.now()):
super_user = User.objects.create_superuser(
email='[email protected]',
password="example_password",
name='Jan',
surname='Kowalski',
)
super_user.register_date = time
super_user.register_updated = time
super_user.akey_expires = time + datetime.timedelta(2)
super_user.last_login = time
return super_user
class UserMethodTests(TestCase):
"""
Tests for methods of User model.
"""
def test_get_email(self):
"""Test for method get_mail - should return email of user object. """
user = create_user()
self.assertEqual(user.get_email, '[email protected]')
def test_get_short_name(self):
"""Test for method get_short_name
- should return name of user object. """
user = create_user()
self.assertEqual(user.get_short_name(), 'Jan')
def test_is_superuser(self):
"""Test for method is_superuser
- should return False for user object. """
user = create_user()
self.assertEqual(user.is_superuser, False)
def test_is_staff(self):
"""Test for method is_staff - should return False for user object. """
user = create_user()
self.assertEqual(user.is_staff, False)
def test_has_perm(self):
"""Test for method has_perm - should return False for user object. """
user = create_user()
self.assertEqual(user.has_perm(perm=None), False)
def test_module_perms(self):
"""Test for method module_perms
- should return False for user object. """
user = create_user()
self.assertEqual(user.has_module_perms(app_label=None), False)
def test_get_last_login(self):
"""Test for method get_last_login
- should return time of last login for user object.
(time = timezone.now())
"""
time = timezone.now()
user = create_user(time=time)
self.assertEqual(user.get_last_login(), time)
def test_get_full_name(self):
"""Test for method get_full_name
- should return full name of user object."""
user = create_user()
self.assertEqual(user.get_full_name(), 'Jan Kowalski')
class SuperUserTests(TestCase):
"""
Tests for methods of User model.
"""
def test_get_email(self):
"""Test for method get_mail
- should return email of super_user object. """
super_user = create_superuser()
self.assertEqual(
super_user.get_email, '[email protected]'
)
def test_get_short_name(self):
"""Test for method get_short_name
- should return name of super_user object. """
super_user = create_superuser()
self.assertEqual(super_user.get_short_name(), 'Jan')
def test_is_superuser(self):
"""Test for method is_superuser
- should return True for super_user object. """
super_user = create_superuser()
self.assertEqual(super_user.is_superuser, True)
def test_is_staff(self):
"""Test for method is_staff
- should return True for super_user object. """
super_user = create_superuser()
self.assertEqual(super_user.is_staff, True)
def test_has_perm(self):
"""Test for method has_perm
- should return True for super_user object. """
super_user = create_superuser()
self.assertEqual(super_user.has_perm(perm=None), True)
def test_module_perms(self):
"""Test for method module_perms
- should return True for super_user object. """
super_user = create_superuser()
self.assertEqual(super_user.has_module_perms(app_label=None), True)
def test_get_last_login(self):
"""Test for method get_last_login -
should return time of last login for super_user object.
(time = timezone.now())
"""
time = timezone.now()
super_user = create_superuser(time=time)
self.assertEqual(super_user.get_last_login(), time)
def test_get_full_name(self):
"""Test for method get_full_name
- should return full name of super_user object."""
super_user = create_superuser()
self.assertEqual(super_user.get_full_name(), 'Jan Kowalski')
| {
"content_hash": "43cc678b792a11a8a088ed9e2ce655f5",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 78,
"avg_line_length": 33.02739726027397,
"alnum_prop": 0.6204894234757362,
"repo_name": "piemar1/Schedule_django",
"id": "0109d3827557aa8b9901826ae23f835b54f3032c",
"size": "4822",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "user_account/tests_model.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1952"
},
{
"name": "HTML",
"bytes": "24957"
},
{
"name": "JavaScript",
"bytes": "201"
},
{
"name": "Python",
"bytes": "76705"
}
],
"symlink_target": ""
} |
'''
Utility script used to determine which CPython tests IronPython can run correctly.
USAGE:
ipy cmodule.py C:\Python25
OUTPUT:
%CD%\IPY_PASSES.log (tests which IP can run)
%CD%\IPY_FAILS.log (tests which IP cannot run)
'''
import sys
import nt
from clr_helpers import Process
#------------------------------------------------------------------------------
CPY_DIR = sys.argv[1] #E.g., C:\Python25
DISABLED = {
"test_aepack.py" : "Platform specific test - Mac",
}
_temp_keys = list(DISABLED.keys())
TEST_LIST = [x for x in nt.listdir(CPY_DIR + r"\Lib\test") if x.startswith("test_") and x.endswith(".py") and _temp_keys.count(x)==0]
#Log containing all tests IP passes
IPY_PASSES = open("IPY_PASSES.log", "w")
#Log containing all tests IP fails
IPY_FAILS = open("IPY_FAILS.log", "w")
#--HELPER FUNCTIONS------------------------------------------------------------
def ip_passes(mod_name):
print(mod_name)
IPY_PASSES.write(mod_name + "\n")
IPY_PASSES.flush()
def ip_fails(mod_name):
IPY_FAILS.write(mod_name + "\n")
IPY_FAILS.flush()
#--MAIN-----------------------------------------------------------------------
nt.chdir(CPY_DIR + r"\Lib")
for mod_name in TEST_LIST:
proc = Process()
proc.StartInfo.FileName = sys.executable
proc.StartInfo.Arguments = "test\\" + mod_name
proc.StartInfo.UseShellExecute = False
proc.StartInfo.RedirectStandardOutput = True
if (not proc.Start()):
raise Exception("Python process failed to start: " + mod_name)
else:
cpymod_dir = proc.StandardOutput.ReadToEnd()
if not proc.HasExited:
raise Exception("Python process should have exited by now: " + mod_name)
if proc.ExitCode==0:
ip_passes(mod_name)
else:
ip_fails(mod_name)
IPY_PASSES.close()
IPY_FAILS.close() | {
"content_hash": "15926a786bbae4bb1af621499a86c189",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 133,
"avg_line_length": 28.723076923076924,
"alnum_prop": 0.5731119442956615,
"repo_name": "IronLanguages/ironpython3",
"id": "08c1aa38a15a0bc174a5c19ee8a3ceafeb0f722f",
"size": "2075",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Tests/Tools/cpy_tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "6855"
},
{
"name": "C",
"bytes": "239473"
},
{
"name": "C#",
"bytes": "12619304"
},
{
"name": "C++",
"bytes": "28403"
},
{
"name": "CSS",
"bytes": "96"
},
{
"name": "HTML",
"bytes": "13157428"
},
{
"name": "Makefile",
"bytes": "332"
},
{
"name": "PLSQL",
"bytes": "22886"
},
{
"name": "PowerShell",
"bytes": "84504"
},
{
"name": "Python",
"bytes": "29490541"
},
{
"name": "Roff",
"bytes": "21080"
},
{
"name": "Shell",
"bytes": "4872"
},
{
"name": "VBScript",
"bytes": "481"
}
],
"symlink_target": ""
} |
import threading
import collections
import zephyr
class EventStream:
def __init__(self):
self.events = []
self.events_cleaned_up = 0
self.lock = threading.RLock()
def __iter__(self):
with self.lock:
return iter(self.events[:])
def __len__(self):
with self.lock:
corrected_length = len(self.events) + self.events_cleaned_up
return corrected_length
def __getitem__(self, index):
with self.lock:
assert 0 <= index < len(self)
assert index >= self.events_cleaned_up
corrected_index = index - self.events_cleaned_up
return self.events[corrected_index]
def append(self, value):
with self.lock:
self.events.append(value)
def clean_up_events_before(self, timestamp_lower_bound):
with self.lock:
cutoff_index = 0
for event_timestamp, event_value in self.events: #@UnusedVariable
if event_timestamp < timestamp_lower_bound:
cutoff_index += 1
else:
break
if cutoff_index:
self.events = self.events[cutoff_index:]
self.events_cleaned_up += cutoff_index
def iterate_samples(self, from_sample_index, to_end_timestamp):
sample_index = from_sample_index
while True:
with self.lock:
if self.events_cleaned_up > sample_index:
break
last_item = self[sample_index] if len(self) > sample_index else None
if last_item is not None:
event_timestamp, event_value = last_item
if event_timestamp <= to_end_timestamp:
yield event_value
sample_index += 1
continue
break
class SignalStream:
def __init__(self, signal_packet):
self.samplerate = signal_packet.samplerate
self.samples = []
self.lock = threading.RLock()
self.end_timestamp = None
self.append_signal_packet(signal_packet)
def append_signal_packet(self, signal_packet):
with self.lock:
assert signal_packet.samplerate == self.samplerate
self.samples.extend(signal_packet.samples)
self.end_timestamp = signal_packet.timestamp + len(signal_packet.samples) / float(signal_packet.samplerate)
def remove_samples_before(self, timestamp_lower_bound):
with self.lock:
samples_to_remove = max(0, int((timestamp_lower_bound - self.start_timestamp) * self.samplerate))
if samples_to_remove:
self.samples = self.samples[samples_to_remove:]
return samples_to_remove
@property
def start_timestamp(self):
return self.end_timestamp - len(self.samples) / float(self.samplerate)
def iterate_timed_samples(self, skip_samples=0):
with self.lock:
start_timestamp = self.start_timestamp
sample_period = 1.0 / self.samplerate
for sample_i, sample in enumerate(self.samples[skip_samples:], start=skip_samples):
sample_timestamp = start_timestamp + sample_i * sample_period
yield sample_timestamp, sample
class SignalStreamHistory:
def __init__(self):
self._signal_streams = []
self.samples_cleaned_up = 0
def append_signal_packet(self, signal_packet, starts_new_stream):
if starts_new_stream or not len(self._signal_streams):
signal_stream = SignalStream(signal_packet)
self._signal_streams.append(signal_stream)
else:
signal_stream = self._signal_streams[-1]
signal_stream.append_signal_packet(signal_packet)
def get_signal_streams(self):
return self._signal_streams
def _cleanup_signal_stream(self, signal_stream, timestamp_bound):
if timestamp_bound >= signal_stream.end_timestamp:
self._signal_streams.remove(signal_stream)
samples_removed = len(signal_stream.samples)
else:
samples_removed = signal_stream.remove_samples_before(timestamp_bound)
self.samples_cleaned_up += samples_removed
def clean_up_samples_before(self, history_limit):
for signal_stream in self._signal_streams[:]:
first_timestamp = signal_stream.start_timestamp
if first_timestamp >= history_limit:
break
self._cleanup_signal_stream(signal_stream, history_limit)
def iterate_samples(self, from_sample_index, to_end_timestamp):
from_sample_index = from_sample_index - self.samples_cleaned_up
signal_stream_start_index = 0
for signal_stream in self._signal_streams:
sample_count = len(signal_stream.samples)
next_signal_stream_start_index = signal_stream_start_index + sample_count
if from_sample_index < next_signal_stream_start_index:
samples_to_skip = max(0, from_sample_index - signal_stream_start_index)
for sample_timestamp, sample in signal_stream.iterate_timed_samples(samples_to_skip):
if sample_timestamp > to_end_timestamp:
break
yield sample
signal_stream_start_index = next_signal_stream_start_index
class MeasurementCollector:
def __init__(self, history_length_seconds=20.0):
self._signal_stream_histories = collections.defaultdict(SignalStreamHistory)
self._event_streams = collections.defaultdict(EventStream)
self.history_length_seconds = history_length_seconds
self.last_cleanup_time = 0.0
def get_signal_stream_history(self, stream_type):
return self._signal_stream_histories[stream_type]
def get_event_stream(self, stream_type):
return self._event_streams[stream_type]
def iterate_signal_stream_histories(self):
return self._signal_stream_histories.items()
def iterate_event_streams(self):
return self._event_streams.items()
def handle_signal(self, signal_packet, starts_new_stream):
signal_stream_history = self._signal_stream_histories[signal_packet.type]
signal_stream_history.append_signal_packet(signal_packet, starts_new_stream)
self.cleanup_if_needed()
def handle_event(self, stream_name, value):
self._event_streams[stream_name].append(value)
self.cleanup_if_needed()
def cleanup_if_needed(self):
now = zephyr.time()
if self.last_cleanup_time < now - 5.0:
history_limit = now - self.history_length_seconds
for signal_stream_history in self._signal_stream_histories.values():
signal_stream_history.clean_up_samples_before(history_limit)
for event_stream in self._event_streams.values():
event_stream.clean_up_events_before(history_limit)
self.last_cleanup_time = now
| {
"content_hash": "007a06673b4774607316ad301143b8f3",
"timestamp": "",
"source": "github",
"line_count": 204,
"max_line_length": 119,
"avg_line_length": 37.3921568627451,
"alnum_prop": 0.569349764027268,
"repo_name": "jpaalasm/zephyr-bt",
"id": "610d660ba1ce2ad6f2f026d7236f468a5e3cdbcf",
"size": "7628",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/zephyr/collector.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "46916"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import print_function
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
import os
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('group', parent_package, top_path)
config.add_subpackage('tests')
return config
if __name__ == '__main__':
print('This is the wrong setup.py file to run')
| {
"content_hash": "42a8dca6d354c2b3f55e261a9fe063f9",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 73,
"avg_line_length": 28.333333333333332,
"alnum_prop": 0.6843137254901961,
"repo_name": "alexis-roche/nipy",
"id": "b0cce0828d0c8340e7e9f005c423bbcf99679c79",
"size": "510",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "nipy/algorithms/group/setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "1592552"
},
{
"name": "C++",
"bytes": "6037"
},
{
"name": "Makefile",
"bytes": "3630"
},
{
"name": "Matlab",
"bytes": "5508"
},
{
"name": "Python",
"bytes": "2891734"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, print_function, unicode_literals
from builtins import dict, str
from copy import deepcopy
import json
import logging
import itertools
import collections
import numpy as np
from matplotlib.colors import LinearSegmentedColormap as colormap
from matplotlib.colors import rgb2hex, hex2color
from indra.statements import Activation, Inhibition, RegulateActivity, \
Complex, Modification, \
SelfModification, Agent
from indra.databases import hgnc_client
from indra.databases import context_client
from indra.preassembler import Preassembler
from indra.tools.expand_families import Expander
from indra.preassembler.hierarchy_manager import hierarchies
expander = Expander(hierarchies)
# Python 2
try:
basestring
# Python 3
except:
basestring = str
logger = logging.getLogger('cyjs_assembler')
class CyJSAssembler(object):
def __init__(self, stmts=None):
if not stmts:
self.statements = []
else:
self.statements = stmts
self._edges = []
self._nodes = []
self._existing_nodes = {}
self._id_counter = 0
self._exp_colorscale = []
self._mut_colorscale = []
def add_statements(self, stmts):
"""Add INDRA Statements to the assembler's list of statements.
Parameters
----------
stmts : list[indra.statements.Statement]
A list of :py:class:`indra.statements.Statement`
to be added to the statement list of the assembler.
"""
stmts = Preassembler.combine_duplicate_stmts(stmts)
for stmt in stmts:
self.statements.append(stmt)
def make_model(self, *args, **kwargs):
"""Assemble a Cytoscape JS network from INDRA Statements.
This method assembles a Cytoscape JS network from the set of INDRA
Statements added to the assembler.
Parameters
----------
grouping : bool
If True, the nodes with identical incoming and outgoing edges
are grouped and the corresponding edges are merged.
drop_virtual_edges : bool
If True, the original edges which have been collected and made
virtual are discarded. If these edges are discarded, they are
not seen by the cytoscape.js layout algorithms.
add_edge_weights : bool
If True, give edges that connect group nodes a weight of their
group size. All other edges get a weight of 1.
Returns
-------
cyjs_str : str
The json serialized Cytoscape JS model.
"""
for stmt in self.statements:
if isinstance(stmt, RegulateActivity):
self._add_regulate_activity(stmt)
elif isinstance(stmt, Inhibition):
self._add_activation(stmt)
elif isinstance(stmt, Complex):
self._add_complex(stmt)
elif isinstance(stmt, Modification):
self._add_modification(stmt)
else:
logger.warning('Unhandled statement type: %s' %
stmt.__class__.__name__)
if kwargs.get('grouping'):
self._group_nodes()
self._group_edges()
if kwargs.get('drop_virtual_edges'):
self._drop_virtual_edges()
if kwargs.get('add_edge_weights'):
self._add_edge_weights()
return self.print_cyjs()
def set_context(self, *args, **kwargs):
"""Set protein expression data as node attribute
This method uses :py:mod:`indra.databases.context_client` to get
protein expression levels for a given cell type and set a node
attribute for proteins accordingly.
Parameters
----------
cell_type : str
Cell type name for which expression levels are queried.
The cell type name follows the CCLE database conventions.
Example: LOXIMVI_SKIN, BT20_BREAST
bin_expression : bool
If True, the gene expression will be put into 5 bins based on
all gene expression values. An additional bin is used to indicate
that the context_client returned None.
user_bins : int
If specified, split the expression levels into the given number
of bins. If not specified, default will be 5.
"""
cell_type = kwargs.get('cell_type')
if not cell_type:
logger.warning('No cell type given.')
return
# Collect all gene names in network
gene_names = []
for node in self._nodes:
members = node['data'].get('members')
if members:
gene_names += list(members.keys())
else:
if node['data']['name'].startswith('Group'):
continue
gene_names.append(node['data']['name'])
# Get expression and mutation from context client
exp = context_client.get_protein_expression(gene_names, cell_type)
mut = context_client.get_mutations(gene_names, cell_type)
if not exp:
logger.warning('Could not get context for %s cell type.' %
cell_type)
return
else:
exp = {k: v[cell_type] for k, v in exp.items()}
if not mut:
logger.warning('Could not get mutations for %s cell type.' %
cell_type)
return
else:
mut = {k: v[cell_type] for k, v in mut.items()}
# Get expression and mutation for specific gene
def get_expr_mut(name, expr_data, mut_data):
amount = expr_data.get(name)
if amount is None:
expression = None
else:
expression = np.log10(amount)
mutation = mut_data.get(name)
if mutation is not None:
mutation = int(mutation)
else:
mutation = 0
return expression, mutation
# Set node properties for expression and mutation
for node in self._nodes:
members = node['data'].get('members')
if members:
for member in members.keys():
expression, mutation = get_expr_mut(member, exp, mut)
node['data']['members'][member]['expression'] = expression
node['data']['members'][member]['mutation'] = mutation
node['data']['expression'] = None
node['data']['mutation'] = 0
else:
if node['data']['name'].startswith('Group'):
node['data']['expression'] = None
node['data']['mutation'] = 0
else:
expression, mutation = get_expr_mut(node['data']['name'],
exp, mut)
node['data']['expression'] = expression
node['data']['mutation'] = mutation
# Binning for the purpose of assigning colors
if kwargs.get('bin_expression'):
# how many bins? If not specified, set to 5
n_bins = 5
user_bins = kwargs.get('n_bins')
if type(user_bins) == int:
n_bins = user_bins
if n_bins > 9:
n_bins = 9
logger.info('Only 9 bins allowed. Setting n_bins = 9.')
if n_bins < 3:
n_bins = 3
logger.info('Need at least 3 bin. Setting n_bins = 3.')
# Create color scale for unmutated gene expression
# feed in hex values from colorbrewer2 9-class PuBuGn
wt_hexes = ['#f7fcf5', '#e5f5e0', '#c7e9c0', '#a1d99b', '#74c476',
'#41ab5d', '#238b45', '#006d2c', '#00441b']
exp_wt_colorscale = _build_color_scale(wt_hexes, n_bins)
# tack on a gray for no expression data
exp_wt_colorscale.append('#bdbdbd')
self._exp_colorscale = exp_wt_colorscale
# create color scale for mutated gene expression
# feed in hex values from colorbrewer2 9-class YlOrRd
mut_hexes = ['#fff5eb', '#fee6ce', '#fdd0a2', '#fdae6b', '#fd8d3c',
'#f16913', '#d94801', '#a63603', '#7f2704']
exp_mut_colorscale = _build_color_scale(mut_hexes, n_bins)
# tack on a gray for no expression data
exp_mut_colorscale.append('#bdbdbd')
self._mut_colorscale = exp_mut_colorscale
# capture the expression levels of every gene in nodes
exp_lvls = [n['data'].get('expression') for n in self._nodes]
# capture the expression levels of every gene in family members
m_exp_lvls = []
for n in self._nodes:
if n['data'].get('members'):
members = n['data']['members']
for m in members:
m_exp_lvls.append(members[m]['expression'])
# combine node expressions and family expressions
exp_lvls = exp_lvls + m_exp_lvls
# get rid of None gene expressions
exp_lvls = [x for x in exp_lvls if x is not None]
# bin expression levels into n equally sized bins
# bin n+1 reserved for None
# this returns the bounds of each bin. so n_bins+1 bounds.
# get rid of first value which is the leftmost bound
bin_thr = np.histogram(exp_lvls, n_bins)[1][1:]
# iterate over nodes
for n in self._nodes:
# if node has members set member bin_expression values
if n['data'].get('members'):
members = n['data']['members']
for m in members:
# if expression is None, set to bin index n_bins
if members[m]['expression'] is None:
members[m]['bin_expression'] = n_bins
else:
for thr_idx, thr in enumerate(bin_thr):
if members[m]['expression'] <= thr:
members[m]['bin_expression'] = thr_idx
break
# set bin_expression for the node itself
if n['data']['expression'] is None:
n['data']['bin_expression'] = n_bins
else:
for thr_idx, thr in enumerate(bin_thr):
if n['data']['expression'] <= thr:
n['data']['bin_expression'] = thr_idx
break
def print_cyjs(self):
"""Return the assembled Cytoscape JS network as a json string.
Returns
-------
cyjs_str : str
A json string representation of the Cytoscape JS network.
"""
exp_colorscale_str = json.dumps(self._exp_colorscale)
mut_colorscale_str = json.dumps(self._mut_colorscale)
cyjs_dict = {'edges': self._edges, 'nodes': self._nodes}
model_str = json.dumps(cyjs_dict, indent=1, sort_keys=True)
model_dict = {'exp_colorscale_str': exp_colorscale_str,
'mut_colorscale_str': mut_colorscale_str,
'model_elements_str': model_str}
cyjs_str = json.dumps(model_dict, indent=1)
return cyjs_str
def save_model(self, fname='model.js'):
"""Save the assembled Cytoscape JS network in a file.
Parameters
----------
file_name : Optional[str]
The name of the file to save the Cytoscape JS network to.
Default: model.js
"""
model_dict = json.loads(self.print_cyjs())
s = ''
s += 'var exp_colorscale = %s;\n' % model_dict['exp_colorscale_str']
s += 'var mut_colorscale = %s;\n' % model_dict['mut_colorscale_str']
s += 'var model_elements = %s;\n' % model_dict['model_elements_str']
with open(fname, 'wt') as fh:
fh.write(s)
def _add_regulate_activity(self, stmt):
edge_type, edge_polarity = _get_stmt_type(stmt)
edge_id = self._get_new_id()
source_id = self._add_node(stmt.subj)
target_id = self._add_node(stmt.obj)
edge = {'data': {'i': edge_type, 'id': edge_id,
'source': source_id, 'target': target_id,
'polarity': edge_polarity}}
self._edges.append(edge)
def _add_modification(self, stmt):
edge_type, edge_polarity = _get_stmt_type(stmt)
edge_id = self._get_new_id()
source_id = self._add_node(stmt.enz)
target_id = self._add_node(stmt.sub)
edge = {'data': {'i': edge_type, 'id': edge_id,
'source': source_id, 'target': target_id,
'polarity': edge_polarity}}
self._edges.append(edge)
def _add_complex(self, stmt):
edge_type, edge_polarity = _get_stmt_type(stmt)
for m1, m2 in itertools.combinations(stmt.members, 2):
m1_id = self._add_node(m1)
m2_id = self._add_node(m2)
edge_id = self._get_new_id()
edge = {'data': {'i': edge_type, 'id': edge_id,
'source': m1_id, 'target': m2_id,
'polarity': edge_polarity}}
self._edges.append(edge)
def _add_node(self, agent):
node_key = agent.name
node_id = self._existing_nodes.get(node_key)
if node_id is not None:
return node_id
db_refs = _get_db_refs(agent)
node_id = self._get_new_id()
self._existing_nodes[node_key] = node_id
node_name = agent.name
node_name = node_name.replace('_', ' ')
expanded_families = expander.get_children(agent, ns_filter='HGNC')
members = {}
for member in expanded_families:
hgnc_symbol = member[1]
hgnc_id = hgnc_client.get_hgnc_id(hgnc_symbol)
if hgnc_id:
up_id = hgnc_client.get_uniprot_id(hgnc_id)
member_agent = Agent(hgnc_symbol,
db_refs={'HGNC': hgnc_id,
'UP': up_id})
member_db_refs = _get_db_refs(member_agent)
else:
member_db_refs = {}
members[member[1]] = {
'mutation': None,
'expression': None,
'db_refs': member_db_refs
}
node = {'data': {'id': node_id, 'name': node_name,
'db_refs': db_refs, 'parent': '',
'members': members}}
self._nodes.append(node)
return node_id
def _get_new_id(self):
ret = self._id_counter
self._id_counter += 1
return ret
def _get_node_key(self, node_dict):
s = tuple(sorted(node_dict['sources']))
t = tuple(sorted(node_dict['targets']))
return (s, t)
def _get_node_groups(self):
# First we construct a dictionary for each node's
# source and target edges
node_dict = {node['data']['id']: {'sources': [], 'targets': []}
for node in self._nodes}
for edge in self._edges:
# Add edge as a source for its target node
edge_data = (edge['data']['i'], edge['data']['polarity'],
edge['data']['source'])
node_dict[edge['data']['target']]['sources'].append(edge_data)
# Add edge as target for its source node
edge_data = (edge['data']['i'], edge['data']['polarity'],
edge['data']['target'])
node_dict[edge['data']['source']]['targets'].append(edge_data)
# Make a dictionary of nodes based on source/target as a key
node_key_dict = collections.defaultdict(lambda: [])
for node_id, node_d in node_dict.items():
key = self._get_node_key(node_d)
node_key_dict[key].append(node_id)
# Constrain the groups to ones that have more than 1 member
node_groups = [g for g in node_key_dict.values() if (len(g) > 1)]
return node_groups
def _group_edges(self):
# Iterate over edges in a copied edge list
edges_to_add = []
for e in self._edges:
# Check if edge source or target are contained in a parent
# If source or target in parent edit edge
# Nodes may only point within their container
source = e['data']['source']
target = e['data']['target']
source_node = [x for x in self._nodes if
x['data']['id'] == source][0]
target_node = [x for x in self._nodes if
x['data']['id'] == target][0]
# If the source node is in a group, we change the source of this
# edge to the group
new_edge = None
if source_node['data']['parent'] != '':
new_edge = deepcopy(e)
new_edge['data'].pop('id', None)
new_edge['data']['source'] = source_node['data']['parent']
e['data']['i'] = 'Virtual'
# If the targete node is in a group, we change the target of this
# edge to the group
if target_node['data']['parent'] != '':
if new_edge is None:
new_edge = deepcopy(e)
new_edge['data'].pop('id', None)
new_edge['data']['target'] = target_node['data']['parent']
e['data']['i'] = 'Virtual'
if new_edge is not None:
if new_edge not in edges_to_add:
edges_to_add.append(new_edge)
# need to check if there are identical edges in edges to add
# identical on everything but id
for edge in edges_to_add:
new_id = self._get_new_id()
edge['data']['id'] = new_id
self._edges.append(edge)
def _group_nodes(self):
node_groups = self._get_node_groups()
for group in node_groups:
# Make new group node
new_group_node = {'data': {'id': (self._get_new_id()),
'name': ('Group' + str(group)),
'parent': ''}}
# Point the node to its parent
for node in self._nodes:
if node['data']['id'] in group:
node['data']['parent'] = new_group_node['data']['id']
self._nodes.append(new_group_node)
def _drop_virtual_edges(self):
self._edges = [x for x in self._edges if x['data']['i'] != 'Virtual']
def _add_edge_weights(self):
# make a list of group nodes
group_node_ids = []
for n in self._nodes:
if n['data']['parent'] != '':
group_node_ids.append(n['data']['parent'])
group_node_ids = list(set(group_node_ids))
# get sizes for each group
group_node_sizes = {}
for g in group_node_ids:
group_members = [x for x in self._nodes
if x['data']['parent'] == g]
group_size = len(group_members)
group_node_sizes[g] = group_size
# iterate over edges
# if they point to/from group, weigh them acc to group size
# nodes between two groups get assigned heaviest of two weights
for e in self._edges:
source = e['data']['source']
target = e['data']['target']
if (source in group_node_ids) and (target in group_node_ids):
e['data']['weight'] = max(group_node_sizes[source],
group_node_sizes[target])
elif source in group_node_ids:
e['data']['weight'] = group_node_sizes[source]
elif target in group_node_ids:
e['data']['weight'] = group_node_sizes[target]
# once all group node edges have weights
# give non-group node edges weights of 1
for e in self._edges:
if e['data'].get('weight') is None:
e['data']['weight'] = 1
def _add_attractors(self):
parent_node_ids = [x['data']['parent'] for x in self._nodes
if x['data']['parent'] != '']
parent_node_ids = list(set(parent_node_ids))
attr_dict = {}
for parent_node_id in parent_node_ids:
child_node_ids = [x['data']['id'] for x in self._nodes
if x['data']['parent'] == parent_node_id]
# pick the middle node of the children
# this actually produces some spread group nodes, not ideal
for i in list(range(len(child_node_ids))):
if i >= len(child_node_ids)/2:
break
else:
attr_node_id = child_node_ids[i]
# sets attractor to last child node
# attr_node_id = child_node_ids[-1]
attr_dict[parent_node_id] = attr_node_id
# for any existing edges to/from parent
# give attractors same edges
attr_edges = []
for edge in self._edges:
source = edge['data']['source']
target = edge['data']['target']
attr_edge = None
# check source and target against attr_dict to point to attractors
# edges sourcing or targeting parents will be ignored
if source in attr_dict or target in attr_dict:
attr_edge = deepcopy(edge)
if source in attr_dict:
attr_edge['data']['source'] = attr_dict[source]
if target in attr_dict:
attr_edge['data']['target'] = attr_dict[target]
attr_edge['data']['id'] = self._get_new_id()
attr_edge['data']['i'] = 'Attractor'
if attr_edge is not None:
if attr_edge not in attr_edges:
attr_edges.append(attr_edge)
for attr_edge in attr_edges:
self._edges.append(attr_edge)
def _add_ext_attractors(self):
parent_node_ids = [x['data']['parent'] for x in self._nodes
if x['data']['parent'] != '']
parent_node_ids = list(set(parent_node_ids))
# this is in the format {parent_node_id : [child_node_ids]}
# parent child dict
pc_dict = {}
for parent_node_id in parent_node_ids:
child_node_ids = [x['data']['id'] for x in self._nodes
if x['data']['parent'] == parent_node_id]
pc_dict[parent_node_id] = {'children': child_node_ids,
'sources': [],
'src_attr_id': None,
'targets': [],
'targ_attr_id': None}
# discover all sources and targets for group nodes
for e in self._edges:
source = e['data']['source']
target = e['data']['target']
if source in pc_dict or target in pc_dict:
# any edge that has a parent node as its source is a target
# for that parent node
if source in pc_dict:
pc_dict[source]['targets'].append(target)
# any edge that has a parent node as a target is a source
# for that parent node
if target in pc_dict:
pc_dict[target]['sources'].append(source)
# create external attractor nodes for each parent node
for p in pc_dict:
# if there are sources that point at the parent node
# init and append a source attractor
children = pc_dict[p]['children']
sources = pc_dict[p]['sources']
if len(sources) > 0:
src_attr_id = self._get_new_id()
pc_dict[p]['srt_attr_id'] = src_attr_id
src_attr = {'data': {'id': src_attr_id,
'name': ('Attractor'),
'parent': ''}}
self._nodes.append(src_attr)
# create edges from the sources to the source attractor
for s in sources:
edge = {'data': {'i': 'Attractor',
'id': self._get_new_id(),
'source': s,
'target': src_attr_id}}
self._edges.append(edge)
# create edges from the src attractor pointing to children
for c in children:
edge = {'data': {'i': 'Attractor',
'id': self._get_new_id(),
'source': src_attr_id,
'target': c}}
self._edges.append(edge)
# if there are nodes targeted by the parent node
# init and append a target attractor
targets = pc_dict[p]['targets']
if len(targets) > 0:
targ_attr_id = self._get_new_id()
pc_dict[p]['targ_attr_id'] = src_attr_id
targ_attr = {'data': {'id': targ_attr_id,
'name': ('Attractor'),
'parent': ''}}
self._nodes.append(targ_attr)
# create edges from the target attractor to targets
for t in targets:
edge = {'data': {'i': 'Attractor',
'id': self._get_new_id(),
'source': targ_attr_id,
'target': t}}
self._edges.append(edge)
# create edges from the src attractor pointing to children
for c in children:
edge = {'data': {'i': 'Attractor',
'id': self._get_new_id(),
'source': c,
'target': targ_attr_id}}
self._edges.append(edge)
def _get_db_refs(agent):
cyjs_db_refs = {}
for db_name, db_ids in agent.db_refs.items():
if isinstance(db_ids, int):
db_id = str(db_ids)
elif isinstance(db_ids, basestring):
db_id = db_ids
else:
db_id = db_ids[0]
if db_name == 'UP':
name = 'UniProt'
val = 'http://identifiers.org/uniprot/%s' % db_id
elif db_name == 'HGNC':
name = 'HGNC'
val = 'http://identifiers.org/hgnc/HGNC:%s' % db_id
elif db_name == 'CHEBI':
name = 'ChEBI'
val = 'http://identifiers.org/chebi/%s' % db_id
elif db_name == 'PUBCHEM':
name = 'PubChem'
val = 'http://identifiers.org/pubchem.compound/%s' % db_id
elif db_name == 'HMDB':
name = 'HMDB'
val = 'http://identifiers.org/hmdb/%s' % db_id
elif db_name == 'GO':
name = 'GO'
val = 'http://identifiers.org/go/%s' % db_id
elif db_name == 'MESH':
name = 'MESH'
val = 'http://identifiers.org/mesh/%s' % db_id
elif db_name == 'IP':
name = 'InterPro'
val = 'http://identifiers.org/interpro/%s' % db_id
elif db_name == 'TEXT':
continue
else:
val = db_id
name = db_name
cyjs_db_refs[name] = val
return cyjs_db_refs
def _get_stmt_type(stmt):
if isinstance(stmt, Modification):
edge_type = 'Modification'
edge_polarity = 'positive'
elif isinstance(stmt, SelfModification):
edge_type = 'SelfModification'
edge_polarity = 'positive'
elif isinstance(stmt, Complex):
edge_type = 'Complex'
edge_polarity = 'none'
elif isinstance(stmt, Activation):
edge_type = 'Activation'
edge_polarity = 'positive'
elif isinstance(stmt, Inhibition):
edge_type = 'Inhibition'
edge_polarity = 'negative'
elif isinstance(stmt, RasGef):
edge_type = 'RasGef'
edge_polarity = 'positive'
elif isinstance(stmt, RasGap):
edge_type = 'RasGap'
edge_polarity = 'negative'
else:
edge_type = stmt.__class__.__str__()
edge_polarity = 'none'
return edge_type, edge_polarity
def _build_color_scale(hex_colors_list, n_bins):
rgb_colors = [hex2color(x) for x in hex_colors_list]
rgb_colors_array = np.array(rgb_colors)
rgb_names = {'red': 0, 'green': 1, 'blue': 2}
linear_mapping = np.linspace(0, 1, len(rgb_colors_array))
cdict = {}
for rgb_name in rgb_names:
color_list = []
rgb_idx = rgb_names[rgb_name]
for lin, val in zip(linear_mapping, rgb_colors_array[:, rgb_idx]):
color_list.append((lin, val, val))
cdict[rgb_name] = color_list
cmap = colormap('expression_colormap', cdict, 256, 1)
color_scale = []
for i in np.linspace(0, 1, n_bins):
color_scale.append(rgb2hex(cmap(i)))
return color_scale
| {
"content_hash": "119817ee299297688077a2a83cd579b9",
"timestamp": "",
"source": "github",
"line_count": 697,
"max_line_length": 79,
"avg_line_length": 42.50215208034433,
"alnum_prop": 0.507460167431812,
"repo_name": "jmuhlich/indra",
"id": "63a8431c3011c20eb6f862fb8651b69d289bd639",
"size": "29624",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "indra/assemblers/cyjs_assembler.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "619830"
},
{
"name": "Ruby",
"bytes": "433"
},
{
"name": "Shell",
"bytes": "1319"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from article.models import Article
#enabloidaan auto admin
admin.site.register(Article) | {
"content_hash": "311f19be5cbc85271df754127481923e",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 34,
"avg_line_length": 24.2,
"alnum_prop": 0.8429752066115702,
"repo_name": "puuparta/django_bootcamp",
"id": "1be095e9f22f7ca03fb8513d1f853d4aebcf1efd",
"size": "121",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "article/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4950"
}
],
"symlink_target": ""
} |
import requests_mock
import pytest
from tidypy import get_extenders, DoesNotExistError
def test_can_handle():
extender = get_extenders()['pastebin']
assert extender.can_handle('pastebin:abc123') == True
assert extender.can_handle('pastebin:') == False
RESP_BASIC = '''
[tidypy]
test = 'extended'
extension = 'pastebin'
'''
def test_retrieve_basic():
with requests_mock.Mocker() as m:
m.get('https://pastebin.com/raw/MYyLRaaB', text=RESP_BASIC)
extender = get_extenders()['pastebin']
cfg = extender.retrieve('pastebin:MYyLRaaB', 'test')
assert cfg == {
'extension': 'pastebin',
'test': 'extended',
}
def test_retrieve_missing():
with requests_mock.Mocker() as m:
m.get('https://pastebin.com/raw/doesntexist', status_code=302, headers={'Location': 'http://fake.com/missing'})
extender = get_extenders()['pastebin']
with pytest.raises(DoesNotExistError):
cfg = extender.retrieve('pastebin:doesntexist', 'test')
| {
"content_hash": "edd233d010d4b93196f3c9649d721e91",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 119,
"avg_line_length": 27.342105263157894,
"alnum_prop": 0.6390760346487007,
"repo_name": "jayclassless/tidypy",
"id": "a0d0a226baa7887e04e3744104b5dc37623a9f34",
"size": "1040",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_extender_pastebin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "337"
},
{
"name": "Makefile",
"bytes": "1028"
},
{
"name": "Python",
"bytes": "217369"
}
],
"symlink_target": ""
} |
"""
Dumps data from the main database, but only dumps a subset of the items
To ensure we can load them on the development server
We use this because the production database became too heavy to load even with
optimized tools like pg_dump
This script follows relations to ensure referential integrity so if you load
blog_post, it will ensure the author is also serialized
"""
from django.core import serializers
from django.core.management.base import BaseCommand, CommandError
from optparse import make_option
from dev_db.utils import timer
import logging
from dev_db.utils import get_creator_instance
import os
logger = logging.getLogger(__name__)
DEBUG = False
class Command(BaseCommand):
help = 'Output a sample of the database as a fixture of the given format.'
option_list = BaseCommand.option_list + (
make_option('--format', default='json', dest='format',
help='Specifies the output serialization format for fixtures.'),
make_option('--indent', default=4, dest='indent', type='int',
help='Specifies the indent level to use when pretty-printing output'),
make_option('--limit', default=None, dest='limit', type='int',
help='Allows you to limit the number of tables, used for testing purposes only'),
make_option(
'-o', '--output', default=None, dest='output', type='string',
help='Path of the output file'),
make_option(
'--skipcache', default=False, dest='skipcache', action='store_true',
help='Skips the settings cache'),
)
def handle(self, **options):
# setup the options
self.format = options.get('format', 'json')
self._validate_serializer(self.format)
self.indent = options.get('indent', 4)
self.limit = options.get('limit')
output = options.get('output')
self.output = None
if output:
self.output_path = os.path.abspath(output)
self.output = open(self.output_path, 'w')
self.skipcache = options.get('skipcache')
logger.info(
'serializing using %s and indent %s', self.format, self.indent)
t = timer()
creator = get_creator_instance()
logger.info('using creator instance %s', creator)
if self.skipcache:
logger.info('skipping the cache')
model_settings = creator.get_model_settings()
else:
model_settings = creator.get_cached_model_settings()
logger.info('model_settings lookup took %s', t.next())
data = creator.collect_data(
model_settings, limit=self.limit, select_related=False)
logger.info('data collection took %s', t.next())
extended_data = creator.extend_data(data)
logger.info('extending data took %s', t.next())
filtered_data = creator.filter_data(extended_data)
logger.info('filtering data took %s', t.next())
logger.info('serializing data with format %s', self.format)
serialized = serializers.serialize(
self.format, filtered_data, indent=self.indent, use_natural_keys=False)
# write the output
if self.output:
self.output.write(serialized)
logger.info('serializing data took %s', t.next())
logger.info('total duration %s', t.total)
return serialized
def _validate_serializer(self, format):
# Check that the serialization format exists; this is a shortcut to
# avoid collating all the objects and _then_ failing.
try:
serializers.get_serializer(format)
except KeyError:
raise CommandError("Unknown serialization format: %s" % format)
| {
"content_hash": "c013a325afcdb3a3960f26e9d1aceab0",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 101,
"avg_line_length": 41.93258426966292,
"alnum_prop": 0.6414790996784566,
"repo_name": "tschellenbach/dev_db",
"id": "965e1873b707f712a254312324256fd798d5336e",
"size": "3732",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dev_db/management/commands/create_dev_db.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "41257"
}
],
"symlink_target": ""
} |
"""Tests for haiku.examples.impala_lite."""
from absl.testing import absltest
from examples import impala_lite
class ImpalaLiteTest(absltest.TestCase):
def test_impala_integration(self):
impala_lite.run(trajectories_per_actor=2, num_actors=2, unroll_len=20)
if __name__ == '__main__':
absltest.main()
| {
"content_hash": "0291254943628e68e686718ad7d36b5f",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 74,
"avg_line_length": 22.5,
"alnum_prop": 0.7238095238095238,
"repo_name": "deepmind/dm-haiku",
"id": "8e12bc0308af278b605c2071a7bb119e2b8f6633",
"size": "1011",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "examples/impala_lite_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1024855"
},
{
"name": "Shell",
"bytes": "1907"
},
{
"name": "Starlark",
"bytes": "31643"
}
],
"symlink_target": ""
} |
print( '\n'
"""|----------------------------------------------------------|
| ____ __ ___ __ |
| / __ )__ ____ __/ |/ /___ ______/ /____ _____ |
| / __ / / / / / / / /|_/ / __ `/ ___/ __/ _ \/ ___/ |
| / /_/ / /_/ / /_/ / / / / /_/ (__ ) /_/ __/ / |
| /_____/\__,_/\__, /_/ /_/\__,_/____/\__/\___/_/ |
| /____/ |
|----------------------------------------------------------|"""
)
from app import *
capital = 1000
myport = portfolio('TD Dank')
print(bcolors.OKGREEN + 'Portfolio name: ' + myport.name + bcolors.ENDC)
# Provision portfolio
myport.add_security('VTI')
myport.add_security('VXUS')
myport.add_security('BND')
myport.add_security('BNDX')
# Provision portfolio
for sec in myport.group:
if sec.name == 'VTI':
sec.set_shares(18)
sec.set_allocation(0.54)
sec.get_price()
sec.get_marketvalue()
if sec.name == 'VXUS':
sec.set_shares(27)
sec.set_allocation(0.36)
sec.get_price()
sec.get_marketvalue()
if sec.name == 'BND':
sec.set_shares(0)
sec.set_allocation(0.07)
sec.get_price()
sec.get_marketvalue()
if sec.name == 'BNDX':
sec.set_shares(0)
sec.set_allocation(0.03)
sec.get_price()
sec.get_marketvalue()
# Update portfolio value
myport.update_portfolio_value()
# Ensure provisioning process with the below
for sec in myport.group:
print 'Security: %s Shares: %s Allocation: %s Price: %s Market Value: %s ' % (sec.name,str(sec.shares),str(sec.allocation),str(sec.price),format_money(sec.marketvalue))
print('\n')
print(bcolors.OKGREEN + "Portoflio Total Value: " + bcolors.ENDC)
print format_money(myport.portfoliovalue)
print('\n')
print(bcolors.OKGREEN + "Query functions: uses portfolio" + bcolors.ENDC)
print("Get portfolio security price")
print get_port_security_price(myport)
print("Get portfolio allocation")
print get_port_allocation(myport)
print('\n')
print(bcolors.OKGREEN + "Buy functions: uses capital" + bcolors.ENDC)
print("Proposed whole shares to buy")
print buy_shares(myport,capital)
print("Left over cash")
print leftover_cash(myport,capital)
print('\n')
print(bcolors.OKGREEN + "Rebalance functions: uses portfolio" + bcolors.ENDC)
print("Target share allocation")
print rebalance_target_allocation(myport)
print("Rebalance reccomended action")
print rebalance_action(myport)
| {
"content_hash": "712c8d1166a4aa1de8f3c5e000a005ad",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 172,
"avg_line_length": 30.948051948051948,
"alnum_prop": 0.5765841376416282,
"repo_name": "brianannis/buymaster",
"id": "52e31cdb513155e5e70e70642684dfe968117218",
"size": "2383",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "demo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6161"
}
],
"symlink_target": ""
} |
from django import template
from django.utils import timezone
from ..models import Category, Post
register = template.Library()
@register.assignment_tag
def get_news_categories():
return Category.objects.all()
@register.assignment_tag
def get_news_months():
return Post.objects.filter(published=True, date__lte=timezone.now()).dates('date_url', 'month')
@register.assignment_tag
def get_latest_news(count, category=None):
post_list = Post.objects.select_related().filter(published=True, date__lte=timezone.now())
# Optional filter by category
if category is not None:
post_list = post_list.filter(category__slug=category)
return post_list[:count]
| {
"content_hash": "96d5acccb0ab5b4106b75d02bc69f573",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 99,
"avg_line_length": 25.51851851851852,
"alnum_prop": 0.7314949201741655,
"repo_name": "blancltd/blanc-basic-news",
"id": "2ba2d6d77470e6dd839133c0abe39fac6336408a",
"size": "689",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blanc_basic_news/templatetags/news_tags.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "10972"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function, \
with_statement
import os
import sys
import hashlib
import logging
from shadowsocks import common
from shadowsocks.crypto import rc4_md5, openssl, sodium, table
method_supported = {}
method_supported.update(rc4_md5.ciphers)
method_supported.update(openssl.ciphers)
method_supported.update(sodium.ciphers)
method_supported.update(table.ciphers)
def random_string(length):
return os.urandom(length)
cached_keys = {}
def try_cipher(key, method=None):
Encryptor(key, method)
def EVP_BytesToKey(password, key_len, iv_len):
# equivalent to OpenSSL's EVP_BytesToKey() with count 1
# so that we make the same key and iv as nodejs version
if hasattr(password, 'encode'):
password = password.encode('utf-8')
cached_key = '%s-%d-%d' % (password, key_len, iv_len)
r = cached_keys.get(cached_key, None)
if r:
return r
m = []
i = 0
while len(b''.join(m)) < (key_len + iv_len):
md5 = hashlib.md5()
data = password
if i > 0:
data = m[i - 1] + password
md5.update(data)
m.append(md5.digest())
i += 1
ms = b''.join(m)
key = ms[:key_len]
iv = ms[key_len:key_len + iv_len]
cached_keys[cached_key] = (key, iv)
return key, iv
class Encryptor(object):
def __init__(self, key, method):
self.key = key
self.method = method
self.iv = None
self.iv_sent = False
self.cipher_iv = b''
self.iv_buf = b''
self.cipher_key = b''
self.decipher = None
method = method.lower()
self._method_info = self.get_method_info(method)
if self._method_info:
self.cipher = self.get_cipher(key, method, 1,
random_string(self._method_info[1]))
else:
logging.error('method %s not supported' % method)
sys.exit(1)
def get_method_info(self, method):
method = method.lower()
m = method_supported.get(method)
return m
def iv_len(self):
return len(self.cipher_iv)
def get_cipher(self, password, method, op, iv):
password = common.to_bytes(password)
m = self._method_info
if m[0] > 0:
key, iv_ = EVP_BytesToKey(password, m[0], m[1])
else:
# key_length == 0 indicates we should use the key directly
key, iv = password, b''
iv = iv[:m[1]]
if op == 1:
# this iv is for cipher not decipher
self.cipher_iv = iv[:m[1]]
self.cipher_key = key
return m[2](method, key, iv, op)
def encrypt(self, buf):
if len(buf) == 0:
return buf
if self.iv_sent:
return self.cipher.update(buf)
else:
self.iv_sent = True
return self.cipher_iv + self.cipher.update(buf)
def decrypt(self, buf):
if len(buf) == 0:
return buf
if self.decipher is not None: #optimize
return self.decipher.update(buf)
decipher_iv_len = self._method_info[1]
if len(self.iv_buf) <= decipher_iv_len:
self.iv_buf += buf
if len(self.iv_buf) > decipher_iv_len:
decipher_iv = self.iv_buf[:decipher_iv_len]
self.decipher = self.get_cipher(self.key, self.method, 0,
iv=decipher_iv)
buf = self.iv_buf[decipher_iv_len:]
del self.iv_buf
return self.decipher.update(buf)
else:
return b''
def encrypt_all(password, method, op, data):
result = []
method = method.lower()
(key_len, iv_len, m) = method_supported[method]
if key_len > 0:
key, _ = EVP_BytesToKey(password, key_len, iv_len)
else:
key = password
if op:
iv = random_string(iv_len)
result.append(iv)
else:
iv = data[:iv_len]
data = data[iv_len:]
cipher = m(method, key, iv, op)
result.append(cipher.update(data))
return b''.join(result)
CIPHERS_TO_TEST = [
'aes-128-cfb',
'aes-256-cfb',
'rc4-md5',
'salsa20',
'chacha20',
'table',
]
def test_encryptor():
from os import urandom
plain = urandom(10240)
for method in CIPHERS_TO_TEST:
logging.warn(method)
encryptor = Encryptor(b'key', method)
decryptor = Encryptor(b'key', method)
cipher = encryptor.encrypt(plain)
plain2 = decryptor.decrypt(cipher)
assert plain == plain2
def test_encrypt_all():
from os import urandom
plain = urandom(10240)
for method in CIPHERS_TO_TEST:
logging.warn(method)
cipher = encrypt_all(b'key', method, 1, plain)
plain2 = encrypt_all(b'key', method, 0, cipher)
assert plain == plain2
if __name__ == '__main__':
test_encrypt_all()
test_encryptor()
| {
"content_hash": "741b38d0410ca3680964213f387812cb",
"timestamp": "",
"source": "github",
"line_count": 181,
"max_line_length": 78,
"avg_line_length": 27.359116022099446,
"alnum_prop": 0.5632067851373183,
"repo_name": "ukoinobita/shadowsocks",
"id": "249439576abf176607ecf99edf488639385a5f4d",
"size": "5555",
"binary": false,
"copies": "1",
"ref": "refs/heads/manyuser",
"path": "shadowsocks/encrypt.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "268920"
},
{
"name": "Shell",
"bytes": "15212"
}
],
"symlink_target": ""
} |
from actions_lib import *
user = "[email protected]"
passwords = ["test123"]
set_wait_timeout(60)
for (cur,new) in [ (passwords[i],passwords[i+1]) for i in range(len(passwords)-1) ]:
go_to('https://rally1.rallydev.com')
wait_for(assert_title_contains, 'Rally Login')
assert_title_contains('Rally Login')
username = get_element_by_css("#j_username")
write_textfield(username, user)
password = get_element_by_css("#j_password")
write_textfield(password, cur)
button = get_element_by_css("#login-button")
click_element(button)
# 1st checkpoint
wait_for(assert_page_contains, "My Dashboard")
wait_for(get_element_by_css, "span.icon-chevron-down")
dropdown = get_element_by_css("span.icon-chevron-down")
click_element(dropdown)
# "My Settings" is dynamic
my_settings = get_element(text="My Settings")
click_element(my_settings)
# 2nd checkpoint
wait_for(assert_page_contains, "Account Information")
edit_profile = get_element_by_css("#editUser")
# This will trigger a pop-up
click_element(edit_profile)
sst.actions.switch_to_window(1)
# 3rd checkpoint
wait_for(assert_page_contains, "Edit User")
wait_for(assert_page_contains, "Account Information")
existing_passwd = get_element_by_css("input#currentPassword")
write_textfield(existing_passwd, cur)
new_passwd = get_element_by_css("input#password")
write_textfield(new_passwd, new)
confirm = get_element_by_css("input#password2")
write_textfield(confirm, new)
take_screenshot("Edit_User")
#Save
save = get_element_by_css("button#save_btn.ed-btn")
assert_button(save)
click_button(save)
sleep(3)
take_screenshot("Save")
assert_page_does_not_contain("User could not be saved")
'''
#Cancel
cancel = get_element_by_css("button#cancel_btn.ed-btn")
assert_button(cancel)
click_button(cancel)
'''
# logout
sst.actions.switch_to_window()
click_element(dropdown)
sign_out = get_element(text="Sign Out")
click_element(sign_out)
wait_for(assert_page_contains, "You have successfully logged out")
stop() | {
"content_hash": "89c71bd474764f1d24030c73be87cfbb",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 84,
"avg_line_length": 27.114942528735632,
"alnum_prop": 0.6189063162356931,
"repo_name": "ktan2020/legacy-automation",
"id": "11064309c71d2af322f343f43892a13af4a55f6f",
"size": "2359",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "samples/misc/rally.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "ActionScript",
"bytes": "913"
},
{
"name": "Ada",
"bytes": "289"
},
{
"name": "Assembly",
"bytes": "687"
},
{
"name": "Boo",
"bytes": "540"
},
{
"name": "C",
"bytes": "40116"
},
{
"name": "C#",
"bytes": "474"
},
{
"name": "C++",
"bytes": "393"
},
{
"name": "CSS",
"bytes": "70883"
},
{
"name": "ColdFusion",
"bytes": "1012"
},
{
"name": "Common Lisp",
"bytes": "1034"
},
{
"name": "D",
"bytes": "1858"
},
{
"name": "Eiffel",
"bytes": "426"
},
{
"name": "Erlang",
"bytes": "9243"
},
{
"name": "FORTRAN",
"bytes": "1810"
},
{
"name": "Forth",
"bytes": "182"
},
{
"name": "Groovy",
"bytes": "2366"
},
{
"name": "Haskell",
"bytes": "816"
},
{
"name": "Haxe",
"bytes": "455"
},
{
"name": "Java",
"bytes": "1155"
},
{
"name": "JavaScript",
"bytes": "69444"
},
{
"name": "Lua",
"bytes": "795"
},
{
"name": "Matlab",
"bytes": "1278"
},
{
"name": "OCaml",
"bytes": "350"
},
{
"name": "Objective-C++",
"bytes": "885"
},
{
"name": "PHP",
"bytes": "1411"
},
{
"name": "Pascal",
"bytes": "388"
},
{
"name": "Perl",
"bytes": "252651"
},
{
"name": "Pike",
"bytes": "589"
},
{
"name": "Python",
"bytes": "42085780"
},
{
"name": "R",
"bytes": "1156"
},
{
"name": "Ruby",
"bytes": "480"
},
{
"name": "Scheme",
"bytes": "282"
},
{
"name": "Shell",
"bytes": "30518"
},
{
"name": "Smalltalk",
"bytes": "926"
},
{
"name": "Squirrel",
"bytes": "697"
},
{
"name": "Stata",
"bytes": "302"
},
{
"name": "SystemVerilog",
"bytes": "3145"
},
{
"name": "Tcl",
"bytes": "1039"
},
{
"name": "TeX",
"bytes": "1746"
},
{
"name": "VHDL",
"bytes": "985"
},
{
"name": "Vala",
"bytes": "664"
},
{
"name": "Verilog",
"bytes": "439"
},
{
"name": "Visual Basic",
"bytes": "2142"
},
{
"name": "XSLT",
"bytes": "152770"
},
{
"name": "ooc",
"bytes": "890"
},
{
"name": "xBase",
"bytes": "769"
}
],
"symlink_target": ""
} |
from pylab import *
# ###########################################
# Analysis functions
# ###########################################
def rate_from_spiketrain(spiket,spikei,fulltime,sigma,dt,nrnidx=None):
"""
Returns a rate series of spiketimes convolved with a Gaussian kernel;
all times must be in SI units,
remember to divide fulltime and dt by second
"""
# normalized Gaussian kernel, integral with dt is normed to 1
# to count as 1 spike smeared over a finite interval
norm_factor = 1./(sqrt(2.*pi)*sigma)
gauss_kernel = array([norm_factor*exp(-x**2/(2.*sigma**2))\
for x in arange(-5.*sigma,5.*sigma+dt,dt)])
if nrnidx is None:
spiketimes = spiket
else:
# take spiketimes of only neuron index nrnidx
spiketimes = spiket[where(spikei==nrnidx)]
kernel_len = len(gauss_kernel)
# need to accommodate half kernel_len on either side of fulltime
rate_full = zeros(int(fulltime/dt)+kernel_len)
for spiketime in spiketimes:
idx = int(spiketime/dt)
rate_full[idx:idx+kernel_len] += gauss_kernel
# only the middle fulltime part of the rate series
# This is already in Hz,
# since should have multiplied by dt for above convolution
# and divided by dt to get a rate, so effectively not doing either.
return rate_full[kernel_len/2:kernel_len/2+int(fulltime/dt)]
def CV_spiketrains(spiket,spikei,tinit,nrnidxs):
""" calculate CV of excitatory neurons
a la Lim and Goldman 2013 """
CV = []
for j in nrnidxs:
indices = where(spikei == j)
spiketimes = spiket[indices]
spiketimes = spiketimes[where(spiketimes>tinit)]
ISI = diff(spiketimes)
# at least 5 spikes in this neuron to get CV
if len(spiketimes)>5:
CV.append( std(ISI)/mean(ISI) )
return array(CV)
| {
"content_hash": "99c56f16a38fa0239dd5cd16c48a1061",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 73,
"avg_line_length": 37.18,
"alnum_prop": 0.6272189349112426,
"repo_name": "h-mayorquin/camp_india_2016",
"id": "053e816bc61ab22acafe40dddbfae01c80beebb1",
"size": "1859",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tutorials/LTPinnetworks2/data_utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "AMPL",
"bytes": "33891"
},
{
"name": "C",
"bytes": "205445"
},
{
"name": "GAP",
"bytes": "71247"
},
{
"name": "Jupyter Notebook",
"bytes": "2211795"
},
{
"name": "OpenEdge ABL",
"bytes": "1723"
},
{
"name": "Python",
"bytes": "251481"
},
{
"name": "Shell",
"bytes": "564"
}
],
"symlink_target": ""
} |
import os.path
from ginga.gw import Widgets
from ginga.misc import Bunch
from ginga import GingaPlugin
class Toolbar(GingaPlugin.GlobalPlugin):
def __init__(self, fv):
# superclass defines some variables for us, like logger
super(Toolbar, self).__init__(fv)
# active view
self.active = None
# holds our gui widgets
self.w = Bunch.Bunch()
self.gui_up = False
# get local plugin preferences
prefs = self.fv.get_preferences()
self.settings = prefs.createCategory('plugin_Toolbar')
self.settings.load(onError='silent')
self.modetype = self.settings.get('mode_type', 'oneshot')
fv.set_callback('add-channel', self.add_channel_cb)
fv.set_callback('delete-channel', self.delete_channel_cb)
fv.set_callback('active-image', self.focus_cb)
def build_gui(self, container):
top = Widgets.VBox()
top.set_border_width(0)
vbox, sw, orientation = Widgets.get_oriented_box(container)
self.orientation = orientation
#vbox.set_border_width(2)
vbox.set_spacing(2)
tb = Widgets.Toolbar(orientation=orientation)
for tup in (
#("Load", 'button', 'fits_open_48', "Open an image file",
#None),
("FlipX", 'toggle', 'flipx_48', "Flip image in X axis",
self.flipx_cb),
("FlipY", 'toggle', 'flipy_48', "Flip image in Y axis",
self.flipy_cb),
("SwapXY", 'toggle', 'swapxy_48', "Swap X and Y axes",
self.swapxy_cb),
("---",),
("Rot90", 'button', 'rot90ccw_48', "Rotate image 90 deg",
self.rot90_cb),
("RotN90", 'button', 'rot90cw_48', "Rotate image -90 deg",
self.rotn90_cb),
("OrientRH", 'button', 'orient_nw_48', "Orient image N=Up E=Right",
self.orient_rh_cb),
("OrientLH", 'button', 'orient_ne_48', "Orient image N=Up E=Left",
self.orient_lh_cb),
("---",),
("Prev", 'button', 'prev_48', "Go to previous image in channel",
lambda w: self.fv.prev_img()),
("Next", 'button', 'next_48', "Go to next image in channel",
lambda w: self.fv.next_img()),
("---",),
("Zoom In", 'button', 'zoom_in_48', "Zoom in",
lambda w: self.fv.zoom_in()),
("Zoom Out", 'button', 'zoom_out_48', "Zoom out",
lambda w: self.fv.zoom_out()),
("Zoom Fit", 'button', 'zoom_fit_48', "Zoom to fit window size",
lambda w: self.fv.zoom_fit()),
("Zoom 1:1", 'button', 'zoom_100_48', "Zoom to 100% (1:1)",
lambda w: self.fv.zoom_1_to_1()),
("---",),
("Pan", 'toggle', 'pan_48', "Pan with left, zoom with right",
lambda w, tf: self.mode_cb(tf, 'pan')),
("FreePan", 'toggle', 'hand_48', "Free Panning",
lambda w, tf: self.mode_cb(tf, 'freepan')),
("Rotate", 'toggle', 'rotate_48', "Interactive rotation",
lambda w, tf: self.mode_cb(tf, 'rotate')),
("Cuts", 'toggle', 'cuts_48',
"Left/right sets hi cut, up/down sets lo cut",
lambda w, tf: self.mode_cb(tf, 'cuts')),
("Contrast", 'toggle', 'contrast_48',
"Contrast/bias with left/right/up/down",
lambda w, tf: self.mode_cb(tf, 'contrast')),
("ModeLock", 'toggle', 'lock_48',
"Modes are oneshot or locked", self.set_locked_cb),
("---",),
("Center", 'button', 'center_image_48', "Center image",
self.center_image_cb),
("Restore", 'button', 'reset_rotation_48',
"Reset all transformations and rotations",
self.reset_all_transforms_cb),
("AutoLevels", 'button', 'auto_cuts_48', "Auto cut levels",
self.auto_levels_cb),
("ResetContrast", 'button', 'reset_contrast_48', "Reset contrast",
self.reset_contrast_cb),
("---",),
("Preferences", 'button', 'settings_48', "Set channel preferences",
lambda w: self.start_plugin_cb('Preferences')),
("FBrowser", 'button', 'open_48', "Open file",
lambda w: self.start_plugin_cb('FBrowser')),
## ("Histogram", 'button', 'open_48', "Histogram and cut levels",
## lambda w: self.start_plugin_cb('Histogram')),
#("Quit", 'button', 'exit_48', "Quit the program"),
):
name = tup[0]
if name == '---':
tb.add_separator()
continue
#btn = self.fv.make_button(*tup[:4])
iconpath = os.path.join(self.fv.iconpath, "%s.png" % (tup[2]))
btn = tb.add_action(None, toggle=(tup[1]=='toggle'),
iconpath=iconpath)
if tup[3]:
btn.set_tooltip(tup[3])
if tup[4]:
btn.add_callback('activated', tup[4])
# add to our widget dict
self.w[Widgets.name_mangle(name, pfx='btn_')] = btn
# add widget to toolbar
#tb.add_widget(btn)
# stretcher
#tb.add_widget(Widgets.Label(''), stretch=1)
#sw.set_widget(tb)
#top.add_widget(sw, stretch=1)
container.add_widget(tb, stretch=1)
self.gui_up = True
# CALLBACKS
def add_channel_cb(self, viewer, chinfo):
fitsimage = chinfo.fitsimage
#fitsimage.add_callback('image-set', self.new_image_cb)
fitsimage.add_callback('transform', self.viewer_transform_cb)
bm = fitsimage.get_bindmap()
bm.add_callback('mode-set', self.mode_set_cb, fitsimage)
def delete_channel_cb(self, viewer, chinfo):
self.logger.debug("delete channel %s" % (chinfo.name))
# we don't keep around any baggage on channels so nothing
# to delete
def focus_cb(self, viewer, fitsimage):
self.active = fitsimage
self._update_toolbar_state(fitsimage)
return True
def center_image_cb(self, w):
view, bd = self._get_view()
bd.kp_center(view, 'x', 0.0, 0.0)
return True
def reset_contrast_cb(self, w):
view, bd = self._get_view()
bd.kp_contrast_restore(view, 'x', 0.0, 0.0)
return True
def auto_levels_cb(self, w):
view, bd = self._get_view()
bd.kp_cut_auto(view, 'x', 0.0, 0.0)
return True
def rot90_cb(self, w):
view, bd = self._get_view()
bd.kp_rotate_inc90(view, 'x', 0.0, 0.0)
return True
def rotn90_cb(self, w):
view, bd = self._get_view()
bd.kp_rotate_dec90(view, 'x', 0.0, 0.0)
return True
def orient_lh_cb(self, w):
view, bd = self._get_view()
bd.kp_orient_lh(view, 'x', 0.0, 0.0)
return True
def orient_rh_cb(self, w):
view, bd = self._get_view()
bd.kp_orient_rh(view, 'x', 0.0, 0.0)
return True
def reset_all_transforms_cb(self, w):
view, bd = self._get_view()
bd.kp_rotate_reset(view, 'x', 0.0, 0.0)
return True
def start_plugin_cb(self, name):
chinfo = self.fv.get_channelInfo()
self.fv.start_operation(name)
return True
def flipx_cb(self, w, tf):
view, bd = self._get_view()
flip_x, flip_y, swap_xy = view.get_transforms()
flip_x = tf
view.transform(flip_x, flip_y, swap_xy)
return True
def flipy_cb(self, w, tf):
view, bd = self._get_view()
flip_x, flip_y, swap_xy = view.get_transforms()
flip_y = tf
view.transform(flip_x, flip_y, swap_xy)
return True
def swapxy_cb(self, w, tf):
view, bd = self._get_view()
flip_x, flip_y, swap_xy = view.get_transforms()
swap_xy = tf
view.transform(flip_x, flip_y, swap_xy)
return True
def mode_cb(self, tf, modename):
if self.active is None:
self.active, bd = self._get_view()
fitsimage = self.active
if fitsimage is None:
return
bm = fitsimage.get_bindmap()
if not tf:
bm.reset_mode(fitsimage)
return True
bm.set_mode(modename)
# just in case mode change failed
self._update_toolbar_state(fitsimage)
return True
def mode_set_cb(self, bm, mode, mtype, fitsimage):
# called whenever the user interaction mode is changed
# in the viewer
if self.active is None:
self.active, bd = self._get_view()
if fitsimage != self.active:
return True
self._update_toolbar_state(fitsimage)
return True
def viewer_transform_cb(self, fitsimage):
# called whenever the transform (flip x/y, swap axes) is done
# in the viewer
if self.active is None:
self.active, bd = self._get_view()
if fitsimage != self.active:
return True
self._update_toolbar_state(fitsimage)
return True
def new_image_cb(self, fitsimage, image):
self._update_toolbar_state(fitsimage)
return True
def set_locked_cb(self, w, tf):
if tf:
modetype = 'locked'
else:
modetype = 'oneshot'
if self.active is None:
self.active, bd = self._get_view()
fitsimage = self.active
if fitsimage is None:
return
# get current bindmap, make sure that the mode is consistent
# with current lock button
bm = fitsimage.get_bindmap()
modename, cur_modetype = bm.current_mode()
bm.set_default_mode_type(modetype)
bm.set_mode(modename, mode_type=modetype)
if not tf:
# turning off lock also resets the mode
bm.reset_mode(fitsimage)
self._update_toolbar_state(fitsimage)
return True
# LOGIC
def _get_view(self):
chinfo = self.fv.get_channelInfo()
view = chinfo.fitsimage
return (view, view.get_bindings())
def _update_toolbar_state(self, fitsimage):
if (fitsimage is None) or (not self.gui_up):
return
self.logger.debug("updating toolbar state")
try:
# update transform toggles
flipx, flipy, swapxy = fitsimage.get_transforms()
# toolbar follows view
self.w.btn_flipx.set_state(flipx)
self.w.btn_flipy.set_state(flipy)
self.w.btn_swapxy.set_state(swapxy)
# update mode toggles
bm = fitsimage.get_bindmap()
modename, mode_type = bm.current_mode()
self.logger.debug("modename=%s" % (modename))
# toolbar follows view
self.w.btn_pan.set_state(modename == 'pan')
self.w.btn_freepan.set_state(modename == 'freepan')
self.w.btn_rotate.set_state(modename == 'rotate')
self.w.btn_cuts.set_state(modename == 'cuts')
self.w.btn_contrast.set_state(modename == 'contrast')
default_mode_type = bm.get_default_mode_type()
if self.w.has_key('btn_modelock'):
self.w.btn_modelock.set_state(default_mode_type == 'locked')
except Exception as e:
self.logger.error("error updating toolbar: %s" % str(e))
raise e
def __str__(self):
return 'toolbar'
#END
| {
"content_hash": "f353faa5ad563c0a6f374928dfa8e70f",
"timestamp": "",
"source": "github",
"line_count": 329,
"max_line_length": 79,
"avg_line_length": 35.07598784194529,
"alnum_prop": 0.5376949740034662,
"repo_name": "eteq/ginga",
"id": "47b5197509ee9a790eaf89a0a0cdfeb5982424c5",
"size": "11800",
"binary": false,
"copies": "1",
"ref": "refs/heads/staging",
"path": "ginga/misc/plugins/Toolbar.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "2151"
},
{
"name": "JavaScript",
"bytes": "82354"
},
{
"name": "Python",
"bytes": "2725923"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages # Always prefer setuptools over distutils
from os import path
import io
from trainsimulator.version import __version__, __url__, __license__, __author__, __email__
here = path.abspath(path.dirname(__file__))
try:
import pypandoc
long_description = pypandoc.convert('README.md', 'rst')
except(IOError, ImportError):
print("Can't import pypandoc - using README.md without converting to RST")
long_description = open('README.md').read()
NAME = 'trainsimulator'
with io.open(path.join(here, NAME, 'version.py'), 'rt', encoding='UTF-8') as f:
exec(f.read())
setup(
name=NAME,
version=__version__,
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
url=__url__,
license=__license__,
author=__author__,
author_email=__email__,
long_description=long_description,
description=''
)
| {
"content_hash": "745e40538085344c671ad3c4f4f1e6d8",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 91,
"avg_line_length": 28.741935483870968,
"alnum_prop": 0.6666666666666666,
"repo_name": "arturosolutions/trainsimulator",
"id": "66bb0937f2a5d775cd36b5585c5983eeddb2d79e",
"size": "891",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "137"
},
{
"name": "Python",
"bytes": "32600"
}
],
"symlink_target": ""
} |
import os.path as op
from bakery_lint.base import BakeryTestCase as TestCase
from bakery_lint.metadata import Metadata
from bakery_cli.ttfont import Font
weights = {
'Thin': 100,
'ThinItalic': 100,
'ExtraLight': 200,
'ExtraLightItalic': 200,
'Light': 300,
'LightItalic': 300,
'Regular': 400,
'Italic': 400,
'Medium': 500,
'MediumItalic': 500,
'SemiBold': 600,
'SemiBoldItalic': 600,
'Bold': 700,
'BoldItalic': 700,
'ExtraBold': 800,
'ExtraBoldItalic': 800,
'Black': 900,
'BlackItalic': 900,
}
class CheckCanonicalWeights(TestCase):
path = '.'
targets = 'metadata'
name = __name__
tool = 'lint'
def read_metadata_contents(self):
return open(self.path).read()
def test_check_canonical_weights(self):
""" Check that weights have canonical value """
contents = self.read_metadata_contents()
fm = Metadata.get_family_metadata(contents)
for font_metadata in fm.fonts:
weight = font_metadata.weight
first_digit = weight / 100
is_invalid = (weight % 100) != 0 or (first_digit < 1
or first_digit > 9)
_ = ("%s: The weight is %d which is not a "
"multiple of 100 between 1 and 9")
self.assertFalse(is_invalid, _ % (op.basename(self.path),
font_metadata.weight))
tf = Font.get_ttfont_from_metadata(self.path, font_metadata)
_ = ("%s: METADATA.json overwrites the weight. "
" The METADATA.json weight is %d and the font"
" file %s weight is %d")
_ = _ % (font_metadata.filename, font_metadata.weight,
font_metadata.filename, tf.OS2_usWeightClass)
self.assertEqual(tf.OS2_usWeightClass, font_metadata.weight)
class CheckPostScriptNameMatchesWeight(TestCase):
path = '.'
targets = 'metadata'
name = __name__
tool = 'lint'
def read_metadata_contents(self):
return open(self.path).read()
def test_postscriptname_contains_correct_weight(self):
""" Metadata weight matches postScriptName """
contents = self.read_metadata_contents()
fm = Metadata.get_family_metadata(contents)
for font_metadata in fm.fonts:
pair = []
for k, weight in weights.items():
if weight == font_metadata.weight:
pair.append((k, weight))
if not pair:
self.fail('Font weight does not match for "postScriptName"')
if not (font_metadata.post_script_name.endswith('-%s' % pair[0][0])
or font_metadata.post_script_name.endswith('-%s' % pair[1][0])):
_ = ('postScriptName with weight %s must be '
'ended with "%s" or "%s"')
self.fail(_ % (pair[0][1], pair[0][0], pair[1][0]))
class CheckFontWeightSameAsInMetadata(TestCase):
path = '.'
targets = 'metadata'
name = __name__
tool = 'lint'
def read_metadata_contents(self):
return open(self.path).read()
def test_postscriptname_contains_correct_weight(self):
""" Metadata weight matches postScriptName """
contents = self.read_metadata_contents()
fm = Metadata.get_family_metadata(contents)
for font_metadata in fm.fonts:
font = Font.get_ttfont_from_metadata(self.path, font_metadata)
if font.OS2_usWeightClass != font_metadata.weight:
msg = 'METADATA.JSON has weight %s but in TTF it is %s'
self.fail(msg % (font_metadata.weight, font.OS2_usWeightClass))
class CheckFullNameEqualCanonicalName(TestCase):
path = '.'
targets = 'metadata'
name = __name__
tool = 'lint'
def read_metadata_contents(self):
return open(self.path).read()
def test_metadata_contains_current_font(self):
""" METADATA.json should contains testing font, under canonic name"""
contents = self.read_metadata_contents()
fm = Metadata.get_family_metadata(contents)
for font_metadata in fm.fonts:
font = Font.get_ttfont_from_metadata(self.path, font_metadata)
_weights = []
for value, intvalue in weights.items():
if intvalue == font.OS2_usWeightClass:
_weights.append(value)
for w in _weights:
current_font = "%s %s" % (font.familyname, w)
if font_metadata.full_name != current_font:
is_canonical = True
if not is_canonical:
v = map(lambda x: font.familyname + ' ' + x, _weights)
msg = 'Canonical name in font expected: [%s] but %s'
self.fail(msg % (v, font_metadata.full_name))
| {
"content_hash": "136efe46470317ea0b9a216332630e24",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 84,
"avg_line_length": 32.296052631578945,
"alnum_prop": 0.568547565695661,
"repo_name": "lowks/fontbakery-cli",
"id": "b5bf807a7cf11f347add1f1e4f161bbef5ba4118",
"size": "5611",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bakery_lint/tests/downstream/test_check_canonical_weights.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
from copy import copy
from django.conf import settings
from django.forms import fields, models, ValidationError
from django.forms.util import ValidationError
from django.template.defaultfilters import slugify
from django.utils.formats import get_format
from django.utils.translation import ugettext_lazy as _
from limbo import widgets
from limbo.timeblock.logic import TimeBlock
from limbo.validation import valid_sms, clean_sms
import datetime
DEFAULT_DATE_INPUT_FORMATS = get_format('DATE_INPUT_FORMATS')
DEFAULT_TIME_INPUT_FORMATS = get_format('TIME_INPUT_FORMATS')
class MoneyField(fields.DecimalField):
def clean(self, value):
if isinstance(value, basestring):
value = value.replace('$', '')
return super(MoneyField, self).clean(value)
class MobileField(fields.CharField):
default_error_messages = {
'invalid': _(u'Invalid Mobile Number.'),
}
def __init__(self, max_length=20, min_length=None, *args, **kwargs):
self.max_length, self.min_length = max_length, min_length
super(MobileField, self).__init__(*args, **kwargs)
defaults = copy(fields.CharField.default_error_messages)
defaults.update(self.default_error_messages)
self.default_error_messages = defaults
def clean(self, value):
value = super(MobileField, self).clean(value)
if not value:
return value
if not valid_sms(value):
raise ValidationError(self.error_messages['invalid'])
return clean_sms(value)
class ParsedChoiceField(fields.ChoiceField):
DEFAULT_PARSERS = list()
def __init__(self, *args, **kwargs):
self.parsers = list(kwargs.pop('parsers', [])) + list(self.DEFAULT_PARSERS)
super(ParsedChoiceField, self).__init__(*args, **kwargs)
def parse_value(self, value):
for parser in self.parsers:
parsed = parser(value, self.choices)
if parsed:
return parsed
return value
def clean(self, value):
value = self.parse_value(value)
value = self.to_python(value)
self.validate(value)
self.run_validators(value)
return value
class UploadChoiceField(ParsedChoiceField):
DEFAULT_PARSERS = (widgets.stripped_reverse_choice,)
widget = widgets.CheckedSelect
@classmethod
def from_choice_field(cls, field, widget=widgets.CheckedSelect):
return cls(field.choices, field.required, widget, field.label,
field.initial, field.help_text,
error_messages=field.error_messages, show_hidden_initial=field.show_hidden_initial,
validators=field.validators, localize=field.localize)
class ModelUploadChoiceField(models.ModelChoiceField, UploadChoiceField):
DEFAULT_PARSERS = UploadChoiceField.DEFAULT_PARSERS
def __init__(self, *args, **kwargs):
self.parsers = list(kwargs.pop('parsers', [])) + list(self.DEFAULT_PARSERS)
models.ModelChoiceField.__init__(self, *args, **kwargs)
def clean(self, value):
return UploadChoiceField.clean(self, value)
@classmethod
def from_model_choice_field(cls, field, widget=widgets.CheckedSelect):
return cls(field.queryset, field.empty_label, field.cache_choices,
field.required, widget, field.label, field.initial,
field.help_text, field.to_field_name,
error_messages=field.error_messages, show_hidden_initial=field.show_hidden_initial,
validators=field.validators, localize=field.localize)
class YearField(fields.DateField):
DEFAULT_YEAR_INPUT_FORMATS = list(DEFAULT_DATE_INPUT_FORMATS) + ['%Y', '%y']
def __init__(self, input_formats=DEFAULT_YEAR_INPUT_FORMATS, *args, **kwargs):
super(YearField, self).__init__(*args, **kwargs)
self.input_formats = input_formats
def clean(self, value):
value = super(YearField, self).clean(value)
return value and value.year or value
time_formats = []
try:
time_formats += list(get_format('DEFAULT_TIME_INPUT_FORMATS'))
except AttributeError:
pass
DEFAULT_TIME_INPUT_FORMATS = getattr(settings, 'DEFAULT_TIME_INPUT_FORMATS',
[
'%H:%M %p',
'%H:%M:%S %p'
] + time_formats )
class AutoTimeField(fields.TimeField):
widget = widgets.TimePicker
def __init__(self, input_formats=DEFAULT_TIME_INPUT_FORMATS, *args, **kwargs):
super(AutoTimeField, self).__init__(*args, **kwargs)
self.input_formats = input_formats
class AutoDateField(fields.DateField):
widget = widgets.DatePicker
def __init__(self, input_formats=DEFAULT_DATE_INPUT_FORMATS, *args, **kwargs):
super(AutoDateField, self).__init__(*args, **kwargs)
self.input_formats = input_formats
class TimeRange:
def __init__(self, start, end):
self.start = start
self.end = end
@property
def delta(self):
if not (self.start and self.end):
return datetime.timedelta()
return self.end - self.start
class TimeRangePicker(fields.ChoiceField):
widget = widgets.TimeRange
class DEFAULT_CHOICES:
TODAY = 'today'
YESTERDAY = 'yesterday'
THIS_WEEK = 'this_week'
LAST_WEEK = 'last_week'
THIS_MONTH = 'this_month'
LAST_MONTH = 'last_month'
ALL_TIME = 'all_time'
CHOICES = (
(TODAY, 'Today'),
(YESTERDAY, 'Yesterday'),
(THIS_WEEK, 'This Week'),
(LAST_WEEK, 'Last Week'),
(THIS_MONTH, 'This Month'),
(LAST_MONTH, 'Last Month'),
)
ADMIN_CHOICES = tuple(list(CHOICES) +
[
(ALL_TIME, 'All Time'),
]
)
def __init__(self, choices = DEFAULT_CHOICES.CHOICES, *args, **kwargs):
if not choices:
choices = self.DEFAULT_CHOICES.CHOICES
super(TimeRangePicker, self).__init__(choices, *args, **kwargs)
def clean(self, value):
value = super(TimeRangePicker, self).clean(value)
return self.convert_date(value)
@classmethod
def default_range(cls, value = DEFAULT_CHOICES.TODAY):
now = datetime.datetime.now()
one_day = datetime.timedelta(days=1)
one_week = datetime.timedelta(weeks=1)
first_dom = datetime.datetime(year=now.year, month=now.month, day=1)
midnight = datetime.time(0,0)
minute = datetime.timedelta(minutes=1)
today = datetime.datetime.combine(now.date(), midnight)
days_from_sunday = one_day * (today.weekday()+1)
days_to_sunday = one_day * (6-today.weekday())
if value == cls.DEFAULT_CHOICES.TODAY:
return TimeRange(today, now)
elif value == cls.DEFAULT_CHOICES.YESTERDAY:
return TimeRange(today - one_day, today-minute)
elif value == cls.DEFAULT_CHOICES.THIS_WEEK:
return TimeRange(today - days_from_sunday, now)
elif value == cls.DEFAULT_CHOICES.LAST_WEEK:
return TimeRange(today - one_week - days_from_sunday, today - one_week -minute + days_to_sunday)
elif value == cls.DEFAULT_CHOICES.THIS_MONTH:
return TimeRange(first_dom, now)
elif value == cls.DEFAULT_CHOICES.LAST_MONTH:
ldolm = first_dom - minute
fdolm = datetime.datetime(year=ldolm.year, month=ldolm.month, day=1)
return TimeRange(fdolm, ldolm)
elif value == cls.DEFAULT_CHOICES.ALL_TIME:
return TimeRange(None, None)
def convert_date(self, value):
return self.default_range(value)
def check_required(kwargs):
kwargs['required'] = kwargs.get('required', False)
class DateRangeFieldGenerator:
""" This is not an actual field, but a field generator """
DEFAULT_MAX_RANGE = datetime.timedelta(weeks=52)
@classmethod
def fields(cls, **kwargs):
""" Returns 3 fields, a Button set quick picker, a start date time field, and an end date time field"""
# This removes stuff from kwargs and should happen first
start, end = cls.start_end(**kwargs)
picker = cls.picker(**kwargs)
return picker, start, end
@classmethod
def picker(cls, **kwargs):
check_required(kwargs)
return TimeRangePicker(**kwargs)
@classmethod
def __dtfield(cls, label, class_name, **kwargs):
new_kwargs = {}
new_kwargs.update(kwargs)
new_kwargs['label'] = label
field = AutoDateField(**new_kwargs)
field.widget.addClass(class_name)
return field
@classmethod
def start(cls, **kwargs):
check_required(kwargs)
label = kwargs.pop('start_label', "Start Date")
return cls.__dtfield(label, 'start_date', **kwargs)
@classmethod
def end(cls, **kwargs):
check_required(kwargs)
label = kwargs.pop('end_label', "End Date")
return cls.__dtfield(label, 'end_date', **kwargs)
@classmethod
def start_end(cls, **kwargs):
return cls.start(**kwargs), cls.end(**kwargs)
@classmethod
def clean(cls,
data,
picker_key = 'picker',
start_date = 'start_date',
end_date = 'end_date',
max_range = None,
default_range = None,
required=True):
""" This required specifies if the range is required, so a start and end
must be specified in the end
@type default_range: TimeRange or timedelta
@type max_range: timedelta
"""
if callable(default_range):
default_range = default_range()
if isinstance(default_range, datetime.timedelta):
now = datetime.datetime.now()
default_range = TimeRange(now - default_range, now)
requireds = [start_date, end_date, picker_key]
for key in requireds:
if not data.has_key(key):
data[key] = ''
start = data[start_date]
end = data[end_date]
picker = data[picker_key]
if not picker and not (start or end):
if required and not default_range:
raise ValidationError("You must use the picker or specify a start and end date.")
elif default_range:
end = default_range.end
start = default_range.start
else:
return data
if not start:
start = picker.start
if not end:
end = picker.end
if required and not (start and end):
if not (isinstance(picker, TimeRange) and picker.start is None and picker.end is None):
# If picker was chosen and all time is a valid option, it will return none as start and end
raise ValidationError('Date range required: %s - %s' %(start_date, end_date))
if start and end:
tb = TimeBlock(start, end)
start, end = tb.start, tb.end
if start > end:
raise ValidationError('Start date must be before end date.')
elif max_range and end - start > max_range:
raise ValidationError('Time range too large. Max range:%s' %cls.get_max_range_display(max_range))
data[start_date] = start
data[end_date] = end
return data
@classmethod
def get_max_range_display(cls, max_range):
# TODO: Actually format this
return str(max_range)
| {
"content_hash": "e9042406df1809e7a623c1b316982a57",
"timestamp": "",
"source": "github",
"line_count": 309,
"max_line_length": 114,
"avg_line_length": 37.045307443365694,
"alnum_prop": 0.6171922774526076,
"repo_name": "gdoermann/django-limbo",
"id": "ec9398e3106730a3fa830043776b5e8d388c4b83",
"size": "11447",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "limbo/fields.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "409084"
},
{
"name": "Python",
"bytes": "327119"
}
],
"symlink_target": ""
} |
import sys
import os
import tempfile
import getpass
import simplejson as json
f = open("bootconf.json", "r")
vals_dict = json.load(f)
f.close()
(osf, fname) = tempfile.mkstemp()
print vals_dict['message']
os.write(osf, vals_dict['message'])
os.close(osf)
sudo = ""
if getpass.getuser() != "root":
sudo = "sudo"
cmd = "%s cp %s /var/www/test.txt && %s chmod 644 /var/www/test.txt" % (sudo, fname, sudo)
print cmd
rc = os.system(cmd)
out_vals = {}
out_vals = {'testpgm' : 'hello' }
output = open('bootout.json', "w")
json.dump(out_vals, output)
output.close()
sys.exit(rc)
| {
"content_hash": "f54e2705bca0c8a476a8e25287de0f28",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 90,
"avg_line_length": 20,
"alnum_prop": 0.6586206896551724,
"repo_name": "nimbusproject/cloudinit.d",
"id": "f5a8a3166a61e09c4b09200a113b8b0425a582e7",
"size": "603",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/plans/back2backterminatehang/webserver.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "262162"
},
{
"name": "Shell",
"bytes": "1414"
}
],
"symlink_target": ""
} |
from ..errors import StopValidation, ValidationError
__all__ = ["AnyValueType", "TypedValueType"]
class AnyValueType:
"""An instance of ValueType describes for some type of value (int, "int
between 0 and 10", list, "list with elements of value type
int-between-0-and-10", dict, namedtuple, whathaveyou):
* how to make sure some value is of that type (*validation*)
* how values of that type are represented in the DB and Python worlds, and
how to convert safely between those representations (*conversion*).
`AnyValueType` itself is the most general value type that validates any
Python object and does not convert anything, unless it is specialized in
some way.
A ValueType object does not hold any data. Therefore, the validator and
converter methods all accept an extra parameter for the actual value to be
validated/converted.
Validation
----------
We say that some value "is of the value type X" if it passes X's
``validate()`` method without raising a ``ValidationError``.
Validation can be run simply by calling ``some_value_type.validate(val)``,
which runs all ``_validate()`` methods that are defined in
`some_value_type`'s class and ancestor classes, plus the optional
``_extra_validators`` that instances of ValueType can tack on.
Through subclassing of ValueTypes and/or tacking on _extra_validators on
value type objects, value types can get arbitrarily special.
Conversion
----------
Any value has two representations (which might be the same): a "DB world"
representation and a "Python world" representation. The DB world
representation is something JSON serializable that RethinkDB can store and
load. The Python world representation is some Python object that can be
constructed from / written to a DB world representation.
A ValueType implements ``pyval_to_dbval`` and ``dbval_to_pyval`` methods
that transform values between these two worlds.
In cases where "Python world" and "DB world" representations are the same,
for instance integers or strings, these methods just return the value
passed to them unchanged.
Subclassing: validation
-----------------------
Subclasses that want to specify their own validation should override
``_validate``.
extra_validators, if present, must be an iterable of callable which each
accept two parameters: the ValueType object the validation runs on (think
"self"), and the value to be validated.
See ``_validate`` for details on validator functions.
Subclassing: conversion
-----------------------
If your ValueType needs conversion (i.e. if Python world and DB world
representation differ), then override ``pyval_to_dbval`` and
``dbval_to_pyval``.
"""
def __init__(self, extra_validators = None, forbid_none = False):
"""Optional kwargs:
* extra_validators: iterable of extra validators tacked onto the
object. See ``ValueType`` class doc for more on this.
* forbid_none: set to True if the value None should always fail
validation. The default is False.
"""
self._extra_validators = extra_validators
self._forbid_none = forbid_none
@classmethod
def _find_methods_in_reverse_mro(cls, name):
"""Collects methods with matching name along the method resolution
order in a list, reverses that list, and returns it.
"""
# we have a cache for this. See if we get a hit for name
cache = cls.__dict__.get("_find_methods_cache", None)
if cache == None:
cls._find_methods_cache = cache = {}
else:
methods = cache.get(name, None)
if methods != None:
return methods
# still here? then we need to do the work
methods = []
for c in cls.__mro__:
method = c.__dict__.get(name, None)
if method != None:
methods.append(method)
methods.reverse()
cache[name] = methods
return methods
###########################################################################
# validation
###########################################################################
def _validate(self, val):
"""Override this in subclasses where you want validation. Don't call
super()._validate.
If validation goes wrong, raise a ValidationError.
If you need the validation cascade to stop after this validator, raise
StopValidation.
"""
if val == None and self._forbid_none:
raise ValidationError("None is not an allowed value.")
def validate(self, val = None):
"""Runs all validators, beginning with the most basic _validate (the
one defined furthest back in the method resolution order), and ending
with the extra_validators that might be attached to the object. The
method returns self.
Aiorethink users don't have to call this function directly, as
aiorethink calls it implicitly when necessary.
"""
validators = self.__class__._find_methods_in_reverse_mro("_validate")
if self._extra_validators != None:
validators = validators[:]
validators.extend(self._extra_validators)
for validator in validators:
try:
validator(self, val)
except StopValidation as s:
break
return self
###########################################################################
# conversions RethinkDB doc <-> Python object
###########################################################################
def pyval_to_dbval(self, pyval):
"""Converts a "python world" pyval to a "DB world" value (i.e.,
something JSON serializable that RethinkDB can store). Override in
subclasses. The default implementation just returns pyval.
"""
return pyval
def dbval_to_pyval(self, dbval):
"""Converts a "DB world" dbval to a "Python world" value (i.e., some
Python object constructed from it). Override in subclasses. The default
implementation just returns dbval.
"""
return dbval
class TypedValueType(AnyValueType):
"""Base for ValueTypes whose validation simply checks if a value
isinstance() of a given type. Just override _val_instance_of with a type.
"""
_val_instance_of = type(None)
def _validate(self, val):
oktype = self._val_instance_of
if val != None and not isinstance(val, oktype):
raise ValidationError("value {} is not an instance of {}, but "
"{}".format(repr(val), str(oktype), str(val.__class__)))
| {
"content_hash": "b951529a19b0026fac122f7c72ecb5fa",
"timestamp": "",
"source": "github",
"line_count": 179,
"max_line_length": 79,
"avg_line_length": 38.14525139664804,
"alnum_prop": 0.616871704745167,
"repo_name": "lars-tiede/aiorethink",
"id": "eb39e8d07685cd810f6f8b7caeab20641ee546ec",
"size": "6828",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aiorethink/values_and_valuetypes/base_types.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "125701"
},
{
"name": "Shell",
"bytes": "274"
}
],
"symlink_target": ""
} |
"""
Represents a property, i.e. property definition and value.
"""
from datafinder.core.error import PropertyError
__version__ = "$Revision-Id:$"
class Property(object):
"""
Represents a property, i.e. property definition and value.
"""
def __init__(self, propertyDefinition, value):
"""
Constructor.
@param propertyDefinition: Describes the property by name, identifier, restrictions, etc.
@type propertyDefinition: L{PropertyDefinition<datafinder.core.configuration.properties.property_definition.PropertyDefinition>}
@param value: Value of the property.
@type value: C{object}
@raise PropertyError: Value does not fit.
"""
self._propertyDefinition = propertyDefinition
self._additionalValueRepresentations = list()
self.value = value
def __getPropertyIdentifier(self):
""" Getter of the property identifier. """
return self._propertyDefinition.identifier
identifier = property(__getPropertyIdentifier)
def __getPropertyDefinition(self):
""" Getter for the property definition. """
return self._propertyDefinition
propertyDefinition = property(__getPropertyDefinition)
def __repr__(self):
""" Returns the representation. """
return str(self.propertyDefinition) + ": " + str(self.value)
def __cmp__(self, other):
""" Implements comparison of two instances. """
try:
return cmp(self.propertyDefinition.identifier, other.propertyDefinition.identifier)
except AttributeError:
return 1
def toPersistenceFormat(self):
""" @note: Raises a C{PropertyError} if the conversion fails. """
preparedValue = self._propertyDefinition.toPersistenceFormat(self.value)
return {self.identifier: preparedValue}
@staticmethod
def create(propertyDefinition, persistedValue):
"""
Creates a property from persistence format.
@param propertyDefinition: Describes the property by name, identifier, restrictions, etc.
@type propertyDefinition: L{PropertyDefinition<datafinder.core.configuration.properties.property_definition.PropertyDefinition>}
@param persistedValue: Value of the property in persistence format.
@type persistedValue: L{MetadataValue<datafinder.persistence.metadata.value_mapping.MetadataValue>}
"""
foundValue = False
valueRepresentations = persistedValue.guessRepresentation()
for valueRepresentation in valueRepresentations:
try:
value = propertyDefinition.fromPersistenceFormat(valueRepresentation)
foundValue = True
break
except PropertyError:
continue
if not foundValue:
value = propertyDefinition.defaultValue
return Property(propertyDefinition, value)
| {
"content_hash": "31641a6e025ce4080c1c2bd6dcf2138f",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 136,
"avg_line_length": 35,
"alnum_prop": 0.6253968253968254,
"repo_name": "DLR-SC/DataFinder",
"id": "465c899b3c6e0a22c162fe145106338152a0f231",
"size": "4844",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/datafinder/core/item/property.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "NSIS",
"bytes": "7649"
},
{
"name": "Python",
"bytes": "7056802"
},
{
"name": "QMake",
"bytes": "1975"
}
],
"symlink_target": ""
} |
class defaultlist(list):
def __init__(self, *args, factory=None, **kwargs):
super().__init__(*args, **kwargs)
self.factory = factory
def __getitem__(self, index):
if index >= len(self):
diff = index - len(self) + 1
for i in range(diff):
self.append(self.factory())
return super().__getitem__(index)
| {
"content_hash": "d9a9941936be49fa8d3d4be2b86f7071",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 54,
"avg_line_length": 34.54545454545455,
"alnum_prop": 0.5210526315789473,
"repo_name": "cdgriffith/Reusables",
"id": "a3b3366b7a256b1e95a32ca757284ca46d1b26d9",
"size": "428",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reusables/default_list.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2616"
},
{
"name": "Python",
"bytes": "171865"
}
],
"symlink_target": ""
} |
import os
import pytest
import pandas as pd
from toolz import pipe
from ..data import limit_rows, MaxRowsError, sample, to_values, to_json, to_csv
def _create_dataframe(N):
data = pd.DataFrame({"x": range(N), "y": range(N)})
return data
def _create_data_with_values(N):
data = {"values": [{"x": i, "y": i + 1} for i in range(N)]}
return data
def test_limit_rows():
"""Test the limit_rows data transformer."""
data = _create_dataframe(10)
result = limit_rows(data, max_rows=20)
assert data is result
with pytest.raises(MaxRowsError):
pipe(data, limit_rows(max_rows=5))
data = _create_data_with_values(10)
result = pipe(data, limit_rows(max_rows=20))
assert data is result
with pytest.raises(MaxRowsError):
limit_rows(data, max_rows=5)
def test_sample():
"""Test the sample data transformer."""
data = _create_dataframe(20)
result = pipe(data, sample(n=10))
assert len(result) == 10
assert isinstance(result, pd.DataFrame)
data = _create_data_with_values(20)
result = sample(data, n=10)
assert isinstance(result, dict)
assert "values" in result
assert len(result["values"]) == 10
data = _create_dataframe(20)
result = pipe(data, sample(frac=0.5))
assert len(result) == 10
assert isinstance(result, pd.DataFrame)
data = _create_data_with_values(20)
result = sample(data, frac=0.5)
assert isinstance(result, dict)
assert "values" in result
assert len(result["values"]) == 10
def test_to_values():
"""Test the to_values data transformer."""
data = _create_dataframe(10)
result = pipe(data, to_values)
assert result == {"values": data.to_dict(orient="records")}
def test_type_error():
"""Ensure that TypeError is raised for types other than dict/DataFrame."""
for f in (sample, limit_rows, to_values):
with pytest.raises(TypeError):
pipe(0, f)
def test_dataframe_to_json():
"""Test to_json
- make certain the filename is deterministic
- make certain the file contents match the data
"""
data = _create_dataframe(10)
try:
result1 = pipe(data, to_json)
result2 = pipe(data, to_json)
filename = result1["url"]
output = pd.read_json(filename)
finally:
os.remove(filename)
assert result1 == result2
assert output.equals(data)
def test_dict_to_json():
"""Test to_json
- make certain the filename is deterministic
- make certain the file contents match the data
"""
data = _create_data_with_values(10)
try:
result1 = pipe(data, to_json)
result2 = pipe(data, to_json)
filename = result1["url"]
output = pd.read_json(filename).to_dict(orient="records")
finally:
os.remove(filename)
assert result1 == result2
assert data == {"values": output}
def test_dataframe_to_csv():
"""Test to_csv with dataframe input
- make certain the filename is deterministic
- make certain the file contents match the data
"""
data = _create_dataframe(10)
try:
result1 = pipe(data, to_csv)
result2 = pipe(data, to_csv)
filename = result1["url"]
output = pd.read_csv(filename)
finally:
os.remove(filename)
assert result1 == result2
assert output.equals(data)
def test_dict_to_csv():
"""Test to_csv with dict input
- make certain the filename is deterministic
- make certain the file contents match the data
"""
data = _create_data_with_values(10)
try:
result1 = pipe(data, to_csv)
result2 = pipe(data, to_csv)
filename = result1["url"]
output = pd.read_csv(filename).to_dict(orient="records")
finally:
os.remove(filename)
assert result1 == result2
assert data == {"values": output}
| {
"content_hash": "860097084ea7bd54ef4cea77af878f07",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 79,
"avg_line_length": 27.776978417266186,
"alnum_prop": 0.6293706293706294,
"repo_name": "altair-viz/altair",
"id": "b4b4196ffc2d3f6215be3d3706a668cb96f9c544",
"size": "3861",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "altair/utils/tests/test_data.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "343"
},
{
"name": "Python",
"bytes": "5377805"
},
{
"name": "TeX",
"bytes": "2684"
}
],
"symlink_target": ""
} |
"""Contains generic table-writing functions. Data is expected to be a list of namedtuples.
kwargs (kws):
'title': First row will contain user-provided title string
'prt_if': Only print a line if user-specfied test returns True.
prt_if is a lambda function with the data item's namedtuple as input.
Example: prt_if = lambda nt: nt.p_uncorrected < 0.05
'sort_by' : User-customizable sort when printing.
sortby is a lambda function with the data item's namedtuple as input.
It is the 'key' used in the sorted function.
Example: sort_by = lambda nt: [nt.NS, -1*nt.depth]
'hdrs' : A list of column headers to use when printing the table.
default: The fields in the data's namedtuple is used as the column headers.
'sep': Separator used when printing the tab-separated table format.
default: sep = '\t'
'prt_flds' : Used to print a subset of the fields in the namedtuple or
to control the order of the print fields
'fld2col_widths: A dictionary of column widths used when writing xlsx files.
'fld2fmt': Used in tsv files and xlsx files for formatting specific fields
For adding color or other formatting to a row based on value in a row:
'ntfld_wbfmt': namedtuple field containing a value used as a key for a xlsx format
'ntval2wbfmtdict': namedtuple value and corresponding xlsx format dict. Examples:
"""
__copyright__ = "Copyright (C) 2016-2017, DV Klopfenstein, H Tang, All rights reserved."
__author__ = "DV Klopfenstein"
import re
import sys
from goatools.wr_tbl_class import get_hdrs
def prt_txt(prt, data_nts, prtfmt=None, nt_fields=None, **kws):
"""Print list of namedtuples into a table using prtfmt."""
# optional keyword args: prt_if sort_by
if data_nts:
if prtfmt is None:
prtfmt = mk_fmtfld(data_nts[0])
# if nt_fields arg is None, use fields from prtfmt string.
if nt_fields is not None:
_chk_flds_fmt(nt_fields, prtfmt)
if 'sort_by' in kws:
data_nts = sorted(data_nts, key=kws['sort_by'])
prt_if = kws.get('prt_if', None)
for data_nt in data_nts:
if prt_if is None or prt_if(data_nt):
prt.write(prtfmt.format(**data_nt._asdict()))
else:
sys.stdout.write(" 0 items. NOT WRITING w/format_string({F})\n".format(F=prtfmt))
def prt_nts(data_nts, prtfmt=None, prt=sys.stdout, nt_fields=None, **kws):
"""Print list of namedtuples into a table using prtfmt."""
prt_txt(prt, data_nts, prtfmt, nt_fields, **kws)
def wr_xlsx(fout_xlsx, data_xlsx, **kws):
"""Write a spreadsheet into a xlsx file."""
from goatools.wr_tbl_class import WrXlsx
# optional keyword args: fld2col_widths hdrs prt_if sort_by fld2fmt prt_flds
items_str = kws.get("items", "items") if "items" not in kws else kws["items"]
if data_xlsx:
# Open xlsx file
xlsxobj = WrXlsx(fout_xlsx, data_xlsx[0]._fields, **kws)
worksheet = xlsxobj.add_worksheet()
# Write title (optional) and headers.
row_idx = xlsxobj.wr_title(worksheet)
row_idx = xlsxobj.wr_hdrs(worksheet, row_idx)
row_idx_data0 = row_idx
# Write data
row_idx = xlsxobj.wr_data(data_xlsx, row_idx, worksheet)
# Close xlsx file
xlsxobj.workbook.close()
sys.stdout.write(" {N:>5} {ITEMS} WROTE: {FOUT}\n".format(
N=row_idx-row_idx_data0, ITEMS=items_str, FOUT=fout_xlsx))
else:
sys.stdout.write(" 0 {ITEMS}. NOT WRITING {FOUT}\n".format(
ITEMS=items_str, FOUT=fout_xlsx))
def wr_xlsx_sections(fout_xlsx, xlsx_data, **kws):
"""Write xlsx file containing section names followed by lines of namedtuple data."""
from goatools.wr_tbl_class import WrXlsx
items_str = "items" if "items" not in kws else kws["items"]
prt_hdr_min = 10
num_items = 0
if xlsx_data:
# Basic data checks
assert len(xlsx_data[0]) == 2, "wr_xlsx_sections EXPECTED: [(section, nts), ..."
assert xlsx_data[0][1], \
"wr_xlsx_sections EXPECTED SECTION({S}) LIST TO HAVE DATA".format(S=xlsx_data[0][0])
# Open xlsx file and write title (optional) and headers.
xlsxobj = WrXlsx(fout_xlsx, xlsx_data[0][1][0]._fields, **kws)
worksheet = xlsxobj.add_worksheet()
row_idx = xlsxobj.wr_title(worksheet)
hdrs_wrote = False
# Write data
for section_text, data_nts in xlsx_data:
num_items += len(data_nts)
fmt = xlsxobj.wbfmtobj.get_fmt_section()
row_idx = xlsxobj.wr_row_mergeall(worksheet, section_text, fmt, row_idx)
if hdrs_wrote is False or len(data_nts) > prt_hdr_min:
row_idx = xlsxobj.wr_hdrs(worksheet, row_idx)
hdrs_wrote = True
row_idx = xlsxobj.wr_data(data_nts, row_idx, worksheet)
# Close xlsx file
xlsxobj.workbook.close()
sys.stdout.write(" {N:>5} {ITEMS} WROTE: {FOUT} ({S} sections)\n".format(
N=num_items, ITEMS=items_str, FOUT=fout_xlsx, S=len(xlsx_data)))
else:
sys.stdout.write(" 0 {ITEMS}. NOT WRITING {FOUT}\n".format(
ITEMS=items_str, FOUT=fout_xlsx))
def wr_tsv(fout_tsv, tsv_data, **kws):
"""Write a file of tab-separated table data"""
items_str = "items" if "items" not in kws else kws["items"]
if tsv_data:
ifstrm = sys.stdout if fout_tsv is None else open(fout_tsv, 'w')
num_items = prt_tsv(ifstrm, tsv_data, **kws)
if fout_tsv is not None:
sys.stdout.write(" {N:>5} {ITEMS} WROTE: {FOUT}\n".format(
N=num_items, ITEMS=items_str, FOUT=fout_tsv))
ifstrm.close()
else:
sys.stdout.write(" 0 {ITEMS}. NOT WRITING {FOUT}\n".format(
ITEMS=items_str, FOUT=fout_tsv))
def prt_tsv(prt, data_nts, **kws):
"""Print tab-separated table data"""
# User-controlled printing options
sep = "\t" if 'sep' not in kws else kws['sep']
flds_all = data_nts[0]._fields
hdrs = get_hdrs(flds_all, **kws)
fld2fmt = None if 'fld2fmt' not in kws else kws['fld2fmt']
if 'sort_by' in kws:
data_nts = sorted(data_nts, key=kws['sort_by'])
prt_if = kws['prt_if'] if 'prt_if' in kws else None
prt_flds = kws['prt_flds'] if 'prt_flds' in kws else data_nts[0]._fields
# Write header
prt.write("# {}\n".format(sep.join(hdrs)))
# Write data
items = 0
for nt_data_row in data_nts:
if prt_if is None or prt_if(nt_data_row):
if fld2fmt is not None:
row_fld_vals = [(fld, getattr(nt_data_row, fld)) for fld in prt_flds]
row_vals = _fmt_fields(row_fld_vals, fld2fmt)
else:
row_vals = [getattr(nt_data_row, fld) for fld in prt_flds]
prt.write("{}\n".format(sep.join(str(d) for d in row_vals)))
items += 1
return items
def _fmt_fields(fld_vals, fld2fmt):
"""Optional user-formatting of specific fields, eg, pval: '{:8.2e}'."""
vals = []
for fld, val in fld_vals:
if fld in fld2fmt:
val = fld2fmt[fld].format(val)
vals.append(val)
return vals
def _chk_flds_fmt(nt_fields, prtfmt):
"""Check that all fields in the prtfmt have corresponding data in the namedtuple."""
fmtflds = get_fmtflds(prtfmt)
missing_data = set(fmtflds).difference(set(nt_fields))
# All data needed for print is present, return.
if not missing_data:
return
#raise Exception('MISSING DATA({M}).'.format(M=" ".join(missing_data)))
msg = ['CANNOT PRINT USING: "{PF}"'.format(PF=prtfmt.rstrip())]
for fld in fmtflds:
errmrk = "" if fld in nt_fields else "ERROR-->"
msg.append(" {ERR:8} {FLD}".format(ERR=errmrk, FLD=fld))
raise Exception('\n'.join(msg))
def get_fmtflds(prtfmt):
"""Return the fieldnames in the formatter text."""
# Example prtfmt: "{NS} {study_cnt:2} {fdr_bh:5.3e} L{level:02} D{depth:02} {GO} {name}\n"
return [f.split(':')[0] for f in re.findall(r'{(\S+)}', prtfmt)]
def get_fmtfldsdict(prtfmt):
"""Return the fieldnames in the formatter text."""
# Example prtfmt: "{NS} {study_cnt:2} {fdr_bh:5.3e} L{level:02} D{depth:02} {GO} {name}\n"
return {v:v for v in get_fmtflds(prtfmt)}
def _prt_txt_hdr(prt, prtfmt):
"""Print header for text report."""
tblhdrs = get_fmtfldsdict(prtfmt)
# If needed, reformat for format_string for header, which has strings, not floats.
hdrfmt = re.sub(r':(\d+)\.\S+}', r':\1}', prtfmt)
hdrfmt = re.sub(r':(0+)(\d+)}', r':\2}', hdrfmt)
prt.write("#{}".format(hdrfmt.format(**tblhdrs)))
def mk_fmtfld(nt_item):
"""Given a namedtuple, return a format_field string."""
fldstrs = []
# Default formats based on fieldname
fld2fmt = {
'hdrgo' : lambda f: "{{{FLD}:1,}}".format(FLD=f),
'dcnt' : lambda f: "{{{FLD}:6,}}".format(FLD=f),
'level' : lambda f: "L{{{FLD}:02,}}".format(FLD=f),
'depth' : lambda f: "D{{{FLD}:02,}}".format(FLD=f),
}
for fld in nt_item._fields:
if fld in fld2fmt:
val = fld2fmt[fld](fld)
else:
val = "{{{FLD}}}".format(FLD=fld)
fldstrs.append(val)
fldstrs.append("\n")
return " ".join(fldstrs)
# Copyright (C) 2016-2017, DV Klopfenstein, H Tang. All rights reserved.
| {
"content_hash": "543575ed48d582bf03b140499c148147",
"timestamp": "",
"source": "github",
"line_count": 211,
"max_line_length": 97,
"avg_line_length": 45.27014218009479,
"alnum_prop": 0.6002931323283082,
"repo_name": "lileiting/goatools",
"id": "383701896a17b13eec24a46d4c87a28a172e3a8b",
"size": "9552",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "goatools/wr_tbl.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "224437"
},
{
"name": "Makefile",
"bytes": "14930"
},
{
"name": "Python",
"bytes": "77536843"
},
{
"name": "Shell",
"bytes": "1068"
}
],
"symlink_target": ""
} |
from django.db import models
from django.utils.translation import ugettext_lazy as _
from cms.models.pluginmodel import CMSPlugin
from filer.fields.image import FilerImageField
from djangocms_social.models import likes
from djangocms_social import defaults
class Like(CMSPlugin):
facebook = models.BooleanField(_('facebook'), default=False)
google = models.BooleanField(_('google'), default=False)
# options
title = models.CharField(_('title'), max_length=255, default=defaults.LIKE['title'], blank=True, null=True,
help_text=_('Uses the title of the browser window if empty.'))
description = models.CharField(_('description'), max_length=255, default=defaults.LIKE['description'],
blank=True, null=True)
image = FilerImageField(verbose_name=_('image'), blank=True, null=True,
help_text=_('This setting can only be set once per page. If set twice, it will be overridden.'))
def __init__(self, *args, **kwargs):
super(Like, self).__init__(*args, **kwargs)
self.options = likes.AVAILABLE
@property
def get_objects(self):
objects = []
for type, object in self.options.iteritems():
if getattr(self, type, False):
objects.append(getattr(likes, object)(**self.get_kwargs))
return objects
@property
def get_kwargs(self):
kwargs = {
'title': self.title,
'description': self.description,
}
return kwargs
class Mail(CMSPlugin):
subject = models.CharField(_('subject'), max_length=100)
body = models.TextField(_('body'), default='', blank=True)
append_url = models.BooleanField(_('append url'), default=True,
help_text=_('Append the current web address at the end of the mail.'))
class Links(CMSPlugin):
facebook = models.URLField(_('facebook'), null=True, blank=True)
# googleplus = models.URLField(_('google plus'), null=True, blank=True)
twitter = models.URLField(_('twitter'), null=True, blank=True)
xing = models.URLField(_('xing'), null=True, blank=True)
linkedin = models.URLField(_('linkedin'), null=True, blank=True)
# youtube = models.URLField(_('youtube'), null=True, blank=True)
rss = models.URLField(_('rss'), null=True, blank=True)
links = [
'facebook', 'twitter', 'xing', 'linkedin', 'rss',
] | {
"content_hash": "ceb0099003e1450f3a3c48a3a4567701",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 111,
"avg_line_length": 38.04838709677419,
"alnum_prop": 0.6511233573548114,
"repo_name": "pascalmouret/djangocms-social",
"id": "c368fb92b517d373dbe5de04010c1f9fdefa4e60",
"size": "2383",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "djangocms_social/models/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "46162"
}
],
"symlink_target": ""
} |
from gevent import pool
import requests
import npyscreen
import pickle
import os.path
import time
class RebMaControl:
def __init__(self, repoListUrl):
self.dumpFileName_repo = "./.repoList.bin"
self.repoListUrl = repoListUrl
try:
if (os.path.isfile(self.dumpFileName_repo) and time.time() - os.path.getmtime(self.dumpFileName_repo) < 3600):
self.repoList = self.readRepoListDump(self.dumpFileName_repo)
else:
self.repoList = self.readRepoListSource(self.repoListUrl)
self.writeRepoListDump(self.repoList, self.dumpFileName_repo)
except:
self.repoList = []
def readRepoListDump(self, dumpFileName_repo):
dumpFile_repo = open(dumpFileName_repo, 'rb')
return pickle.load(dumpFile_repo)
def readRepoListSource(self, repoListUrl):
repoPathList = [item
for item in requests.get(repoListUrl, timeout=20).content.split("\n")
if item.startswith("github.com")]
#get list of repo urls'
repoUrlList = ['https://api.github.com/repos/{0}'.format(item[11:]) for item in repoPathList]
#build valid github api url
p = pool.Pool(20)
repoResponseList = p.map(requests.get, repoUrlList)
repoList = [ (
str(request.json()["name"]),
str(version),
str(request.json()["description"]),
str(request.json()["html_url"])
)
for request in repoResponseList
for version in
[ item["name"]
for item in requests.get('{0}/branches'.format(request.url)).json()
] +
[ item["name"]
for item in requests.get('{0}/tags'.format(request.url)).json()
]
]
return repoList
def writeRepoListDump(self, repoList, dumpFileName_repo):
dumpFile_repo = open(dumpFileName_repo, 'wb')
pickle.dump(repoList, dumpFile_repo)
def get_repoList(self):
return self.repoList
| {
"content_hash": "28ec1044011d632f6405b621c54ec127",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 113,
"avg_line_length": 28.75409836065574,
"alnum_prop": 0.6984036488027366,
"repo_name": "Mr-Pi/rebma",
"id": "31d55baad647ca09c39b957e49a19666c7237bdf",
"size": "1773",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rebmaControl.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10973"
}
],
"symlink_target": ""
} |
import asyncio
import atexit
import json
import logging
import os
import re
import signal
import subprocess
import sys
import tempfile
import threading
from typing import Any
import tornado.iostream
import tornado.queues
import tornado.websocket
from tornado import gen
from tornado.ioloop import IOLoop
from tornado.process import Subprocess
logger = logging.getLogger("weditor")
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
IS_WINDOWS = os.name == "nt"
class WinAsyncSubprocess(object):
def __init__(self, *args: Any, **kwargs: Any) -> None:
self.io_loop = IOLoop.current()
kwargs['bufsize'] = 0 # unbuffed
kwargs['stdin'] = subprocess.PIPE
kwargs['stdout'] = subprocess.PIPE
kwargs['stderr'] = subprocess.STDOUT
self.proc = subprocess.Popen(*args, **kwargs)
self.pid = self.proc.pid
# https://www.tornadoweb.org/en/stable/queues.html
self._qout = tornado.queues.Queue()
self._qexit = tornado.queues.Queue()
threading.Thread(name="async-subprocess",
target=self._drain,
daemon=True).start()
def _drain(self):
logger.info("Started drain subprocess stdout in thread")
for line in iter(self.proc.stdout.readline, b''):
self.io_loop.add_callback(self._qout.put, line)
self.io_loop.add_callback(self._qout.put, None)
logger.info("windows process stdout closed")
self.io_loop.add_callback(self._qexit.put, self.proc.wait())
async def wait_for_exit(self, raise_error=True):
""" Ignore raise_error """
exit_code = await self._qexit.get()
return exit_code
async def readline(self) -> bytes:
ret = await self._qout.get()
if ret is None:
raise IOError("subprocess stdout closed")
return ret
async def stdin_write(self, data: bytes):
return self.proc.stdin.write(data)
class PosixAsyncSubprocess(Subprocess):
async def readline(self) -> bytes:
return await self.stdout.read_until(b"\n")
async def stdin_write(self, data: bytes):
return await self.stdin.write(data)
class PythonShellHandler(tornado.websocket.WebSocketHandler):
def initialize(self):
self._tmpd = tempfile.TemporaryDirectory(suffix='-weditor')
atexit.register(self._tmpd.cleanup)
def on_close(self):
logger.warning("websocket closed, cleanup")
self._tmpd.cleanup()
atexit.unregister(self._tmpd.cleanup)
IOLoop.current().add_callback(self.kill_process)
@property
def _tmpdir(self) -> str:
return self._tmpd.name
async def prepare(self):
"""
Refs:
https://www.tornadoweb.org/en/stable/process.html#tornado.process.Subprocess
https://www.tornadoweb.org/en/stable/iostream.html#tornado.iostream.IOStream
"""
AsyncSubprocess = WinAsyncSubprocess if IS_WINDOWS else PosixAsyncSubprocess
env = os.environ.copy()
env['PYTHONIOENCODING'] = "utf-8"
self.__process = AsyncSubprocess(
[sys.executable, "-u",
os.path.join(ROOT_DIR, "../ipyshell-console.py")],
env=env,
cwd=self._tmpdir,
stdin=Subprocess.STREAM,
stdout=Subprocess.STREAM,
stderr=subprocess.STDOUT) # yapf: disable
# self.__process = Subprocess([sys.executable, "-u", os.path.join(ROOT_DIR, "ipyshell.py")],
# # bufsize=1, #universal_newlines=True,
# stdin=Subprocess.STREAM,
# stdout=Subprocess.STREAM)
IOLoop.current().add_callback(self.sync_process_output)
async def kill_process(self):
self.__process.proc.kill()
ret = await self.__process.wait_for_exit(raise_error=False)
logger.info("process quited with code %d", ret)
async def _readline_decoded(self) -> str:
line = await self.__process.readline()
return line.decode("utf-8").rstrip()
async def sync_process_output(self):
try:
while True:
line = await self._readline_decoded()
if not line:
logger.warning("proc-stdout read empty line")
break
fields = line.split(":", 1)
if len(fields) != 2:
continue
cmdx, value = fields
if cmdx == "LNO":
self.write2({"method": "gotoLine", "value": int(value)})
elif cmdx == "DBG":
logger.debug("DBG: %s", value)
# self.write2({"method": "output", "value": "- "+value})
elif cmdx == "WRT":
# here value is json_encoded string
self.write2({
"method": "output",
"value": json.loads(value)
})
elif cmdx == "EOF":
logger.debug(
"finished running code block, time used %.1fs",
int(value) / 1000)
self.write2({"method": "finish", "value": int(value)})
else:
# self.write2({"method": "output", "value": line +"\n"})
logger.warning("Unsupported output line: %s", line)
except (tornado.iostream.StreamClosedError, IOError):
pass
finally:
logger.debug("sync process output stopped")
# code may never goes here
#ret = await self.__process.wait_for_exit(raise_error=False)
#logger.info("process exit with code %d", ret)
#self.__process = None
def send_keyboard_interrupt(self):
if IS_WINDOWS: # Windows
# On windows, it's not working with the following code
# - p.send_signal(signal.SIGINT)
# - os.kill(p.pid, signal.CTRL_C_EVENT)
# - subprocess.call(["taskkill", "/PID", str(p.pid)])
# But the following command works find
pid = self.__process.pid
import ctypes
k = ctypes.windll.kernel32
k.FreeConsole() # Don't understand
k.AttachConsole(pid)
k.SetConsoleCtrlHandler(
None, True) # Disable Ctrl-C handling for our program
k.GenerateConsoleCtrlEvent(signal.CTRL_C_EVENT, 0) # SIGINT
# Re-enable Ctrl-C handling or any subsequently started
# programs will inherit the disabled state.
k.SetConsoleCtrlHandler(None, False)
else:
self.__process.proc.send_signal(
signal.SIGINT) # Linux is so simple
async def open(self):
logger.debug("websocket opened")
logger.info("create process pid: %d", self.__process.pid)
# self.write2({"method": "resetContent", "value": INIT_CODE})
# self.write2({"method": "gotoLine", "value": 1})
# await gen.sleep(.1)
def write2(self, data):
self.write_message(json.dumps(data))
def _adjust_code(self, code: str):
""" fix indent error, remove all line spaces """
prefixs = re.findall(r"^\s*", code, re.M)
space_len = min([len(pre) for pre in prefixs])
lines = code.splitlines(keepends=True)
return ''.join([line[space_len:] for line in lines])
async def on_message(self, message):
# print("Receive:", message)
data = json.loads(message)
method, value = data['method'], data.get('value')
if method == 'input':
code = self._adjust_code(value)
code = json.dumps(code) + "\n"
logger.debug("send to proc: %s", code.rstrip())
await self.__process.stdin_write(code.encode('utf-8'))
elif method == "keyboardInterrupt":
self.send_keyboard_interrupt()
elif method == "restartKernel":
await self.kill_process()
await self.prepare()
self.write2({"method": "restarted"})
else:
logger.warning("Unknown received message: %s", data)
| {
"content_hash": "39830319c24e4aab458e0baffc9a883c",
"timestamp": "",
"source": "github",
"line_count": 218,
"max_line_length": 100,
"avg_line_length": 37.41284403669725,
"alnum_prop": 0.5720941638057871,
"repo_name": "openatx/weditor",
"id": "a234005015557166e135970574714467f1414bf0",
"size": "8174",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "weditor/web/handlers/shell.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "83444"
},
{
"name": "HTML",
"bytes": "17844"
},
{
"name": "JavaScript",
"bytes": "1257418"
},
{
"name": "Python",
"bytes": "42017"
}
],
"symlink_target": ""
} |
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Labelfont(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "parcats"
_path_str = "parcats.labelfont"
_valid_props = {"color", "family", "size"}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
"""
def __init__(self, arg=None, color=None, family=None, size=None, **kwargs):
"""
Construct a new Labelfont object
Sets the font for the `dimension` labels.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.parcats.Labelfont`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
Returns
-------
Labelfont
"""
super(Labelfont, self).__init__("labelfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.parcats.Labelfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.parcats.Labelfont`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| {
"content_hash": "3a9f01a416a967bd13b6d78e1b9f24f6",
"timestamp": "",
"source": "github",
"line_count": 227,
"max_line_length": 82,
"avg_line_length": 37.12334801762115,
"alnum_prop": 0.5587990981369407,
"repo_name": "plotly/plotly.py",
"id": "31427079fb73b85a7f33d80bb77d98d6cf1fa9cb",
"size": "8427",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/graph_objs/parcats/_labelfont.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, overload
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._availability_sets_operations import (
build_create_or_update_request,
build_delete_request,
build_get_request,
build_list_available_sizes_request,
build_list_by_subscription_request,
build_list_request,
build_update_request,
)
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class AvailabilitySetsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.compute.v2022_03_01.aio.ComputeManagementClient`'s
:attr:`availability_sets` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@overload
async def create_or_update(
self,
resource_group_name: str,
availability_set_name: str,
parameters: _models.AvailabilitySet,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.AvailabilitySet:
"""Create or update an availability set.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param availability_set_name: The name of the availability set. Required.
:type availability_set_name: str
:param parameters: Parameters supplied to the Create Availability Set operation. Required.
:type parameters: ~azure.mgmt.compute.v2022_03_01.models.AvailabilitySet
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AvailabilitySet or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2022_03_01.models.AvailabilitySet
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def create_or_update(
self,
resource_group_name: str,
availability_set_name: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.AvailabilitySet:
"""Create or update an availability set.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param availability_set_name: The name of the availability set. Required.
:type availability_set_name: str
:param parameters: Parameters supplied to the Create Availability Set operation. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AvailabilitySet or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2022_03_01.models.AvailabilitySet
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def create_or_update(
self,
resource_group_name: str,
availability_set_name: str,
parameters: Union[_models.AvailabilitySet, IO],
**kwargs: Any
) -> _models.AvailabilitySet:
"""Create or update an availability set.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param availability_set_name: The name of the availability set. Required.
:type availability_set_name: str
:param parameters: Parameters supplied to the Create Availability Set operation. Is either a
model type or a IO type. Required.
:type parameters: ~azure.mgmt.compute.v2022_03_01.models.AvailabilitySet or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AvailabilitySet or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2022_03_01.models.AvailabilitySet
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-03-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.AvailabilitySet]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IO, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "AvailabilitySet")
request = build_create_or_update_request(
resource_group_name=resource_group_name,
availability_set_name=availability_set_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.create_or_update.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("AvailabilitySet", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}"} # type: ignore
@overload
async def update(
self,
resource_group_name: str,
availability_set_name: str,
parameters: _models.AvailabilitySetUpdate,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.AvailabilitySet:
"""Update an availability set.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param availability_set_name: The name of the availability set. Required.
:type availability_set_name: str
:param parameters: Parameters supplied to the Update Availability Set operation. Required.
:type parameters: ~azure.mgmt.compute.v2022_03_01.models.AvailabilitySetUpdate
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AvailabilitySet or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2022_03_01.models.AvailabilitySet
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def update(
self,
resource_group_name: str,
availability_set_name: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.AvailabilitySet:
"""Update an availability set.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param availability_set_name: The name of the availability set. Required.
:type availability_set_name: str
:param parameters: Parameters supplied to the Update Availability Set operation. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AvailabilitySet or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2022_03_01.models.AvailabilitySet
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def update(
self,
resource_group_name: str,
availability_set_name: str,
parameters: Union[_models.AvailabilitySetUpdate, IO],
**kwargs: Any
) -> _models.AvailabilitySet:
"""Update an availability set.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param availability_set_name: The name of the availability set. Required.
:type availability_set_name: str
:param parameters: Parameters supplied to the Update Availability Set operation. Is either a
model type or a IO type. Required.
:type parameters: ~azure.mgmt.compute.v2022_03_01.models.AvailabilitySetUpdate or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AvailabilitySet or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2022_03_01.models.AvailabilitySet
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-03-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.AvailabilitySet]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IO, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "AvailabilitySetUpdate")
request = build_update_request(
resource_group_name=resource_group_name,
availability_set_name=availability_set_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.update.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("AvailabilitySet", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}"} # type: ignore
@distributed_trace_async
async def delete( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, availability_set_name: str, **kwargs: Any
) -> None:
"""Delete an availability set.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param availability_set_name: The name of the availability set. Required.
:type availability_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-03-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[None]
request = build_delete_request(
resource_group_name=resource_group_name,
availability_set_name=availability_set_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.delete.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}"} # type: ignore
@distributed_trace_async
async def get(self, resource_group_name: str, availability_set_name: str, **kwargs: Any) -> _models.AvailabilitySet:
"""Retrieves information about an availability set.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param availability_set_name: The name of the availability set. Required.
:type availability_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AvailabilitySet or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2022_03_01.models.AvailabilitySet
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-03-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.AvailabilitySet]
request = build_get_request(
resource_group_name=resource_group_name,
availability_set_name=availability_set_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("AvailabilitySet", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}"} # type: ignore
@distributed_trace
def list_by_subscription(
self, expand: Optional[str] = None, **kwargs: Any
) -> AsyncIterable["_models.AvailabilitySet"]:
"""Lists all availability sets in a subscription.
:param expand: The expand expression to apply to the operation. Allowed values are
'instanceView'. Default value is None.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AvailabilitySet or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2022_03_01.models.AvailabilitySet]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-03-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.AvailabilitySetListResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_subscription_request(
subscription_id=self._config.subscription_id,
expand=expand,
api_version=api_version,
template_url=self.list_by_subscription.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("AvailabilitySetListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list_by_subscription.metadata = {"url": "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/availabilitySets"} # type: ignore
@distributed_trace
def list(self, resource_group_name: str, **kwargs: Any) -> AsyncIterable["_models.AvailabilitySet"]:
"""Lists all availability sets in a resource group.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AvailabilitySet or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2022_03_01.models.AvailabilitySet]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-03-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.AvailabilitySetListResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("AvailabilitySetListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets"} # type: ignore
@distributed_trace
def list_available_sizes(
self, resource_group_name: str, availability_set_name: str, **kwargs: Any
) -> AsyncIterable["_models.VirtualMachineSize"]:
"""Lists all available virtual machine sizes that can be used to create a new virtual machine in
an existing availability set.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param availability_set_name: The name of the availability set. Required.
:type availability_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualMachineSize or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2022_03_01.models.VirtualMachineSize]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-03-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.VirtualMachineSizeListResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_available_sizes_request(
resource_group_name=resource_group_name,
availability_set_name=availability_set_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_available_sizes.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("VirtualMachineSizeListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list_available_sizes.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}/vmSizes"} # type: ignore
| {
"content_hash": "dfb0d2cef44c4cdea6362425dc97dc11",
"timestamp": "",
"source": "github",
"line_count": 698,
"max_line_length": 204,
"avg_line_length": 44.83810888252149,
"alnum_prop": 0.6309869955586798,
"repo_name": "Azure/azure-sdk-for-python",
"id": "ba172ec6e8a482a5b068639234823ad206d1da33",
"size": "31797",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2022_03_01/aio/operations/_availability_sets_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
from decimal import Decimal
from datetime import datetime
import dateutil.parser
class BaseModel(object):
""" Base class for other models. """
def __init__(self, **kwargs):
self._default_params = {}
@classmethod
def _NewFromJsonDict(cls, data, **kwargs):
if kwargs:
for key, val in kwargs.items():
data[key] = val
return cls(**data)
class Book(BaseModel):
"""A class that represents the Bitso orderbook and it's limits"""
def __init__(self, **kwargs):
self._default_params = {
'symbol': kwargs.get('book'),
'minimum_amount': Decimal(kwargs.get('minimum_amount')),
'maximum_amount': Decimal(kwargs.get('maximum_amount')),
'minimum_price': Decimal(kwargs.get('minimum_price')),
'maximum_price': Decimal(kwargs.get('maximum_price')),
'minimum_value': Decimal(kwargs.get('minimum_value')),
'maximum_value': Decimal(kwargs.get('maximum_value'))
}
for (param, val) in self._default_params.items():
setattr(self, param, val)
def __repr__(self):
return "Book(symbol={symbol})".format(symbol=self.symbol)
class AvailableBooks(BaseModel):
"""A class that represents Bitso's orderbooks"""
def __init__(self, **kwargs):
self.books = []
for ob in kwargs.get('payload'):
self.books.append(ob['book'])
setattr(self, ob['book'], Book._NewFromJsonDict(ob))
def __repr__(self):
return "AvilableBooks(books={books})".format(books=','.join(self.books))
class AccountStatus(BaseModel):
def __init__(self, **kwargs):
self._default_params = {
'client_id': kwargs.get('client_id'),
'status': kwargs.get('status'),
'cellphone_number': kwargs.get('cellphone_number'),
'official_id': kwargs.get('official_id'),
'proof_of_residency': kwargs.get('proof_of_residency'),
'signed_contract': kwargs.get('signed_contract'),
'origin_of_funds': kwargs.get('origin_of_funds'),
'daily_limit': Decimal(kwargs.get('daily_limit')),
'monthly_limit': Decimal(kwargs.get('monthly_limit')),
'daily_remaining': Decimal(kwargs.get('daily_remaining')),
'monthly_remaining': Decimal(kwargs.get('monthly_remaining'))
}
for (param, val) in self._default_params.items():
setattr(self, param, val)
def __repr__(self):
return "AccountStatus(client_id={client_id})".format(client_id=self.client_id)
class AccountRequiredField(BaseModel):
def __init__(self, **kwargs):
self._default_params = {
'name': kwargs.get('field_name'),
'description': kwargs.get('field_description'),
}
for (param, val) in self._default_params.items():
setattr(self, param, val)
def __repr__(self):
return "AccountRequiredField(name={name})".format(name=self.name)
class Ticker(BaseModel):
""" A class that represents a Bitso ticker. """
def __init__(self, **kwargs):
self._default_params = {
'book': kwargs.get('book'),
'ask': Decimal(kwargs.get('ask')),
'bid': Decimal(kwargs.get('bid')),
'high': Decimal(kwargs.get('high')),
'last': Decimal(kwargs.get('last')),
'low': Decimal(kwargs.get('low')),
'vwap': Decimal(kwargs.get('vwap')),
'volume': Decimal(kwargs.get('volume')),
'created_at': dateutil.parser.parse(kwargs.get('created_at'))
}
for (param, val) in self._default_params.items():
setattr(self, param, val)
def __repr__(self):
return "Ticker(book={book},ask={ask}, bid={bid}, high={high}, last={last}, low={low}, created_at={created_at}, vwaplow={vwap})".format(
book=self.book,
ask=self.ask,
bid=self.bid,
high=self.high,
low=self.low,
last=self.last,
vwap=self.vwap,
created_at=self.created_at)
class PublicOrder(BaseModel):
def __init__(self, **kwargs):
self._default_params = {
'book': kwargs.get('book'),
'price': Decimal(kwargs.get('price')),
'amount': Decimal(kwargs.get('amount'))
}
for (param, val) in self._default_params.items():
setattr(self, param, val)
if kwargs.get('oid'):
setattr(self, 'oid', kwargs.get('oid'))
else:
setattr(self, 'oid', None)
def __repr__(self):
return "PublicOrder(book={book},price={price}, amount={amount})".format(
book=self.book,
price=self.price,
amount=self.amount)
class OrderBook(BaseModel):
""" A class that represents a Bitso order book. """
def __init__(self, **kwargs):
self._default_params = {
'asks': kwargs.get('asks'),
'bids': kwargs.get('bids'),
'updated_at': dateutil.parser.parse(kwargs.get('updated_at')),
'sequence': int(kwargs.get('sequence'))
}
for (param, val) in self._default_params.items():
if param in ['asks', 'bids']:
public_orders = []
for order in val:
public_orders.append(PublicOrder._NewFromJsonDict(order))
setattr(self, param, public_orders)
continue
setattr(self, param, val)
def __repr__(self):
return "OrderBook({num_asks} asks, {num_bids} bids, updated_at={updated_at})".format(
num_asks=len(self.asks),
num_bids=len(self.bids),
updated_at=self.updated_at)
class Balance(BaseModel):
""" A class that represents a Bitso user's balance for a specifc currency. """
def __init__(self, **kwargs):
self._default_params = {
'name': kwargs.get('currency'),
'total': Decimal(kwargs.get('total')),
'locked': Decimal(kwargs.get('locked')),
'available': Decimal(kwargs.get('available'))
}
for (param, val) in self._default_params.items():
setattr(self, param, val)
def __repr__(self):
return "Balance(name={name}, total={total})".format(
name=self.name,
total=self.total)
class Balances(BaseModel):
""" A class that represents a Bitso user's balances """
def __init__(self, **kwargs):
self.currencies = []
for balance in kwargs.get('balances'):
self.currencies.append(balance['currency'])
setattr(self, balance['currency'], Balance._NewFromJsonDict(balance))
def __repr__(self):
return "Balances(currencies={currencies})".format(
currencies=','.join(self.currencies))
class Fee(BaseModel):
""" A class that represents a Bitso user's fees for a specifc order book. """
def __init__(self, **kwargs):
self._default_params = {
'book': kwargs.get('book'),
'fee_decimal': Decimal(kwargs.get('fee_decimal')),
'fee_percent': Decimal(kwargs.get('fee_percent'))
}
for (param, val) in self._default_params.items():
setattr(self, param, val)
def __repr__(self):
return "Fee(book={book}, fee_percent={fee_percent})".format(
book=self.book,
fee_percent=self.fee_percent)
class Fees(BaseModel):
""" A class that represents a Bitso user's fees """
def __init__(self, **kwargs):
self.books = []
for fee in kwargs.get('fees'):
self.books.append(fee['book'])
setattr(self, fee['book'], Fee._NewFromJsonDict(fee))
def __repr__(self):
return "Fees(books={books})".format(
books=','.join(self.books))
class Trade(BaseModel):
""" A class that represents a Bitso public trade. """
def __init__(self, **kwargs):
self._default_params = {
'book': kwargs.get('book'),
'tid': kwargs.get('tid'),
'amount': Decimal(kwargs.get('amount')),
'price': Decimal(kwargs.get('price')),
'maker_side': kwargs.get('maker_side'),
'created_at': dateutil.parser.parse(kwargs.get('created_at'))
}
for (param, val) in self._default_params.items():
setattr(self, param, val)
def __repr__(self):
return "Trade(tid={tid}, price={price}, amount={amount}, maker_side={maker_side}, created_at={created_at})".format(
tid=self.tid,
price=self.price,
amount=self.amount,
maker_side=self.maker_side,
created_at=self.created_at)
class Withdrawal(BaseModel):
""" A class that represents a User Withdrawal """
def __init__(self, **kwargs):
self._default_params = {
'wid': kwargs.get('wid'),
'status': kwargs.get('status'),
'created_at': dateutil.parser.parse(kwargs.get('created_at')),
'currency': kwargs.get('currency'),
'method': kwargs.get('method'),
'amount': Decimal(kwargs.get('amount')),
'details': kwargs.get('details')
}
for (param, val) in self._default_params.items():
setattr(self, param, val)
def __repr__(self):
return "Withdrawal(wid={wid}, amount={amount}, currency={currency})".format(
wid=self.wid,
amount=self.amount,
currency=self.currency)
class Funding(BaseModel):
""" A class that represents a User Funding """
def __init__(self, **kwargs):
self._default_params = {
'fid': kwargs.get('fid'),
'status': kwargs.get('status'),
'created_at': dateutil.parser.parse(kwargs.get('created_at')),
'currency': kwargs.get('currency'),
'method': kwargs.get('method'),
'amount': Decimal(kwargs.get('amount')),
'details': kwargs.get('details')
}
for (param, val) in self._default_params.items():
setattr(self, param, val)
def __repr__(self):
return "Funding(fid={fid}, amount={amount}, currency={currency})".format(
fid=self.fid,
amount=self.amount,
currency=self.currency)
class UserTrade(BaseModel):
""" A class that represents a trade for a Bitso user. """
def __init__(self, **kwargs):
self._default_params = {
'book': kwargs.get('book'),
'tid': kwargs.get('tid'),
'oid': kwargs.get('oid'),
'created_at': dateutil.parser.parse(kwargs.get('created_at')),
'major': Decimal(kwargs.get('major')),
'minor': Decimal(kwargs.get('minor')),
'price': Decimal(kwargs.get('price')),
'fees_amount': Decimal(kwargs.get('fees_amount')),
'fees_currency': kwargs.get('fees_currency'),
'side': kwargs.get('side')
}
for (param, val) in self._default_params.items():
setattr(self, param, val)
def __repr__(self):
return "UserTrade(tid={tid}, book={book}, price={price}, major={major}, minor={minor})".format(
tid=self.tid,
book=self.book,
price=self.price,
major=self.major,
minor=self.minor)
class LedgerEntry(BaseModel):
"""A class that represents a Bitso Ledger entry."""
def __init__(self, **kwargs):
for (param, value) in kwargs.items():
if param == 'created_at':
value = dateutil.parser.parse(value)
if param == 'balance_updates':
value = [BalanceUpdate._NewFromJsonDict(item) for item in value]
setattr(self, param, value)
class BalanceUpdate(BaseModel):
"""A class that represents a Bitso Balance Update"""
def __init__(self, **kwargs):
for (param, value) in kwargs.items():
if param == 'amount':
value = Decimal(value)
setattr(self, param, value)
def __repr__(self):
return "BalanceUpdate(currency={currency}, amount={amount}".format(
currency=self.currency,
amount=self.amount)
class Order(BaseModel):
""" A class that represents a Bitso order. """
def __init__(self, **kwargs):
self._default_params = {
'book': kwargs.get('book'),
'oid': kwargs.get('oid'),
'created_at': dateutil.parser.parse(kwargs.get('created_at')),
'updated_at': kwargs.get('updated_at'),
'original_amount': kwargs.get('original_amount'),
'unfilled_amount': Decimal(kwargs.get('unfilled_amount')),
'price': Decimal(kwargs.get('price')),
'side': kwargs.get('side'),
'status': kwargs.get('status'),
'type': kwargs.get('type')
}
for (param, val) in self._default_params.items():
setattr(self, param, val)
setattr(self, 'updated_at', dateutil.parser.parse(kwargs.get('updated_at')))
if kwargs.get('original_amount') != None:
setattr(self, 'original_amount', Decimal(kwargs.get('original_amount')))
if kwargs.get('original_value') != None:
setattr(self, 'original_value', Decimal(kwargs.get('original_value')))
if kwargs.get('updated_at') != None:
setattr(self, 'updated_at', dateutil.parser.parse(kwargs.get('updated_at')))
def __repr__(self):
return "Order(oid={oid}, side={side}, type={order_type}, price={price}, original_amount={original_amount})".format(
oid=self.oid,
side=self.side,
order_type=self.type,
price=self.price,
original_amount=self.original_amount)
class FundingDestination(BaseModel):
"""A class that represents a Bitso Funding Destination"""
def __init__(self, **kwargs):
for (param, value) in kwargs.items():
setattr(self, param, value)
def __repr__(self):
return "FundingDestination(account_identifier_name={account_identifier_name})".format(
account_identifier_name=self.account_identifier_name)
class OutletDictionary(dict):
""" A Dictionary subclass to represet Bitso Transfer Outlets with parsed decimals. """
def __init__(self, data):
_decimal_keys = ('minimum_transaction',
'maximum_transaction',
'daily_limit',
'fee',
'net')
for k, v in data.items():
if isinstance(v, dict):
self[k] = OutletDictionary(v)
else:
if isinstance(v, basestring) and k in _decimal_keys:
v = Decimal(v)
elif k == 'available':
if v=='1':
v = True
else:
v = False
self[k] = v
class TransactionQuote(BaseModel):
""" A class that represents a Bitso Transaction Quote. """
def __init__(self, **kwargs):
for (param, value) in kwargs.items():
if param=='outlets':
setattr(self, param, OutletDictionary(value))
else:
setattr(self, param, value)
setattr(self, 'created_at', dateutil.parser.parse(kwargs.get('created_at')))
setattr(self, 'expires_at', dateutil.parser.parse(kwargs.get('expires_at')))
setattr(self, 'btc_amount', Decimal(self.btc_amount))
setattr(self, 'gross', Decimal(self.gross))
setattr(self, 'rate', Decimal(self.rate))
def __repr__(self):
return "TransactionQuote(btc_amount={btc_amount}, currency={currency}, rate={rate}, created_at={created_at}, expires_at={expires_at}, gross={gross})".format(
btc_amount=self.btc_amount,
currency=self.currency,
rate=self.rate,
gross= self.gross,
created_at=self.created_at,
expires_at=self.expires_at)
class TransactionOrder(BaseModel):
""" A class that represents a Bitso Transaction Quote. """
def __init__(self, **kwargs):
setattr(self, 'btc_amount', None)
for (param, value) in kwargs.items():
setattr(self, param, value)
#setattr(self, 'created_at', dateutil.parser.parse(kwargs.get('created_at')))
setattr(self, 'expires_at', dateutil.parser.parse(self.expires_at))
if self.btc_amount:
setattr(self, 'btc_amount', Decimal(self.btc_amount))
if self.btc_pending:
setattr(self, 'btc_pending', Decimal(self.btc_pending))
if self.btc_received:
setattr(self, 'btc_received', Decimal(self.btc_received))
if self.currency_amount:
setattr(self, 'currency_amount', Decimal(self.currency_amount))
if self.currency_fees:
setattr(self, 'currency_fees', Decimal(self.currency_fees))
if self.currency_settled:
setattr(self, 'currency_settled', Decimal(self.currency_settled))
class OrderUpdate(BaseModel):
def __init__(self, **kwargs):
for (param, value) in kwargs.items():
if param == 'd':
setattr(self, 'timestamp', value)
setattr(self, 'datetime', datetime.fromtimestamp(int(value)/1000))
elif param == 'r':
setattr(self, 'rate', Decimal(str(value)))
elif param == 't':
if value == 0:
setattr(self, 'side', 'bid')
elif value == 1:
setattr(self, 'side', 'ask')
elif param == 'a':
setattr(self, 'amount', Decimal(str(value)))
elif param == 'v':
setattr(self, 'value', Decimal(str(value)))
elif param == 'o':
setattr(self, 'oid', str(value))
if not hasattr(self, 'amount'):
setattr(self, 'amount', Decimal('0.0'))
setattr(self, 'value', Decimal('0.0'))
def __repr__(self):
return "OrderUpdate(side={side}, timestamp={timestamp}, rate={rate}, amount={amount}, value={value})".format(
side=self.side,
timestamp=self.timestamp,
rate=self.rate,
amount= self.amount,
value=self.value,
oid=self.oid)
class TradeUpdate(BaseModel):
def __init__(self, **kwargs):
for (param, value) in kwargs.items():
if param == 'r':
setattr(self, 'rate', Decimal(str(value)))
elif param == 'a':
setattr(self, 'amount', Decimal(str(value)))
elif param == 'v':
setattr(self, 'value', Decimal(str(value)))
elif param == 'i':
setattr(self, 'tid', value)
def __repr__(self):
return "TradeUpdate(tid={tid}, amount={amount}, rate={rate},value={value})".format(
tid=self.tid,
rate=self.rate,
amount= self.amount,
value=self.value)
class StreamUpdate(object):
def __init__(self, json_dict):
self.channel = json_dict['type']
self.sequence_number = None
if 'sequence' in json_dict:
self.sequence_number = int(json_dict['sequence'])
self.updates = []
if 'payload' in json_dict:
if self.channel == 'diff-orders':
self.updates = self._build_diff_order_updates(json_dict['payload'])
elif self.channel == 'trades':
self.updates = self._build_trade_updates(json_dict['payload'])
elif self.channel == 'orders':
self.updates = self._build_order_updates(json_dict['payload'])
def _build_object_updates(self, payload, objcls):
obj_list = []
for elem in payload:
elobj = objcls(**elem)
obj_list.append(elobj)
return obj_list
def _build_trade_updates(self, payload):
return self._build_object_updates(payload, TradeUpdate)
def _build_diff_order_updates(self, payload):
return self._build_object_updates(payload, OrderUpdate)
def _build_order_updates(self, payload):
asks = self._build_object_updates(payload['asks'], OrderUpdate)
bids = self._build_object_updates(payload['bids'], OrderUpdate)
return asks+bids
| {
"content_hash": "af3019edecf575791d40b83311a704b9",
"timestamp": "",
"source": "github",
"line_count": 615,
"max_line_length": 165,
"avg_line_length": 34.076422764227644,
"alnum_prop": 0.5401059311924417,
"repo_name": "mariorz/python-bitso",
"id": "70ffc46786b357cadcda740ddee6ec0a1db79784",
"size": "22085",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bitso/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "98652"
}
],
"symlink_target": ""
} |
'''
Simple FreeSurfer ASCII surface file I/O functions
Reads and writes surface anatomy files as understood by AFNI SUMA (and maybe other programs)
The format for a surface with NV vertices and NF faces is:
NV NF
x_0 y_0 z_0 0
x_1 y_1 z_1 0
...
x_[NV-1] y_[NV-1] z_[NV-1] 0
f_00 f01 f02 0
f_10 f11 f12 0
...
f_[NF-1]0 f_[NF-1]1 f_[NF-1]2 0
where the (x,y,z) triples are coordinates and fi(p,q,r) are faces so that vertices with
indices p, q and r form a single triangle
Created on Feb 12, 2012
@author: nick
'''
import numpy as np, os, datetime
import surf
def read(fn):
'''
Reads a AFNI SUMA ASCII surface
Parameters
----------
fn : str
Filename of ASCII surface file
Returns
-------
s : Surface
a surf.Surface as defined in 'fn'
'''
if not os.path.exists(fn):
raise Exception("File not found: %s" % fn)
with open(fn) as f:
r = f.read().split("\n")
row = 0
nv = nf = None # number of vertices and faces
while True:
line = r[row]
row += 1
if line.startswith("#"):
continue
try:
nvnf = line.split(" ")
nv = int(nvnf[0])
nf = int(nvnf[1])
break
except:
continue
if not nf:
raise Exception("Not found in %s: number of nodes and faces" % fn)
# helper function to get a numpy Cx3 ndarray
def getrows(c, s): # c: number of rows, s is string with data
vs = np.fromstring(s, count=4 * c, sep=" ")
vx = np.reshape(vs, (c, 4))
return vx[:, :3]
# coordinates should start at pos...
v = getrows(nv, "\n".join(r[row:(row + nv)]))
# and the faces just after those
ffloat = getrows(nf, "\n".join(r[(row + nv):(row + nv + nf)]))
f = ffloat.astype(int)
return surf.Surface(v=v, f=f)
def write(fn, surface, overwrite=False, comment=None):
'''
Writes a AFNI SUMA ASCII surface
Parameters
----------
surface: surface.Surface
surface to be written
fn : str
Output filename of ASCII surface file
overwrite : bool
Whether to overwrite 'fn' if it exists
comment : str
Comments to add to 'fn'
'''
if isinstance(surface, str) and isinstance(fn, surf.Surface):
surface, fn = fn, surface
if not overwrite and os.path.exists(fn):
raise Exception("File already exists: %s" % fn)
s = []
if comment == None:
comment = '# Created %s' % str(datetime.datetime.now())
s.append(comment)
nv, nf = surface.nvertices, surface.nfaces,
v, f = surface.vertices, surface.faces
# number of vertices and faces
s.append('%d %d' % (nv, nf))
# add vertices and faces
s.extend('%f %f %f 0' % (v[i, 0], v[i, 1], v[i, 2]) for i in xrange(nv))
s.extend('%d %d %d 0' % (f[i, 0], f[i, 1], f[i, 2]) for i in xrange(nf))
# write to file
f = open(fn, 'w')
f.write("\n".join(s))
f.close()
| {
"content_hash": "43b7465ea2116becb447a33f5fac43a5",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 92,
"avg_line_length": 23.503937007874015,
"alnum_prop": 0.5658291457286432,
"repo_name": "nno/surfing",
"id": "6083adaf54f1c326419097dbc3a08e78188e56fd",
"size": "3365",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/surf_fs_asc.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "150625"
},
{
"name": "C++",
"bytes": "1131669"
},
{
"name": "Makefile",
"bytes": "4036"
},
{
"name": "Matlab",
"bytes": "221990"
},
{
"name": "Python",
"bytes": "243404"
},
{
"name": "Shell",
"bytes": "17198"
}
],
"symlink_target": ""
} |
import threading
import eventlet
from eventlet import greenpool
from openstack.common import log as logging
from openstack.common import loopingcall
LOG = logging.getLogger(__name__)
def _thread_done(gt, *args, **kwargs):
"""Callback function to be passed to GreenThread.link() when we spawn()
Calls the :class:`ThreadGroup` to notify if.
"""
kwargs['group'].thread_done(kwargs['thread'])
class Thread(object):
"""Wrapper around a greenthread, that holds a reference to the
:class:`ThreadGroup`. The Thread will notify the :class:`ThreadGroup` when
it has done so it can be removed from the threads list.
"""
def __init__(self, thread, group):
self.thread = thread
self.thread.link(_thread_done, group=group, thread=self)
def stop(self):
self.thread.kill()
def wait(self):
return self.thread.wait()
def link(self, func, *args, **kwargs):
self.thread.link(func, *args, **kwargs)
class ThreadGroup(object):
"""The point of the ThreadGroup class is to:
* keep track of timers and greenthreads (making it easier to stop them
when need be).
* provide an easy API to add timers.
"""
def __init__(self, thread_pool_size=10):
self.pool = greenpool.GreenPool(thread_pool_size)
self.threads = []
self.timers = []
def add_dynamic_timer(self, callback, initial_delay=None,
periodic_interval_max=None, *args, **kwargs):
timer = loopingcall.DynamicLoopingCall(callback, *args, **kwargs)
timer.start(initial_delay=initial_delay,
periodic_interval_max=periodic_interval_max)
self.timers.append(timer)
def add_timer(self, interval, callback, initial_delay=None,
*args, **kwargs):
pulse = loopingcall.FixedIntervalLoopingCall(callback, *args, **kwargs)
pulse.start(interval=interval,
initial_delay=initial_delay)
self.timers.append(pulse)
def add_thread(self, callback, *args, **kwargs):
gt = self.pool.spawn(callback, *args, **kwargs)
th = Thread(gt, self)
self.threads.append(th)
return th
def thread_done(self, thread):
self.threads.remove(thread)
def _stop_threads(self):
current = threading.current_thread()
# Iterate over a copy of self.threads so thread_done doesn't
# modify the list while we're iterating
for x in self.threads[:]:
if x is current:
# don't kill the current thread.
continue
try:
x.stop()
except Exception as ex:
LOG.exception(ex)
def stop_timers(self):
for x in self.timers:
try:
x.stop()
except Exception as ex:
LOG.exception(ex)
self.timers = []
def stop(self, graceful=False):
"""stop function has the option of graceful=True/False.
* In case of graceful=True, wait for all threads to be finished.
Never kill threads.
* In case of graceful=False, kill threads immediately.
"""
self.stop_timers()
if graceful:
# In case of graceful=True, wait for all threads to be
# finished, never kill threads
self.wait()
else:
# In case of graceful=False(Default), kill threads
# immediately
self._stop_threads()
def wait(self):
for x in self.timers:
try:
x.wait()
except eventlet.greenlet.GreenletExit:
pass
except Exception as ex:
LOG.exception(ex)
current = threading.current_thread()
# Iterate over a copy of self.threads so thread_done doesn't
# modify the list while we're iterating
for x in self.threads[:]:
if x is current:
continue
try:
x.wait()
except eventlet.greenlet.GreenletExit:
pass
except Exception as ex:
LOG.exception(ex)
| {
"content_hash": "2c0ad2b7534965adfa32ee0a0a3da2f1",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 79,
"avg_line_length": 31.21641791044776,
"alnum_prop": 0.5825962228065982,
"repo_name": "biirdy/monasca-anomaly",
"id": "83df8b386f019e159090a81d7b637de822fce323",
"size": "4788",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "openstack/common/threadgroup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "162615"
},
{
"name": "Shell",
"bytes": "3236"
}
],
"symlink_target": ""
} |
from base import PacfmObjectBase, PacfmContainerBase
from pacfm.model import file_provider
class Highlight(PacfmObjectBase):
def __init__(self, name, pathways, level, color, checked=True):
PacfmObjectBase.__init__(self, name= name, level=level, color=color)
self.pathways= pathways
self.checked= checked
def add_pathway(self, pathway):
if pathway not in self.pathways:
self.pathways.append(pathway)
def remove_pathway(self, pathway):
if pathway in self.pathways:
self.pathways.remove(pathway)
class HighlightContainer(PacfmContainerBase):
def __init__(self, highlights=[]):
PacfmContainerBase.__init__(self, items= highlights)
self.dmp_file= file_provider["launching"]["highlights"]
def add(self, name= "New highlight", names=[], level=2, color=(127,127,127), checked= True):
i=1
base= name
while name in self.names:
i+=1
name= base+' %s' %i
h= Highlight(name, names, level, color, checked)
self.items.append(h)
self.set_current(h)
return h
def add_pathway(self, pathway):
hl= self.get_current().add_pathway(pathway)
def remove_pathway(self, pathway):
self.get_current().remove(pathway)
| {
"content_hash": "a2eba990e8e50fd775ec75fd0c0dd631",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 96,
"avg_line_length": 30.88372093023256,
"alnum_prop": 0.6212349397590361,
"repo_name": "ecotox/pacfm",
"id": "64ac9f2f66fdb7a580a8644b257a0dffc151a79b",
"size": "1328",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pacfm/model/tools/circos/highlight.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "AMPL",
"bytes": "96986"
},
{
"name": "C",
"bytes": "2253401"
},
{
"name": "Java",
"bytes": "28647"
},
{
"name": "Makefile",
"bytes": "88653"
},
{
"name": "Matlab",
"bytes": "14547"
},
{
"name": "Objective-C",
"bytes": "1061"
},
{
"name": "OpenEdge ABL",
"bytes": "99470"
},
{
"name": "Pascal",
"bytes": "34142"
},
{
"name": "Perl",
"bytes": "705775"
},
{
"name": "Python",
"bytes": "224920"
},
{
"name": "Shell",
"bytes": "17458"
}
],
"symlink_target": ""
} |
"""
aspen.configuration
+++++++++++++++++++
Define configuration objects.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import errno
import mimetypes
import os
import sys
from collections import defaultdict
import aspen
from . import parse
from .. import logging
from ..exceptions import ConfigurationError
from ..utils import ascii_dammit
from ..typecasting import defaults as default_typecasters
import aspen.body_parsers
from ..simplates.renderers import factories
default_indices = lambda: ['index.html', 'index.json', 'index',
'index.html.spt', 'index.json.spt', 'index.spt']
# 'name': (default, from_unicode)
KNOBS = \
{ 'base_url': ('', parse.identity)
, 'changes_reload': (False, parse.yes_no)
, 'charset_dynamic': ('UTF-8', parse.charset)
, 'charset_static': (None, parse.charset)
, 'indices': (default_indices, parse.list_)
, 'list_directories': (False, parse.yes_no)
, 'logging_threshold': (0, int)
, 'media_type_default': ('text/plain', parse.media_type)
, 'media_type_json': ('application/json', parse.media_type)
, 'project_root': (None, parse.identity)
, 'renderer_default': ('stdlib_percent', parse.renderer)
, 'show_tracebacks': (False, parse.yes_no)
, 'colorize_tracebacks':(True, parse.yes_no)
, 'www_root': (None, parse.identity)
}
class Configurable(object):
"""Mixin object for aggregating configuration from several sources.
This is implemented in such a way that we get helpful log output: we
iterate over settings first, not over contexts first (defaults,
environment, kwargs).
"""
def _set(self, name, hydrated, flat, context, name_in_context):
"""Set value at self.name, calling hydrated if it's callable.
"""
if aspen.is_callable(hydrated):
hydrated = hydrated() # Call it if we can.
setattr(self, name, hydrated)
if name_in_context:
assert isinstance(flat, unicode) # sanity check
name_in_context = " %s=%s" % (name_in_context, flat)
out = " %-22s %-30s %-24s"
return out % (name, hydrated, context + name_in_context)
def set(self, name, raw, from_unicode, context, name_in_context):
error = None
try:
value = raw
if isinstance(value, str):
value = raw.decode('US-ASCII')
hydrated = from_unicode(value)
except UnicodeDecodeError, error:
value = ascii_dammit(value)
error_detail = "Configuration values must be US-ASCII."
except ValueError, error:
error_detail = error.args[0]
if error is not None:
msg = "Got a bad value '%s' for %s %s:"
msg %= (value, context, name_in_context)
if error_detail:
msg += " " + error_detail + "."
raise ConfigurationError(msg)
# special-case lists, so we can layer them
if from_unicode is parse.list_:
extend, new_value = hydrated
if extend:
old_value = getattr(self, name)
hydrated = old_value + new_value
else:
hydrated = new_value
args = (name, hydrated, value, context, name_in_context)
return self._set(*args)
def configure(self, **kwargs):
"""Takes a dictionary of strings/unicodes to strings/unicodes.
"""
# Do some base-line configuration.
# ================================
# We want to do the following configuration of our Python environment
# regardless of the user's configuration preferences
# mimetypes
aspens_mimetypes = os.path.join(os.path.dirname(__file__), 'mime.types')
mimetypes.knownfiles += [aspens_mimetypes]
# mimetypes.init is called below after the user has a turn.
# XXX register codecs here
self.typecasters = default_typecasters
# Configure from defaults, environment, and kwargs.
# =================================================
msgs = ["Reading configuration from defaults, environment, and "
"kwargs."] # can't actually log until configured
for name, (default, func) in sorted(KNOBS.items()):
# set the default value for this variable
msgs.append(self._set(name, default, None, "default", ''))
# set from the environment
envvar = 'ASPEN_' + name.upper()
value = os.environ.get(envvar, '').strip()
if value:
msgs.append(self.set( name
, value
, func
, "environment variable"
, envvar
))
# set from kwargs
value = kwargs.get(name)
if value is not None:
msgs.append(self.set( name
, value
, func
, "kwargs"
, name
))
# log appropriately
aspen.log_dammit(os.linesep.join(msgs))
# Set some attributes.
# ====================
def safe_getcwd(errorstr):
try:
# If the working directory no longer exists, then the following
# will raise OSError: [Errno 2] No such file or directory. I
# swear I've seen this under supervisor, though I don't have
# steps to reproduce. :-( To get around this you specify a
# www_root explicitly, or you can use supervisor's cwd
# facility.
return os.getcwd()
except OSError, err:
if err.errno != errno.ENOENT:
raise
raise ConfigurationError(errorstr)
# project root
if self.project_root is None:
aspen.log_dammit("project_root not configured (no template bases, "
"etc.).")
else:
# canonicalize it
if not os.path.isabs(self.project_root):
aspen.log_dammit("project_root is relative to CWD: '%s'."
% self.project_root)
cwd = safe_getcwd("Could not get a current working "
"directory. You can specify "
"ASPEN_PROJECT_ROOT in the environment, "
"or project_root in kwargs.")
self.project_root = os.path.join(cwd, self.project_root)
self.project_root = os.path.realpath(self.project_root)
aspen.log_dammit("project_root set to %s." % self.project_root)
# mime.types
users_mimetypes = os.path.join(self.project_root, 'mime.types')
mimetypes.knownfiles += [users_mimetypes]
# PYTHONPATH
sys.path.insert(0, self.project_root)
# www_root
if self.www_root is None:
self.www_root = safe_getcwd("Could not get a current working "
"directory. You can specify "
"ASPEN_WWW_ROOT in the environment, "
"or www_root in kwargs.")
self.www_root = os.path.realpath(self.www_root)
# load bodyparsers
self.body_parsers = {
"application/x-www-form-urlencoded": aspen.body_parsers.formdata,
"multipart/form-data": aspen.body_parsers.formdata,
self.media_type_json: aspen.body_parsers.jsondata
}
# load renderers
self.renderer_factories = factories(self)
self.default_renderers_by_media_type = defaultdict(lambda: self.renderer_default)
self.default_renderers_by_media_type[self.media_type_json] = 'json_dump'
# mime.types
# ==========
# It turns out that init'ing mimetypes is somewhat expensive. This is
# significant in testing, though in dev/production you wouldn't notice.
# In any case this means that if a devuser inits mimetypes themselves
# then we won't do so again here, which is fine. Right?
if not mimetypes.inited:
mimetypes.init()
self.show_renderers()
def show_renderers(self):
aspen.log_dammit("Renderers (*ed are unavailable, CAPS is default):")
width = max(map(len, self.renderer_factories))
for name, factory in self.renderer_factories.items():
star, error = " ", ""
if isinstance(factory, ImportError):
star = "*"
error = "ImportError: " + factory.args[0]
if name == self.renderer_default:
name = name.upper()
name = name.ljust(width + 2)
aspen.log_dammit(" %s%s%s" % (star, name, error))
default_renderer = self.renderer_factories[self.renderer_default]
if isinstance(default_renderer, ImportError):
msg = "\033[1;31mImportError loading the default renderer, %s:\033[0m"
aspen.log_dammit(msg % self.renderer_default)
sys.excepthook(*default_renderer.info)
raise default_renderer
| {
"content_hash": "294c67c82b5ef6e7d6e65bd011b3f1ce",
"timestamp": "",
"source": "github",
"line_count": 251,
"max_line_length": 89,
"avg_line_length": 39.11155378486056,
"alnum_prop": 0.5318325353977794,
"repo_name": "jaraco/aspen",
"id": "67e590cb11e37c8af72e1ebf8958cc542427cb3d",
"size": "9817",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aspen/configuration/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "20"
},
{
"name": "Makefile",
"bytes": "231"
},
{
"name": "Python",
"bytes": "388419"
},
{
"name": "Shell",
"bytes": "1328"
}
],
"symlink_target": ""
} |
'''
Connection module for Amazon S3
:configuration: This module is not usable until the following are specified
either in a pillar or in the minion's config file::
s3.keyid: GKTADJGHEIQSXMKKRBJ08H
s3.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
A service_url may also be specified in the configuration::
s3.service_url: s3.amazonaws.com
If a service_url is not specified, the default is s3.amazonaws.com. This
may appear in various documentation as an "endpoint". A comprehensive list
for Amazon S3 may be found at::
http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region
The service_url will form the basis for the final endpoint that is used to
query the service.
This module should be usable to query other S3-like services, such as
Eucalyptus.
'''
# Import Python libs
import logging
# Import Salt libs
import salt.utils
import salt.utils.s3
log = logging.getLogger(__name__)
def __virtual__():
'''
Should work on any modern Python installation
'''
return 's3'
def delete(bucket, path=None, action=None, key=None, keyid=None,
service_url=None):
'''
Delete a bucket, or delete an object from a bucket.
CLI Example to delete a bucket::
salt myminion s3.delete mybucket
CLI Example to delete an object from a bucket::
salt myminion s3.delete mybucket remoteobject
'''
key, keyid, service_url = _get_key(key, keyid, service_url)
return salt.utils.s3.query(method='DELETE',
bucket=bucket,
path=path,
action=action,
key=key,
keyid=keyid,
service_url=service_url)
def get(bucket=None, path=None, return_bin=False, action=None,
local_file=None, key=None, keyid=None, service_url=None):
'''
List the contents of a bucket, or return an object from a bucket. Set
return_bin to True in order to retrieve an object wholesale. Otherwise,
Salt will attempt to parse an XML response.
CLI Example to list buckets:
.. code-block:: bash
salt myminion s3.get
CLI Example to list the contents of a bucket:
.. code-block:: bash
salt myminion s3.get mybucket
CLI Example to return the binary contents of an object:
.. code-block:: bash
salt myminion s3.get mybucket myfile.png return_bin=True
CLI Example to save the binary contents of an object to a local file:
.. code-block:: bash
salt myminion s3.get mybucket myfile.png local_file=/tmp/myfile.png
It is also possible to perform an action on a bucket. Currently, S3
supports the following actions::
acl
cors
lifecycle
policy
location
logging
notification
tagging
versions
requestPayment
versioning
website
To perform an action on a bucket:
.. code-block:: bash
salt myminion s3.get mybucket myfile.png action=acl
'''
key, keyid, service_url = _get_key(key, keyid, service_url)
return salt.utils.s3.query(method='GET',
bucket=bucket,
path=path,
return_bin=return_bin,
local_file=local_file,
action=action,
key=key,
keyid=keyid,
service_url=service_url)
def head(bucket, path=None, key=None, keyid=None, service_url=None):
'''
Return the metadata for a bucket, or an object in a bucket.
CLI Examples:
.. code-block:: bash
salt myminion s3.head mybucket
salt myminion s3.head mybucket myfile.png
'''
key, keyid, service_url = _get_key(key, keyid, service_url)
return salt.utils.s3.query(method='HEAD',
bucket=bucket,
path=path,
key=key,
keyid=keyid,
service_url=service_url)
def put(bucket, path=None, return_bin=False, action=None, local_file=None,
key=None, keyid=None, service_url=None):
'''
Create a new bucket, or upload an object to a bucket.
CLI Example to create a bucket:
.. code-block:: bash
salt myminion s3.put mybucket
CLI Example to upload an object to a bucket:
.. code-block:: bash
salt myminion s3.put mybucket remotepath local_path=/path/to/file
'''
key, keyid, service_url = _get_key(key, keyid, service_url)
return salt.utils.s3.query(method='PUT',
bucket=bucket,
path=path,
return_bin=return_bin,
local_file=local_file,
action=action,
key=key,
keyid=keyid,
service_url=service_url)
def _get_key(key, keyid, service_url):
'''
Examine the keys, and populate as necessary
'''
if not key and __salt__['config.option']('s3.key'):
key = __salt__['config.option']('s3.key')
if not keyid and __salt__['config.option']('s3.keyid'):
keyid = __salt__['config.option']('s3.keyid')
if not service_url and __salt__['config.option']('s3.service_url'):
service_url = __salt__['config.option']('s3.service_url')
if not service_url:
service_url = 's3.amazonaws.com'
return key, keyid, service_url
| {
"content_hash": "cc5b9147a09d1bee2414314f001c9cb7",
"timestamp": "",
"source": "github",
"line_count": 200,
"max_line_length": 78,
"avg_line_length": 28.845,
"alnum_prop": 0.5689027561102444,
"repo_name": "victorywang80/Maintenance",
"id": "e93ee95cde5319b2f37b0c33a0b5865058ab2345",
"size": "5793",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "saltstack/src/salt/modules/s3.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "160954"
},
{
"name": "JavaScript",
"bytes": "1"
},
{
"name": "Python",
"bytes": "4522522"
},
{
"name": "Scheme",
"bytes": "7488"
},
{
"name": "Shell",
"bytes": "14653"
}
],
"symlink_target": ""
} |
from django.test import SimpleTestCase
from ..utils import setup
class TruncatecharsTests(SimpleTestCase):
@setup({'truncatechars01': '{{ a|truncatechars:5 }}'})
def test_truncatechars01(self):
output = self.engine.render_to_string('truncatechars01', {'a': 'Testing, testing'})
self.assertEqual(output, 'Te...')
@setup({'truncatechars02': '{{ a|truncatechars:7 }}'})
def test_truncatechars02(self):
output = self.engine.render_to_string('truncatechars02', {'a': 'Testing'})
self.assertEqual(output, 'Testing')
| {
"content_hash": "6c2cea52d264fcfddee3d265a37c1433",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 91,
"avg_line_length": 36.1875,
"alnum_prop": 0.6511226252158895,
"repo_name": "yephper/django",
"id": "6e62457bd281a8bb46bd9249a65ea8dea8f81331",
"size": "579",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/template_tests/filter_tests/test_truncatechars.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "1538"
},
{
"name": "CSS",
"bytes": "1697381"
},
{
"name": "HTML",
"bytes": "390772"
},
{
"name": "Java",
"bytes": "588"
},
{
"name": "JavaScript",
"bytes": "3172126"
},
{
"name": "Makefile",
"bytes": "134"
},
{
"name": "PHP",
"bytes": "19336"
},
{
"name": "Python",
"bytes": "13365273"
},
{
"name": "Shell",
"bytes": "837"
},
{
"name": "Smarty",
"bytes": "133"
}
],
"symlink_target": ""
} |
from flask import Blueprint
navbar_blueprint = Blueprint("navbar", __name__, static_folder="static", template_folder="templates")
| {
"content_hash": "167dd99d913a7c1729a2572842d1cda1",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 101,
"avg_line_length": 43.666666666666664,
"alnum_prop": 0.7557251908396947,
"repo_name": "marcoprado17/flask-bone",
"id": "3af8b455982c1f0d9091ecc8e5660fb1e3acfac8",
"size": "632",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/blueprints/components/lightly_route_dependent/navbar/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3196"
},
{
"name": "HTML",
"bytes": "10430"
},
{
"name": "JavaScript",
"bytes": "3983"
},
{
"name": "Python",
"bytes": "96101"
},
{
"name": "Shell",
"bytes": "2801"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.