text
stringlengths 4
1.02M
| meta
dict |
---|---|
import yaml
import sys
layout = yaml.load(open('zuul/layout.yaml'))
def check_merge_template():
"""Check that each job has a merge-check template."""
errors = False
print("\nChecking for usage of merge template")
print("====================================")
for project in layout['projects']:
if project['name'] == 'z/tempest':
continue
try:
correct = False
for template in project['template']:
if template['name'] == 'merge-check':
correct = True
if not correct:
raise
except:
print("Project %s has no merge-check template" % project['name'])
errors = True
return errors
def normalize(s):
"Normalize string for comparison."
return s.lower().replace("_", "-")
def check_sections():
"""Check that the projects are in alphabetical order per section."""
print("Checking sections for alphabetical order")
print("========================================")
# Note that the file has different sections and we need to sort
# entries within these sections.
errors = False
# Skip all entries before the first section header
firstEntry = True
last = ""
for line in open('zuul/layout.yaml', 'r'):
if line.startswith('# Section:'):
last = ""
section = line[10:].strip()
print("Checking section '%s'" % section)
firstEntry = False
if line.startswith(' - name: ') and not firstEntry:
current = line[10:].strip()
if (normalize(last) > normalize(current) and
last != 'z/tempest'):
print(" Wrong alphabetical order: %(last)s, %(current)s" %
{"last": last, "current": current})
errors = True
last = current
return errors
def check_formatting():
errors = False
count = 1
print("Checking indents")
print("================")
for line in open('zuul/layout.yaml', 'r'):
if (len(line) - len(line.lstrip(' '))) % 2 != 0:
print("Line %(count)s not indented by multiple of 2:\n\t%(line)s" %
{"count": count, "line": line})
errors = True
count = count + 1
return errors
def check_all():
errors = check_sections()
errors = check_formatting() or errors
if errors:
print("\nFound errors in layout.yaml!")
else:
print("\nNo errors found in layout.yaml!")
return errors
if __name__ == "__main__":
sys.exit(check_all())
| {
"content_hash": "08dd4842b9637cdd10481188a7fe7820",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 79,
"avg_line_length": 29.133333333333333,
"alnum_prop": 0.532418001525553,
"repo_name": "CiscoSystems/project-config-third-party",
"id": "daf1619440afb2238fcd52fec48e3deef668b481",
"size": "3268",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/layout-checks.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "34800"
},
{
"name": "Shell",
"bytes": "10220"
}
],
"symlink_target": ""
} |
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Mail User Guide'
copyright = u'2014, Francesco Ceccon'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1'
# The full version, including alpha/beta/rc tags.
release = '1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'haiku'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = 'Mail User Guide'
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'MailUserGuidedoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'MailUserGuide.tex', u'Mail User Guide',
u'Francesco Ceccon', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'mailuserguide', u'Mail User Guide',
[u'Francesco Ceccon'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'MailUserGuide', u'Mail User Guide',
u'Francesco Ceccon', 'MailUserGuide', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'Mail User Guide'
epub_author = u'Francesco Ceccon'
epub_publisher = u'Francesco Ceccon'
epub_copyright = u'2014, Francesco Ceccon'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
| {
"content_hash": "fd1860a00779478f61a05ba8e81599bd",
"timestamp": "",
"source": "github",
"line_count": 272,
"max_line_length": 80,
"avg_line_length": 31.613970588235293,
"alnum_prop": 0.7017095011047796,
"repo_name": "fracek/mail-dylan",
"id": "875d719598697456c39cddac25b37ba00fa5ca5a",
"size": "9011",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "documentation/source/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dylan",
"bytes": "117501"
},
{
"name": "Python",
"bytes": "9011"
},
{
"name": "Shell",
"bytes": "5119"
}
],
"symlink_target": ""
} |
class TimeoutError(Exception):
"""Raised when a timer runs out."""
pass
def timeout(seconds, e=None):
"""
A decorator for blocking methods to cause them to timeout. Can be used
like this:
@timeout(30)
def foo(arg1, kwarg="baz"):
for x in xrange(1000000000):
print "%s %s" % (arg1, kwarg)
print x
or like this:
ridiculous = timeout(30)(foo("bar"))
:param seconds:
Number of seconds to wait before raising :class:`TimeoutError`.
:param e:
Error message to pass to :class:`TimeoutError`. Default None.
:return:
The result of the original function, or else an instance of
:class:`TimeoutError`.
"""
from signal import alarm, signal, SIGALRM
from functools import wraps
def decorator(func):
def _timeout(signum, frame):
raise TimeoutError, e
def wrapper(*args, **kwargs):
signal(SIGALRM, _timeout)
alarm(seconds)
try:
res = func(*args, **kwargs)
finally:
alarm(0)
return res
return wraps(func)(wrapper)
return decorator
def deferred_timeout(seconds, e=None):
"""
Decorator for adding a timeout to an instance of a
:class:`twisted.internet.defer.Deferred`. Can be used like this:
@deferred_timeout(30)
def foo(arg1, kwarg="baz"):
for x in xrange(1000000000):
print "%s %s" % (arg1, kwarg)
print x
or like this:
ridiculous = deferred_timeout(30)(foo("bar"))
:param seconds:
Number of seconds to wait before raising :class:`TimeoutError`.
:param e:
Error message to pass to :class:`TimeoutError`. Default None.
:return:
The result of the orginal :class:`twisted.internet.defer.Deferred`
or else a :class:`TimeoutError`.
"""
from twisted.internet import defer, reactor
def wrapper(func):
@defer.inlineCallbacks
def _timeout(*args, **kwargs):
d_original = func(*args, **kwargs)
if not isinstance(d_original, defer.Deferred):
defer.returnValue(d_original) ## fail gracefully
d_timeout = defer.Deferred()
timeup = reactor.callLater(seconds, d_timeout.callback, None)
try:
original_result, timeout_result = \
yield defer.DeferredList([d_original, d_timeout],
fireOnOneCallback=True,
fireOnOneErrback=True,
consumeErrors=True)
except defer.FirstError, dfe:
assert dfe.index == 0 ## error in original
timeup.cancel()
dfe.subFailure.raiseException()
else:
if d_timeout.called: ## timeout
d_original.cancel()
raise TimeoutError, e
timeup.cancel() ## no timeout
defer.returnValue(d_original)
return _timeout
return wrapper
| {
"content_hash": "dea0654561766d2d33acfb6911db8e3f",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 74,
"avg_line_length": 33.427083333333336,
"alnum_prop": 0.5403552508569648,
"repo_name": "hackerberry/ooni-probe",
"id": "e03fd7408c60c2da179dd4339c09cc478025ae8c",
"size": "3471",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ooni/utils/timer.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "519235"
},
{
"name": "Shell",
"bytes": "10418"
}
],
"symlink_target": ""
} |
from zxtest import zxCoreTestCase
class OracleSPTest(zxCoreTestCase):
def setUp(self):
zxCoreTestCase.setUp(self)
c = self.cursor()
try:
try:
c.execute("drop table sptest")
except:
self.db.rollback()
try:
c.execute("create table sptest (x varchar2(20))")
c.execute("create or replace procedure procnone is begin insert into sptest values ('testing'); end;")
c.execute("create or replace procedure procin (y in varchar2) is begin insert into sptest values (y); end;")
c.execute("create or replace procedure procout (y out varchar2) is begin y := 'tested'; end;")
c.execute("create or replace procedure procinout (y out varchar2, z in varchar2) is begin insert into sptest values (z); y := 'tested'; end;")
c.execute("create or replace function funcnone return varchar2 is begin return 'tested'; end;")
c.execute("create or replace function funcin (y varchar2) return varchar2 is begin return y || y; end;")
c.execute("create or replace function funcout (y out varchar2) return varchar2 is begin y := 'tested'; return 'returned'; end;")
self.db.commit()
except:
self.db.rollback()
self.fail("procedure creation failed")
self.proc_errors("PROC")
self.proc_errors("FUNC")
finally:
c.close()
def tearDown(self):
zxCoreTestCase.tearDown(self)
def proc_errors(self, name):
c = self.cursor()
try:
c.execute("select * from user_errors where name like '%s%%'" % (name.upper()))
errors = c.fetchall()
try:
assert not errors, "found errors"
except AssertionError, e:
print "printing errors:"
for a in errors:
print a
raise e
finally:
c.close()
def testCursor(self):
c = self.cursor()
try:
c.execute("insert into sptest values ('a')")
c.execute("insert into sptest values ('b')")
c.execute("insert into sptest values ('c')")
c.execute("insert into sptest values ('d')")
c.execute("insert into sptest values ('e')")
c.execute("""
CREATE OR REPLACE PACKAGE types
AS
TYPE ref_cursor IS REF CURSOR;
END;
""")
c.execute("""
CREATE OR REPLACE FUNCTION funccur(v_x IN VARCHAR)
RETURN types.ref_cursor
AS
funccur_cursor types.ref_cursor;
BEGIN
OPEN funccur_cursor FOR
SELECT x FROM sptest WHERE x < v_x;
RETURN funccur_cursor;
END;
""")
self.proc_errors("funccur")
c.callproc("funccur", ("z",))
data = c.fetchall()
self.assertEquals(5, len(data))
c.callproc("funccur", ("c",))
data = c.fetchall()
self.assertEquals(2, len(data))
finally:
c.close()
def testProcin(self):
c = self.cursor()
try:
params = ["testProcin"]
c.callproc("procin", params)
self.assertEquals([], c.fetchall())
c.execute("select * from sptest")
self.assertEquals(1, len(c.fetchall()))
finally:
c.close()
def testProcinout(self):
c = self.cursor()
try:
params = [None, "testing"]
c.callproc("procinout", params)
data = c.fetchone()
assert data is None, "data was not None"
c.execute("select * from sptest")
data = c.fetchone()
self.assertEquals("testing", data[0])
self.assertEquals("tested", params[0])
finally:
c.close()
def testFuncnone(self):
c = self.cursor()
try:
c.callproc("funcnone")
data = c.fetchone()
assert data is not None, "data was None"
self.assertEquals(1, len(data))
self.assertEquals("tested", data[0])
finally:
c.close()
def testFuncin(self):
c = self.cursor()
try:
params = ["testing"]
c.callproc("funcin", params)
self.assertEquals(1, c.rowcount)
data = c.fetchone()
assert data is not None, "data was None"
self.assertEquals(1, len(data))
self.assertEquals("testingtesting", data[0])
finally:
c.close()
def testCallingWithKws(self):
c = self.cursor()
try:
params = ["testing"]
c.callproc("funcin", params=params)
self.assertEquals(1, c.rowcount)
data = c.fetchone()
assert data is not None, "data was None"
self.assertEquals(1, len(data))
self.assertEquals("testingtesting", data[0])
finally:
c.close()
def testFuncout(self):
c = self.cursor()
try:
params = [None]
c.callproc("funcout", params)
data = c.fetchone()
assert data is not None, "data was None"
self.assertEquals(1, len(data))
self.assertEquals("returned", data[0])
self.assertEquals("tested", params[0].strip())
finally:
c.close()
def testMultipleFetch(self):
"""testing the second fetch call to a callproc() is None"""
c = self.cursor()
try:
c.callproc("funcnone")
data = c.fetchone()
assert data is not None, "data was None"
data = c.fetchone()
assert data is None, "data was not None"
finally:
c.close()
class SQLServerSPTest(zxCoreTestCase):
def testProcWithResultSet(self):
c = self.cursor()
try:
for a in (("table", "sptest"), ("procedure", "sp_proctest")):
try:
c.execute("drop %s %s" % (a))
except:
pass
c.execute("create table sptest (a int, b varchar(32))")
c.execute("insert into sptest values (1, 'hello')")
c.execute("insert into sptest values (2, 'there')")
c.execute("insert into sptest values (3, 'goodbye')")
c.execute(""" create procedure sp_proctest (@A int) as select a, b from sptest where a <= @A """)
self.db.commit()
c.callproc("sp_proctest", (2,))
data = c.fetchall()
self.assertEquals(2, len(data))
self.assertEquals(2, len(c.description))
assert c.nextset() is not None, "expected an additional result set"
data = c.fetchall()
self.assertEquals(1, len(data))
self.assertEquals(1, len(c.description))
finally:
c.close()
# def testSalesByCategory(self):
# c = self.cursor()
# try:
# c.execute("use northwind")
# c.callproc(("northwind", "dbo", "SalesByCategory"), ["Seafood", "1998"])
# data = c.fetchall()
# assert data is not None, "no results from SalesByCategory"
# assert len(data) > 0, "expected numerous results"
# finally:
# c.close()
| {
"content_hash": "756f8d659eb7f7cf6be57835dd787e3a",
"timestamp": "",
"source": "github",
"line_count": 220,
"max_line_length": 174,
"avg_line_length": 44.486363636363635,
"alnum_prop": 0.3957290283028507,
"repo_name": "babble/babble",
"id": "9e6c12cc825ba815d1633a1c14580eb4dc3959db",
"size": "9941",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "include/jython/Lib/test/zxjdbc/sptest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "3378"
},
{
"name": "Groovy",
"bytes": "16151"
},
{
"name": "Java",
"bytes": "7316421"
},
{
"name": "JavaScript",
"bytes": "644844"
},
{
"name": "Python",
"bytes": "10107943"
},
{
"name": "Ruby",
"bytes": "4961765"
},
{
"name": "Shell",
"bytes": "2575"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
} |
class Solution:
"""
@param matrix, a list of lists of integers
@param target, an integer
@return a boolean, indicate whether matrix contains target
"""
def searchMatrix(self, matrix, target):
# write your code here
i, j = len(matrix) - 1, 0
while (i >= 0 and j < len(matrix[i])):
if (matrix[i][j] == target):
return True
elif (matrix[i][j] < target):
j += 1
else:
i -= 1
return False
| {
"content_hash": "04cbb828c188c556bad843510d35b29b",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 62,
"avg_line_length": 30.823529411764707,
"alnum_prop": 0.4961832061068702,
"repo_name": "Rhadow/leetcode",
"id": "5a6fff879cd57a0d1e09c38caca5f930dd0b5d70",
"size": "524",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lintcode/Easy/028_Search_a_2D_Matrix.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Go",
"bytes": "3966"
},
{
"name": "JavaScript",
"bytes": "131066"
},
{
"name": "Python",
"bytes": "83790"
},
{
"name": "Shell",
"bytes": "492"
}
],
"symlink_target": ""
} |
from pprint import pprint
from datetime import timedelta
from fo2.connections import db_cursor_so
from base.views import O2BaseGetPostView
from utils.table_defs import TableDefs
import cd.forms
from cd.queries.novo_modulo import atividade_cd63
class AtividadeCD63(O2BaseGetPostView):
def __init__(self, *args, **kwargs):
super(AtividadeCD63, self).__init__(*args, **kwargs)
self.Form_class = cd.forms.AtividadeCD63Form
self.template_name = 'cd/novo_modulo/atividade_cd63.html'
self.title_name = 'Atividade no CD'
self.table_defs = TableDefs(
{
'data': ['Data'],
'usuario': ['Obs.'],
'dep': ['Depósito'],
'atividade': ['Atividade'],
'qtd': ['Quantidade'],
},
['header'],
)
def mount_context(self):
cursor = db_cursor_so(self.request)
data_de = self.form.cleaned_data['data_de']
data_ate = self.form.cleaned_data['data_ate']
if not data_ate:
data_ate = data_de
data_ate = data_ate + timedelta(days=1)
self.context['data_de'] = data_de
self.context['data_ate'] = data_ate
data = atividade_cd63.query(cursor, data_de, data_ate)
if not data:
self.context['erro'] = 'Atividade não encontrada'
return
self.context.update(self.table_defs.hfs_dict())
self.context.update({
'data': data,
})
| {
"content_hash": "b77bc5ea31a0f6d2bc002d0a79a3c424",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 65,
"avg_line_length": 29.568627450980394,
"alnum_prop": 0.5709549071618037,
"repo_name": "anselmobd/fo2",
"id": "0d8eb725bf73e386dc5f53613834837c0e662197",
"size": "1510",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/cd/views/novo_modulo/atividade_cd63.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "160899"
},
{
"name": "HTML",
"bytes": "855985"
},
{
"name": "JavaScript",
"bytes": "203109"
},
{
"name": "PLSQL",
"bytes": "2762"
},
{
"name": "Python",
"bytes": "3228268"
},
{
"name": "Shell",
"bytes": "2161"
}
],
"symlink_target": ""
} |
import numpy as np
import blockmodels as bm
import css_em as em
# Do it
# 1) With k > 2
# 2) Different rates for diffent groups
#
G_true = bm.blocked_matrix(20, 2, dim=2) # on=1, off=0
X = [bm.blocked_matrix(20, 2, dim=2, on=.8, off=.2) for i in range(5)]
X += [bm.blocked_matrix(20, 2, dim=2, on=.9, off=.1) for i in range(5)]
# Concatenate the lists -- adding arrays adds the elements.
X = np.array(X)
# HAVE to ROUND the final estimate. (Otherwise -- weird shit.)
# Find a GOOD STARTING POINT.
b = bm.blockmodel(G_true, k=2)
#
# INDICES: it thinks the ten samples are the first ten people, and because
# they're blocked in order, they all end up in the same cluster -- meaning
# there just objectively isn't anything for the other group. SO.
#
indices = range(0,20,2)
G_hat, b_hat = em.em(X, k=2, indices=indices)
# Maybe it's the GROUPS -- there isn't a ground truth ...
# But it also shouldn't matter. Wait -- of course it does --
# Why aren't they getting the groups right?
# THIS isn't taking indices into account. Duh. Asshole. (Yeah?)
pfp, pfn = em.estimate_error_rates(*em.count_errors(X, G_true, b.groups, 2, indices))
# What ARE all these things? What SHOULD I be seeing?
print 'b.p: \n', b.p
print 'b_hat.p: \n', b_hat.p
print 'pfp: \n ', pfp
print 'pfn: \n ', pfn
| {
"content_hash": "12a5f4db03c795274c42aa7e9e037239",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 85,
"avg_line_length": 30.69047619047619,
"alnum_prop": 0.674166020170675,
"repo_name": "amloewi/css-blockmodels",
"id": "3049bf1c67390464519f9d7c121aac9b0262956a",
"size": "1290",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "em_tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "79396"
},
{
"name": "Game Maker Language",
"bytes": "509"
},
{
"name": "Makefile",
"bytes": "343"
},
{
"name": "Python",
"bytes": "1751473"
},
{
"name": "R",
"bytes": "4361"
},
{
"name": "TeX",
"bytes": "17189"
}
],
"symlink_target": ""
} |
"""Local filesystem-based filesystem plugin."""
import glob
import os
import shutil
from typing import Any, Callable, Iterable, List, Optional, Tuple
from tfx.dsl.io import filesystem
from tfx.dsl.io import filesystem_registry
from tfx.dsl.io.filesystem import PathType
class LocalFilesystem(filesystem.Filesystem):
"""Filesystem that uses local file operations."""
SUPPORTED_SCHEMES = ['']
@staticmethod
def open(name: PathType, mode: str = 'r') -> Any:
try:
return open(name, mode=mode)
except FileNotFoundError as e:
raise filesystem.NotFoundError() from e
@staticmethod
def copy(src: PathType, dst: PathType, overwrite: bool = False) -> None:
if not overwrite and os.path.exists(dst):
raise OSError(
('Destination file %r already exists and argument `overwrite` is '
'false.') % dst)
try:
shutil.copyfile(src, dst)
except FileNotFoundError as e:
raise filesystem.NotFoundError() from e
@staticmethod
def exists(path: PathType) -> bool:
return os.path.exists(path)
@staticmethod
def glob(pattern: PathType) -> List[PathType]:
return glob.glob(pattern)
@staticmethod
def isdir(path: PathType) -> bool:
return os.path.isdir(path)
@staticmethod
def listdir(path: PathType) -> List[PathType]:
try:
return os.listdir(path)
except FileNotFoundError as e:
raise filesystem.NotFoundError() from e
@staticmethod
def makedirs(path: PathType) -> None:
os.makedirs(path, exist_ok=True)
@staticmethod
def mkdir(path: PathType) -> None:
try:
os.mkdir(path)
except FileNotFoundError as e:
raise filesystem.NotFoundError() from e
@staticmethod
def remove(path: PathType) -> None:
try:
os.remove(path)
except FileNotFoundError as e:
raise filesystem.NotFoundError() from e
@staticmethod
def rename(src: PathType, dst: PathType, overwrite: bool = False) -> None:
if not overwrite and os.path.exists(dst):
raise OSError(
('Destination path %r already exists and argument `overwrite` is '
'false.') % dst)
try:
os.rename(src, dst)
except FileNotFoundError as e:
raise filesystem.NotFoundError() from e
@staticmethod
def rmtree(path: PathType) -> None:
try:
shutil.rmtree(path)
except FileNotFoundError as e:
raise filesystem.NotFoundError() from e
@staticmethod
def stat(path: PathType) -> Any:
try:
return os.stat(path)
except FileNotFoundError as e:
raise filesystem.NotFoundError() from e
@staticmethod
def walk(
top: PathType,
topdown: bool = True,
onerror: Optional[Callable[..., None]] = None
) -> Iterable[Tuple[PathType, List[PathType], List[PathType]]]:
try:
yield from os.walk(top, topdown=topdown, onerror=onerror)
except FileNotFoundError as e:
raise filesystem.NotFoundError() from e
filesystem_registry.DEFAULT_FILESYSTEM_REGISTRY.register(
LocalFilesystem, priority=20)
| {
"content_hash": "4e0fa0aa15570354dfdfa79e3f73545f",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 76,
"avg_line_length": 27.026785714285715,
"alnum_prop": 0.6788899900891973,
"repo_name": "tensorflow/tfx",
"id": "21dceef40981153a058da0b3e975a01005fdc913",
"size": "3623",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tfx/dsl/io/plugins/local.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "7405"
},
{
"name": "Jupyter Notebook",
"bytes": "38579"
},
{
"name": "Python",
"bytes": "6009050"
},
{
"name": "Shell",
"bytes": "34056"
},
{
"name": "Starlark",
"bytes": "20324"
}
],
"symlink_target": ""
} |
import sys
import os
import django
from django.conf import settings
from django.core.management import execute_from_command_line
urlpatterns = []
DEBUG = True
TEMPLATE_DEBUG = DEBUG
BASE_DIR = os.path.dirname(__file__)
ROOT_URLCONF = __name__
SECRET_KEY = 'qwerty'
TEMPLATES = [{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
}]
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
},
}
INSTALLED_APPS = [
'django_nose',
'redisca.admin_sidebar',
'redisca.template',
'redisca.seo',
]
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
def main():
sys.path.insert(0, os.path.abspath(os.path.dirname(__file__)))
settings.configure(**globals())
django.setup()
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| {
"content_hash": "5387b525463a0f20491780231aea922f",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 66,
"avg_line_length": 18.244444444444444,
"alnum_prop": 0.6577344701583435,
"repo_name": "redisca/django-redisca",
"id": "30ee9dc19a1885f28f915751857316321ab60c55",
"size": "843",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "742"
},
{
"name": "HTML",
"bytes": "6744"
},
{
"name": "Python",
"bytes": "22737"
},
{
"name": "Shell",
"bytes": "79"
}
],
"symlink_target": ""
} |
import os
import sys
import string
class bcolors:
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
ENDC = '\033[0m'
def disable(self):
self.BLUE = ''
self.GREEN = ''
self.YELLOW = ''
self.RED = ''
self.ENDC = ''
#################################################
# Writen by Jamie Murdock 'b0dach' #
# --Credit Card number Luhn Check-- #
# Checks to see if numbers in a #
# file or single input are valid #
# credit card numbers #
#################################################
# Luhn check algorithm
def luhn_checksum(card_number):
def digits_of(n):
return [int(d) for d in str(n)]
digits = digits_of(card_number)
odd_digits = digits[-1::-2]
even_digits = digits[-2::-2]
checksum = 0
checksum += sum(odd_digits)
for d in even_digits:
checksum += sum(digits_of(d*2))
return checksum % 10
def is_luhn_valid(card_number):
return luhn_checksum(card_number) == 0
# Menu
os.system('clear')
print bcolors.BLUE + """
-------------------------------------------------
| --Credit Card Number Luhn Check -- |
| |
-------------------------------------------------
| Writen by Jamie Murdock 'b0dach' |
| Checks to see if numbers in a |
| file or single input are valid |
| credit card numbers |
-------------------------------------------------
""" + bcolors.ENDC
print """
Do you want to:
1. Enter the location and file of the list (i.e. /tmp/cards.csv)
2. Enter a single number to evaluate
* Press any other key to exit *
Please enter your selection:
"""
menu_choice = (raw_input(""))
# Evaluate numbers in a file
if menu_choice == '1':
try:
# menu_1 was input
input=raw_input("please enter the full path of the csv file: ")
with open (input, "r") as file:
lines=file.read().replace('\r', '\n')
lines=string.split(lines, '\n')
print "Do you want to save the results to a file in the current directory?"
menu_2 = (raw_input(""))
if menu_2 == "y" or menu_2 == "Y" or menu_2 == "Yes" or menu_2 == "yes":
output_name=raw_input("What do you want the output file to be named? ")
output=open(output_name,"w")
for line in lines:
card=line.strip()
if len(card)>=12 and len(card)<=19:
last4=card[len(card)-4:]
first6=card[0:6]
print " "
if is_luhn_valid(card):
print first6+"xxxxxx"+last4+" "+bcolors.RED+"Valid card number" +bcolors.ENDC
output.write(first6+"xxxxxx"+last4+" " +"Valid card number"+"\n")
else:
print first6+"xxxxxx"+last4+" " +bcolors.GREEN+"Not a valid card number"+bcolors.ENDC
output.write(first6+"xxxxxx"+last4+" " +"Not a valid card number"+"\n")
print " "
print "Results have been saved to "+output_name
else:
for line in lines:
card=line.strip()
if len(card)>=12 and len(card)<=19:
last4=card[len(card)-4:]
first6=card[0:6]
print " "
if is_luhn_valid(card):
print first6+"xxxxxx"+last4+" "+bcolors.RED+"Valid card number" +bcolors.ENDC
else:
print first6+"xxxxxx"+last4+" " +bcolors.GREEN+"Not a valid card number"+bcolors.ENDC
except Exception, error:
print bcolors.RED + "\n\n Something went wrong, printing the error: "+ str(error) + bcolors.ENDC
# Evaluate single number
if menu_choice == '2':
card=raw_input (bcolors.GREEN + "Please enter the card number: " + bcolors.ENDC)
valid=card.isdigit ()
if (valid == False):
print "Please enter numbers only"
# else:
if len(card)>=12 and len(card)<=19:
last4=card[len(card)-4:]
first6=card[0:6]
print " "
if is_luhn_valid(card):
print first6+"xxxxxx"+last4+" "+bcolors.RED+"Valid card number" +bcolors.ENDC
else:
print first6+"xxxxxx"+last4+" " +bcolors.GREEN+"Not a valid card number"+bcolors.ENDC
else:
print bcolors.RED + "Credit card numbers must be 12 and 19 digits." + bcolors.ENDC
# press any key to continue function
exit=raw_input('Press Enter to exit')
os.system('clear')
| {
"content_hash": "f5837ff259eba3cfcedf2ae8b93621eb",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 123,
"avg_line_length": 31.632653061224488,
"alnum_prop": 0.5075268817204301,
"repo_name": "Yoshi325/luhn_check",
"id": "d345740ecd7f98c92c03cb144782858149454651",
"size": "4668",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "luhn_check.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4668"
}
],
"symlink_target": ""
} |
"""Tests for the CNN divergence."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
import towards_gan_benchmarks.lib.flags
import towards_gan_benchmarks.lib.nn_divergence
lib = towards_gan_benchmarks.lib
class NNDivergenceTest(tf.test.TestCase):
def test_import(self):
self.assertIsNotNone(towards_gan_benchmarks.lib)
def test_nn_div(self):
flags = lib.flags.Flags()
lib.nn_divergence.set_flags(flags)
flags.batch_size = 64
flags.iters = 100
flags.final_eval_iters = 100
flags.ema = 0.9
def real_gen():
while True:
yield np.random.randint(0, 246, (flags.batch_size, 32, 32, 3), 'int32')
def fake_gen():
while True:
yield np.random.randint(10, 256, (flags.batch_size, 32, 32, 3), 'int32')
result = lib.nn_divergence.run(flags, real_gen(), fake_gen())
self.assertTrue(4. <= result <= 7.)
if __name__ == '__main__':
tf.test.main()
| {
"content_hash": "c84808dadce37d68ad4f0b8a931a41a0",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 80,
"avg_line_length": 25.195121951219512,
"alnum_prop": 0.6660212971926428,
"repo_name": "google-research/google-research",
"id": "23c6cb8a631d119451918368048981628ec32bcb",
"size": "1641",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "towards_gan_benchmarks/lib/nn_divergence_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "9817"
},
{
"name": "C++",
"bytes": "4166670"
},
{
"name": "CMake",
"bytes": "6412"
},
{
"name": "CSS",
"bytes": "27092"
},
{
"name": "Cuda",
"bytes": "1431"
},
{
"name": "Dockerfile",
"bytes": "7145"
},
{
"name": "Gnuplot",
"bytes": "11125"
},
{
"name": "HTML",
"bytes": "77599"
},
{
"name": "ImageJ Macro",
"bytes": "50488"
},
{
"name": "Java",
"bytes": "487585"
},
{
"name": "JavaScript",
"bytes": "896512"
},
{
"name": "Julia",
"bytes": "67986"
},
{
"name": "Jupyter Notebook",
"bytes": "71290299"
},
{
"name": "Lua",
"bytes": "29905"
},
{
"name": "MATLAB",
"bytes": "103813"
},
{
"name": "Makefile",
"bytes": "5636"
},
{
"name": "NASL",
"bytes": "63883"
},
{
"name": "Perl",
"bytes": "8590"
},
{
"name": "Python",
"bytes": "53790200"
},
{
"name": "R",
"bytes": "101058"
},
{
"name": "Roff",
"bytes": "1208"
},
{
"name": "Rust",
"bytes": "2389"
},
{
"name": "Shell",
"bytes": "730444"
},
{
"name": "Smarty",
"bytes": "5966"
},
{
"name": "Starlark",
"bytes": "245038"
}
],
"symlink_target": ""
} |
import collections
import re
import warnings
import astropy.io.fits as fits
import numpy as np
import pandas as pd
import requests
from threeML.utils.fermi_relative_mission_time import compute_fermi_relative_mission_times
from threeML.plugins.spectrum.pha_spectrum import PHASpectrumSet
class GBMTTEFile(object):
def __init__(self, ttefile):
"""
A simple class for opening and easily accessing Fermi GBM
TTE Files.
:param ttefile: The filename of the TTE file to be stored
"""
tte = fits.open(ttefile)
self._events = tte['EVENTS'].data['TIME']
self._pha = tte['EVENTS'].data['PHA']
try:
self._trigger_time = tte['PRIMARY'].header['TRIGTIME']
except:
# For continuous data
warnings.warn("There is no trigger time in the TTE file. Must be set manually or using MET relative times.")
self._trigger_time = 0
self._start_events = tte['PRIMARY'].header['TSTART']
self._stop_events = tte['PRIMARY'].header['TSTOP']
self._utc_start = tte['PRIMARY'].header['DATE-OBS']
self._utc_stop = tte['PRIMARY'].header['DATE-END']
self._n_channels = tte['EBOUNDS'].header['NAXIS2']
self._det_name = "%s_%s" % (tte['PRIMARY'].header['INSTRUME'], tte['PRIMARY'].header['DETNAM'])
self._telescope = tte['PRIMARY'].header['TELESCOP']
self._calculate_deadtime()
@property
def trigger_time(self):
return self._trigger_time
@trigger_time.setter
def trigger_time(self, val):
assert self._start_events <= val <= self._stop_events, "Trigger time must be within the interval (%f,%f)" % (
self._start_events, self._stop_events)
self._trigger_time = val
@property
def tstart(self):
return self._start_events
@property
def tstop(self):
return self._stop_events
@property
def arrival_times(self):
return self._events
@property
def n_channels(self):
return self._n_channels
@property
def energies(self):
return self._pha
@property
def mission(self):
"""
Return the name of the mission
:return:
"""
return self._telescope
@property
def det_name(self):
"""
Return the name of the instrument and detector
:return:
"""
return self._det_name
@property
def deadtime(self):
return self._deadtime
def _calculate_deadtime(self):
"""
Computes an array of deadtimes following the perscription of Meegan et al. (2009).
The array can be summed over to obtain the total dead time
"""
self._deadtime = np.zeros_like(self._events)
overflow_mask = self._pha == self._n_channels # specific to gbm! should work for CTTE
# From Meegan et al. (2009)
# Dead time for overflow (note, overflow sometimes changes)
self._deadtime[overflow_mask] = 10.E-6 # s
# Normal dead time
self._deadtime[~overflow_mask] = 2.E-6 # s
def _compute_mission_times(self):
mission_dict = {}
if self.trigger_time == 0:
return None
# Complements to Volodymyr Savchenko
xtime_url = "https://heasarc.gsfc.nasa.gov/cgi-bin/Tools/xTime/xTime.pl"
pattern = """<tr>.*?<th scope=row><label for="(.*?)">(.*?)</label></th>.*?<td align=center>.*?</td>.*?<td>(.*?)</td>.*?</tr>"""
args = dict(
time_in_sf=self._trigger_time,
timesys_in="u",
timesys_out="u",
apply_clock_offset="yes")
try:
content = requests.get(xtime_url, params=args).content
mission_info = re.findall(pattern, content, re.S)
mission_dict['UTC'] = mission_info[0][-1]
mission_dict[mission_info[7][1]] = mission_info[7][2] # LIGO
mission_dict[mission_info[8][1]] = mission_info[8][2] # NUSTAR
mission_dict[mission_info[12][1]] = mission_info[12][2] # RXTE
mission_dict[mission_info[16][1]] = mission_info[16][2] # SUZAKU
mission_dict[mission_info[20][1]] = mission_info[20][2] # SWIFT
mission_dict[mission_info[24][1]] = mission_info[24][2] # CHANDRA
except:
warnings.warn("You do not have the requests library, cannot get time system from Heasarc "
"at this point.")
return None
return mission_dict
def __repr__(self):
return self._output().to_string()
def _output(self):
"""
Examine the currently selected interval
If connected to the internet, will also look up info for other instruments to compare with
Fermi.
:return: none
"""
mission_dict = compute_fermi_relative_mission_times(self._trigger_time)
fermi_dict = collections.OrderedDict()
fermi_dict['Fermi Trigger Time'] = "%.3f" % self._trigger_time
fermi_dict['Fermi MET OBS Start'] = "%.3f" % self._start_events
fermi_dict['Fermi MET OBS Stop'] = "%.3f" % self._stop_events
fermi_dict['Fermi UTC OBS Start'] = self._utc_start
fermi_dict['Fermi UTC OBS Stop'] = self._utc_stop
fermi_df = pd.Series(fermi_dict, index=fermi_dict.keys())
if mission_dict is not None:
mission_df = pd.Series(mission_dict, index=mission_dict.keys())
fermi_df = fermi_df.append(mission_df)
return fermi_df
class GBMCdata(object):
def __init__(self,cdata_file,rsp_file):
self.spectrum_set = PHASpectrumSet(cdata_file,rsp_file=rsp_file)
cdata = fits.open(cdata_file)
try:
self._trigger_time = cdata['PRIMARY'].header['TRIGTIME']
except:
# For continuous data
warnings.warn("There is no trigger time in the TTE file. Must be set manually or using MET relative times.")
self._trigger_time = 0
self._start_events = cdata['PRIMARY'].header['TSTART']
self._stop_events = cdata['PRIMARY'].header['TSTOP']
self._utc_start = cdata['PRIMARY'].header['DATE-OBS']
self._utc_stop = cdata['PRIMARY'].header['DATE-END']
self._n_channels = cdata['EBOUNDS'].header['NAXIS2']
self._det_name = "%s_%s" % (cdata['PRIMARY'].header['INSTRUME'], cdata['PRIMARY'].header['DETNAM'])
self._telescope = cdata['PRIMARY'].header['TELESCOP']
@property
def trigger_time(self):
return self._trigger_time
@trigger_time.setter
def trigger_time(self, val):
assert self._start_events <= val <= self._stop_events, "Trigger time must be within the interval (%f,%f)" % (
self._start_events, self._stop_events)
self._trigger_time = val
@property
def tstart(self):
return self._start_events
@property
def tstop(self):
return self._stop_events
@property
def arrival_times(self):
return self._events
@property
def n_channels(self):
return self._n_channels
@property
def energies(self):
return self._pha
@property
def mission(self):
"""
Return the name of the mission
:return:
"""
return self._telescope
@property
def det_name(self):
"""
Return the name of the instrument and detector
:return:
"""
return self._det_name
def _compute_mission_times(self):
mission_dict = {}
if self.trigger_time == 0:
return None
# Complements to Volodymyr Savchenko
xtime_url = "https://heasarc.gsfc.nasa.gov/cgi-bin/Tools/xTime/xTime.pl"
pattern = """<tr>.*?<th scope=row><label for="(.*?)">(.*?)</label></th>.*?<td align=center>.*?</td>.*?<td>(.*?)</td>.*?</tr>"""
args = dict(
time_in_sf=self._trigger_time,
timesys_in="u",
timesys_out="u",
apply_clock_offset="yes")
try:
content = requests.get(xtime_url, params=args).content
mission_info = re.findall(pattern, content, re.S)
mission_dict['UTC'] = mission_info[0][-1]
mission_dict[mission_info[7][1]] = mission_info[7][2] # LIGO
mission_dict[mission_info[8][1]] = mission_info[8][2] # NUSTAR
mission_dict[mission_info[12][1]] = mission_info[12][2] # RXTE
mission_dict[mission_info[16][1]] = mission_info[16][2] # SUZAKU
mission_dict[mission_info[20][1]] = mission_info[20][2] # SWIFT
mission_dict[mission_info[24][1]] = mission_info[24][2] # CHANDRA
except:
warnings.warn("You do not have the requests library, cannot get time system from Heasarc "
"at this point.")
return None
return mission_dict
def __repr__(self):
return self._output().to_string()
def _output(self):
"""
Examine the currently selected interval
If connected to the internet, will also look up info for other instruments to compare with
Fermi.
:return: none
"""
mission_dict = compute_fermi_relative_mission_times(self._trigger_time)
fermi_dict = collections.OrderedDict()
fermi_dict['Fermi Trigger Time'] = "%.3f" % self._trigger_time
fermi_dict['Fermi MET OBS Start'] = "%.3f" % self._start_events
fermi_dict['Fermi MET OBS Stop'] = "%.3f" % self._stop_events
fermi_dict['Fermi UTC OBS Start'] = self._utc_start
fermi_dict['Fermi UTC OBS Stop'] = self._utc_stop
fermi_df = pd.Series(fermi_dict, index=fermi_dict.keys())
if mission_dict is not None:
mission_df = pd.Series(mission_dict, index=mission_dict.keys())
fermi_df = fermi_df.append(mission_df)
return fermi_df
| {
"content_hash": "13a6431d552be9bc18941bef7bcbf892",
"timestamp": "",
"source": "github",
"line_count": 369,
"max_line_length": 135,
"avg_line_length": 27.349593495934958,
"alnum_prop": 0.5723345223939754,
"repo_name": "volodymyrss/3ML",
"id": "fff80e1c594b869734fcdbb91404cfe59726bf5f",
"size": "10092",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "threeML/utils/data_builders/fermi/gbm_data.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1896"
},
{
"name": "Python",
"bytes": "1237912"
},
{
"name": "Shell",
"bytes": "6442"
}
],
"symlink_target": ""
} |
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "lectura_ciclica.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| {
"content_hash": "445b691f635ed0262b50bfc010c519cd",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 79,
"avg_line_length": 26.22222222222222,
"alnum_prop": 0.7161016949152542,
"repo_name": "luisza/lectura_ciclica",
"id": "e8e1280623f8777ca518ef2fcf664d56c745fd00",
"size": "258",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "67590"
},
{
"name": "JavaScript",
"bytes": "484"
},
{
"name": "Python",
"bytes": "10861"
}
],
"symlink_target": ""
} |
from rest_framework import viewsets, filters
from imageupload_rest.serializers import UploadedImageSerializer
from imageupload.models import UploadedImage
# ViewSet for our UploadedImage Model
# Gets all images from database and serializes them using UploadedImageSerializer
class UploadedImagesViewSet(viewsets.ModelViewSet):
queryset = UploadedImage.objects.all()
serializer_class = UploadedImageSerializer
| {
"content_hash": "d881f3b4c4d0e8b1db05cf473e00c793",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 81,
"avg_line_length": 38.18181818181818,
"alnum_prop": 0.8404761904761905,
"repo_name": "ChristianKreuzberger/django-rest-imageupload-example",
"id": "19395b98ce177f7efe9075561b2c5297a0001c0e",
"size": "420",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_rest_imageupload_backend/imageupload_rest/viewsets.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "3533"
},
{
"name": "JavaScript",
"bytes": "4342"
},
{
"name": "Python",
"bytes": "12589"
}
],
"symlink_target": ""
} |
import pandas as pd
def RSI(data,window_length=14):
# Get only closing price
close = data['Closing Price']
# Get difference in price
delta = close.diff()
# Make positive gains and negative gains Series
up, down = delta.copy(), delta.copy()
down[down > 0] = 0
up[up < 0] = 0
# Calculate the EWMA
roll_up1 = up.ewm(span=window_length).mean()
roll_down1 = down.abs().ewm(span=window_length).mean()
# Calculate RSI based on EWMA
RS1 = roll_up1 / roll_down1
RSI1 = pd.Series(100.0 - (100.0 / (1.0 + RS1)), name='RSI_FWMA')
# Calculate the SMA
roll_up2 = up.rolling(window_length).mean()
roll_down2 = down.abs().rolling(window_length).mean()
# Calculate RSI based on SMA
RS2 = roll_up2 / roll_down2
RSI2 = pd.Series(100.0 - (100.0 / (1.0 + RS2)), name='RSI_SMA')
data = pd.concat([data, RSI1, RSI2], axis=1)
return data
def movingaverage(data,window_length=14):
# Get only closing price
close = data['Closing Price']
ewma = close.ewm(span=window_length).mean().rename('CP_EMA')
sma = close.ewm(span=window_length).mean().rename('CP_SMA')
data = pd.concat([data, ewma, sma], axis=1)
return data
def macd(data):
close = data['Closing Price']
ema12 = close.ewm(span=12).mean()
ema26 = close.ewm(span=26).mean()
ema9 = close.ewm(span=9).mean()
macd = (ema12 - ema26).rename('MACD')
macd_signal = macd.ewm(span=9).mean().rename('MACD_SIGNAL')
macd_hist = (macd - macd_signal).rename('MACD_HIST')
data = pd.concat([data,macd,macd_signal,macd_hist],axis=1)
return data
| {
"content_hash": "06efa13fc1ea25ba364841bfa0b2cc10",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 68,
"avg_line_length": 29.962962962962962,
"alnum_prop": 0.6242274412855378,
"repo_name": "samshara/Stock-Market-Analysis-and-Prediction",
"id": "c7b2ba23d49983f825dcf7b4533360872cfc392f",
"size": "1618",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "smap_nepse/preprocessing/indicator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "75655"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.ops import control_flow_ops
# from datasets import dataset_factory
from deployment import model_deploy
from nets import nets_factory
# from preprocessing import preprocessing_factory
slim = tf.contrib.slim
tf.app.flags.DEFINE_string(
'master', '', 'The address of the TensorFlow master to use.')
tf.app.flags.DEFINE_string(
'dataset_dir', None, 'The directory where the dataset files are stored.')
tf.app.flags.DEFINE_integer(
'labels_offset', 0,
'An offset for the labels in the dataset. This flag is primarily used to '
'evaluate the VGG and ResNet architectures which do not use a background '
'class for the ImageNet dataset.')
tf.app.flags.DEFINE_float(
'weight_decay', 0.00004, 'The weight decay on the model weights.')
tf.app.flags.DEFINE_float('learning_rate', 0.01, 'Initial learning rate.')
tf.app.flags.DEFINE_string(
'learning_rate_decay_type',
'exponential',
'Specifies how the learning rate is decayed. One of "fixed", "exponential",'
' or "polynomial"')
tf.app.flags.DEFINE_float(
'learning_rate_decay_factor', 0.94, 'Learning rate decay factor.')
tf.app.flags.DEFINE_float(
'end_learning_rate', 0.0001,
'The minimal end learning rate used by a polynomial decay learning rate.')
tf.app.flags.DEFINE_string(
'trainable_scopes', None,
'Comma-separated list of scopes to filter the set of variables to train.'
'By default, None would train all the variables.')
tf.app.flags.DEFINE_string(
'checkpoint_path', None,
'The path to a checkpoint from which to fine-tune.')
tf.app.flags.DEFINE_string(
'train_dir', './train_dir/yolo1-resnet/',
'Directory where checkpoints and event logs are written to.')
tf.app.flags.DEFINE_integer(
'batch_size', 32, 'The number of samples in each batch.')
tf.app.flags.DEFINE_float(
'num_epochs_per_decay', 2.0,
'Number of epochs after which learning rate decays.')
tf.app.flags.DEFINE_integer(
'max_number_of_steps', None,
'The maximum number of training steps.')
tf.app.flags.DEFINE_integer(
'log_every_n_steps', 10,
'The frequency with which logs are print.')
tf.app.flags.DEFINE_integer(
'save_summaries_secs', 600,
'The frequency with which summaries are saved, in seconds.')
tf.app.flags.DEFINE_integer(
'save_interval_secs', 600,
'The frequency with which the model is saved, in seconds.')
def _configure_learning_rate(num_samples_per_epoch, global_step):
"""Configures the learning rate.
Args:
num_samples_per_epoch: The number of samples in each epoch of training.
global_step: The global_step tensor.
Returns:
A `Tensor` representing the learning rate.
Raises:
ValueError: if
"""
decay_steps = int(num_samples_per_epoch / FLAGS.batch_size *
FLAGS.num_epochs_per_decay)
if FLAGS.learning_rate_decay_type == 'exponential':
return tf.train.exponential_decay(FLAGS.learning_rate,
global_step,
decay_steps,
FLAGS.learning_rate_decay_factor,
staircase=True,
name='exponential_decay_learning_rate')
elif FLAGS.learning_rate_decay_type == 'fixed':
return tf.constant(FLAGS.learning_rate, name='fixed_learning_rate')
elif FLAGS.learning_rate_decay_type == 'polynomial':
return tf.train.polynomial_decay(FLAGS.learning_rate,
global_step,
decay_steps,
FLAGS.end_learning_rate,
power=1.0,
cycle=False,
name='polynomial_decay_learning_rate')
else:
raise ValueError('learning_rate_decay_type [%s] was not recognized',
FLAGS.learning_rate_decay_type)
def _add_variables_summaries(learning_rate):
summaries = []
for variable in slim.get_model_variables():
summaries.append(tf.summary.histogram(variable.op.name, variable))
summaries.append(tf.summary.scalar('training/Learning Rate', learning_rate))
return summaries
def _get_variables_to_train():
"""Returns a list of variables to train.
Returns:
A list of variables to train by the optimizer.
"""
if FLAGS.trainable_scopes is None:
return tf.trainable_variables()
else:
scopes = [scope.strip() for scope in FLAGS.trainable_scopes.split(',')]
variables_to_train = []
for scope in scopes:
variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope)
variables_to_train.extend(variables)
return variables_to_train
def _get_init_fn():
"""Returns a function run by the chief worker to warm-start the training.
Note that the init_fn is only run when initializing the model during the very
first global step.
Returns:
An init function run by the supervisor.
"""
if FLAGS.checkpoint_path is None:
return None
# Warn the user if a checkpoint exists in the train_dir. Then we'll be
# ignoring the checkpoint anyway.
if tf.train.latest_checkpoint(FLAGS.train_dir):
tf.logging.info(
'Ignoring --checkpoint_path because a checkpoint already exists in %s'
% FLAGS.train_dir)
return None
exclusions = ['resnet_v1_50/logits']
# TODO(sguada) variables.filter_variables()
variables_to_restore = []
for var in slim.get_model_variables():
excluded = False
for exclusion in exclusions:
if var.op.name.startswith(exclusion):
excluded = True
break
if not excluded:
variables_to_restore.append(var)
if tf.gfile.IsDirectory(FLAGS.checkpoint_path):
checkpoint_path = tf.train.latest_checkpoint(FLAGS.checkpoint_path)
else:
checkpoint_path = FLAGS.checkpoint_path
tf.logging.info('Fine-tuning from %s' % checkpoint_path)
return slim.assign_from_checkpoint_fn(
checkpoint_path,
variables_to_restore,
ignore_missing_vars=False)
def main(_):
if not FLAGS.dataset_dir:
raise ValueError('You must supply the dataset directory with --dataset_dir')
tf.logging.set_verbosity(tf.logging.INFO)
with tf.Graph().as_default():
#######################
# Config model_deploy #
#######################
deploy_config = model_deploy.DeploymentConfig(
num_clones=1,
clone_on_cpu=False,
replica_id=0,
num_replicas=1,
num_ps_tasks=0)
# Create global_step
with tf.device(deploy_config.variables_device()):
global_step = slim.create_global_step()
# TODO: integrate data
######################
# Select the dataset #
######################
dataset = dataset_factory.get_dataset(
FLAGS.dataset_name, FLAGS.dataset_split_name, FLAGS.dataset_dir)
######################
# Select the network #
######################
network_fn = nets_factory.get_network_fn(
'resnet_v1_50',
num_classes=(dataset.num_classes - FLAGS.labels_offset),
weight_decay=FLAGS.weight_decay,
is_training=True)
# TODO: should write own preprocessing
#####################################
# Select the preprocessing function #
#####################################
preprocessing_name = 'resnet_v1_50'
image_preprocessing_fn = preprocessing_factory.get_preprocessing(
preprocessing_name,
is_training=True)
# TODO: data provider needed
##############################################################
# Create a dataset provider that loads data from the dataset #
##############################################################
with tf.device(deploy_config.inputs_device()):
provider = slim.dataset_data_provider.DatasetDataProvider(
dataset,
num_readers=FLAGS.num_readers,
common_queue_capacity=20 * FLAGS.batch_size,
common_queue_min=10 * FLAGS.batch_size)
[image, label] = provider.get(['image', 'label'])
label -= FLAGS.labels_offset
train_image_size = FLAGS.train_image_size or network_fn.default_image_size
image = image_preprocessing_fn(image, train_image_size, train_image_size)
images, labels = tf.train.batch(
[image, label],
batch_size=FLAGS.batch_size,
num_threads=FLAGS.num_preprocessing_threads,
capacity=5 * FLAGS.batch_size)
labels = slim.one_hot_encoding(
labels, dataset.num_classes - FLAGS.labels_offset)
batch_queue = slim.prefetch_queue.prefetch_queue(
[images, labels], capacity=2 * deploy_config.num_clones)
####################
# Define the model #
####################
def clone_fn(batch_queue):
"""Allows data parallelism by creating multiple clones of network_fn."""
images, labels = batch_queue.dequeue()
logits, end_points = network_fn(images)
#############################
# Specify the loss function #
#############################
if 'AuxLogits' in end_points:
tf.losses.softmax_cross_entropy(
logits=end_points['AuxLogits'], onehot_labels=labels,
label_smoothing=0, weights=0.4, scope='aux_loss')
tf.losses.softmax_cross_entropy(
logits=logits, onehot_labels=labels,
label_smoothing=0, weights=1.0)
return end_points
# Gather initial summaries.
summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES))
clones = model_deploy.create_clones(deploy_config, clone_fn, [batch_queue])
first_clone_scope = deploy_config.clone_scope(0)
# Gather update_ops from the first clone. These contain, for example,
# the updates for the batch_norm variables created by network_fn.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, first_clone_scope)
# Add summaries for end_points.
end_points = clones[0].outputs
for end_point in end_points:
x = end_points[end_point]
summaries.add(tf.summary.histogram('activations/' + end_point, x))
summaries.add(tf.summary.scalar('sparsity/' + end_point,
tf.nn.zero_fraction(x)))
# Add summaries for losses.
for loss in tf.get_collection(tf.GraphKeys.LOSSES, first_clone_scope):
summaries.add(tf.summary.scalar('losses/%s' % loss.op.name, loss))
# Add summaries for variables.
for variable in slim.get_model_variables():
summaries.add(tf.summary.histogram(variable.op.name, variable))
moving_average_variables, variable_averages = None, None
#########################################
# Configure the optimization procedure. #
#########################################
with tf.device(deploy_config.optimizer_device()):
learning_rate = _configure_learning_rate(dataset.num_samples, global_step)
# TODO: may need to add flexibility in optimizer
optimizer = tf.train.AdamOptimizer(learning_rate)
summaries.add(tf.summary.scalar('learning_rate', learning_rate))
# Variables to train.
variables_to_train = _get_variables_to_train()
# and returns a train_tensor and summary_op
total_loss, clones_gradients = model_deploy.optimize_clones(
clones,
optimizer,
var_list=variables_to_train)
# Add total_loss to summary.
summaries.add(tf.summary.scalar('total_loss', total_loss))
# Create gradient updates.
grad_updates = optimizer.apply_gradients(clones_gradients,
global_step=global_step)
update_ops.append(grad_updates)
update_op = tf.group(*update_ops)
train_tensor = control_flow_ops.with_dependencies([update_op], total_loss,
name='train_op')
# Add the summaries from the first clone. These contain the summaries
# created by model_fn and either optimize_clones() or _gather_clone_loss().
summaries |= set(tf.get_collection(tf.GraphKeys.SUMMARIES,
first_clone_scope))
# Merge all summaries together.
summary_op = tf.summary.merge(list(summaries), name='summary_op')
###########################
# Kicks off the training. #
###########################
slim.learning.train(
train_tensor,
logdir=FLAGS.train_dir,
master=FLAGS.master,
is_chief=(FLAGS.task == 0),
init_fn=_get_init_fn(),
summary_op=summary_op,
number_of_steps=FLAGS.max_number_of_steps,
log_every_n_steps=FLAGS.log_every_n_steps,
save_summaries_secs=FLAGS.save_summaries_secs,
save_interval_secs=FLAGS.save_interval_secs,
sync_optimizer=None) | {
"content_hash": "c184354c3402241cac911425d3ea229f",
"timestamp": "",
"source": "github",
"line_count": 364,
"max_line_length": 80,
"avg_line_length": 35.41208791208791,
"alnum_prop": 0.6228083785880527,
"repo_name": "wenxichen/tensorflow_yolo2",
"id": "08dcacc937cb6465b0781185b9df5e42b1732d4e",
"size": "12890",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "yolo1-resnet-adv.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "45645"
},
{
"name": "Python",
"bytes": "630813"
},
{
"name": "Shell",
"bytes": "10985"
}
],
"symlink_target": ""
} |
"""
WSGI config for sitetest project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "sitetest.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| {
"content_hash": "aab4eebea7df7e401b3ad5c6da18e665",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 78,
"avg_line_length": 27.928571428571427,
"alnum_prop": 0.7749360613810742,
"repo_name": "guillaume-havard/testdjango",
"id": "84253da47cf73c0c87269044916840f7ac24da81",
"size": "391",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sitetest/sitetest/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "57278"
}
],
"symlink_target": ""
} |
"""This module enables the GUI, buttons and control logic for Room-based plots."""
# coding=utf-8
from __future__ import print_function
import bisect
import numbers
import sys
import numpy as np
from PyQt4 import QtCore,QtGui
from dataStructures.timeSeries import TimeArray
from readStadicData.processVisData import VisData
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
from dataStructures.dayIll import Dayill
from readStadicData.parseJson import StadicProject
from pyqtGui.gui import Ui_Form
from plotFunctions.gridPlots import gridPlot
#~~~~~DevNotes: 25Mar2016:~~~~~~~
# Fixed display limits to conform to illuminance units.
# Moved all the connect signals to the end so that they don't get triggered..
# in the beginning the readStadicData is being loaded into all the comboBoxes and textBoxes
# TODO: Add a Qmessagebox to show an error message in case the grid..
# TODO:..spacings aren't uniform.
# TODO: Fix the calendar year. (Almost done) !
# TODO: Mask missing points.
class NavigationToolbarStadic(NavigationToolbar):
dataDescr = None
dataType = None
def mouse_move(self, event):
self._set_cursor(event)
if event.inaxes and event.inaxes.get_navigate():
try:
s = event.inaxes.format_coord(event.xdata, event.ydata)
except (ValueError, OverflowError):
pass
else:
artists = [a for a in event.inaxes.mouseover_set
if a.contains(event)]
if artists:
a = max(enumerate(artists), key=lambda x: x[1].zorder)[1]
if a is not event.inaxes.patch:
data = a.get_cursor_data(event)
if isinstance(data,numbers.Number):
if self.dataDescr:
s += " {} ".format(self.dataDescr)
if self.dataType:
if self.dataType == 'lux':
dataVal = int(data)
elif self.dataType == 'fc':
dataVal = round(data,3)
else:
dataVal = round(data*100,3)
s += '{}'.format(dataVal)
if self.dataType != "%":
s += ' {}'.format(self.dataType)
else:
s += '{}'.format(self.dataType)
else:
s = ''
if data is np.NaN or data < 0 :
s = ''
if len(self.mode):
self.set_message('%s, %s' % (self.mode, s))
else:
self.set_message(s)
else:
self.set_message(self.mode)
def pick_event(self,event):
print(event.ind)
class Spatial(QtGui.QDialog, Ui_Form,VisData):
def setupGui(self):
if self.dataSpaceNameSelected:
self.tabWidget.setEnabled(True)
#Set the calendar as per the starting year.
self.calSpaceDateTimeIllum.setMinimumDateTime(
QtCore.QDateTime(self.dataYear,1,1,0,0))
self.calSpaceDateTimeIllum.setDateTime(
QtCore.QDateTime(self.dataYear, 1, 1, 0, 0))
self.calSpaceDateTimeIllum.setMaximumDateTime(
QtCore.QDateTime(self.dataYear,12,31,23,59))
self.grpContoursIlluminance.setEnabled(True)
self.btnSpaceSettingsContour.setEnabled(True)
#TODO: Change the visiblity settings to True later ;)
self.btnSpaceSettingsContour.setVisible(True)
#Setup matplotlib inside Qt.
self.spFigure = Figure()
self.spCanvas = FigureCanvas(self.spFigure)
#Validator for setting values
floatValidator = QtGui.QDoubleValidator(0.0,20000.0,3)
#Settings for showing and hiding color and contours.
self.grpColoursIlluminance.setVisible(False)
self.grpContoursIlluminance.setVisible(False)
#Initiate a dictioanry for ill files.
self.spAllFilesDict = {}
#Code for manipulating navigation settings for illuminance.
#Code for manipulating navigation settings for illuminance.
self.spTimeStepIlluminance = 1 #This attribute determines the time step for stepping between different illuminance plots.
#Changing/clicking any of the below controls should trigger the illuminance plots.
self.spIlluminanceActivated = False
self.spCurrentIlluminanceHour = 9
units = self.dataProject.unitsIlluminance
unitsMultiplier = {'lux':1,'fc':0.1}[str(units)]
self.spIlluminanceMaxVal = 5000*unitsMultiplier
self.spIlluminanceMinVal = 0
self.spIlluminanceMaxValDefault = 5000*unitsMultiplier
self.spIlluminanceMinValDefault = 0
self.spIlluminanceUpperMaskValue = None
self.spIlluminanceLowerMaskValue = None
self.spIlluminanceUpperMaskColor = None
self.spIlluminanceLowerMaskColor = None
self.spElectricMaxVal = 400 * unitsMultiplier
self.spElectricMinVal = 0
self.spElectricMaxValDefault = 400 * unitsMultiplier
self.spElectricMinValDefault = 0
self.spElectricUpperMaskValue = None
self.spElectricLowerMaskValue = None
self.spElectricUpperMaskColor = None
self.spElectricLowerMaskColor = None
self.spMetricsMaxVal = 1.0
self.spMetricsMinVal = 0.0
self.spMetricsMaxValDefault = 1.0
self.spMetricsMinValDefault = 0
self.spMetricsUpperMaskValue = None
self.spMetricsLowerMaskValue = None
self.spMetricsUpperMaskColor = None
self.spMetricsLowerMaskColor = None
self.spCurrentPlotIsIlluminance = True
self.spCurrentPlotIsElectric = False
self.txtSpaceColorsMax.setText(str(self.spIlluminanceMaxValDefault))
self.txtSpaceColorsMin.setText(str(self.spIlluminanceMinValDefault))
self.txtSpaceColorsMax.setValidator(floatValidator)
self.txtSpaceColorsMin.setValidator(floatValidator)
self.spPlotIlluminanceColors = True
#Put all contourboxes inside a list for easy iteration.
self.spContourBoxes = [self.txtSpaceCountourValue1, self.txtSpaceCountourValue2, self.txtSpaceCountourValue3, self.txtSpaceCountourValue4,
self.txtSpaceCountourValue5, self.txtSpaceCountourValue6, self.txtSpaceCountourValue7, self.txtSpaceCountourValue8]
for contourBox in self.spContourBoxes:
contourBox.setValidator(floatValidator)
self.spContourValuesIlluminance = (50, 100, 500, 1000, 2000, 3000, 5000, 10000)
self.spContourValuesIlluminance = map(lambda x:x*unitsMultiplier,self.spContourValuesIlluminance)
self.spContourValuesElectric = (50, 100, 150, 200, 250, 300, 350, 400)
self.spContourValuesElectric = map(lambda x:x*unitsMultiplier,self.spContourValuesElectric)
self.spContourValuesIlluminanceDefault = (50, 100, 500, 1000, 2000, 3000, 5000, 10000)
self.spContourValuesIlluminanceDefault = map(lambda x:x*unitsMultiplier,self.spContourValuesIlluminanceDefault)
self.spContourValuesElectricDefault = (50, 100, 150, 200, 250, 300, 350, 400)
self.spContourValuesElectricDefault = map(lambda x:x*unitsMultiplier,self.spContourValuesElectricDefault)
for idx,contourBox in enumerate(self.spContourBoxes):
contourBox.setText(str(self.spContourValuesIlluminance[idx]))
self.spContourValuesMetrics = (0.1, 0.2, 0.3, 0.4, 0.5, 0.7, 0.9, 1.0)
self.spContourValuesMetricsDefault = (0.1, 0.2, 0.3, 0.4, 0.5, 0.7, 0.9, 1.0)
#Contstuctor Stuff
self.spColorMapTuple = (('Uniform01', 'viridis'), ('Uniform02', 'inferno'), ('Uniform03', 'plasma'), ('Uniform04', 'magma'), ('Blues', 'Blues'),
('BlueGreen','BuGn'), ('BluePurple','BuPu'), ('GreenBlue','GnBu'), ('Greens','Greens'), ('Greys','Greys'), ('Oranges','Oranges'),
('OrangeRed','OrRd'), ('PurpleBlue','PuBu'), ('PurpleBlueGreen','PuBuGn'), ('PurpleRed','PuRd'), ('Purples','Purples'),
('RedPurple','RdPu'), ('Reds','Reds'), ('YellowGreen','YlGn'), ('YellowGreenBlue','YlGnBu'), ('YellowOrangeBrown','YlOrBr'),
('YellowOrangeRed','YlOrRd'), ('Hot01','afmhot'), ('Hot02','hot'), ('Hot03','gist_heat'), ('Autumn','autumn'), ('Bone','bone'), ('Cool','cool'),
('Copper','copper'), ('Spring','spring'), ('Summer','summer'), ('Winter','winter'))
colorNames = [name for name,plotName in self.spColorMapTuple]
self.spColorDict =dict(self.spColorMapTuple)
self.cmbSpaceColorScheme.addItems(colorNames)
self.cmbSpaceColorScheme.setCurrentIndex(21)
self.spCurrentColorScheme = 'YlOrRd'
self.spCurrentSpaceChartOpacityValue = 1
self.spCurrentColorSchemeMetrics = 'YlOrRd'
self.spCurrentSpaceChartOpacityValueMetrics = 1
self.spCurrentColorSchemeElectric = 'YlOrRd'
self.spCurrentSpaceChartOpacityValueElectric = 1
self.spInterpolateColorScheme= None
self.txtSpaceStatusDisplay.setEnabled(False)
illFileKeys,illFileNames = zip(*self.dataDayIllFilesList)
self.ptsFile = self.dataPtsFile
self.illData = Dayill(illFileNames[0],self.ptsFile)
hourFormat = self.illData.timedata[0:24]
hourFormat = [hourVal['tstamp'].strftime("%I:%M %p") for hourVal in hourFormat]
#Set valid time stamps for all the drop boxes that show time.
self.cmbSpaceTimeIllum.clear()
self.cmbSpaceTimeIllum.addItems(map(str,hourFormat))
self.cmbSpaceTimeIllum.setCurrentIndex(9)
self.cmbSpaceTimeIntervalMax.clear()
self.cmbSpaceTimeIntervalMax.addItems(map(str,hourFormat))
self.cmbSpaceTimeIntervalMax.setCurrentIndex(23)
self.cmbSpaceTimeIntervalMin.clear()
self.cmbSpaceTimeIntervalMin.addItems(map(str,hourFormat))
self.cmbSpaceTimeIntervalMin.setCurrentIndex(0)
# Set valid time stamps for all the drop boxes that show time.
self.cmbCombinedTimeIllum.clear()
self.cmbCombinedTimeIllum.addItems(map(str, hourFormat))
self.cmbCombinedTimeIllum.setCurrentIndex(9)
self.cmbCombinedTimeIntervalMax.clear()
self.cmbCombinedTimeIntervalMax.addItems(map(str, hourFormat))
self.cmbCombinedTimeIntervalMax.setCurrentIndex(23)
self.cmbCombinedTimeIntervalMin.clear()
self.cmbCombinedTimeIntervalMin.addItems(map(str, hourFormat))
self.cmbCombinedTimeIntervalMin.setCurrentIndex(0)
self.spAllFilesDict = self.dataAllFiles
# Addedd this test as sometimes metrics are not calculated. In those cases it's just the illuminance readStadicData.
try:
resultsFiles,resultsFilesNames = zip(*self.dataMetricsFilesList)
if self.dataElectricIllFilesList:
electricFiles,electricFilesNames = zip(*self.dataElectricIllFilesList)
else:
electricFiles=[]
electricFilesNames=[]
mainComboBoxContents = [illFileKeys[0]]+ \
sorted(list(resultsFiles))+ \
sorted(list(electricFiles))
except ValueError:
mainComboBoxContents = [illFileKeys[0]]
self.cmbSpacePlotType.clear()
self.cmbSpacePlotType.addItems(mainComboBoxContents)
self.cmbSpaceSelectIlluminanceFile.clear()
self.cmbSpaceSelectIlluminanceFile.addItems(illFileKeys)
self.cmbCombinedSelectIlluminanceFile.clear()
self.cmbCombinedSelectIlluminanceFile.addItems(illFileKeys)
self.spacePlotTypeDict = self.dataAllFilesAvailable
self.spShadeSchedule = self.dataProject.spaces[self.dataSpaceIndex].scheduleShades
self.spWindowGroupNames = [windowGroup.name for windowGroup in self.dataProject.spaces[self.dataSpaceIndex].windowGroups]
self.spShowWindowGroupInfo = True #Toggle this to False in case window Group info isn't to be shown.
if self.spShadeSchedule and self.spShowWindowGroupInfo:
shadeData = TimeArray(self.spShadeSchedule)
shadeData = [map(int,timedata['readStadicData']) for timedata in shadeData.timedata]
self.spShadeSchedule = shadeData
self.btnSpaceSettingsContour.clicked.connect(self.spToggleContourSettings)
self.btnSpaceSettingsColours.clicked.connect(self.spToggleColorSettings)
self.calSpaceDateTimeIllum.dateChanged.connect(self.spSetCurrentIlluminanceHourCalendar)
self.cmbSpaceTimeIllum.currentIndexChanged.connect(self.spSetCurrentIlluminanceHourCalendar)
self.btnSpacePrevHour.clicked.connect(lambda:self.spSetCurrentIlluminanceHourTimeStep(False))
self.btnSpaceNextHour.clicked.connect(lambda:self.spSetCurrentIlluminanceHourTimeStep(True))
#If the timestep settings are changed, change the time step but don't trigger the illuminance plot.
self.cmbSpaceIlluminanceStepType.currentIndexChanged.connect(self.spUpdateIlluminanceTimeStep)
self.cmbSpaceIluminanceStepValue.currentIndexChanged.connect(self.spUpdateIlluminanceTimeStep)
#Settings for displaying the opacity value on a box.
self.sliderSpaceOpacity.valueChanged.connect(self.spOpacitySliderChanged)
#Settings for color values of the illuminance plot.
self.btnSelectColorLowerMask.clicked.connect(lambda:self.spMaskSettingsActivated(False))
self.btnSelectColorUpperMask.clicked.connect(lambda:self.spMaskSettingsActivated(True))
self.btnSpaceResetColors.clicked.connect(self.spResetColorSettings)
self.btnSpaceSetColors.clicked.connect(self.spSetColorSettings)
#settings for contour values for the illuminance plot.
self.cmbSpaceContourQuantity.currentIndexChanged.connect(self.spSetContourQuantity)
self.chkSpaceColors.clicked.connect(self.spRefreshPlots)
self.chkSpaceContours.clicked.connect(self.spRefreshPlots)
self.btnSpaceResetContours.clicked.connect(self.spResetContourSettings)
self.btnSpaceSetContours.clicked.connect(self.spSetContourSettings)
self.btnSpaceSetColorScheme.clicked.connect(self.spAssignSpaceColorScheme)
self.cmbSpacePlotType.currentIndexChanged.connect(self.spPlotTypeSelect)
self.cmbSpaceSelectIlluminanceFile.currentIndexChanged.connect(self.spLoadDifferentIlluminanceFile)
self.cmbSpaceTimeIllum.setCurrentIndex(10)
# self.spCanvas.mpl_connect('motion_notify_event',self.spMouseClicked)
# self.spCurrentDataSet = None
self.txtSpaceMsgBox.setText(self.dataLog)
# TODO: Delete the line below to enable the timeseries stuff.
self.tabWidget.removeTab(2)
def spMouseClicked(self,event):
"""
I am leaving this on for future reference for event releated stuff
:param event:
:return:
"""
xdata,ydata = event.xdata,event.ydata
if xdata and ydata:
xCor = list(self.illData.roomgrid.uniCorX)
yCor = list(self.illData.roomgrid.uniCorY)
xCorLen,yCorLen = len(xCor),len(yCor)
currentData = self.spCurrentDataSet
xloc = bisect.bisect(xCor,xdata)
yloc = bisect.bisect(yCor,ydata)
def spSetContourSettings(self):
contourList = []
for box in self.spContourBoxes:
if box.isEnabled() and box.text():
contourList.append(float(str(box.text())))
if self.spCurrentPlotIsIlluminance:
self.spContourValuesIlluminance = list(contourList)
self.spPlotIlluminance()
elif self.spCurrentPlotIsElectric:
self.spContourValuesElectric = list(contourList)
self.spPlotElectric()
else:
self.spContourValuesMetrics = list(contourList)
self.spPlotMetrics()
def spResetContourSettings(self):
self.cmbSpaceContourQuantity.setCurrentIndex(6)
for idx,box in enumerate(self.spContourBoxes):
if self.spCurrentPlotIsIlluminance:
box.setText(str(self.spContourValuesIlluminanceDefault[idx]))
elif self.spCurrentPlotIsElectric:
box.setText(str(self.spContourValuesElectricDefault[idx]))
else:
box.setText(str(self.spContourValuesMetricsDefault[idx]))
def spRefreshPlots(self):
"""
This is required because there are certain events that just need to trigger the current plot.
:return:
"""
if self.spCurrentPlotIsIlluminance:
self.spPlotIlluminance()
elif self.spCurrentPlotIsElectric:
self.spPlotElectric()
else:
self.spPlotMetrics()
def spLoadDifferentIlluminanceFile(self):
selectedIllFileKey = str(self.cmbSpaceSelectIlluminanceFile.currentText())
selectedIllFile = self.spAllFilesDict[selectedIllFileKey]
self.illData = Dayill(selectedIllFile,self.ptsFile)
self.txtSpaceStatusDisplay.setText("Current space: {} \tCurrent readStadicData set: {}.\t Source:{}".format(self.dataSpaceNameSelected, selectedIllFileKey, selectedIllFile))
self.spPlotIlluminance()
def spOpenJsonFileDirectly(self):
jsonFileName = QtGui.QFileDialog.getOpenFileName(self,"Select a json file to open","C:/","Json File (*.json)")
if jsonFileName:
self.jsonFile = str(jsonFileName)
self.txtJsonPath.setText(jsonFileName)
project = StadicProject(jsonFileName)
spaceTuple = [space.spaceName for space in project.spaces]
self.cmbSpaceName.clear()
self.cmbSpaceName.addItems(spaceTuple)
self.cmbSpaceName.setEnabled(True)
self.btnSelectSpaceName.setEnabled(True)
newWindowTitle = jsonFileName+" -- "+self.defaultWindowTitle
self.setWindowTitle(newWindowTitle)
del project
def spLoadVisualsFromOpenedJsonFile(self):
self.txtSpaceStatusDisplay.clear()
self.spLoadJson(self.jsonFile, self.cmbSpaceName.currentIndex())
self.tabWidget.setEnabled(True)
def spAssignSpaceColorScheme(self):
currentColor = self.spColorDict[str(self.cmbSpaceColorScheme.currentText())]
if self.chkSpaceColorSchemeInvert.checkState():
currentColor += "_r"
if self.chkSpaceColorSchemeInterpolate.checkState():
self.spInterpolateColorScheme = 'nearest'
else:
self.spInterpolateColorScheme = 'hanning'
if self.spCurrentPlotIsIlluminance:
self.spCurrentColorScheme = currentColor
self.spCurrentSpaceChartOpacityValue = self.sliderSpaceOpacity.value() / 100.0
self.spPlotIlluminance()
elif self.spCurrentPlotIsElectric:
self.spCurrentColorSchemeElectric = currentColor
self.spCurrentSpaceChartOpacityValueElectric = self.sliderSpaceOpacity.value() / 100.0
self.spPlotElectric()
else:
self.spCurrentColorSchemeMetrics = currentColor
self.spCurrentSpaceChartOpacityValueMetrics = self.sliderSpaceOpacity.value() / 100.0
self.spPlotMetrics()
# TODO:Change this to mean all plots later.
def spSetContourQuantity(self):
contourQuantity = int(self.cmbSpaceContourQuantity.currentText())
for idx,contourBoxes in enumerate(self.spContourBoxes):
if (idx+2-1)>contourQuantity:
contourBoxes.clear()
contourBoxes.setEnabled(False)
else:
contourBoxes.setEnabled(True)
def spMaskSettingsActivated(self, isUpperMask):
colorDialog = QtGui.QColorDialog
selectedColor = colorDialog.getColor()
if selectedColor.isValid():
selectedColor = selectedColor.getRgb()
if isUpperMask:
self.txtSpaceColorsUpperMask.setStyleSheet("background-color: rgb{}".format(selectedColor))
if self.spCurrentPlotIsIlluminance:
self.spIlluminanceUpperMaskColor = selectedColor
elif self.spCurrentPlotIsElectric:
self.spElectricUpperMaskColor = selectedColor
else:
self.spMetricsUpperMaskColor = selectedColor
else:
self.txtSpaceColorsLowerMask.setStyleSheet("background-color: rgb{}".format(selectedColor))
if self.spCurrentPlotIsIlluminance:
self.spIlluminanceLowerMaskColor = selectedColor
elif self.spCurrentPlotIsElectric:
self.spElectricLowerMaskColor = selectedColor
else:
self.spMetricsLowerMaskColor = selectedColor
def spSetCurrentIlluminanceHourCalendar(self):
"""
Plot illuminance based on a selection from the calendar
"""
dateVal = self.calSpaceDateTimeIllum.dateTime().date().dayOfYear()
self.spCurrentIlluminanceHour = (dateVal - 1) * 24 + self.cmbSpaceTimeIllum.currentIndex()
self.spPlotIlluminance()
def spSetCurrentIlluminanceHourTimeStep(self, stepForward):
currentHour = self.spCurrentIlluminanceHour
currentHourOriginal = currentHour
skipDarkHours = self.chkSpaceSkipDarkHours.checkState()
timeStep = self.spTimeStepIlluminance
lowerInterval = self.cmbSpaceTimeIntervalMin.currentIndex()
higherInterval = self.cmbSpaceTimeIntervalMax.currentIndex()
intervals = sorted(range(*sorted([lowerInterval,higherInterval])))
if intervals:
intervals.extend([max(intervals)+1])
else:
intervals.extend([lowerInterval])
if stepForward:
currentHour += timeStep
currentDay = (currentHour+1)//24
currentDayHour = currentHour%24
if currentDayHour not in intervals:
currentDay += 1
currentHour = currentDay*24+intervals[0]
if skipDarkHours:
while currentHour<8759 and max(self.illData.timedata[currentHour]['readStadicData'].illarr)==0:
currentHour += 1
else:
currentHour -= timeStep
currentDay = (currentHour+1)//24
currentDayHour = currentHour%24
if currentDayHour not in intervals:
currentDay -= 1
currentHour = currentDay*24+intervals[-1]
if skipDarkHours:
while currentHour>-1 and max(self.illData.timedata[currentHour]['readStadicData'].illarr)==0:
currentHour -= 1
#If the current skipped hour turns out to be dark, then just rever to the original value.
if skipDarkHours and -1<currentHour<8760 and max(self.illData.timedata[currentHour]['readStadicData'].illarr)==0:
currentHour = currentHourOriginal
if -1<currentHour<8760:
self.spCurrentIlluminanceHour = currentHour
self.spPlotIlluminance()
def spUpdateIlluminanceTimeStep(self):
"""
Update illuminance time step in case corresponding controls are updated.
:return:
"""
#0 corresponds to hour while 1 corresponds to day.
timeStepType = {0:1,1:24}[self.cmbSpaceIlluminanceStepType.currentIndex()]
timeStepMultiplier = int(self.cmbSpaceIluminanceStepValue.currentText())
self.spTimeStepIlluminance = timeStepMultiplier * timeStepType
def spOpacitySliderChanged(self):
opacityValue = self.sliderSpaceOpacity.value()
self.txtSpaceOpacityValue.setText(str(opacityValue))
def spToggleContourSettings(self):
visibility = self.grpContoursIlluminance.isVisible()
if not visibility:
self.btnSpaceSettingsContour.setText("Hide Settings")
self.grpContoursIlluminance.setVisible(True)
else:
self.btnSpaceSettingsContour.setText("Show Settings")
self.grpContoursIlluminance.setVisible(False)
def spSetColorSettings(self):
if self.spCurrentPlotIsIlluminance:
try:
self.spIlluminanceUpperMaskValue = float(str(self.txtSpaceColorsUpperMask.text()))
except ValueError:
pass
try:
self.spIlluminanceLowerMaskValue = float(self.txtSpaceColorsLowerMask.text())
except ValueError:
pass
self.spIlluminanceMaxVal = float(self.txtSpaceColorsMax.text())
self.spIlluminanceMinVal = float(self.txtSpaceColorsMin.text())
self.spPlotIlluminance()
elif self.spCurrentPlotIsElectric:
try:
self.spElectricUpperMaskValue = float(str(self.txtSpaceColorsUpperMask.text()))
except ValueError:
pass
try:
self.spElectricLowerMaskValue = float(self.txtSpaceColorsLowerMask.text())
except ValueError:
pass
self.spElectricMaxVal = float(self.txtSpaceColorsMax.text())
self.spElectricMinVal = float(self.txtSpaceColorsMin.text())
self.spPlotElectric()
else:
try:
self.spMetricsUpperMaskValue = float(str(self.txtSpaceColorsUpperMask.text()))
except ValueError:
pass
try:
self.spMetricsLowerMaskValue = float(self.txtSpaceColorsLowerMask.text())
except ValueError:
pass
self.spMetricsMaxVal = float(self.txtSpaceColorsMax.text())
self.spMetricsMinVal = float(self.txtSpaceColorsMin.text())
self.spPlotMetrics()
def spResetColorSettings(self):
self.txtSpaceColorsLowerMask.setStyleSheet("")
self.txtSpaceColorsUpperMask.setStyleSheet("")
self.txtSpaceColorsUpperMask.clear()
self.txtSpaceColorsUpperMask.setEnabled(False)
self.txtSpaceColorsUpperMask.setPlaceholderText('')
self.txtSpaceColorsLowerMask.clear()
self.txtSpaceColorsLowerMask.setEnabled(False)
self.txtSpaceColorsLowerMask.setPlaceholderText('')
if self.spCurrentPlotIsIlluminance:
self.txtSpaceColorsMax.setText(str(self.spIlluminanceMaxValDefault))
self.txtSpaceColorsMin.setText(str (self.spIlluminanceMinValDefault))
self.spIlluminanceLowerMaskColor = None
self.spIlluminanceUpperMaskColor = None
self.spIlluminanceMaxVal = float(self.txtSpaceColorsMax.text())
self.spIlluminanceMinVal = float(self.txtSpaceColorsMin.text())
self.spPlotIlluminance()
elif self.spCurrentPlotIsElectric:
self.txtSpaceColorsMax.setText(str(self.spElectricMaxValDefault))
self.txtSpaceColorsMin.setText(str (self.spElectricMinValDefault))
self.spElectricLowerMaskColor = None
self.spElectricUpperMaskColor = None
self.spElectricMaxVal = float(self.txtSpaceColorsMax.text())
self.spElectricMinVal = float(self.txtSpaceColorsMin.text())
self.spPlotElectric()
else:
self.txtSpaceColorsMax.setText(str(self.spMetricsMaxValDefault))
self.txtSpaceColorsMin.setText(str (self.spMetricsMinValDefault))
self.spMetricsLowerMaskColor = None
self.spMetricsUpperMaskColor = None
self.spMetricsMaxVal = float(self.txtSpaceColorsMax.text())
self.spMetricsMinVal = float(self.txtSpaceColorsMin.text())
self.spPlotMetrics()
def spToggleColorSettings(self):
visibility = self.grpColoursIlluminance.isVisible()
if not visibility:
self.btnSpaceSettingsColours.setText("Hide Settings")
self.grpColoursIlluminance.setVisible(True)
else:
self.btnSpaceSettingsColours.setText("Show Settings")
self.grpColoursIlluminance.setVisible(False)
def spPlotTypeSelect(self):
"""
Plot metrics or illuminance readStadicData based on the selection from the main combo box for space.
"""
currentSelection = str(self.cmbSpacePlotType.currentText())
filesDict = self.spacePlotTypeDict
if filesDict:
currentFile = filesDict[currentSelection]
if currentFile.endswith(".ill") and 'electric zone' not in currentSelection.lower():
selectedIllFileKey = [key for key,items in self.spAllFilesDict.items() if items == currentFile][0]
self.illData = Dayill(currentFile,self.ptsFile)
self.txtSpaceStatusDisplay.setText("Current space: {} \tCurrent readStadicData set: {}.\t Source:{}".format(self.dataSpaceNameSelected, selectedIllFileKey, currentFile))
self.spPlotIlluminance()
self.grpSpaceIlluminance.setVisible(True)
self.spCurrentPlotIsIlluminance=True
self.sliderSpaceOpacity.setValue(self.spCurrentSpaceChartOpacityValue * 100)
currentColorScheme = self.spCurrentColorScheme
elif 'electric zone' in currentSelection.lower():
with open(currentFile)as metricsFile:
electricData = map(float,metricsFile.read().split())
self.spElectricData = list(electricData)
self.spCurrentElectricZoneName = currentSelection
self.spPlotElectric()
self.txtSpaceStatusDisplay.setText("Current space: {} \tCurrent readStadicData set: {}.\t Source:{}".format(self.dataSpaceNameSelected,currentSelection,currentFile))
self.grpSpaceIlluminance.setVisible(False)
self.spCurrentPlotIsIlluminance = False
self.spCurrentPlotIsElectric = True
self.sliderSpaceOpacity.setValue(self.spCurrentSpaceChartOpacityValueMetrics * 100)
currentColorScheme = self.spCurrentColorSchemeElectric
elif currentFile.endswith(".res"):
with open(currentFile)as metricsFile:
metricsData = map(float,metricsFile.read().split())
self.spMetricsData = list(metricsData)
self.spCurrentMetricsName = currentSelection
self.spPlotMetrics()
self.txtSpaceStatusDisplay.setText("Current space: {} \tCurrent readStadicData set: {}.\t Source:{}".format(self.dataSpaceNameSelected,currentSelection,currentFile))
self.grpSpaceIlluminance.setVisible(False)
self.spCurrentPlotIsIlluminance = False
# TODO: Uncomment this one. !
self.spCurrentPlotIsElectric = False
self.sliderSpaceOpacity.setValue(self.spCurrentSpaceChartOpacityValueMetrics * 100)
currentColorScheme = self.spCurrentColorSchemeMetrics
# currentIndex = [idx for idx,value in enumerate(self.cmbSpaceColorScheme.)]
# print(currentIndex)
#I am resetting the values for colors and contours everytime. Ideally I should be saving state for each occassion but that will result in too much readStadicData getting store in
#each instance.
self.spResetColorSettings()
self.spResetContourSettings()
if currentColorScheme.endswith("_r"):
self.chkSpaceColorSchemeInvert.setChecked(True)
currentColorScheme = currentColorScheme[:-2]
else:
self.chkSpaceColorSchemeInvert.setChecked(False)
if self.spInterpolateColorScheme == 'nearest':
self.chkSpaceColorSchemeInterpolate.setChecked(True)
else:
self.chkSpaceColorSchemeInterpolate.setChecked(False)
colorSchemes= zip(*self.spColorMapTuple)[1]
currentColorIndex = colorSchemes.index(currentColorScheme)
self.cmbSpaceColorScheme.setCurrentIndex(currentColorIndex)
def spPlotIlluminance(self):
if not self.spIlluminanceActivated:
self.spToolbar = NavigationToolbarStadic(self.spCanvas, self)
self.layoutSpace.addWidget(self.spToolbar)
self.layoutSpace.addWidget(self.spCanvas)
self.spIlluminanceActivated = True
xCor = self.illData.roomgrid.uniCor['x']
yCor = self.illData.roomgrid.uniCor['y']
data = self.illData.timedata[self.spCurrentIlluminanceHour]['readStadicData'].illarr
# if len(readStadicData)<len(xCor)*len(yCor):
# readStadicData = readStadicData + [0]*(len(xCor)*len(yCor)-len(readStadicData))
timeStamp = self.illData.timedata[self.spCurrentIlluminanceHour]['tstamp']
timeStamp = timeStamp.strftime("%I:%M%p on %b %d")
colorScheme = self.spCurrentColorScheme
alphaVal = self.spCurrentSpaceChartOpacityValue
upperMask = self.spIlluminanceUpperMaskColor
lowerMask = self.spIlluminanceLowerMaskColor
plotTitle = str("Illuminance at {}".format(timeStamp).strip())
if self.spShowWindowGroupInfo and self.spShadeSchedule:
shadeScheduleCurrentHour = self.spShadeSchedule[self.spCurrentIlluminanceHour]
groupNames = map(str,self.spWindowGroupNames)
groupNames = map(str.strip,groupNames)
shadeSettings = zip(groupNames,shadeScheduleCurrentHour)
shadeSettings = str("\nShade Settings: {}".format(shadeSettings))
plotTitle += shadeSettings
contourValues = self.spContourValuesIlluminance
self.spToolbar.dataType = self.dataProject.unitsIlluminance
self.spCurrentDataSet = data
gridPlot(data, xCor, yCor,plotTitle,"X Coordinates","Y Coordinates",
fullDataGrid=self.illData.roomgrid.gridMatrixLocations, figVal=self.spFigure, colormap=colorScheme,
alpha=alphaVal, colorMax=self.spIlluminanceMaxVal, colorMin=self.spIlluminanceMinVal, lowerMask=lowerMask,
upperMask=upperMask, plotColors=self.chkSpaceColors.checkState(), plotContours=self.chkSpaceContours.checkState(),
contourValues=contourValues,interpolationVal=self.spInterpolateColorScheme)
self.spCanvas.draw()
def spPlotMetrics(self):
if not self.spIlluminanceActivated:
self.spToolbar = NavigationToolbarStadic(self.spCanvas, self)
self.layoutSpace.addWidget(self.spToolbar)
self.layoutSpace.addWidget(self.spCanvas)
self.spIlluminanceActivated = True
xCor = self.illData.roomgrid.uniCor['x']
yCor = self.illData.roomgrid.uniCor['y']
data = self.spMetricsData
colorScheme = self.spCurrentColorSchemeMetrics
alphaVal = self.spCurrentSpaceChartOpacityValueMetrics
upperMask = self.spMetricsUpperMaskColor
lowerMask = self.spMetricsLowerMaskColor
#This replace is a quick hack for cases where Illuminance is abbreivated as Illuminance
currentMetricsName = self.spCurrentMetricsName.replace("Illum", "Illuminance")
currentMetricsName = self.dataSpaceNamesDict[self.spCurrentMetricsName]
self.spCurrentDataSet = data
self.spToolbar.dataType = "%"
gridPlot(data, xCor, yCor, currentMetricsName,"X Coordinates","Y Coordinates",
fullDataGrid=self.illData.roomgrid.gridMatrixLocations, figVal=self.spFigure, colormap=colorScheme,
alpha=alphaVal, colorMax=self.spMetricsMaxVal, colorMin=self.spMetricsMinVal, lowerMask=lowerMask,
upperMask=upperMask, plotColors=self.chkSpaceColors.checkState(), plotContours=self.chkSpaceContours.checkState(), contourValues=self.spContourValuesMetrics,
interpolationVal=self.spInterpolateColorScheme)
self.spCanvas.draw()
def spPlotElectric(self):
if not self.spIlluminanceActivated:
self.spToolbar = NavigationToolbarStadic(self.spCanvas, self)
self.layoutSpace.addWidget(self.spToolbar)
self.layoutSpace.addWidget(self.spCanvas)
self.spIlluminanceActivated = True
xCor = self.illData.roomgrid.uniCor['x']
yCor = self.illData.roomgrid.uniCor['y']
data = self.spElectricData
colorScheme = self.spCurrentColorSchemeElectric
alphaVal = self.spCurrentSpaceChartOpacityValueElectric
upperMask = self.spElectricUpperMaskColor
lowerMask = self.spElectricLowerMaskColor
# This replace is a quick hack for cases where Illuminance is abbreivated as Illuminance
currentZoneName = self.spCurrentElectricZoneName
self.spCurrentDataSet = data
self.spToolbar.dataType = self.dataProject.unitsIlluminance
gridPlot(data, xCor, yCor, currentZoneName, "X Coordinates",
"Y Coordinates",
fullDataGrid=self.illData.roomgrid.gridMatrixLocations,
figVal=self.spFigure, colormap=colorScheme,
alpha=alphaVal, colorMax=self.spElectricMaxVal,
colorMin=self.spElectricMinVal, lowerMask=lowerMask,
upperMask=upperMask, plotColors=self.chkSpaceColors.checkState(),
plotContours=self.chkSpaceContours.checkState(),
contourValues=self.spContourValuesElectric,
interpolationVal=self.spInterpolateColorScheme)
self.spCanvas.draw()
@property
def illData(self):
return self._illData
@illData.setter
def illData(self,value):
self._illData = value
self.checkPointsFileSpacing(value)
@property
def spMetricsData(self):
return self._spMetricsData
@spMetricsData.setter
def spMetricsData(self,value):
self._spMetricsData = value
self.checkPointsFileSpacing(self.illData)
def checkPointsFileSpacing(self,illData):
roomGrid = illData.roomgrid
coordDict = {'z_spacings': 'coordinates in the Z axis',
'y_spacings': 'coordinates in the Y axis',
'x_spacings': 'coordinates in the X axis'}
msg = ''
setPtsErrorMsg = ''
for key, value in roomGrid.testUniformSpc.items():
if len(value) > 1:
if not setPtsErrorMsg:
setPtsErrorMsg = "The readStadicData set cannot be plotted properly due to " \
"the structure of the points file {} " \
"not being in a compatible format.\n\n".format(self.dataPtsFile)
msg += setPtsErrorMsg
msg += "The {} are not uniformly spaced. The spacing intervals" \
" are {}.\n".\
format(coordDict[key],",".join(map(str, value)))
if len(roomGrid.testUniformSpc['z_spacings'])>0:
msg += "There are multiple values for the z coordinate in the points" \
" file. The values are {}".format(",".join(map(str,roomGrid.uniCorZ)))
if msg:
self.displayInErrorBox(msg)
def displayInErrorBox(self,msg):
"""Copy existing readStadicData, then add new readStadicData to the error box and display."""
if not self.spErrorDataDisplayVisible:
for values in self.spErrorDataDisplay:
values.setVisible(True)
currentText = str(self.txtSpaceErrorBox.toPlainText())
msg = "{0}\n{1}\n{0}".format("-"*84,msg)
if currentText:
newText = currentText + "\n\n" + msg
else:
newText = msg
self.txtSpaceErrorBox.setText(newText)
def main(jsonFile=None,spaceID=None,*args):
app = QtGui.QApplication(sys.argv)
if len(sys.argv)>=3:
jsonFile = sys.argv[-2]
spaceID = int(sys.argv[-1])
else:
jsonFile=spaceID=None
form = Spatial()
form.show()
app.exec_()
if __name__ =="__main__":
pass | {
"content_hash": "559871e0b8252febc33d228f1d1fcc90",
"timestamp": "",
"source": "github",
"line_count": 967,
"max_line_length": 190,
"avg_line_length": 43.47156153050672,
"alnum_prop": 0.6354639960035207,
"repo_name": "sariths/stadicViewer",
"id": "e962fcde2fb9cbb178c6668f0941cae75f1fbeae",
"size": "42037",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "StadicViewer/gui/spatial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3280"
},
{
"name": "Python",
"bytes": "378076"
}
],
"symlink_target": ""
} |
'''A module to run the development server.'''
from wordfusion import app
app.run(debug=True)
| {
"content_hash": "38cba94028eed886985d46110651fedc",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 45,
"avg_line_length": 31,
"alnum_prop": 0.7526881720430108,
"repo_name": "krerkkiat/word-fusion",
"id": "acd059a23cf59cd579477cdd53eb88e203320ab1",
"size": "117",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "runserver.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "6507"
},
{
"name": "Python",
"bytes": "10607"
}
],
"symlink_target": ""
} |
from keystoneclient.v2_0 import Client as KeystoneClient
import collectd
global NAME, OS_USERNAME, OS_PASSWORD, OS_TENANT_NAME, OS_AUTH_URL, VERBOSE_LOGGING
NAME = "keystone_plugin"
OS_USERNAME = "username"
OS_PASSWORD = "password"
OS_TENANT_NAME = "tenantname"
OS_AUTH_URL = "http://localhost:5000/v2.0"
VERBOSE_LOGGING = False
def get_stats(user, passwd, tenant, url):
keystone = KeystoneClient(username=user, password=passwd, tenant_name=tenant, auth_url=url)
data = dict()
# Define list of keys to query for
keys = ('tenants','users','roles','services','endpoints')
for key in keys:
data["openstack.keystone.%s.count" % key] = len(keystone.__getattribute__(key).list())
tenant_list = keystone.tenants.list()
for tenant in tenant_list:
tenant_key = "openstack.keystone.tenants.tenants.%s.users.count" % tenant.name.replace(' ', '')
data[tenant_key] = len(keystone.tenants.list_users(tenant.id))
##########
# debug
#for key in data.keys():
# print "%s = %s" % (key, data[key])
##########
return data
def configure_callback(conf):
"""Received configuration information"""
global OS_USERNAME, OS_PASSWORD, OS_TENANT_NAME, OS_AUTH_URL, VERBOSE_LOGGING
for node in conf.children:
if node.key == "Username":
OS_USERNAME = node.values[0]
elif node.key == "Password":
OS_PASSWORD = node.values[0]
elif node.key == "TenantName":
OS_TENANT_NAME = node.values[0]
elif node.key == "AuthURL":
OS_AUTH_URL = node.values[0]
elif node.key == "Verbose":
VERBOSE_LOGGING = node.values[0]
else:
logger("warn", "Unknown config key: %s" % node.key)
def read_callback():
logger("verb", "read_callback")
info = get_stats(OS_USERNAME, OS_PASSWORD, OS_TENANT_NAME, OS_AUTH_URL)
if not info:
logger("err", "No information received")
return
for key in info.keys():
logger('verb', 'Dispatching %s : %i' % (key, int(info[key])))
val = collectd.Values(plugin=key)
val.type = 'gauge'
val.values = [int(info[key])]
val.dispatch()
def logger(t, msg):
if t == 'err':
collectd.error('%s: %s' % (NAME, msg))
if t == 'warn':
collectd.warning('%s: %s' % (NAME, msg))
elif t == 'verb' and VERBOSE_LOGGING == True:
collectd.info('%s: %s' % (NAME, msg))
collectd.register_config(configure_callback)
collectd.warning("Initializing keystone plugin")
collectd.register_read(read_callback)
| {
"content_hash": "3d03ceb4bb7b01dcadc0f56a93b1f3cc",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 103,
"avg_line_length": 31.91358024691358,
"alnum_prop": 0.6123791102514506,
"repo_name": "zxcq/chef-openstack-ubuntu-13.10",
"id": "e82cff70f53faee68447ad3f8bb2d47c7d17f837",
"size": "3178",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cookbooks/openstack-monitoring/files/default/keystone_plugin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Perl",
"bytes": "1694"
},
{
"name": "Python",
"bytes": "68311"
},
{
"name": "Ruby",
"bytes": "1508251"
},
{
"name": "Shell",
"bytes": "18447"
}
],
"symlink_target": ""
} |
"""
URL patterns for the OpenStack Dashboard.
"""
from django.conf import settings
from django.conf.urls import include # noqa
from django.conf.urls import patterns # noqa
from django.conf.urls.static import static # noqa
from django.conf.urls import url # noqa
from django.contrib.staticfiles.urls import staticfiles_urlpatterns # noqa
import horizon
urlpatterns = patterns('',
url(r'^$', 'openstack_dashboard.views.splash', name='splash'),
url(r'^auth/', include('openstack_auth.urls')),
url(r'^register/', include('register.urls')),
url(r'', include(horizon.urls))
)
# Development static app and project media serving using the staticfiles app.
urlpatterns += staticfiles_urlpatterns()
# Convenience function for serving user-uploaded media during
# development. Only active if DEBUG==True and the URL prefix is a local
# path. Production media should NOT be served by Django.
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
urlpatterns += patterns('',
url(r'^500/$', 'django.views.defaults.server_error')
)
| {
"content_hash": "c487c1fd5faaf9f0335a7d00342c8020",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 77,
"avg_line_length": 33.42424242424242,
"alnum_prop": 0.7343608340888486,
"repo_name": "neudesk/neucloud",
"id": "b719bcee40723939b9946fdbd8b2e4b7643ac718",
"size": "1912",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openstack_dashboard/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "169426"
},
{
"name": "JavaScript",
"bytes": "426538"
},
{
"name": "Python",
"bytes": "3100734"
},
{
"name": "Shell",
"bytes": "13743"
}
],
"symlink_target": ""
} |
"""Tests for specification."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import interval_bound_propagation as ibp
import numpy as np
import tensorflow.compat.v1 as tf
MockLinearModule = collections.namedtuple('MockLinearModule', ['w', 'b'])
MockModule = collections.namedtuple(
'MockModule', ['input_bounds', 'output_bounds', 'module'])
def _build_spec_input():
# Specifications expects a list of objects with output_bounds or input_bounds
# attributes.
w = np.identity(2, dtype=np.float32)
b = np.ones(2, dtype=np.float32)
snt_module = MockLinearModule(tf.constant(w), tf.constant(b))
z_lower = np.array([[1, 2]], dtype=np.float32)
z_upper = np.array([[3, 4]], dtype=np.float32)
input_bounds = ibp.IntervalBounds(tf.constant(z_lower), tf.constant(z_upper))
z_lower += b
z_upper += b
output_bounds = ibp.IntervalBounds(tf.constant(z_lower), tf.constant(z_upper))
return [MockModule(input_bounds, output_bounds, snt_module)]
def _build_classification_specification(label, num_classes, collapse):
"""Returns a LinearSpecification for adversarial classification."""
# Pre-construct the specifications of the different classes.
eye = np.eye(num_classes - 1)
specifications = []
for i in range(num_classes):
specifications.append(np.concatenate(
[eye[:, :i], -np.ones((num_classes - 1, 1)), eye[:, i:]], axis=1))
specifications = np.array(specifications, dtype=np.float32)
specifications = tf.constant(specifications)
# We can then use gather.
c = tf.gather(specifications, label)
# By construction all specifications are relevant.
d = tf.zeros(shape=(tf.shape(label)[0], num_classes - 1))
return ibp.LinearSpecification(c, d, prune_irrelevant=False,
collapse=collapse)
class SpecificationTest(tf.test.TestCase):
def testLinearSpecification(self):
# c has shape [batch_size, num_specifications, num_outputs]
# d has shape [batch_size, num_specifications]
c = tf.constant([[[1, 2]]], dtype=tf.float32)
d = tf.constant([[3]], dtype=tf.float32)
# The above is equivalent to z_{K,1} + 2 * z_{K,2} + 3 <= 0
spec = ibp.LinearSpecification(c, d, collapse=False)
spec_collapse = ibp.LinearSpecification(c, d, collapse=True)
modules = _build_spec_input()
values = spec(modules)
values_collapse = spec_collapse(modules)
with self.test_session() as sess:
self.assertAlmostEqual(17., sess.run(values).item())
self.assertAlmostEqual(17., sess.run(values_collapse).item())
def testEquivalenceLinearClassification(self):
num_classes = 3
def _build_model():
layer_types = (
('conv2d', (2, 2), 4, 'VALID', 1),
('activation', 'relu'),
('linear', 10),
('activation', 'relu'))
return ibp.DNN(num_classes, layer_types)
# Input.
batch_size = 100
width = height = 2
channels = 3
num_restarts = 10
z = tf.random.uniform((batch_size, height, width, channels),
minval=-1., maxval=1., dtype=tf.float32)
y = tf.random.uniform((batch_size,), minval=0, maxval=num_classes,
dtype=tf.int64)
predictor = _build_model()
predictor = ibp.VerifiableModelWrapper(predictor)
logits = predictor(z)
random_logits1 = tf.random.uniform((num_restarts, batch_size, num_classes))
random_logits2 = tf.random.uniform((num_restarts, num_classes - 1,
batch_size, num_classes))
input_bounds = ibp.IntervalBounds(z - 2., z + 4.)
predictor.propagate_bounds(input_bounds)
# Specifications.
s1 = ibp.ClassificationSpecification(y, num_classes, collapse=False)
s1_collapse = ibp.ClassificationSpecification(y, num_classes, collapse=True)
s2 = _build_classification_specification(y, num_classes, collapse=False)
s2_collapse = _build_classification_specification(y, num_classes,
collapse=True)
def _build_values(s, s_collapse):
return [
s(predictor.modules),
s_collapse(predictor.modules),
s.evaluate(logits),
s.evaluate(random_logits1),
s.evaluate(random_logits2)
]
v1 = _build_values(s1, s1_collapse)
v2 = _build_values(s2, s2_collapse)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output1, output2 = sess.run([v1, v2])
for a, b in zip(output1, output2):
self.assertTrue(np.all(np.abs(a - b) < 1e-5))
if __name__ == '__main__':
tf.test.main()
| {
"content_hash": "bc8fc51d455f28b16306f955fabd0ac2",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 80,
"avg_line_length": 38.19672131147541,
"alnum_prop": 0.6484978540772532,
"repo_name": "deepmind/interval-bound-propagation",
"id": "e672ab1b3c5262837d7b8fbf10802ea2b9706875",
"size": "5279",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "interval_bound_propagation/tests/specification_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "270398"
}
],
"symlink_target": ""
} |
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.6.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1Secret(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, api_version=None, data=None, kind=None, metadata=None, string_data=None, type=None):
"""
V1Secret - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'api_version': 'str',
'data': 'dict(str, str)',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'string_data': 'dict(str, str)',
'type': 'str'
}
self.attribute_map = {
'api_version': 'apiVersion',
'data': 'data',
'kind': 'kind',
'metadata': 'metadata',
'string_data': 'stringData',
'type': 'type'
}
self._api_version = api_version
self._data = data
self._kind = kind
self._metadata = metadata
self._string_data = string_data
self._type = type
@property
def api_version(self):
"""
Gets the api_version of this V1Secret.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#resources
:return: The api_version of this V1Secret.
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""
Sets the api_version of this V1Secret.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#resources
:param api_version: The api_version of this V1Secret.
:type: str
"""
self._api_version = api_version
@property
def data(self):
"""
Gets the data of this V1Secret.
Data contains the secret data. Each key must be a valid DNS_SUBDOMAIN or leading dot followed by valid DNS_SUBDOMAIN. The serialized form of the secret data is a base64 encoded string, representing the arbitrary (possibly non-string) data value here. Described in https://tools.ietf.org/html/rfc4648#section-4
:return: The data of this V1Secret.
:rtype: dict(str, str)
"""
return self._data
@data.setter
def data(self, data):
"""
Sets the data of this V1Secret.
Data contains the secret data. Each key must be a valid DNS_SUBDOMAIN or leading dot followed by valid DNS_SUBDOMAIN. The serialized form of the secret data is a base64 encoded string, representing the arbitrary (possibly non-string) data value here. Described in https://tools.ietf.org/html/rfc4648#section-4
:param data: The data of this V1Secret.
:type: dict(str, str)
"""
self._data = data
@property
def kind(self):
"""
Gets the kind of this V1Secret.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
:return: The kind of this V1Secret.
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""
Sets the kind of this V1Secret.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
:param kind: The kind of this V1Secret.
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""
Gets the metadata of this V1Secret.
Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
:return: The metadata of this V1Secret.
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""
Sets the metadata of this V1Secret.
Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
:param metadata: The metadata of this V1Secret.
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def string_data(self):
"""
Gets the string_data of this V1Secret.
stringData allows specifying non-binary secret data in string form. It is provided as a write-only convenience method. All keys and values are merged into the data field on write, overwriting any existing values. It is never output when reading from the API.
:return: The string_data of this V1Secret.
:rtype: dict(str, str)
"""
return self._string_data
@string_data.setter
def string_data(self, string_data):
"""
Sets the string_data of this V1Secret.
stringData allows specifying non-binary secret data in string form. It is provided as a write-only convenience method. All keys and values are merged into the data field on write, overwriting any existing values. It is never output when reading from the API.
:param string_data: The string_data of this V1Secret.
:type: dict(str, str)
"""
self._string_data = string_data
@property
def type(self):
"""
Gets the type of this V1Secret.
Used to facilitate programmatic handling of secret data.
:return: The type of this V1Secret.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""
Sets the type of this V1Secret.
Used to facilitate programmatic handling of secret data.
:param type: The type of this V1Secret.
:type: str
"""
self._type = type
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| {
"content_hash": "01b9934274708b20517a53b044ee7f5d",
"timestamp": "",
"source": "github",
"line_count": 242,
"max_line_length": 317,
"avg_line_length": 33.96280991735537,
"alnum_prop": 0.5995863243703614,
"repo_name": "skuda/client-python",
"id": "edd624620c1b789ef492537c3135f03169685906",
"size": "8236",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kubernetes/client/models/v1_secret.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "5907789"
},
{
"name": "Shell",
"bytes": "8195"
}
],
"symlink_target": ""
} |
from django.views.generic import FormView
from .forms import RoundForms
class HomePageView(FormView):
template_name = 'civ/home.html'
form_class = RoundForms
def get(self, request, *args, **kwargs):
self.initial.update(request.session.get('base_initial', {}))
return super(HomePageView, self).get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(HomePageView, self).get_context_data(**kwargs)
field_list = []
for field in context['form']:
field_list.append(field)
context['field_list'] = field_list
return context
def form_valid(self, form):
"""
If the form is valid, redirect to the supplied URL.
"""
self.request.session['base_initial'] = form.cleaned_data.copy()
return self.render_to_response(self.get_context_data(form=form, result=form.calculate()))
| {
"content_hash": "d9a62affc31cbafdcd1e6961b84b5fc4",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 97,
"avg_line_length": 32.6551724137931,
"alnum_prop": 0.6219640971488912,
"repo_name": "kuza/civhelper-II",
"id": "5449466b4db9651cd2ae2007064a7dd90067e640",
"size": "947",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "civ/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "13689"
}
],
"symlink_target": ""
} |
from celery.app import shared_task
from celery.app.base import Celery
from scrapy.crawler import Crawler
from scrapy.conf import settings
from scrapy import log, project, signals
from twisted.internet import reactor
from billiard import Process
from scrapy.utils.project import get_project_settings
from craigslist_sample.spiders.test2 import MySpider
from celery.utils.log import get_task_logger
app = Celery('tasks', broker='amqp://guest@localhost//')
app.config_from_object('celeryconfig')
logger = get_task_logger(__name__)
class UrlCrawlerScript(Process):
def __init__(self, spider):
Process.__init__(self)
settings = get_project_settings()
self.crawler = Crawler(settings)
self.crawler.configure()
# self.crawler.signals.connect(reactor.stop, signal=signals.spider_closed)
self.spider = spider
def run(self):
self.crawler.crawl(self.spider)
self.crawler.start()
# reactor.run()
def run_spider(url):
spider = MySpider(url)
crawler = UrlCrawlerScript(spider)
crawler.start()
crawler.join()
@app.task
def crawl(domain):
return run_spider(domain) | {
"content_hash": "f58ea5d906e7141f69bd00652f51702b",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 86,
"avg_line_length": 29.975,
"alnum_prop": 0.6872393661384487,
"repo_name": "gerosalesc/scrapy-celery-sample",
"id": "0329a9d79ec3b7ad55a44e50160fcdbbe8357c24",
"size": "1298",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tasks/tasks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "278652"
}
],
"symlink_target": ""
} |
import unittest
from pypika import (
Field as F,
Interval,
)
from pypika.enums import Dialects
dt = F("dt")
class AddIntervalTests(unittest.TestCase):
def test_add_microsecond(self):
c = dt + Interval(microseconds=1)
self.assertEqual("\"dt\"+INTERVAL '1 MICROSECOND'", str(c))
def test_add_second(self):
c = dt + Interval(seconds=1)
self.assertEqual("\"dt\"+INTERVAL '1 SECOND'", str(c))
def test_add_minute(self):
c = dt + Interval(minutes=1)
self.assertEqual("\"dt\"+INTERVAL '1 MINUTE'", str(c))
def test_add_day(self):
c = dt + Interval(days=1)
self.assertEqual("\"dt\"+INTERVAL '1 DAY'", str(c))
def test_add_week(self):
c = dt + Interval(weeks=1)
self.assertEqual("\"dt\"+INTERVAL '1 WEEK'", str(c))
def test_add_month(self):
c = dt + Interval(months=1)
self.assertEqual("\"dt\"+INTERVAL '1 MONTH'", str(c))
def test_add_quarter(self):
c = dt + Interval(quarters=1)
self.assertEqual("\"dt\"+INTERVAL '1 QUARTER'", str(c))
def test_add_year(self):
c = dt + Interval(years=1)
self.assertEqual("\"dt\"+INTERVAL '1 YEAR'", str(c))
def test_add_default(self):
c = dt + Interval(days=0)
self.assertEqual("\"dt\"+INTERVAL '0 DAY'", str(c))
class AddIntervalMultipleUnitsTests(unittest.TestCase):
def test_add_second_microsecond(self):
c = dt + Interval(seconds=1, microseconds=1)
self.assertEqual("\"dt\"+INTERVAL '1.1 SECOND_MICROSECOND'", str(c))
def test_add_minute_microsecond(self):
c = dt + Interval(minutes=1, microseconds=1)
self.assertEqual("\"dt\"+INTERVAL '1:0.1 MINUTE_MICROSECOND'", str(c))
def test_add_minute_second(self):
c = dt + Interval(minutes=1, seconds=1)
self.assertEqual("\"dt\"+INTERVAL '1:1 MINUTE_SECOND'", str(c))
def test_add_hour_microsecond(self):
c = dt + Interval(hours=1, microseconds=1)
self.assertEqual("\"dt\"+INTERVAL '1:0:0.1 HOUR_MICROSECOND'", str(c))
def test_add_hour_second(self):
c = dt + Interval(hours=1, seconds=1)
self.assertEqual("\"dt\"+INTERVAL '1:0:1 HOUR_SECOND'", str(c))
def test_add_hour_minute(self):
c = dt + Interval(hours=1, minutes=1)
self.assertEqual("\"dt\"+INTERVAL '1:1 HOUR_MINUTE'", str(c))
def test_add_day_microsecond(self):
c = dt + Interval(days=1, microseconds=1)
self.assertEqual("\"dt\"+INTERVAL '1 0:0:0.1 DAY_MICROSECOND'", str(c))
def test_add_day_second(self):
c = dt + Interval(days=1, seconds=1)
self.assertEqual("\"dt\"+INTERVAL '1 0:0:1 DAY_SECOND'", str(c))
def test_add_day_minute(self):
c = dt + Interval(days=1, minutes=1)
self.assertEqual("\"dt\"+INTERVAL '1 0:1 DAY_MINUTE'", str(c))
def test_add_day_hour(self):
c = dt + Interval(days=1, hours=1)
self.assertEqual("\"dt\"+INTERVAL '1 1 DAY_HOUR'", str(c))
def test_add_year_month(self):
c = dt + Interval(years=1, months=1)
self.assertEqual("\"dt\"+INTERVAL '1-1 YEAR_MONTH'", str(c))
def test_add_value_right(self):
c = Interval(microseconds=1) - dt
self.assertEqual("INTERVAL '1 MICROSECOND'-\"dt\"", str(c))
def test_add_value_complex_expressions(self):
c = dt + Interval(quarters=1) + Interval(weeks=1)
self.assertEqual("\"dt\"+INTERVAL '1 QUARTER'+INTERVAL '1 WEEK'", str(c))
class DialectIntervalTests(unittest.TestCase):
def test_mysql_dialect_uses_single_quotes_around_expression_in_an_interval(self):
c = Interval(days=1).get_sql(dialect=Dialects.MYSQL)
self.assertEqual("INTERVAL '1' DAY", c)
def test_oracle_dialect_uses_single_quotes_around_expression_in_an_interval(self):
c = Interval(days=1).get_sql(dialect=Dialects.ORACLE)
self.assertEqual("INTERVAL '1' DAY", c)
def test_vertica_dialect_uses_single_quotes_around_interval(self):
c = Interval(days=1).get_sql(dialect=Dialects.VERTICA)
self.assertEqual("INTERVAL '1 DAY'", c)
def test_redshift_dialect_uses_single_quotes_around_interval(self):
c = Interval(days=1).get_sql(dialect=Dialects.REDSHIFT)
self.assertEqual("INTERVAL '1 DAY'", c)
def test_postgresql_dialect_uses_single_quotes_around_interval(self):
c = Interval(days=1).get_sql(dialect=Dialects.POSTGRESQL)
self.assertEqual("INTERVAL '1 DAY'", c)
class TestNegativeIntervals(unittest.TestCase):
def test_day(self):
c = Interval(days=-1).get_sql()
self.assertEqual("INTERVAL '-1 DAY'", c)
def test_week(self):
c = Interval(weeks=-1).get_sql()
self.assertEqual("INTERVAL '-1 WEEK'", c)
def test_month(self):
c = Interval(months=-1).get_sql()
self.assertEqual("INTERVAL '-1 MONTH'", c)
def test_year(self):
c = Interval(years=-1).get_sql()
self.assertEqual("INTERVAL '-1 YEAR'", c)
def test_year_month(self):
c = Interval(years=-1, months=-4).get_sql()
self.assertEqual("INTERVAL '-1-4 YEAR_MONTH'", c)
class TruncateTrailingZerosTests(unittest.TestCase):
def test_do_not_truncate_integer_values(self):
i = Interval(seconds=10)
self.assertEqual("INTERVAL '10 SECOND'", str(i))
def test_do_not_truncate_months_between_years_and_datys(self):
i = Interval(years=10, days=10)
self.assertEqual("INTERVAL '10-0-10 YEAR_DAY'", str(i))
| {
"content_hash": "1700f9298d55ab2131e5c932cfa0a9a8",
"timestamp": "",
"source": "github",
"line_count": 179,
"max_line_length": 86,
"avg_line_length": 30.972067039106147,
"alnum_prop": 0.6181457431457431,
"repo_name": "kayak/pypika",
"id": "38db8ef2f8e812a97c6980a7ec6b9a3909d804fb",
"size": "5544",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pypika/tests/test_date_math.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "394110"
}
],
"symlink_target": ""
} |
from setuptools import setup
setup(
name='onetimepad',
version='1.4',
description='A hacky implementation of One-time pad',
long_description=open('README.rst').read(),
py_modules=['onetimepad'],
url='http://jailuthra.in/onetimepad',
author='Jai Luthra',
author_email='[email protected]',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Topic :: Security :: Cryptography',
],
entry_points={
'console_scripts': [
'onetimepad = onetimepad:main',
],
},
)
| {
"content_hash": "6c7703819c8f1ef561628ca6c42859be",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 57,
"avg_line_length": 23.4375,
"alnum_prop": 0.576,
"repo_name": "jailuthra/onetimepad",
"id": "0b12f3776c60700ef53c3f83816d30701b6418b5",
"size": "750",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3312"
}
],
"symlink_target": ""
} |
import re
from django.db import models
from django.contrib.contenttypes.models import ContentType
from . fields import TaggedRelationFormField
from taggit.models import Tag, TaggedItem
from taggit.managers import TaggableManager
from taggit.utils import parse_tags
def get_model():
return Tag
def get_tag_manager():
return TaggableManager()
def tokenize_tags(tags_string):
"""
This function is responsible to extract usable tags from a text.
:param tags_string: a string of text
:return: a string of comma separated tags
"""
# text is parsed in two steps:
# the first step extract every single world that is 3 > chars long
# and that contains only alphanumeric characters, underscores and dashes
tags_string = tags_string.lower().strip(",")
single_words = set([w[:100] for w in re.split(';|,|\*|\n| ', tags_string)
if len(w) >= 3 and re.match("^[A-Za-z0-9_-]*$", w)])
# the second step divide the original string using comma as separator
comma_separated = set([t[:100] for t in tags_string.split(",") if t])
# resulting set are merged using union
return list(single_words | comma_separated)
def tags_to_string(tags):
return ','.join(tags).lower()
def set_auto_tags_for_form(form, auto_tags):
for name, field in form.fields.items():
if isinstance(field, TaggedRelationFormField) and \
name in form.changed_data and \
form.cleaned_data.get(name):
form.cleaned_data[name].auto_tags = auto_tags
def set_auto_tags_for_formset(formset, auto_tags):
for form in formset:
set_auto_tags_for_form(form, auto_tags)
def update_changed_tags(new_tags, old_tags):
args = None
for tag in old_tags:
q = models.Q(tag__name=tag)
if not args:
args = q
else:
args = q | args
types = TaggedItem.objects.filter(args).values(
'content_type', 'object_id').annotate(
cs=models.Count('content_type')).filter(cs=len(old_tags))
add_tags = [Tag.objects.get_or_create(name=tag) for tag in new_tags]
mapping = {}
for t in types:
if not t['content_type'] in mapping:
mapping[t['content_type']] = []
mapping[t['content_type']].append(t['object_id'])
for t, ids in mapping.items():
t = ContentType.objects.get_for_id(t)
m = t.model_class()
for ins in m.objects.filter(pk__in=ids):
ins.tags.add(tag)
def get_tags_from_data(data, view_tags):
view_tags = set(tokenize_tags(','.join(view_tags)))
old_tags = set(tokenize_tags(data.get('view_tags', '')))
auto_tags = set(tokenize_tags(data.get('auto_tags', '')))
changed_tags = set(view_tags).difference(old_tags)
if changed_tags:
auto_tags = changed_tags.union(auto_tags)
return set(auto_tags), changed_tags, old_tags
| {
"content_hash": "2d43320d5efa0b423fc27614e82cfd55",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 78,
"avg_line_length": 31.619565217391305,
"alnum_prop": 0.6352698521828807,
"repo_name": "ff0000/scarlet",
"id": "70faad7ba3c22293c6d361b2fc7ac858af42b536",
"size": "2909",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scarlet/cms/internal_tags/taggit_handler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "217430"
},
{
"name": "HTML",
"bytes": "43216"
},
{
"name": "JavaScript",
"bytes": "2200686"
},
{
"name": "Python",
"bytes": "508579"
},
{
"name": "Ruby",
"bytes": "485"
},
{
"name": "Shell",
"bytes": "1813"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class TickmodeValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="tickmode", parent_name="volume.colorbar", **kwargs):
super(TickmodeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
implied_edits=kwargs.pop("implied_edits", {}),
values=kwargs.pop("values", ["auto", "linear", "array"]),
**kwargs,
)
| {
"content_hash": "71d60e1e42ebe09ce146b9857c06021a",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 88,
"avg_line_length": 41.61538461538461,
"alnum_prop": 0.609981515711645,
"repo_name": "plotly/plotly.py",
"id": "96ed16998502fe5c9b84f06177e8a45550581271",
"size": "541",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/volume/colorbar/_tickmode.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
"""Configuration parameters for client builder and server packaging."""
import os
import time
from grr.lib import config_lib
from grr.lib import rdfvalue
from grr.lib import type_info
# Windows Memory driver information.
config_lib.DEFINE_string("MemoryDriver.driver_service_name",
"Pmem",
"The SCCM service name for the driver.")
config_lib.DEFINE_string("MemoryDriver.driver_display_name",
"%(Client.name) Pmem",
"The SCCM display name for the driver.")
config_lib.DEFINE_list("MemoryDriver.driver_files", [],
"The default drivers to use.")
config_lib.DEFINE_list("MemoryDriver.aff4_paths", [],
"The AFF4 paths to the driver objects.")
config_lib.DEFINE_string("MemoryDriver.device_path", r"\\\\.\\pmem",
"The device path which the client will open after "
"installing this driver.")
config_lib.DEFINE_string("MemoryDriver.service_name", "pmem",
"The name of the service created for "
"the driver (Windows).")
config_lib.DEFINE_string("MemoryDriver.display_name", "%(service_name)",
"The display name of the service created for "
"the driver (Windows).")
config_lib.DEFINE_option(type_info.RDFValueType(
rdfclass=rdfvalue.RDFURN,
name="Config.aff4_root", default="aff4:/config/",
description=("The path where the configs are stored in the aff4 "
"namespace.")))
config_lib.DEFINE_option(type_info.RDFValueType(
rdfclass=rdfvalue.RDFURN,
name="Config.python_hack_root",
default="%(Config.aff4_root)/python_hacks",
description=("The path where python hacks are stored in the aff4 "
"namespace.")))
# Executables must be signed and uploaded to their dedicated AFF4 namespace.
config_lib.DEFINE_option(type_info.RDFValueType(
rdfclass=rdfvalue.RDFURN,
name="Executables.aff4_path",
description="The aff4 path to signed executables.",
default="%(Config.aff4_root)/executables/%(Client.platform)"))
config_lib.DEFINE_string(
name="Executables.installer",
default=("%(Executables.aff4_path)/installers/"
"%(ClientBuilder.output_basename)"
"%(ClientBuilder.output_extension)"),
help="The location of the generated installer in the config directory.")
config_lib.DEFINE_string(
name="ClientBuilder.output_extension",
default=None,
help="The file extension for the client (OS dependent).")
config_lib.DEFINE_string(
name="ClientBuilder.package_dir",
default=None,
help="OSX package name.")
class PathTypeInfo(type_info.String):
"""A path to a file or a directory."""
def __init__(self, must_exist=True, **kwargs):
self.must_exist = must_exist
super(PathTypeInfo, self).__init__(**kwargs)
def Validate(self, value):
value = super(PathTypeInfo, self).Validate(value)
if self.must_exist and not os.access(value, os.R_OK):
raise type_info.TypeValueError(
"Path %s does not exist for %s" % (value, self.name))
return value
def FromString(self, string):
return os.path.normpath(string)
# PyInstaller build configuration.
config_lib.DEFINE_option(PathTypeInfo(
name="PyInstaller.path", must_exist=False,
default="c:/grr_build/pyinstaller/pyinstaller.py",
help="Path to the main pyinstaller.py file."))
config_lib.DEFINE_string(
name="PyInstaller.spec",
help="The spec file contents to use for building the client.",
default=r"""
# By default build in one dir mode.
a = Analysis\(
["%(%(ClientBuilder.source)|unixpath)/grr/client/client.py"],
hiddenimports=[],
hookspath=None\)
# Remove some optional libraries that would be packed but serve no purpose.
for prefix in ["IPython"]:
for collection in [a.binaries, a.datas, a.pure]:
for item in collection[:]:
if item[0].startswith\(prefix\):
collection.remove\(item\)
pyz = PYZ\(
a.pure\)
exe = EXE\(
pyz,
a.scripts,
exclude_binaries=1,
name='build/grr-client',
debug=False,
strip=False,
upx=False,
console=True,
version='%(PyInstaller.build_dir)/version.txt',
icon='%(PyInstaller.build_dir)/grr.ico'\)
coll = COLLECT\(
exe,
a.binaries,
a.zipfiles,
a.datas,
strip=False,
upx=False,
name='grr-client'
\)
""")
config_lib.DEFINE_string(
name="PyInstaller.distpath",
help=("Passed to PyInstaller as the --distpath flag. This sets the output "
"directory for PyInstaller."),
default="./dist")
config_lib.DEFINE_string(
name="PyInstaller.version",
help="The version.txt file contents to use for building the client.",
default=r"""
VSVersionInfo\(
ffi=FixedFileInfo\(
filevers=\(%(Client.version_major), %(Client.version_minor),
%(Client.version_revision), %(Client.version_release)\),
prodvers=\(%(Client.version_major), %(Client.version_minor),
%(Client.version_revision), %(Client.version_release)\),
mask=0x3f,
flags=0x0,
OS=0x40004,
fileType=0x1,
subtype=0x0,
date=\(0, 0\)
\),
kids=[
StringFileInfo\(
[
StringTable\(
'040904B0',
[
StringStruct\('CompanyName',
"<---------------- Client.company_name ------------------->"\),
StringStruct\('FileDescription',
"<---------------- Client.description ------------------->"\),
StringStruct\('FileVersion',
"<---------------- Client.version_string ------------------->"\),
StringStruct\('ProductName',
"<---------------- Client.name ------------------->"\),
StringStruct\('OriginalFilename',
"<---------------- ClientBuilder.package_name ------------------->"\),
]\),
]\),
VarFileInfo\([VarStruct\('Translation', [1033, 1200]\)]\)
]
\)
""")
config_lib.DEFINE_bytes(
"PyInstaller.icon",
"%(%(ClientBuilder.source)/grr/gui/static/images/grr.ico|file)",
"The icon file contents to use for building the client.")
config_lib.DEFINE_string(
"PyInstaller.build_dir",
"./build",
"The path to the build directory.")
config_lib.DEFINE_string(
"PyInstaller.dpkg_root",
default=None,
help="Pyinstaller dpkg root.")
config_lib.DEFINE_string(
"PyInstaller.build_root_dir",
default=None,
help="Pyinstaller build root.")
config_lib.DEFINE_string(
name="Client.prefix", default="",
help="A prefix for the client name, usually dbg_ for debug builds.")
config_lib.DEFINE_string(
name="ClientBuilder.output_basename",
default=("%(Client.prefix)%(Client.name)_"
"%(Client.version_string)_%(Client.arch)"),
help="The base name of the output package.")
# Windows client specific options.
config_lib.DEFINE_bool(
"ClientBuilder.console", default=False,
help="Should the application be built as a console program. "
"This aids debugging in windows.")
config_lib.DEFINE_option(type_info.PathTypeInfo(
name="ClientBuilder.nanny_source_dir", must_exist=True,
default="%(ClientBuilder.source)/grr/client/nanny/",
help="Path to the windows nanny VS solution file."))
config_lib.DEFINE_choice(
name="ClientBuilder.build_type",
default="Release",
choices=["Release", "Debug"],
help="Type of build (Debug, Release)")
config_lib.DEFINE_string(name="ClientBuilder.template_extension",
default=".zip",
help="The extension to appear on templates.")
config_lib.DEFINE_string(
name="PyInstaller.template_basename",
default=("grr-client_%(Client.version_string)_%(Client.arch)"),
help="The template name of the output package.")
config_lib.DEFINE_string(
name="PyInstaller.template_filename",
default=(
"%(PyInstaller.template_basename)%(ClientBuilder.template_extension)"),
help="The template file name of the output package.")
config_lib.DEFINE_option(type_info.PathTypeInfo(
name="ClientBuilder.template_path", must_exist=False,
default=(
"%(ClientBuilder.executables_path)/%(Client.platform)/templates/"
"%(PyInstaller.template_filename)"),
help="The full path to the executable template file."))
config_lib.DEFINE_option(type_info.PathTypeInfo(
name="ClientBuilder.executables_path", must_exist=False,
default="%(ClientBuilder.source)/grr/executables",
help="The path to the grr executables directory."))
config_lib.DEFINE_option(type_info.PathTypeInfo(
name="ClientBuilder.output_filename", must_exist=False,
default=(
"%(ClientBuilder.output_basename)%(ClientBuilder.output_extension)"),
help="The filename of the generated installer file."))
config_lib.DEFINE_option(type_info.PathTypeInfo(
name="ClientBuilder.output_path", must_exist=False,
default=(
"%(ClientBuilder.executables_path)/%(Client.platform)"
"/installers/%(ClientBuilder.output_filename)"),
help="The full path to the generated installer file."))
config_lib.DEFINE_option(type_info.PathTypeInfo(
name="ClientBuilder.generated_config_path", must_exist=False,
default=(
"%(ClientBuilder.executables_path)/%(Client.platform)"
"/config/%(ClientBuilder.output_basename).yaml"),
help="The full path to where we write a generated config."))
config_lib.DEFINE_option(type_info.PathTypeInfo(
name="ClientBuilder.unzipsfx_stub", must_exist=False,
default=("%(ClientBuilder.executables_path)/%(Client.platform)"
"/templates/unzipsfx/unzipsfx-%(Client.arch).exe"),
help="The full path to the zip self extracting stub."))
config_lib.DEFINE_string(
name="ClientBuilder.config_filename",
default="%(Client.binary_name).yaml",
help=("The name of the configuration file which will be embedded in the "
"deployable binary."))
config_lib.DEFINE_string(
name="ClientBuilder.autorun_command_line",
default=("%(Client.binary_name) --install "
"--config %(ClientBuilder.config_filename)"),
help=("The command that the installer will execute after "
"unpacking the package."))
config_lib.DEFINE_list(
name="ClientBuilder.installer_plugins",
default=[],
help="Plugins that will copied to the client installation file and run "
"at install time.")
config_lib.DEFINE_list(
name="ClientBuilder.plugins",
default=[],
help="Plugins that will copied to the client installation file and run when"
"the client is running.")
config_lib.DEFINE_string(
name="ClientBuilder.client_logging_filename",
default="%(Logging.path)/%(Client.name)_log.txt",
help="Filename for logging, to be copied to Client section in the client "
"that gets built.")
config_lib.DEFINE_string(
name="ClientBuilder.client_logging_path",
default="/tmp",
help="Filename for logging, to be copied to Client section in the client "
"that gets built.")
config_lib.DEFINE_list(
name="ClientBuilder.client_logging_engines",
default=["stderr", "file"],
help="Enabled logging engines, to be copied to Logging.engines in client "
"configuration.")
config_lib.DEFINE_string(
name="ClientBuilder.client_installer_logfile",
default="%(Logging.path)/%(Client.name)_installer.txt",
help="Logfile for logging the client installation process, to be copied to"
" Installer.logfile in client built.")
config_lib.DEFINE_string(
name="ClientBuilder.maintainer",
default="GRR <[email protected]>",
help="The client package's maintainer.")
config_lib.DEFINE_string(
name="ClientBuilder.debian_build_time",
default=time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime()),
help="The build time put into the debian package. Needs to be formatted"
" like the output of 'date -R'.")
config_lib.DEFINE_string(
name="ClientBuilder.rpm_build_time",
default=time.strftime("%a %b %d %Y", time.gmtime()),
help="The build time put into the rpm package. Needs to be formatted"
" according to the rpm specs.")
config_lib.DEFINE_string(
name="ClientBuilder.debian_version",
default="%(Client.version_numeric)-1",
help="The version of the debian package.")
config_lib.DEFINE_string(
name="ClientBuilder.debian_package_base",
default=("%(ClientBuilder.package_name)_"
"%(ClientBuilder.debian_version)_%(Client.arch)"),
help="The filename of the debian package without extension.")
config_lib.DEFINE_string(
name="ClientBuilder.package_name",
default="%(Client.name)",
help="The debian package name.")
config_lib.DEFINE_option(type_info.PathTypeInfo(
name="ClientBuilder.source", must_exist=False,
default=os.path.normpath(__file__ + "/../../.."),
help="The location of the source files."))
config_lib.DEFINE_option(type_info.PathTypeInfo(
name="ClientBuilder.executables_dir",
default="%(ClientBuilder.source)/grr/executables",
help="The directory that contains the executables."))
config_lib.DEFINE_string(
name="ClientBuilder.build_time",
default=time.ctime(),
help="Time of build to embed into binary.")
config_lib.DEFINE_string(
"ClientBuilder.packagemaker",
default=("/Developer/Applications/Utilities/PackageMaker.app/Contents"
"/MacOS/PackageMaker"),
help="Location of the PackageMaker executable.")
config_lib.DEFINE_string(
"ClientBuilder.vs_arch",
default=None,
help="Visual studio architecture string.")
config_lib.DEFINE_string(
"ClientBuilder.vs_env_script",
default=None,
help="Path to visual studio environment variables bat file.")
config_lib.DEFINE_string(
"ClientBuilder.vs_dir",
default=None,
help="Path to visual studio installation dir.")
config_lib.DEFINE_string(
"ClientBuilder.build_root_dir",
default=None,
help="Root directory for client builds.")
config_lib.DEFINE_string(
"ClientBuilder.build_src_dir",
default=None,
help="Location of the grr src for building.")
config_lib.DEFINE_string(
"ClientBuilder.build_dest",
default=None,
help="Output directory for client building.")
config_lib.DEFINE_string(
"ClientBuilder.install_dir",
default=None,
help="Target installation directory for client builds.")
config_lib.DEFINE_string(
"ClientBuilder.mangled_output_basename",
default=None,
help="OS X package maker mangled name.")
config_lib.DEFINE_string(
"ClientBuilder.package_maker_organization",
default=None,
help="OS X package maker organization name.")
config_lib.DEFINE_string(
"ClientBuilder.package_maker_path",
default=None,
help="Path to OS X package maker binary.")
config_lib.DEFINE_string(
"ClientBuilder.target_dir",
default=None,
help="ClientBuilder target directory.")
config_lib.DEFINE_string(
"ClientBuilder.daemon_link",
default=None,
help="ClientBuilder daemon link.")
| {
"content_hash": "a4aa749ef018461461ebf94a4879229f",
"timestamp": "",
"source": "github",
"line_count": 455,
"max_line_length": 80,
"avg_line_length": 32.995604395604396,
"alnum_prop": 0.6643575567841205,
"repo_name": "ojengwa/grr",
"id": "35dff2d449d9d8bae043238898484a4e6018baeb",
"size": "15035",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "config/build.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "7781"
},
{
"name": "Assembly",
"bytes": "227"
},
{
"name": "C++",
"bytes": "55149"
},
{
"name": "CSS",
"bytes": "37269"
},
{
"name": "HTML",
"bytes": "30838"
},
{
"name": "JavaScript",
"bytes": "831938"
},
{
"name": "Makefile",
"bytes": "6524"
},
{
"name": "Protocol Buffer",
"bytes": "170942"
},
{
"name": "Python",
"bytes": "4652186"
},
{
"name": "Ruby",
"bytes": "1131"
},
{
"name": "Shell",
"bytes": "42248"
}
],
"symlink_target": ""
} |
'''
Auxiliary space class and helper functions.
'''
import time
import numpy as np
import scipy.linalg.blas
from pyscf import lib
from pyscf.lib import logger
from pyscf import __config__
from pyscf.lib.parameters import LARGE_DENOM
class AuxiliarySpace(object):
''' Simple container to hold the energies, couplings and chemical
potential associated with an auxiliary space.
Attributes:
energy : 1D array
Energies of the poles
coupling : 2D array
Coupling vector of the poles to each physical state
chempot : float
Chemical potental associated with the energies
'''
def __init__(self, energy, coupling, chempot=0.0):
self.energy = np.asarray(energy)
self.coupling = np.asarray(coupling, order='C')
self.chempot = chempot
self.sort()
def sort(self):
''' Sort in-place via the energies to make slicing easier.
'''
arg = np.argsort(self.energy)
self.energy = self.energy[arg]
self.coupling = self.coupling[:,arg]
def real_freq_spectrum(self, *args, **kwargs):
''' See subclasses.
'''
raise NotImplementedError
def compress(self, *args, **kwargs):
''' See subclasses.
'''
raise NotImplementedError
def get_occupied(self):
''' Returns a copy of the current AuxiliarySpace object
containing only the poles with energy less than the
chemical potential. The object should already be sorted.
Returns:
:class:`AuxiliarySpace` with only the occupied auxiliaries
'''
nocc = np.searchsorted(self.energy, self.chempot)
energy = np.copy(self.energy[:nocc])
coupling = np.copy(self.coupling[:,:nocc])
return self.__class__(energy, coupling, chempot=self.chempot)
def get_virtual(self):
''' Returns a copy of the current AuxiliarySpace object
containing only the poles with energy greater than the
chemical potential. The object should already be sorted.
Returns:
:class:`AuxiliarySpace` with only the virtual auxiliaries
'''
nocc = np.searchsorted(self.energy, self.chempot)
energy = np.copy(self.energy[nocc:])
coupling = np.copy(self.coupling[:,nocc:])
return self.__class__(energy, coupling, chempot=self.chempot)
def get_array(self, phys, out=None, chempot=0.0):
''' Expresses the auxiliaries as an array, i.e. the extended
Fock matrix in AGF2 or Hamiltonian of ADC(2).
Args:
phys : 2D array
Physical (1p + 1h) part of the matrix
Kwargs:
out : 2D array
If provided, use to store output
chempot : float
Scale energies (by default, :attr:`chempot` is not used
and energies retain their values). Default 0.0
Returns:
Array representing the coupling of the auxiliary space to
the physical space
'''
_check_phys_shape(self, phys)
dtype = np.result_type(phys.dtype, self.energy.dtype, self.coupling.dtype)
if out is None:
out = np.zeros((self.nphys+self.naux,)*2, dtype=dtype)
sp = slice(None, self.nphys)
sa = slice(self.nphys, None)
out[sp,sp] = phys
out[sp,sa] = self.coupling
out[sa,sp] = self.coupling.conj().T
out[sa,sa][np.diag_indices(self.naux)] = self.energy - chempot
return out
def dot(self, phys, vec, out=None, chempot=0.0):
''' Returns the dot product of :func:`get_array` with a vector.
Args:
phys : 2D array
Physical (1p + 1h) part of the matrix
vec : ndarray
Vector to compute dot product with
Kwargs:
out : 2D array
If provided, use to store output
chempot : float
Scale energies (by default, :attr:`chempot` is not used
and energies retain their values). Default 0.0
Returns:
ndarray with shape of :attr:`vec`
'''
_check_phys_shape(self, phys)
vec = np.asarray(vec)
input_shape = vec.shape
vec = vec.reshape((self.nphys+self.naux, -1))
dtype = np.result_type(self.coupling.dtype, vec.dtype)
sp = slice(None, self.nphys)
sa = slice(self.nphys, None)
if out is None:
out = np.zeros(vec.shape, dtype=dtype)
out = out.reshape(vec.shape)
out[sp] = np.dot(phys, vec[sp])
out[sp] += np.dot(self.coupling, vec[sa])
out[sa] = np.dot(vec[sp].T, self.coupling).conj().T
out[sa] += (self.energy[:,None] - chempot) * vec[sa]
out = out.reshape(input_shape)
return out
def eig(self, phys, out=None, chempot=0.0):
''' Computes the eigenvalues and eigenvectors of the array
returned by :func:`get_array`.
Args:
phys : 2D array
Physical (1p + 1h) part of the matrix
Kwargs:
out : 2D array
If provided, use to store output
chempot : float
Scale energies (by default, :attr:`chempot` is not used
and energies retain their values). Default 0.0
Returns:
tuple of ndarrays (eigenvalues, eigenvectors)
'''
_check_phys_shape(self, phys)
h = self.get_array(phys, chempot=chempot, out=out)
w, v = np.linalg.eigh(h)
return w, v
def moment(self, n, squeeze=True):
''' Builds the nth moment of the spectral distribution.
Args:
n : int or list of int
Moment(s) to compute
Kwargs:
squeeze : bool
If True, use :func:`np.squeeze` on output so that in
the case of :attr:`n` being an int, a 2D array is
returned. If False, output is always 3D. Default True.
Returns:
ndarray of moments
'''
n = np.asarray(n)
n = n.reshape(n.size)
energy_factored = self.energy[None] ** n[:,None]
v = self.coupling
moms = lib.einsum('xk,yk,nk->nxy', v, v.conj(), energy_factored)
if squeeze:
moms = np.squeeze(moms)
return moms
def remove_uncoupled(self, tol):
''' Removes poles with very low spectral weight (uncoupled
to the physical space) in-place.
Args:
tol : float
Threshold for the spectral weight (squared norm)
'''
v = self.coupling
w = np.linalg.norm(v, axis=0) ** 2
arg = w >= tol
self.energy = self.energy[arg]
self.coupling = self.coupling[:,arg]
def save(self, chkfile, key=None):
''' Saves the auxiliaries in chkfile
Args:
chkfile : str
Name of chkfile
key : str
Key to be used in h5py object. It can contain "/" to
represent the path in the HDF5 storage structure.
'''
if key is None:
key = 'aux'
lib.chkfile.dump(chkfile, key, self.__dict__)
@classmethod
def load(cls, chkfile, key=None):
''' Loads the auxiliaries from a chkfile
Args:
chkfile : str
Name of chkfile
key : str
Key to be used in h5py object. It can contain "/" to
represent the path in the HDF5 storage structure.
'''
if key is None:
key = 'aux'
dct = lib.chkfile.load(chkfile, key)
return cls(dct['energy'], dct['coupling'], chempot=dct['chempot'])
def copy(self):
''' Returns a copy of the current object.
Returns:
AuxiliarySpace
'''
energy = np.copy(self.energy)
coupling = np.copy(self.coupling)
return self.__class__(energy, coupling, chempot=self.chempot)
@property
def nphys(self):
return self.coupling.shape[0]
@property
def naux(self):
return self.coupling.shape[1]
class SelfEnergy(AuxiliarySpace):
''' Defines a self-energy represented as a :class:`AuxiliarySpace`
object.
'''
def real_freq_spectrum(self, grid, eta=0.02):
raise ValueError('Convert SelfEnergy to GreensFunction before '
'building a spectrum.')
def get_greens_function(self, phys):
''' Returns a :class:`GreensFunction` by solving the Dyson
equation.
Args:
phys : 2D array
Physical space (1p + 1h), typically the Fock matrix
Returns:
:class:`GreensFunction`
'''
w, v = self.eig(phys)
v = v[:self.nphys]
return GreensFunction(w, v, chempot=self.chempot)
def make_rdm1(self, phys, chempot=None, occupancy=2):
''' Returns the first-order reduced density matrix associated
with the self-energy via the :class:`GreensFunction`.
Args:
phys : 2D array
Physical space (1p + 1h), typically the Fock matrix
Kwargs:
chempot : float
If provided, use instead of :attr:`self.chempot`
occupancy : int
Occupancy of the states, i.e. 2 for RHF and 1 for UHF
'''
gf = self.get_greens_function(phys)
return gf.make_rdm1(phys, chempot=chempot, occupancy=occupancy)
def compress(self, phys=None, n=(None, 0), tol=1e-12):
''' Compress the auxiliaries via moments of the particle and
hole Green's function and self-energy. Resulting :attr:`naux`
depends on the chosen :attr:`n`.
Kwargs:
phys : 2D array or None
Physical space (1p + 1h), typically the Fock matrix.
Only required if :attr:`n[0]` is not None.
n : tuple of int
Compression level of the Green's function and
self-energy, respectively.
tol : float
Linear dependecy tolerance. Default value is 1e-12
Returns:
:class:`SelfEnergy` with reduced auxiliary dimension.
Raises:
MemoryError if the compression according to Green's
function moments will exceed the maximum allowed memory.
'''
ngf, nse = n
se = self
if nse is None and ngf is None:
return self.copy()
if nse is not None:
se = compress_via_se(se, n=nse)
if ngf is not None:
se = compress_via_gf(se, phys, n=ngf, tol=tol)
return se
class GreensFunction(AuxiliarySpace):
''' Defines a Green's function represented as a
:class:`AuxiliarySpace` object.
'''
def real_freq_spectrum(self, grid, eta=0.02):
''' Express the auxiliaries as a spectral function on the real
frequency axis.
Args:
grid : 1D array
Real frequency grid
Kwargs:
eta : float
Peak broadening factor in Hartrees. Default is 0.02.
Returns:
ndarray of the spectrum, with the first index being the
frequency
'''
e_shifted = self.energy - self.chempot
v = self.coupling
spectrum = np.zeros((grid.size, self.nphys, self.nphys), dtype=complex)
blksize = 240
p1 = 0
for block in range(0, grid.size, blksize):
p0, p1 = p1, min(p1 + blksize, grid.size)
denom = grid[p0:p1,None] - (e_shifted + eta*1.0j)[None]
spectrum[p0:p1] = lib.einsum('xk,yk,wk->wxy', v, v.conj(), 1./denom)
return -1/np.pi * np.trace(spectrum.imag, axis1=1, axis2=2)
def make_rdm1(self, chempot=None, occupancy=2):
''' Returns the first-order reduced density matrix associated
with the Green's function.
Kwargs:
chempot : float
If provided, use instead of :attr:`self.chempot`
occupancy : int
Occupancy of the states, i.e. 2 for RHF and 1 for UHF
'''
if chempot is None:
chempot = self.chempot
arg = self.energy < chempot
v_occ = self.coupling[:,arg]
rdm1 = np.dot(v_occ, v_occ.T.conj()) * occupancy
return rdm1
def compress(self, *args, **kwargs):
raise ValueError('Compression must be performed on SelfEnergy '
'rather than GreensFunction.')
def combine(*auxspcs):
''' Combine a set of :class:`AuxiliarySpace` objects. attr:`chempot`
is inherited from the first element.
'''
nphys = [auxspc.nphys for auxspc in auxspcs]
if not all([x == nphys[0] for x in nphys]):
raise ValueError('Size of physical space must be the same to '
'combine AuxiliarySpace objects.')
nphys = nphys[0]
naux = sum([auxspc.naux for auxspc in auxspcs])
dtype = np.result_type(*[auxspc.coupling for auxspc in auxspcs])
energy = np.zeros((naux,))
coupling = np.zeros((nphys, naux), dtype=dtype)
p1 = 0
for auxspc in auxspcs:
p0, p1 = p1, p1 + auxspc.naux
energy[p0:p1] = auxspc.energy
coupling[:,p0:p1] = auxspc.coupling
auxspc = auxspcs[0].__class__(energy, coupling, chempot=auxspcs[0].chempot)
return auxspc
def davidson(auxspc, phys, chempot=None, nroots=1, which='SM', tol=1e-14, maxiter=None, ntrial=None):
''' Diagonalise the result of :func:`AuxiliarySpace.get_array` using
the sparse :func:`AuxiliarySpace.dot` method, with the Davidson
algorithm.
This algorithm may perform poorly for IPs or EAs if they are
not extremal eigenvalues, which they are not in standard AGF2.
Args:
auxspc : AuxiliarySpace or subclass
Auxiliary space object to solve for
phys : 2D array
Physical space (1p + 1h), typically the Fock matrix
Kwargs:
chempot : float
If provided, use instead of :attr:`self.chempot`
nroots : int
Number of roots to solve for. Default 1.
which : str
Which eigenvalues to solve for. Options are:
`LM` : Largest (in magnitude) eigenvalues.
`SM` : Smallest (in magnitude) eigenvalues.
`LA` : Largest (algebraic) eigenvalues.
`SA` : Smallest (algebraic) eigenvalues.
Default 'SM'.
tol : float
Convergence threshold
maxiter : int
Maximum number of iterations. Default 10*dim
ntrial : int
Maximum number of trial vectors. Default
min(dim, max(2*nroots+1, 20))
Returns:
tuple of ndarrays (eigenvalues, eigenvectors)
'''
_check_phys_shape(auxspc, phys)
dim = auxspc.nphys + auxspc.naux
if maxiter is None:
maxiter = 10 * dim
if ntrial is None:
ntrial = min(dim, max(2*nroots+1, 20))
if which not in ['SM', 'LM', 'SA', 'LA']:
raise ValueError(which)
if which in ['SM', 'LM']:
abs_op = np.absolute
else:
abs_op = lambda x: x
if which in ['SM', 'SA']:
order = 1
else:
order = -1
matvec = lambda x: auxspc.dot(phys, np.asarray(x))
diag = np.concatenate([np.diag(phys), auxspc.energy])
guess = [np.zeros((dim)) for n in range(nroots)]
mask = np.argsort(abs_op(diag))[::order]
for i in range(nroots):
guess[i][mask[i]] = 1
def pick(w, v, nroots, callback):
mask = np.argsort(abs_op(w))
mask = mask[::order]
w = w[mask]
v = v[:,mask]
return w, v, 0
conv, w, v = lib.davidson1(matvec, guess, diag, tol=tol, nroots=nroots,
max_space=ntrial, max_cycle=maxiter, pick=pick)
return conv, w, v
def _band_lanczos(se_occ, n=0, max_memory=None):
''' Perform the banded Lanczos algorithm for compression of a
self-energy according to consistency in its separate
particle and hole moments.
'''
nblk = n+1
nphys, naux = se_occ.coupling.shape
bandwidth = nblk * nphys
q = np.zeros((bandwidth, naux))
t = np.zeros((bandwidth, bandwidth))
r = np.zeros((naux))
# cholesky qr factorisation of v.T
coupling = se_occ.coupling
x = np.dot(coupling, coupling.T)
try:
v_tri = np.linalg.cholesky(x).T
except np.linalg.LinAlgError:
w, v = np.linalg.eigh(x)
w[w < 1e-20] = 1e-20
x_posdef = np.dot(np.dot(v, np.diag(w)), v.T)
v_tri = np.linalg.cholesky(x_posdef).T
q[:nphys] = np.dot(np.linalg.inv(v_tri).T, coupling)
for i in range(bandwidth):
r[:] = se_occ.energy * q[i]
start = max(i-nphys, 0)
if start != i:
r -= np.dot(t[i,start:i], q[start:i])
for j in range(i, min(i+nphys, bandwidth)):
t[i,j] = t[j,i] = np.dot(r, q[j])
# r := -t[i,j] * q[j] + r
scipy.linalg.blas.daxpy(q[j], r, a=-t[i,j])
if (i+nphys) < bandwidth:
len_r = np.linalg.norm(r)
t[i,i+nphys] = t[i+nphys,i] = len_r
q[i+nphys] = r / (len_r + 1./LARGE_DENOM)
return v_tri, t
def _compress_part_via_se(se_occ, n=0):
''' Compress the auxiliaries of the occupied or virtual part of
the self-energy according to consistency in its moments.
'''
if se_occ.nphys > se_occ.naux:
# breaks this version of the algorithm and is also pointless
e = se_occ.energy.copy()
v = se_occ.coupling.copy()
else:
v_tri, t = _band_lanczos(se_occ, n=n)
e, v = np.linalg.eigh(t)
v = np.dot(v_tri.T, v[:se_occ.nphys])
return e, v
def _compress_via_se(se, n=0):
''' Compress the auxiliaries of the seperate occupied and
virtual parts of the self-energy according to consistency
in its moments.
'''
if se.naux == 0:
return se.energy, se.coupling
se_occ = se.get_occupied()
se_vir = se.get_virtual()
e = []
v = []
if se_occ.naux > 0:
e_occ, v_occ = _compress_part_via_se(se_occ, n=n)
e.append(e_occ)
v.append(v_occ)
if se_vir.naux > 0:
e_vir, v_vir = _compress_part_via_se(se_vir, n=n)
e.append(e_vir)
v.append(v_vir)
e = np.concatenate(e, axis=0)
v = np.concatenate(v, axis=-1)
return e, v
def compress_via_se(se, n=0):
''' Compress the auxiliaries of the seperate occupied and
virtual parts of the self-energy according to consistency
in its moments.
Args:
se : SelfEnergy
Auxiliaries of the self-energy
Kwargs:
n : int
Truncation parameter, conserves the seperate particle
and hole moments to order 2*n+1.
Returns:
:class:`SelfEnergy` with reduced auxiliary dimension
Ref:
[1] H. Muther, T. Taigel and T.T.S. Kuo, Nucl. Phys., 482,
1988, pp. 601-616.
[2] D. Van Neck, K. Piers and M. Waroquier, J. Chem. Phys.,
115, 2001, pp. 15-25.
[3] H. Muther and L.D. Skouras, Nucl. Phys., 55, 1993,
pp. 541-562.
[4] Y. Dewulf, D. Van Neck, L. Van Daele and M. Waroquier,
Phys. Lett. B, 396, 1997, pp. 7-14.
'''
e, v = _compress_via_se(se, n=n)
se_red = SelfEnergy(e, v, chempot=se.chempot)
return se_red
def _build_projector(se, phys, n=0, tol=1e-12):
''' Builds the vectors which project the auxiliary space into a
compress one with consistency in the seperate particle and
hole moments up to order 2n+1.
'''
_check_phys_shape(se, phys)
nphys, naux = se.coupling.shape
w, v = se.eig(phys)
def _part(w, v, s):
en = w[s][None] ** np.arange(n+1)[:,None]
v = v[:,s]
p = np.einsum('xi,pi,ni->xpn', v[nphys:], v[:nphys], en)
return p.reshape(naux, nphys*(n+1))
p = np.hstack((_part(w, v, w < se.chempot),
_part(w, v, w >= se.chempot)))
norm = np.linalg.norm(p, axis=0, keepdims=True)
norm[np.absolute(norm) == 0] = 1./LARGE_DENOM
p /= norm
w, p = np.linalg.eigh(np.dot(p, p.T))
p = p[:, w > tol]
nvec = p.shape[1]
p = np.block([[np.eye(nphys), np.zeros((nphys, nvec))],
[np.zeros((naux, nphys)), p]])
return p
def _compress_via_gf(se, phys, n=0, tol=1e-12):
''' Compress the auxiliaries of the seperate occupied and
virtual parts of the self-energy according to consistency
in the moments of the Green's function
'''
nphys = se.nphys
p = _build_projector(se, phys, n=n, tol=tol)
h_tilde = np.dot(p.T, se.dot(phys, p))
p = None
e, v = np.linalg.eigh(h_tilde[nphys:,nphys:])
v = np.dot(h_tilde[:nphys,nphys:], v)
return e, v
def compress_via_gf(se, phys, n=0, tol=1e-12):
''' Compress the auxiliaries of the seperate occupied and
virtual parts of the self-energy according to consistency
in the moments of the Green's function
Args:
se : SelfEnergy
Auxiliaries of the self-energy
phys : 2D array
Physical space (1p + 1h), typically the Fock matrix
Kwargs:
n : int
Truncation parameter, conserves the seperate particle
and hole moments to order 2*n+1.
tol : float
Linear dependecy tolerance. Default value is 1e-12
Returns:
:class:`SelfEnergy` with reduced auxiliary dimension
'''
e, v = _compress_via_gf(se, phys, n=n, tol=tol)
se_red = SelfEnergy(e, v, chempot=se.chempot)
return se_red
def _check_phys_shape(auxspc, phys):
if np.shape(phys) != (auxspc.nphys, auxspc.nphys):
raise ValueError('Size of physical space must be the same as '
'leading dimension of couplings.')
| {
"content_hash": "94d0e0443c3e7bd9964e60667083be77",
"timestamp": "",
"source": "github",
"line_count": 742,
"max_line_length": 101,
"avg_line_length": 29.796495956873315,
"alnum_prop": 0.5652449228820842,
"repo_name": "sunqm/pyscf",
"id": "6969f5899cb259f963da8210d509cd68a384ce57",
"size": "22826",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyscf/agf2/aux_space.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "2805171"
},
{
"name": "CMake",
"bytes": "19597"
},
{
"name": "Common Lisp",
"bytes": "40515"
},
{
"name": "Dockerfile",
"bytes": "447"
},
{
"name": "Makefile",
"bytes": "6797"
},
{
"name": "Python",
"bytes": "19630497"
},
{
"name": "Roff",
"bytes": "429"
},
{
"name": "Shell",
"bytes": "6564"
}
],
"symlink_target": ""
} |
from .app import app, logger
| {
"content_hash": "ccb5f4ba580a18b81d54554d23749c65",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 28,
"avg_line_length": 29,
"alnum_prop": 0.7586206896551724,
"repo_name": "Lvxingpai/viae-worker",
"id": "5286a59bde105a6e9700cd4e611b87e210115cbf",
"size": "29",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "viae/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "50553"
}
],
"symlink_target": ""
} |
'''
The MIT License (MIT)
Copyright (c) 2017 Sean UN Wood
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
@author: Sean UN Wood
'''
import logging
from collections import namedtuple
import ast
import argparse
try:
import configparser # Python 3.x
except ImportError:
import ConfigParser as configparser # Python 2.x
from gccNMF.defs import DEFAULT_AUDIO_FILE, DEFAULT_CONFIG_FILE
from gccNMF.realtime.gccNMFPretraining import getDictionariesW
INT_OPTIONS = ['numTDOAs', 'numTDOAHistory', 'numSpectrogramHistory', 'numChannels',
'windowSize', 'hopSize', 'blockSize', 'dictionarySize', 'numHUpdates',
'localizationWindowSize']
FLOAT_OPTIONS = ['gccPHATNLAlpha', 'microphoneSeparationInMetres']
BOOL_OPTIONS = ['gccPHATNLEnabled', 'localizationEnabled']
STRING_OPTIONS = ['dictionaryType', 'audioPath']
def getDefaultConfig():
configParser = configparser.ConfigParser(allow_no_value=True)
configParser.optionxform = str
config = {}
config['TDOA'] = {'numTDOAs': '64',
'numTDOAHistory': '128',
'numSpectrogramHistory': '128',
'gccPHATNLAlpha': '2.0',
'gccPHATNLEnabled': 'False',
'microphoneSeparationInMetres': '0.1',
'targetTDOAEpsilon': '5.0',
'targetTDOABeta': '2.0',
'targetTDOANoiseFloor': '0.0',
'localizationEnabled': 'True',
'localizationWindowSize': '6'}
config['Audio'] = {'numChannels': '2',
'sampleRate': '16000',
'deviceIndex': 'None'}
config['STFT'] = {'windowSize': '1024',
'hopSize': '512',
'blockSize': '512'}
config['NMF'] = {'dictionarySize': '64',
'dictionarySizes': '[64, 128, 256, 512, 1024]',
'dictionaryType': 'Pretrained',
'numHUpdates': '0'}
try:
for key, value in config.items():
configParser[key] = value
except:
for sectionKey, sectionValue in config.items():
configParser.add_section(sectionKey)
for key, value in sectionValue.items():
configParser.set(sectionKey, key, value)
return configParser
def getDictFromConfig(config):
logging.info('GCCNMFConfig: loading configuration params...')
dictionary = {}
for section in config.sections():
logging.info(section)
dictionary[section] = {}
for option in config.options(section):
if option in INT_OPTIONS:
dictionary[option] = config.getint(section, option)
elif option in FLOAT_OPTIONS:
dictionary[option] = config.getfloat(section, option)
elif option in BOOL_OPTIONS:
dictionary[option] = config.getboolean(section, option)
elif option in STRING_OPTIONS:
dictionary[option] = config.get(section, option)
else:
dictionary[option] = ast.literal_eval( config.get(section, option) )
logging.info(' %s: %s' % (option, str(dictionary[option])) )
return dictionary
def getGCCNMFConfig(configPath):
raise ValueError('configPath is None')
def getGCCNMFConfigParams(audioPath=DEFAULT_AUDIO_FILE, configPath=DEFAULT_CONFIG_FILE):
try:
config = getGCCNMFConfig(configPath)
except:
config = getDefaultConfig()
parametersDict = getDictFromConfig(config)
parametersDict['audioPath'] = audioPath
parametersDict['numFreq'] = parametersDict['windowSize'] // 2 + 1
parametersDict['windowsPerBlock'] = parametersDict['blockSize'] // parametersDict['hopSize']
parametersDict['dictionariesW'] = getDictionariesW(parametersDict['windowSize'], parametersDict['dictionarySizes'], ordered=True)
params = namedtuple('ParamsDict', parametersDict.keys())(**parametersDict)
return params
def parseArguments():
parser = argparse.ArgumentParser(description='Real-time GCC-NMF Speech Enhancement')
parser.add_argument('-i','--input', help='input wav file path', default=DEFAULT_AUDIO_FILE, required=False)
parser.add_argument('-c','--config', help='config file path', default=DEFAULT_CONFIG_FILE, required=False)
parser.add_argument('--no-gui', help='no user interface mode', action='store_true')
return parser.parse_args() | {
"content_hash": "19a0a1805fc4d7497f4847e0dd87d217",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 133,
"avg_line_length": 43.118110236220474,
"alnum_prop": 0.6539444850255661,
"repo_name": "seanwood/gcc-nmf",
"id": "c5f0ed8bffb165fd54f0ca55a12f64e1db79d810",
"size": "5476",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gccNMF/realtime/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "129346"
}
],
"symlink_target": ""
} |
"""Tests cyclical mapper relationships.
We might want to try an automated generate of much of this, all combos of
T1<->T2, with o2m or m2o between them, and a third T3 with o2m/m2o to one/both
T1/T2.
"""
from itertools import count
from sqlalchemy import bindparam
from sqlalchemy import event
from sqlalchemy import ForeignKey
from sqlalchemy import Integer
from sqlalchemy import String
from sqlalchemy import testing
from sqlalchemy.orm import backref
from sqlalchemy.orm import relationship
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import is_
from sqlalchemy.testing import mock
from sqlalchemy.testing.assertsql import AllOf
from sqlalchemy.testing.assertsql import CompiledSQL
from sqlalchemy.testing.assertsql import Conditional
from sqlalchemy.testing.assertsql import RegexSQL
from sqlalchemy.testing.fixtures import fixture_session
from sqlalchemy.testing.schema import Column
from sqlalchemy.testing.schema import Table
class SelfReferentialTest(fixtures.MappedTest):
"""A self-referential mapper with an additional list of child objects."""
@classmethod
def define_tables(cls, metadata):
Table(
"t1",
metadata,
Column(
"c1", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("parent_c1", Integer, ForeignKey("t1.c1")),
Column("data", String(20)),
)
Table(
"t2",
metadata,
Column(
"c1", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("c1id", Integer, ForeignKey("t1.c1")),
Column("data", String(20)),
)
@classmethod
def setup_classes(cls):
class C1(cls.Basic):
def __init__(self, data=None):
self.data = data
class C2(cls.Basic):
def __init__(self, data=None):
self.data = data
def test_single(self):
C1, t1 = self.classes.C1, self.tables.t1
self.mapper_registry.map_imperatively(
C1,
t1,
properties={
"c1s": relationship(
C1, cascade="all", back_populates="parent"
),
"parent": relationship(
C1,
primaryjoin=t1.c.parent_c1 == t1.c.c1,
remote_side=t1.c.c1,
lazy="select",
uselist=False,
back_populates="c1s",
),
},
)
a = C1("head c1")
a.c1s.append(C1("another c1"))
sess = fixture_session()
sess.add(a)
sess.flush()
sess.delete(a)
sess.flush()
def test_many_to_one_only(self):
"""
test that the circular dependency sort can assemble a many-to-one
dependency processor when only the object on the "many" side is
actually in the list of modified objects.
"""
C1, t1 = self.classes.C1, self.tables.t1
self.mapper_registry.map_imperatively(
C1,
t1,
properties={
"parent": relationship(
C1,
primaryjoin=t1.c.parent_c1 == t1.c.c1,
remote_side=t1.c.c1,
)
},
)
c1 = C1()
sess = fixture_session()
sess.add(c1)
sess.flush()
sess.expunge_all()
c1 = sess.query(C1).get(c1.c1)
c2 = C1()
c2.parent = c1
sess.add(c2)
sess.flush()
assert c2.parent_c1 == c1.c1
def test_cycle(self):
C2, C1, t2, t1 = (
self.classes.C2,
self.classes.C1,
self.tables.t2,
self.tables.t1,
)
self.mapper_registry.map_imperatively(
C1,
t1,
properties={
"c1s": relationship(C1, cascade="all"),
"c2s": relationship(
self.mapper_registry.map_imperatively(C2, t2),
cascade="all, delete-orphan",
),
},
)
a = C1("head c1")
a.c1s.append(C1("child1"))
a.c1s.append(C1("child2"))
a.c1s[0].c1s.append(C1("subchild1"))
a.c1s[0].c1s.append(C1("subchild2"))
a.c1s[1].c2s.append(C2("child2 data1"))
a.c1s[1].c2s.append(C2("child2 data2"))
sess = fixture_session()
sess.add(a)
sess.flush()
sess.delete(a)
sess.flush()
def test_setnull_ondelete(self):
C1, t1 = self.classes.C1, self.tables.t1
self.mapper_registry.map_imperatively(
C1, t1, properties={"children": relationship(C1)}
)
sess = fixture_session()
c1 = C1()
c2 = C1()
c1.children.append(c2)
sess.add(c1)
sess.flush()
assert c2.parent_c1 == c1.c1
sess.delete(c1)
sess.flush()
assert c2.parent_c1 is None
sess.expire_all()
assert c2.parent_c1 is None
class SelfReferentialNoPKTest(fixtures.MappedTest):
"""A self-referential relationship that joins on a column other than the
primary key column"""
@classmethod
def define_tables(cls, metadata):
Table(
"item",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("uuid", String(32), unique=True, nullable=False),
Column(
"parent_uuid",
String(32),
ForeignKey("item.uuid"),
nullable=True,
),
)
@classmethod
def setup_classes(cls):
class TT(cls.Basic):
def __init__(self):
self.uuid = hex(id(self))
@classmethod
def setup_mappers(cls):
item, TT = cls.tables.item, cls.classes.TT
cls.mapper_registry.map_imperatively(
TT,
item,
properties={
"children": relationship(
TT,
remote_side=[item.c.parent_uuid],
backref=backref("parent", remote_side=[item.c.uuid]),
)
},
)
def test_basic(self):
TT = self.classes.TT
t1 = TT()
t1.children.append(TT())
t1.children.append(TT())
s = fixture_session()
s.add(t1)
s.flush()
s.expunge_all()
t = s.query(TT).filter_by(id=t1.id).one()
eq_(t.children[0].parent_uuid, t1.uuid)
def test_lazy_clause(self):
TT = self.classes.TT
s = fixture_session()
t1 = TT()
t2 = TT()
t1.children.append(t2)
s.add(t1)
s.flush()
s.expunge_all()
t = s.query(TT).filter_by(id=t2.id).one()
eq_(t.uuid, t2.uuid)
eq_(t.parent.uuid, t1.uuid)
class InheritTestOne(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table(
"parent",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("parent_data", String(50)),
Column("type", String(10)),
)
Table(
"child1",
metadata,
Column("id", Integer, ForeignKey("parent.id"), primary_key=True),
Column("child1_data", String(50)),
)
Table(
"child2",
metadata,
Column("id", Integer, ForeignKey("parent.id"), primary_key=True),
Column(
"child1_id", Integer, ForeignKey("child1.id"), nullable=False
),
Column("child2_data", String(50)),
)
@classmethod
def setup_classes(cls):
class Parent(cls.Basic):
pass
class Child1(Parent):
pass
class Child2(Parent):
pass
@classmethod
def setup_mappers(cls):
child1, child2, parent, Parent, Child1, Child2 = (
cls.tables.child1,
cls.tables.child2,
cls.tables.parent,
cls.classes.Parent,
cls.classes.Child1,
cls.classes.Child2,
)
cls.mapper_registry.map_imperatively(Parent, parent)
cls.mapper_registry.map_imperatively(Child1, child1, inherits=Parent)
cls.mapper_registry.map_imperatively(
Child2,
child2,
inherits=Parent,
properties=dict(
child1=relationship(
Child1, primaryjoin=child2.c.child1_id == child1.c.id
)
),
)
def test_many_to_one_only(self):
"""test similar to SelfReferentialTest.testmanytooneonly"""
Child1, Child2 = self.classes.Child1, self.classes.Child2
session = fixture_session()
c1 = Child1()
c1.child1_data = "qwerty"
session.add(c1)
session.flush()
session.expunge_all()
c1 = session.query(Child1).filter_by(child1_data="qwerty").one()
c2 = Child2()
c2.child1 = c1
c2.child2_data = "asdfgh"
session.add(c2)
# the flush will fail if the UOW does not set up a many-to-one DP
# attached to a task corresponding to c1, since "child1_id" is not
# nullable
session.flush()
class InheritTestTwo(fixtures.MappedTest):
"""
The fix in BiDirectionalManyToOneTest raised this issue, regarding the
'circular sort' containing UOWTasks that were still polymorphic, which
could create duplicate entries in the final sort
"""
@classmethod
def define_tables(cls, metadata):
Table(
"a",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("cid", Integer, ForeignKey("c.id")),
)
Table(
"b",
metadata,
Column("id", Integer, ForeignKey("a.id"), primary_key=True),
)
Table(
"c",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("aid", Integer, ForeignKey("a.id", name="foo")),
)
@classmethod
def setup_classes(cls):
class A(cls.Basic):
pass
class B(A):
pass
class C(cls.Basic):
pass
def test_flush(self):
a, A, c, b, C, B = (
self.tables.a,
self.classes.A,
self.tables.c,
self.tables.b,
self.classes.C,
self.classes.B,
)
self.mapper_registry.map_imperatively(
A,
a,
properties={"cs": relationship(C, primaryjoin=a.c.cid == c.c.id)},
)
self.mapper_registry.map_imperatively(
B, b, inherits=A, inherit_condition=b.c.id == a.c.id
)
self.mapper_registry.map_imperatively(
C,
c,
properties={
"arel": relationship(A, primaryjoin=a.c.id == c.c.aid)
},
)
sess = fixture_session()
bobj = B()
sess.add(bobj)
cobj = C()
sess.add(cobj)
sess.flush()
class BiDirectionalManyToOneTest(fixtures.MappedTest):
run_define_tables = "each"
@classmethod
def define_tables(cls, metadata):
Table(
"t1",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(30)),
Column("t2id", Integer, ForeignKey("t2.id")),
)
Table(
"t2",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(30)),
Column("t1id", Integer, ForeignKey("t1.id", name="foo_fk")),
)
Table(
"t3",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(30)),
Column("t1id", Integer, ForeignKey("t1.id"), nullable=False),
Column("t2id", Integer, ForeignKey("t2.id"), nullable=False),
)
@classmethod
def setup_classes(cls):
class T1(cls.Basic):
pass
class T2(cls.Basic):
pass
class T3(cls.Basic):
pass
@classmethod
def setup_mappers(cls):
t2, T2, T3, t1, t3, T1 = (
cls.tables.t2,
cls.classes.T2,
cls.classes.T3,
cls.tables.t1,
cls.tables.t3,
cls.classes.T1,
)
cls.mapper_registry.map_imperatively(
T1,
t1,
properties={
"t2": relationship(T2, primaryjoin=t1.c.t2id == t2.c.id)
},
)
cls.mapper_registry.map_imperatively(
T2,
t2,
properties={
"t1": relationship(T1, primaryjoin=t2.c.t1id == t1.c.id)
},
)
cls.mapper_registry.map_imperatively(
T3, t3, properties={"t1": relationship(T1), "t2": relationship(T2)}
)
def test_reflush(self):
T2, T3, T1 = (self.classes.T2, self.classes.T3, self.classes.T1)
o1 = T1()
o1.t2 = T2()
sess = fixture_session()
sess.add(o1)
sess.flush()
# the bug here is that the dependency sort comes up with T1/T2 in a
# cycle, but there are no T1/T2 objects to be saved. therefore no
# "cyclical subtree" gets generated, and one or the other of T1/T2
# gets lost, and processors on T3 don't fire off. the test will then
# fail because the FK's on T3 are not nullable.
o3 = T3()
o3.t1 = o1
o3.t2 = o1.t2
sess.add(o3)
sess.flush()
def test_reflush_2(self):
"""A variant on test_reflush()"""
T2, T3, T1 = (self.classes.T2, self.classes.T3, self.classes.T1)
o1 = T1()
o1.t2 = T2()
sess = fixture_session()
sess.add(o1)
sess.flush()
# in this case, T1, T2, and T3 tasks will all be in the cyclical
# tree normally. the dependency processors for T3 are part of the
# 'extradeps' collection so they all get assembled into the tree
# as well.
o1a = T1()
o2a = T2()
sess.add(o1a)
sess.add(o2a)
o3b = T3()
o3b.t1 = o1a
o3b.t2 = o2a
sess.add(o3b)
o3 = T3()
o3.t1 = o1
o3.t2 = o1.t2
sess.add(o3)
sess.flush()
class BiDirectionalOneToManyTest(fixtures.MappedTest):
"""tests two mappers with a one-to-many relationship to each other."""
run_define_tables = "each"
@classmethod
def define_tables(cls, metadata):
Table(
"t1",
metadata,
Column(
"c1", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("c2", Integer, ForeignKey("t2.c1")),
)
Table(
"t2",
metadata,
Column(
"c1", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("c2", Integer, ForeignKey("t1.c1", name="t1c1_fk")),
)
@classmethod
def setup_classes(cls):
class C1(cls.Basic):
pass
class C2(cls.Basic):
pass
def test_cycle(self):
C2, C1, t2, t1 = (
self.classes.C2,
self.classes.C1,
self.tables.t2,
self.tables.t1,
)
self.mapper_registry.map_imperatively(
C2,
t2,
properties={
"c1s": relationship(
C1, primaryjoin=t2.c.c1 == t1.c.c2, uselist=True
)
},
)
self.mapper_registry.map_imperatively(
C1,
t1,
properties={
"c2s": relationship(
C2, primaryjoin=t1.c.c1 == t2.c.c2, uselist=True
)
},
)
a = C1()
b = C2()
c = C1()
d = C2()
e = C2()
f = C2()
a.c2s.append(b)
d.c1s.append(c)
b.c1s.append(c)
sess = fixture_session()
sess.add_all((a, b, c, d, e, f))
sess.flush()
class BiDirectionalOneToManyTest2(fixtures.MappedTest):
"""Two mappers with a one-to-many relationship to each other,
with a second one-to-many on one of the mappers"""
run_define_tables = "each"
@classmethod
def define_tables(cls, metadata):
Table(
"t1",
metadata,
Column(
"c1", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("c2", Integer, ForeignKey("t2.c1")),
test_needs_autoincrement=True,
)
Table(
"t2",
metadata,
Column(
"c1", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("c2", Integer, ForeignKey("t1.c1", name="t1c1_fq")),
test_needs_autoincrement=True,
)
Table(
"t1_data",
metadata,
Column(
"c1", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("t1id", Integer, ForeignKey("t1.c1")),
Column("data", String(20)),
test_needs_autoincrement=True,
)
@classmethod
def setup_classes(cls):
class C1(cls.Basic):
pass
class C2(cls.Basic):
pass
class C1Data(cls.Basic):
pass
@classmethod
def setup_mappers(cls):
t2, t1, C1Data, t1_data, C2, C1 = (
cls.tables.t2,
cls.tables.t1,
cls.classes.C1Data,
cls.tables.t1_data,
cls.classes.C2,
cls.classes.C1,
)
cls.mapper_registry.map_imperatively(
C2,
t2,
properties={
"c1s": relationship(
C1, primaryjoin=t2.c.c1 == t1.c.c2, uselist=True
)
},
)
cls.mapper_registry.map_imperatively(
C1,
t1,
properties={
"c2s": relationship(
C2, primaryjoin=t1.c.c1 == t2.c.c2, uselist=True
),
"data": relationship(
cls.mapper_registry.map_imperatively(C1Data, t1_data)
),
},
)
def test_cycle(self):
C2, C1, C1Data = (
self.classes.C2,
self.classes.C1,
self.classes.C1Data,
)
a = C1()
b = C2()
c = C1()
d = C2()
e = C2()
f = C2()
a.c2s.append(b)
d.c1s.append(c)
b.c1s.append(c)
a.data.append(C1Data(data="c1data1"))
a.data.append(C1Data(data="c1data2"))
c.data.append(C1Data(data="c1data3"))
sess = fixture_session()
sess.add_all((a, b, c, d, e, f))
sess.flush()
sess.delete(d)
sess.delete(c)
sess.flush()
@testing.combinations(
(
"legacy_style",
True,
),
(
"new_style",
False,
),
argnames="name, _legacy_inactive_history_style",
id_="sa",
)
class OneToManyManyToOneTest(fixtures.MappedTest):
"""
Tests two mappers, one has a one-to-many on the other mapper, the other
has a separate many-to-one relationship to the first. two tests will have
a row for each item that is dependent on the other. without the
"post_update" flag, such relationships raise an exception when
dependencies are sorted.
"""
run_define_tables = "each"
@classmethod
def define_tables(cls, metadata):
Table(
"ball",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column(
"person_id",
Integer,
ForeignKey("person.id", name="fk_person_id"),
),
Column("data", String(30)),
)
Table(
"person",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("favorite_ball_id", Integer, ForeignKey("ball.id")),
Column("data", String(30)),
)
@classmethod
def setup_classes(cls):
class Person(cls.Basic):
pass
class Ball(cls.Basic):
pass
def test_cycle(self):
"""
This test has a peculiar aspect in that it doesn't create as many
dependent relationships as the other tests, and revealed a small
glitch in the circular dependency sorting.
"""
person, ball, Ball, Person = (
self.tables.person,
self.tables.ball,
self.classes.Ball,
self.classes.Person,
)
self.mapper_registry.map_imperatively(Ball, ball)
self.mapper_registry.map_imperatively(
Person,
person,
properties=dict(
balls=relationship(
Ball,
primaryjoin=ball.c.person_id == person.c.id,
remote_side=ball.c.person_id,
_legacy_inactive_history_style=(
self._legacy_inactive_history_style
),
),
favorite=relationship(
Ball,
primaryjoin=person.c.favorite_ball_id == ball.c.id,
remote_side=ball.c.id,
_legacy_inactive_history_style=(
self._legacy_inactive_history_style
),
),
),
)
b = Ball()
p = Person()
p.balls.append(b)
sess = fixture_session()
sess.add(p)
sess.flush()
def test_post_update_m2o_no_cascade(self):
person, ball, Ball, Person = (
self.tables.person,
self.tables.ball,
self.classes.Ball,
self.classes.Person,
)
self.mapper_registry.map_imperatively(Ball, ball)
self.mapper_registry.map_imperatively(
Person,
person,
properties=dict(
favorite=relationship(
Ball,
primaryjoin=person.c.favorite_ball_id == ball.c.id,
post_update=True,
_legacy_inactive_history_style=(
self._legacy_inactive_history_style
),
)
),
)
b = Ball(data="some data")
p = Person(data="some data")
p.favorite = b
sess = fixture_session()
sess.add(b)
sess.add(p)
sess.flush()
sess.delete(p)
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"UPDATE person SET favorite_ball_id=:favorite_ball_id "
"WHERE person.id = :person_id",
lambda ctx: {"favorite_ball_id": None, "person_id": p.id},
),
CompiledSQL(
"DELETE FROM person WHERE person.id = :id",
lambda ctx: {"id": p.id},
),
)
def test_post_update_m2o(self):
"""A cycle between two rows, with a post_update on the many-to-one"""
person, ball, Ball, Person = (
self.tables.person,
self.tables.ball,
self.classes.Ball,
self.classes.Person,
)
self.mapper_registry.map_imperatively(Ball, ball)
self.mapper_registry.map_imperatively(
Person,
person,
properties=dict(
balls=relationship(
Ball,
primaryjoin=ball.c.person_id == person.c.id,
remote_side=ball.c.person_id,
post_update=False,
cascade="all, delete-orphan",
_legacy_inactive_history_style=(
self._legacy_inactive_history_style
),
),
favorite=relationship(
Ball,
primaryjoin=person.c.favorite_ball_id == ball.c.id,
remote_side=person.c.favorite_ball_id,
post_update=True,
_legacy_inactive_history_style=(
self._legacy_inactive_history_style
),
),
),
)
b = Ball(data="some data")
p = Person(data="some data")
p.balls.append(b)
p.balls.append(Ball(data="some data"))
p.balls.append(Ball(data="some data"))
p.balls.append(Ball(data="some data"))
p.favorite = b
sess = fixture_session()
sess.add(b)
sess.add(p)
self.assert_sql_execution(
testing.db,
sess.flush,
RegexSQL("^INSERT INTO person", {"data": "some data"}),
Conditional(
testing.db.dialect.insert_executemany_returning,
[
RegexSQL(
"^INSERT INTO ball",
lambda c: [
{"person_id": p.id, "data": "some data"},
{"person_id": p.id, "data": "some data"},
{"person_id": p.id, "data": "some data"},
{"person_id": p.id, "data": "some data"},
],
)
],
[
RegexSQL(
"^INSERT INTO ball",
lambda c: {"person_id": p.id, "data": "some data"},
),
RegexSQL(
"^INSERT INTO ball",
lambda c: {"person_id": p.id, "data": "some data"},
),
RegexSQL(
"^INSERT INTO ball",
lambda c: {"person_id": p.id, "data": "some data"},
),
RegexSQL(
"^INSERT INTO ball",
lambda c: {"person_id": p.id, "data": "some data"},
),
],
),
CompiledSQL(
"UPDATE person SET favorite_ball_id=:favorite_ball_id "
"WHERE person.id = :person_id",
lambda ctx: {
"favorite_ball_id": p.favorite.id,
"person_id": p.id,
},
),
)
sess.delete(p)
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"UPDATE person SET favorite_ball_id=:favorite_ball_id "
"WHERE person.id = :person_id",
lambda ctx: {"person_id": p.id, "favorite_ball_id": None},
),
# lambda ctx:[{'id': 1L}, {'id': 4L}, {'id': 3L}, {'id': 2L}])
CompiledSQL("DELETE FROM ball WHERE ball.id = :id", None),
CompiledSQL(
"DELETE FROM person WHERE person.id = :id",
lambda ctx: [{"id": p.id}],
),
)
def test_post_update_backref(self):
"""test bidirectional post_update."""
person, ball, Ball, Person = (
self.tables.person,
self.tables.ball,
self.classes.Ball,
self.classes.Person,
)
self.mapper_registry.map_imperatively(Ball, ball)
self.mapper_registry.map_imperatively(
Person,
person,
properties=dict(
balls=relationship(
Ball,
primaryjoin=ball.c.person_id == person.c.id,
remote_side=ball.c.person_id,
post_update=True,
backref=backref(
"person",
post_update=True,
_legacy_inactive_history_style=(
self._legacy_inactive_history_style
),
),
_legacy_inactive_history_style=(
self._legacy_inactive_history_style
),
),
favorite=relationship(
Ball,
primaryjoin=person.c.favorite_ball_id == ball.c.id,
remote_side=person.c.favorite_ball_id,
_legacy_inactive_history_style=(
self._legacy_inactive_history_style
),
),
),
)
sess = fixture_session()
p1 = Person(data="p1")
p2 = Person(data="p2")
p3 = Person(data="p3")
b1 = Ball(data="b1")
b1.person = p1
sess.add_all([p1, p2, p3])
sess.commit()
# switch here. the post_update
# on ball.person can't get tripped up
# by the fact that there's a "reverse" prop.
b1.person = p2
sess.commit()
eq_(p2, b1.person)
# do it the other way
p3.balls.append(b1)
sess.commit()
eq_(p3, b1.person)
def test_post_update_o2m(self):
"""A cycle between two rows, with a post_update on the one-to-many"""
person, ball, Ball, Person = (
self.tables.person,
self.tables.ball,
self.classes.Ball,
self.classes.Person,
)
self.mapper_registry.map_imperatively(Ball, ball)
self.mapper_registry.map_imperatively(
Person,
person,
properties=dict(
balls=relationship(
Ball,
primaryjoin=ball.c.person_id == person.c.id,
remote_side=ball.c.person_id,
cascade="all, delete-orphan",
post_update=True,
backref="person",
_legacy_inactive_history_style=(
self._legacy_inactive_history_style
),
),
favorite=relationship(
Ball,
primaryjoin=person.c.favorite_ball_id == ball.c.id,
remote_side=person.c.favorite_ball_id,
_legacy_inactive_history_style=(
self._legacy_inactive_history_style
),
),
),
)
b = Ball(data="some data")
p = Person(data="some data")
p.balls.append(b)
b2 = Ball(data="some data")
p.balls.append(b2)
b3 = Ball(data="some data")
p.balls.append(b3)
b4 = Ball(data="some data")
p.balls.append(b4)
p.favorite = b
sess = fixture_session()
sess.add_all((b, p, b2, b3, b4))
self.assert_sql_execution(
testing.db,
sess.flush,
Conditional(
testing.db.dialect.insert_executemany_returning,
[
CompiledSQL(
"INSERT INTO ball (person_id, data) "
"VALUES (:person_id, :data)",
[
{"person_id": None, "data": "some data"},
{"person_id": None, "data": "some data"},
{"person_id": None, "data": "some data"},
{"person_id": None, "data": "some data"},
],
),
],
[
CompiledSQL(
"INSERT INTO ball (person_id, data) "
"VALUES (:person_id, :data)",
{"person_id": None, "data": "some data"},
),
CompiledSQL(
"INSERT INTO ball (person_id, data) "
"VALUES (:person_id, :data)",
{"person_id": None, "data": "some data"},
),
CompiledSQL(
"INSERT INTO ball (person_id, data) "
"VALUES (:person_id, :data)",
{"person_id": None, "data": "some data"},
),
CompiledSQL(
"INSERT INTO ball (person_id, data) "
"VALUES (:person_id, :data)",
{"person_id": None, "data": "some data"},
),
],
),
CompiledSQL(
"INSERT INTO person (favorite_ball_id, data) "
"VALUES (:favorite_ball_id, :data)",
lambda ctx: {"favorite_ball_id": b.id, "data": "some data"},
),
CompiledSQL(
"UPDATE ball SET person_id=:person_id "
"WHERE ball.id = :ball_id",
lambda ctx: [
{"person_id": p.id, "ball_id": b.id},
{"person_id": p.id, "ball_id": b2.id},
{"person_id": p.id, "ball_id": b3.id},
{"person_id": p.id, "ball_id": b4.id},
],
),
)
sess.delete(p)
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"UPDATE ball SET person_id=:person_id "
"WHERE ball.id = :ball_id",
lambda ctx: [
{"person_id": None, "ball_id": b.id},
{"person_id": None, "ball_id": b2.id},
{"person_id": None, "ball_id": b3.id},
{"person_id": None, "ball_id": b4.id},
],
),
CompiledSQL(
"DELETE FROM person " "WHERE person.id = :id",
lambda ctx: [{"id": p.id}],
),
CompiledSQL(
"DELETE FROM ball WHERE ball.id = :id",
lambda ctx: [
{"id": b.id},
{"id": b2.id},
{"id": b3.id},
{"id": b4.id},
],
),
)
def test_post_update_m2o_detect_none(self):
person, ball, Ball, Person = (
self.tables.person,
self.tables.ball,
self.classes.Ball,
self.classes.Person,
)
self.mapper_registry.map_imperatively(
Ball,
ball,
properties={
"person": relationship(
Person,
post_update=True,
primaryjoin=person.c.id == ball.c.person_id,
_legacy_inactive_history_style=(
self._legacy_inactive_history_style
),
)
},
)
self.mapper_registry.map_imperatively(Person, person)
sess = fixture_session(autocommit=False, expire_on_commit=True)
p1 = Person()
sess.add(Ball(person=p1))
sess.commit()
b1 = sess.query(Ball).first()
# needs to be unloaded
assert "person" not in b1.__dict__
b1.person = None
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"UPDATE ball SET person_id=:person_id "
"WHERE ball.id = :ball_id",
lambda ctx: {"person_id": None, "ball_id": b1.id},
),
)
is_(b1.person, None)
class SelfReferentialPostUpdateTest(fixtures.MappedTest):
"""Post_update on a single self-referential mapper."""
@classmethod
def define_tables(cls, metadata):
Table(
"node",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("path", String(50), nullable=False),
Column("parent_id", Integer, ForeignKey("node.id"), nullable=True),
Column(
"prev_sibling_id",
Integer,
ForeignKey("node.id"),
nullable=True,
),
Column(
"next_sibling_id",
Integer,
ForeignKey("node.id"),
nullable=True,
),
)
@classmethod
def setup_classes(cls):
class Node(cls.Basic):
def __init__(self, path=""):
self.path = path
def test_one(self):
"""Post_update only fires off when needed.
This test case used to produce many superfluous update statements,
particularly upon delete
"""
node, Node = self.tables.node, self.classes.Node
self.mapper_registry.map_imperatively(
Node,
node,
properties={
"children": relationship(
Node,
primaryjoin=node.c.id == node.c.parent_id,
cascade="all",
backref=backref("parent", remote_side=node.c.id),
),
"prev_sibling": relationship(
Node,
primaryjoin=node.c.prev_sibling_id == node.c.id,
remote_side=node.c.id,
uselist=False,
),
"next_sibling": relationship(
Node,
primaryjoin=node.c.next_sibling_id == node.c.id,
remote_side=node.c.id,
uselist=False,
post_update=True,
),
},
)
session = fixture_session(autoflush=False)
def append_child(parent, child):
if parent.children:
parent.children[-1].next_sibling = child
child.prev_sibling = parent.children[-1]
parent.children.append(child)
def remove_child(parent, child):
child.parent = None
node = child.next_sibling
node.prev_sibling = child.prev_sibling
child.prev_sibling.next_sibling = node
session.delete(child)
root = Node("root")
about = Node("about")
cats = Node("cats")
stories = Node("stories")
bruce = Node("bruce")
append_child(root, about)
assert about.prev_sibling is None
append_child(root, cats)
assert cats.prev_sibling is about
assert cats.next_sibling is None
assert about.next_sibling is cats
assert about.prev_sibling is None
append_child(root, stories)
append_child(root, bruce)
session.add(root)
session.flush()
remove_child(root, cats)
# pre-trigger lazy loader on 'cats' to make the test easier
cats.children
self.assert_sql_execution(
testing.db,
session.flush,
AllOf(
CompiledSQL(
"UPDATE node SET prev_sibling_id=:prev_sibling_id "
"WHERE node.id = :node_id",
lambda ctx: {
"prev_sibling_id": about.id,
"node_id": stories.id,
},
),
CompiledSQL(
"UPDATE node SET next_sibling_id=:next_sibling_id "
"WHERE node.id = :node_id",
lambda ctx: {
"next_sibling_id": stories.id,
"node_id": about.id,
},
),
CompiledSQL(
"UPDATE node SET next_sibling_id=:next_sibling_id "
"WHERE node.id = :node_id",
lambda ctx: {"next_sibling_id": None, "node_id": cats.id},
),
),
CompiledSQL(
"DELETE FROM node WHERE node.id = :id",
lambda ctx: [{"id": cats.id}],
),
)
session.delete(root)
self.assert_sql_execution(
testing.db,
session.flush,
CompiledSQL(
"UPDATE node SET next_sibling_id=:next_sibling_id "
"WHERE node.id = :node_id",
lambda ctx: [
{"node_id": about.id, "next_sibling_id": None},
{"node_id": stories.id, "next_sibling_id": None},
],
),
AllOf(
CompiledSQL(
"DELETE FROM node WHERE node.id = :id",
lambda ctx: {"id": about.id},
),
CompiledSQL(
"DELETE FROM node WHERE node.id = :id",
lambda ctx: {"id": stories.id},
),
CompiledSQL(
"DELETE FROM node WHERE node.id = :id",
lambda ctx: {"id": bruce.id},
),
),
CompiledSQL(
"DELETE FROM node WHERE node.id = :id",
lambda ctx: {"id": root.id},
),
)
about = Node("about")
cats = Node("cats")
about.next_sibling = cats
cats.prev_sibling = about
session.add(about)
session.flush()
session.delete(about)
cats.prev_sibling = None
session.flush()
class SelfReferentialPostUpdateTest2(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table(
"a_table",
metadata,
Column(
"id",
Integer(),
primary_key=True,
test_needs_autoincrement=True,
),
Column("fui", String(128)),
Column("b", Integer(), ForeignKey("a_table.id")),
)
@classmethod
def setup_classes(cls):
class A(cls.Basic):
pass
def test_one(self):
"""
Test that post_update remembers to be involved in update operations as
well, since it replaces the normal dependency processing completely
[ticket:413]
"""
A, a_table = self.classes.A, self.tables.a_table
self.mapper_registry.map_imperatively(
A,
a_table,
properties={
"foo": relationship(
A, remote_side=[a_table.c.id], post_update=True
)
},
)
session = fixture_session()
f1 = A(fui="f1")
session.add(f1)
session.flush()
f2 = A(fui="f2", foo=f1)
# at this point f1 is already inserted. but we need post_update
# to fire off anyway
session.add(f2)
session.flush()
session.expunge_all()
f1 = session.query(A).get(f1.id)
f2 = session.query(A).get(f2.id)
assert f2.foo is f1
class SelfReferentialPostUpdateTest3(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table(
"parent",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("name", String(50), nullable=False),
Column(
"child_id",
Integer,
ForeignKey("child.id", name="c1"),
nullable=True,
),
)
Table(
"child",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("name", String(50), nullable=False),
Column("child_id", Integer, ForeignKey("child.id")),
Column(
"parent_id", Integer, ForeignKey("parent.id"), nullable=True
),
)
@classmethod
def setup_classes(cls):
class Parent(cls.Basic):
def __init__(self, name=""):
self.name = name
class Child(cls.Basic):
def __init__(self, name=""):
self.name = name
def test_one(self):
Child, Parent, parent, child = (
self.classes.Child,
self.classes.Parent,
self.tables.parent,
self.tables.child,
)
self.mapper_registry.map_imperatively(
Parent,
parent,
properties={
"children": relationship(
Child, primaryjoin=parent.c.id == child.c.parent_id
),
"child": relationship(
Child,
primaryjoin=parent.c.child_id == child.c.id,
post_update=True,
),
},
)
self.mapper_registry.map_imperatively(
Child,
child,
properties={"parent": relationship(Child, remote_side=child.c.id)},
)
session = fixture_session()
p1 = Parent("p1")
c1 = Child("c1")
c2 = Child("c2")
p1.children = [c1, c2]
c2.parent = c1
p1.child = c2
session.add_all([p1, c1, c2])
session.flush()
p2 = Parent("p2")
c3 = Child("c3")
p2.children = [c3]
p2.child = c3
session.add(p2)
session.delete(c2)
p1.children.remove(c2)
p1.child = None
session.flush()
p2.child = None
session.flush()
class PostUpdateBatchingTest(fixtures.MappedTest):
"""test that lots of post update cols batch together into a single
UPDATE."""
@classmethod
def define_tables(cls, metadata):
Table(
"parent",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("name", String(50), nullable=False),
Column(
"c1_id",
Integer,
ForeignKey("child1.id", name="c1"),
nullable=True,
),
Column(
"c2_id",
Integer,
ForeignKey("child2.id", name="c2"),
nullable=True,
),
Column(
"c3_id",
Integer,
ForeignKey("child3.id", name="c3"),
nullable=True,
),
)
Table(
"child1",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("name", String(50), nullable=False),
Column(
"parent_id", Integer, ForeignKey("parent.id"), nullable=False
),
)
Table(
"child2",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("name", String(50), nullable=False),
Column(
"parent_id", Integer, ForeignKey("parent.id"), nullable=False
),
)
Table(
"child3",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("name", String(50), nullable=False),
Column(
"parent_id", Integer, ForeignKey("parent.id"), nullable=False
),
)
@classmethod
def setup_classes(cls):
class Parent(cls.Basic):
def __init__(self, name=""):
self.name = name
class Child1(cls.Basic):
def __init__(self, name=""):
self.name = name
class Child2(cls.Basic):
def __init__(self, name=""):
self.name = name
class Child3(cls.Basic):
def __init__(self, name=""):
self.name = name
def test_one(self):
child1, child2, child3, Parent, parent, Child1, Child2, Child3 = (
self.tables.child1,
self.tables.child2,
self.tables.child3,
self.classes.Parent,
self.tables.parent,
self.classes.Child1,
self.classes.Child2,
self.classes.Child3,
)
self.mapper_registry.map_imperatively(
Parent,
parent,
properties={
"c1s": relationship(
Child1, primaryjoin=child1.c.parent_id == parent.c.id
),
"c2s": relationship(
Child2, primaryjoin=child2.c.parent_id == parent.c.id
),
"c3s": relationship(
Child3, primaryjoin=child3.c.parent_id == parent.c.id
),
"c1": relationship(
Child1,
primaryjoin=child1.c.id == parent.c.c1_id,
post_update=True,
),
"c2": relationship(
Child2,
primaryjoin=child2.c.id == parent.c.c2_id,
post_update=True,
),
"c3": relationship(
Child3,
primaryjoin=child3.c.id == parent.c.c3_id,
post_update=True,
),
},
)
self.mapper_registry.map_imperatively(Child1, child1)
self.mapper_registry.map_imperatively(Child2, child2)
self.mapper_registry.map_imperatively(Child3, child3)
sess = fixture_session()
p1 = Parent("p1")
c11, c12, c13 = Child1("c1"), Child1("c2"), Child1("c3")
c21, c22, c23 = Child2("c1"), Child2("c2"), Child2("c3")
c31, c32, c33 = Child3("c1"), Child3("c2"), Child3("c3")
p1.c1s = [c11, c12, c13]
p1.c2s = [c21, c22, c23]
p1.c3s = [c31, c32, c33]
sess.add(p1)
sess.flush()
p1.c1 = c12
p1.c2 = c23
p1.c3 = c31
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"UPDATE parent SET c1_id=:c1_id, c2_id=:c2_id, c3_id=:c3_id "
"WHERE parent.id = :parent_id",
lambda ctx: {
"c2_id": c23.id,
"parent_id": p1.id,
"c1_id": c12.id,
"c3_id": c31.id,
},
),
)
p1.c1 = p1.c2 = p1.c3 = None
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"UPDATE parent SET c1_id=:c1_id, c2_id=:c2_id, c3_id=:c3_id "
"WHERE parent.id = :parent_id",
lambda ctx: {
"c2_id": None,
"parent_id": p1.id,
"c1_id": None,
"c3_id": None,
},
),
)
class PostUpdateOnUpdateTest(fixtures.DeclarativeMappedTest):
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class A(Base):
__tablename__ = "a"
id = Column(Integer, primary_key=True)
data = Column(Integer)
favorite_b_id = Column(ForeignKey("b.id", name="favorite_b_fk"))
bs = relationship("B", primaryjoin="A.id == B.a_id")
favorite_b = relationship(
"B", primaryjoin="A.favorite_b_id == B.id", post_update=True
)
updated = Column(Integer, onupdate=lambda: next(cls.counter))
updated_db = Column(
Integer,
onupdate=bindparam(
key="foo", callable_=lambda: next(cls.db_counter)
),
)
class B(Base):
__tablename__ = "b"
id = Column(Integer, primary_key=True)
a_id = Column(ForeignKey("a.id", name="a_fk"))
def setup_test(self):
PostUpdateOnUpdateTest.counter = count()
PostUpdateOnUpdateTest.db_counter = count()
def test_update_defaults(self):
A, B = self.classes("A", "B")
s = fixture_session()
a1 = A()
b1 = B()
a1.bs.append(b1)
a1.favorite_b = b1
s.add(a1)
s.flush()
eq_(a1.updated, 0)
eq_(a1.updated_db, 0)
def test_update_defaults_refresh_flush_event(self):
A, B = self.classes("A", "B")
canary = mock.Mock()
event.listen(A, "refresh_flush", canary.refresh_flush)
event.listen(A, "expire", canary.expire)
s = fixture_session()
a1 = A()
b1 = B()
a1.bs.append(b1)
a1.favorite_b = b1
s.add(a1)
s.flush()
eq_(a1.updated, 0)
eq_(a1.updated_db, 0)
eq_(
canary.mock_calls,
[
mock.call.refresh_flush(a1, mock.ANY, ["updated"]),
mock.call.expire(a1, ["updated_db"]),
],
)
def test_update_defaults_refresh_flush_event_no_postupdate(self):
# run the same test as test_update_defaults_refresh_flush_event
# but don't actually use any postupdate functionality
(A,) = self.classes("A")
canary = mock.Mock()
event.listen(A, "refresh_flush", canary.refresh_flush)
event.listen(A, "expire", canary.expire)
s = fixture_session()
a1 = A()
s.add(a1)
s.flush()
eq_(a1.updated, None)
eq_(a1.updated_db, None)
# now run a normal UPDATE
a1.data = 5
s.flush()
# now they are updated
eq_(a1.updated, 0)
eq_(a1.updated_db, 0)
eq_(
canary.mock_calls,
[
mock.call.refresh_flush(a1, mock.ANY, ["updated"]),
mock.call.expire(a1, ["updated_db"]),
],
)
def test_update_defaults_dont_expire_on_delete(self):
A, B = self.classes("A", "B")
canary = mock.Mock()
event.listen(A, "refresh_flush", canary.refresh_flush)
event.listen(A, "expire", canary.expire)
s = fixture_session()
a1 = A()
b1 = B()
a1.bs.append(b1)
a1.favorite_b = b1
s.add(a1)
s.flush()
eq_(
canary.mock_calls,
[
mock.call.refresh_flush(a1, mock.ANY, ["updated"]),
mock.call.expire(a1, ["updated_db"]),
],
)
# ensure that we load this value here, we want to see that it
# stays the same and isn't expired below.
eq_(a1.updated, 0)
eq_(a1.updated_db, 0)
s.delete(a1)
s.flush()
assert a1 not in s
# both the python-side default and the server side default
# *did* get bumped for the UPDATE, however the row was then
# deleted, show what the values were *before* the UPDATE.
eq_(a1.updated, 0)
eq_(a1.updated_db, 0)
eq_(
canary.mock_calls,
[
# previous flush
mock.call.refresh_flush(a1, mock.ANY, ["updated"]),
mock.call.expire(a1, ["updated_db"]),
# nothing happened
],
)
eq_(next(self.counter), 2)
def test_update_defaults_dont_expire_on_delete_no_postupdate(self):
# run the same test as
# test_update_defaults_dont_expire_on_delete_no_postupdate
# but don't actually use any postupdate functionality
(A,) = self.classes("A")
canary = mock.Mock()
event.listen(A, "refresh_flush", canary.refresh_flush)
event.listen(A, "expire", canary.expire)
s = fixture_session()
a1 = A()
s.add(a1)
s.flush()
eq_(a1.updated, None)
eq_(a1.updated_db, None)
a1.data = 5
s.flush()
eq_(a1.updated, 0)
eq_(a1.updated_db, 0)
eq_(
canary.mock_calls,
[
mock.call.refresh_flush(a1, mock.ANY, ["updated"]),
mock.call.expire(a1, ["updated_db"]),
],
)
# ensure that we load this value here, we want to see that it
# stays the same and isn't expired below.
eq_(a1.updated, 0)
eq_(a1.updated_db, 0)
s.delete(a1)
s.flush()
assert a1 not in s
# no UPDATE was emitted here, so they stay at zero. but the
# server-side default wasn't expired either even though the row
# was deleted.
eq_(a1.updated, 0)
eq_(a1.updated_db, 0)
eq_(
canary.mock_calls,
[
# previous flush
mock.call.refresh_flush(a1, mock.ANY, ["updated"]),
mock.call.expire(a1, ["updated_db"]),
# nothing called for this flush
],
)
def test_update_defaults_can_set_value(self):
A, B = self.classes("A", "B")
s = fixture_session()
a1 = A()
b1 = B()
a1.bs.append(b1)
a1.favorite_b = b1
a1.updated = 5
a1.updated_db = 7
s.add(a1)
s.flush()
# doesn't require DB access
s.expunge(a1)
eq_(a1.updated, 5)
eq_(a1.updated_db, 7)
| {
"content_hash": "48ced782e2c4c33ae7a6950904b220d4",
"timestamp": "",
"source": "github",
"line_count": 2006,
"max_line_length": 79,
"avg_line_length": 29.166999002991027,
"alnum_prop": 0.4655010340289528,
"repo_name": "monetate/sqlalchemy",
"id": "c75f2a549a5004d0d4ad47bb023f6f11c9132a48",
"size": "58509",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/orm/test_cycles.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "49142"
},
{
"name": "Python",
"bytes": "11790244"
}
],
"symlink_target": ""
} |
"""geometry module"""
from __future__ import (division,absolute_import)
from geode import *
from numpy import asarray
BoxTrees = {2:BoxTree2d,3:BoxTree3d}
def BoxTree(X,leaf_size):
X = asarray(X)
return BoxTrees[X.shape[1]](X,leaf_size)
ParticleTrees = {2:ParticleTree2d,3:ParticleTree3d}
def ParticleTree(X,leaf_size):
X = asarray(X)
return ParticleTrees[X.shape[1]](X,leaf_size)
SimplexTrees = {(2,1):SegmentTree2d,(3,1):SegmentTree3d,(2,2):TriangleTree2d,(3,2):TriangleTree3d}
def SimplexTree(mesh,X,leaf_size):
X = asarray(X)
return SimplexTrees[X.shape[1],mesh.d](mesh,X,leaf_size)
Boxes = {1:Box1d,2:Box2d,3:Box3d}
def Box(min,max):
try:
d = len(min)
except TypeError:
d = len(max)
return Boxes[d](min,max)
Spheres = {2:Sphere2d,3:Sphere3d}
def Sphere(center,radius):
center = asarray(center)
return Spheres[len(center)](center,radius)
Capsules = {2:Capsule2d,3:Capsule3d}
def Capsule(x0,x1,radius):
try:
d = len(x0)
except TypeError:
d = len(x1)
return Capsules[d](x0,x1,radius)
empty_boxes = {1:empty_box_1d,2:empty_box_2d,3:empty_box_3d}
def empty_box(d):
return empty_boxes[d]()
FrameImplicits = {2:FrameImplicit2d,3:FrameImplicit3d}
def FrameImplicit(frame,object):
return FrameImplicits[object.d](frame,object)
| {
"content_hash": "c120695175c7faa2b87c6b9034511499",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 98,
"avg_line_length": 25.62,
"alnum_prop": 0.7096018735362998,
"repo_name": "mikest/geode",
"id": "fcd8217ca2dc147f81fa6a5c4bb80fe5b4e5709d",
"size": "1281",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "geode/geometry/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "113700"
},
{
"name": "C++",
"bytes": "2421742"
},
{
"name": "Mathematica",
"bytes": "94062"
},
{
"name": "Objective-C",
"bytes": "54710"
},
{
"name": "Python",
"bytes": "266407"
},
{
"name": "Shell",
"bytes": "791"
}
],
"symlink_target": ""
} |
from django import template
from django.core import paginator
import urllib.request, urllib.parse, urllib.error
register = template.Library()
@register.simple_tag(takes_context=True)
def paginate(context, qs, count=20):
pages = paginator.Paginator(qs, int(count))
try:
ix = int(context['request'].GET.get('page', 1))
except ValueError:
ix = 1
try:
return pages.page(ix)
except:
ix = 1 if ix < 1 else pages.num_pages
return pages.page(ix)
@register.simple_tag(takes_context=True)
def add_page_number_to_query(context, page, get=None):
if get is None:
get = context['request'].GET.copy()
else:
get = dict(get)
get['page'] = page
return urllib.parse.urlencode(get)
| {
"content_hash": "668fab061931a2ab4d9d0e2ec80e698d",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 55,
"avg_line_length": 25.333333333333332,
"alnum_prop": 0.6486842105263158,
"repo_name": "EuroPython/epcon",
"id": "98cc8bacd4af469812adb39f59374d7a1a4fd09d",
"size": "760",
"binary": false,
"copies": "1",
"ref": "refs/heads/ep2021",
"path": "assopy/templatetags/assopy_tags.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "6475"
},
{
"name": "Dockerfile",
"bytes": "609"
},
{
"name": "HTML",
"bytes": "412025"
},
{
"name": "JavaScript",
"bytes": "421281"
},
{
"name": "Makefile",
"bytes": "4679"
},
{
"name": "Python",
"bytes": "991334"
},
{
"name": "Shell",
"bytes": "1182"
}
],
"symlink_target": ""
} |
from tempfile import NamedTemporaryFile
import pytest
import openai
from openai import util
@pytest.fixture(scope="function")
def api_key_file():
saved_path = openai.api_key_path
try:
with NamedTemporaryFile(prefix="openai-api-key", mode="wt") as tmp:
openai.api_key_path = tmp.name
yield tmp
finally:
openai.api_key_path = saved_path
def test_openai_api_key_path(api_key_file) -> None:
print("sk-foo", file=api_key_file)
api_key_file.flush()
assert util.default_api_key() == "sk-foo"
def test_openai_api_key_path_with_malformed_key(api_key_file) -> None:
print("malformed-api-key", file=api_key_file)
api_key_file.flush()
with pytest.raises(ValueError, match="Malformed API key"):
util.default_api_key()
| {
"content_hash": "f4d0943a9156ca617fa4e9500143301f",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 75,
"avg_line_length": 26.566666666666666,
"alnum_prop": 0.6649937264742786,
"repo_name": "openai/openai-python",
"id": "d0ce0ac5c4a35dbebdd936c875dcca81b5d8e5fc",
"size": "797",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "openai/tests/test_util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "236"
},
{
"name": "Python",
"bytes": "189146"
}
],
"symlink_target": ""
} |
from functools import reduce
from operator import iadd
import itertools
import numpy
from PyQt4.QtGui import (
QFormLayout, QGraphicsRectItem, QGraphicsGridLayout,
QFontMetrics, QPen, QIcon, QPixmap, QLinearGradient, QPainter, QColor,
QBrush, QTransform, QGraphicsWidget
)
from PyQt4.QtCore import Qt, QRect, QRectF, QSize, QPointF
from PyQt4.QtCore import pyqtSignal as Signal
import pyqtgraph as pg
import Orange.data
import Orange.misc
from Orange.clustering import hierarchical
from Orange.widgets import widget, gui, settings
from Orange.widgets.utils import itemmodels, colorbrewer
from .owhierarchicalclustering import DendrogramWidget, GraphicsSimpleTextList
def _remove_item(item):
item.setParentItem(None)
scene = item.scene()
if scene is not None:
scene.removeItem(item)
class DistanceMapItem(pg.ImageItem):
"""A distance matrix image with user selectable regions.
"""
class SelectionRect(QGraphicsRectItem):
def boundingRect(self):
return super().boundingRect().adjusted(-1, 1, 1, -1)
def paint(self, painter, option, widget=None):
t = painter.transform()
rect = t.mapRect(self.rect())
painter.save()
painter.setTransform(QTransform())
pwidth = self.pen().widthF()
painter.setPen(self.pen())
painter.drawRect(rect.adjusted(pwidth, -pwidth, -pwidth, pwidth))
painter.restore()
def setRect(self, rect):
self.prepareGeometryChange()
super().setRect(rect)
selectionChanged = Signal()
Clear, Select, Commit = 1, 2, 4
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.setAcceptedMouseButtons(Qt.LeftButton)
self.setAcceptHoverEvents(True)
self.__selections = []
#: (QGraphicsRectItem, QRectF) | None
self.__dragging = None
def __select(self, area, command):
if command & self.Clear:
self.__clearSelections()
if command & self.Select:
area = area.normalized()
intersects = [rect.intersects(area)
for item, rect in self.__selections]
def partition(predicate, iterable):
t1, t2 = itertools.tee(iterable)
return (itertools.filterfalse(predicate, t1),
filter(predicate, t2))
def intersects(selection):
_, selarea = selection
return selarea.intersects(area)
disjoint, intersection = partition(intersects, self.__selections)
disjoint = list(disjoint)
intersection = list(intersection)
# merge intersecting selections into a single area
area = reduce(QRect.united, (area for _, area in intersection),
area)
visualarea = self.__visualRectForSelection(area)
item = DistanceMapItem.SelectionRect(visualarea, self)
item.update()
item.show()
pen = QPen(Qt.red, 0)
item.setPen(pen)
selection = disjoint + [(item, area)]
for item, _ in intersection:
_remove_item(item)
self.__selections = selection
self.selectionChanged.emit()
def __elastic_band_select(self, area, command):
if command & self.Clear and self.__dragging:
item, area = self.__dragging
_remove_item(item)
self.__dragging = None
if command & self.Select:
if self.__dragging:
item, _ = self.__dragging
else:
item = DistanceMapItem.SelectionRect(self)
pen = QPen(Qt.red, 0)
item.setPen(pen)
self.update()
# intersection with existing regions
intersection = [(item, selarea)
for item, selarea in self.__selections
if area.intersects(selarea)]
fullarea = reduce(
QRect.united, (selarea for _, selarea in intersection),
area
)
visualarea = self.__visualRectForSelection(fullarea)
item.setRect(visualarea)
self.__dragging = item, area
if command & self.Commit and self.__dragging:
item, area = self.__dragging
self.__select(area, self.Select)
def mousePressEvent(self, event):
if event.button() == Qt.LeftButton:
r, c = self._cellAt(event.pos())
if r != -1 and c != -1:
# Clear existing selection
# TODO: Fix extended selection.
self.__select(QRect(), self.Clear)
selrange = QRect(c, r, 2, 2)
self.__elastic_band_select(selrange, self.Select | self.Clear)
super().mousePressEvent(event)
event.accept()
def mouseMoveEvent(self, event):
if event.buttons() & Qt.LeftButton and self.__dragging:
r1, c1 = self._cellAt(event.buttonDownPos(Qt.LeftButton))
r2, c2 = self._cellAt(event.pos())
selrange = QRect(c1, r1, 2, 2).united(QRect(c2, r2, 2, 2))
self.__elastic_band_select(selrange, self.Select)
super().mouseMoveEvent(event)
event.accept()
def mouseReleaseEvent(self, event):
if event.button() == Qt.LeftButton and self.__dragging:
r1, c1 = self._cellAt(event.buttonDownPos(Qt.LeftButton))
r2, c2 = self._cellAt(event.pos())
selrange = QRect(c1, r1, 2, 2).united(QRect(c2, r2, 2, 2))
self.__elastic_band_select(selrange, self.Select | self.Commit)
self.__elastic_band_select(QRect(), self.Clear)
super().mouseReleaseEvent(event)
event.accept()
def _cellAt(self, pos):
"""Return the i, j cell index at `pos` in local coordinates."""
if self.image is None:
return -1, -1
else:
h, w = self.image.shape
i, j = numpy.floor([h - pos.y(), pos.x()])
if 0 < i >= h or 0 < j >= w:
return -1, -1
else:
return int(i), int(j)
def __clearSelections(self):
for item, _ in self.__selections:
_remove_item(item)
self.__selections = []
def __visualRectForSelection(self, rect):
h, _ = self.image.shape
r1, r2 = rect.top(), rect.bottom()
c1, c2 = rect.left(), rect.right()
return QRectF(QPointF(c1, h - r1), QPointF(c2, h - r2))
def __selectionForArea(self, area):
r1, c1 = self._cellAt(area.topLeft())
r2, c2 = self._cellAt(area.bottomRight())
topleft = QRect(c1, r1, 1, 1)
bottomright = QRect(c2, r2, 1, 1)
return topleft.united(bottomright).normalized()
def selections(self):
selections = [self.__selectionForArea(area)
for _, area in self.__selections]
return [(range(r.top(), r.bottom()), range(r.left(), r.right()))
for r in selections]
def hoverMoveEvent(self, event):
super().hoverMoveEvent(event)
i, j = self._cellAt(event.pos())
if i != -1 and j != -1:
d = self.image[i, self.image.shape[1] - j - 1]
self.setToolTip("{}, {}: {:.3f}".format(i, j, d))
else:
self.setToolTip("")
class DendrogramWidget(DendrogramWidget):
def sceneEventFilter(self, recv, event):
return QGraphicsWidget.sceneEventFilter(self, recv, event)
class OWDistanceMap(widget.OWWidget):
name = "Distance Map"
description = "Visualize a distance matrix"
icon = "icons/DistanceMatrix.svg"
priority = 1200
inputs = [("Distances", Orange.misc.DistMatrix, "set_distances")]
outputs = [("Data", Orange.data.Table), ("Features", widget.AttributeList)]
display_grid = settings.Setting(False)
sorting = settings.Setting(0)
colormap = settings.Setting(0)
color_gamma = settings.Setting(0.0)
color_low = settings.Setting(0.0)
color_high = settings.Setting(1.0)
annotation_idx = settings.Setting(0)
autocommit = settings.Setting(True)
def __init__(self, parent=None):
super().__init__(parent)
self.matrix = None
self._tree = None
self._ordered_tree = None
self._sorted_matrix = None
self._sort_indices = None
self._selection = None
box = gui.widgetBox(self.controlArea, "Element sorting", margin=0)
gui.comboBox(box, self, "sorting",
items=["None", "Clustering",
"Clustering with ordered leaves"
],
callback=self._invalidate_ordering)
box = gui.widgetBox(self.controlArea, "Colors")
self.colormap_cb = gui.comboBox(
box, self, "colormap", callback=self._update_color
)
self.colormap_cb.setIconSize(QSize(64, 16))
self.palettes = list(sorted(load_default_palettes()))
init_color_combo(self.colormap_cb, self.palettes, QSize(64, 16))
self.colormap_cb.setCurrentIndex(self.colormap)
form = QFormLayout(
formAlignment=Qt.AlignLeft,
labelAlignment=Qt.AlignLeft,
fieldGrowthPolicy=QFormLayout.AllNonFixedFieldsGrow
)
# form.addRow(
# "Gamma",
# gui.hSlider(box, self, "color_gamma", minValue=0.0, maxValue=1.0,
# step=0.05, ticks=True, intOnly=False,
# createLabel=False, callback=self._update_color)
# )
form.addRow(
"Low",
gui.hSlider(box, self, "color_low", minValue=0.0, maxValue=1.0,
step=0.05, ticks=True, intOnly=False,
createLabel=False, callback=self._update_color)
)
form.addRow(
"High",
gui.hSlider(box, self, "color_high", minValue=0.0, maxValue=1.0,
step=0.05, ticks=True, intOnly=False,
createLabel=False, callback=self._update_color)
)
box.layout().addLayout(form)
box = gui.widgetBox(self.controlArea, "Annotations")
self.annot_combo = gui.comboBox(box, self, "annotation_idx",
callback=self._invalidate_annotations)
self.annot_combo.setModel(itemmodels.VariableListModel())
self.annot_combo.model()[:] = ["None", "Enumeration"]
self.controlArea.layout().addStretch()
gui.auto_commit(self.controlArea, self, "autocommit",
"Send data", "Auto send is on")
self.view = pg.GraphicsView(background="w")
self.mainArea.layout().addWidget(self.view)
self.grid_widget = pg.GraphicsWidget()
self.grid = QGraphicsGridLayout()
self.grid_widget.setLayout(self.grid)
self.viewbox = pg.ViewBox(enableMouse=False)
self.viewbox.setAcceptedMouseButtons(Qt.NoButton)
self.viewbox.setAcceptHoverEvents(False)
self.grid.addItem(self.viewbox, 1, 1)
self.left_dendrogram = DendrogramWidget(
self.grid_widget, orientation=DendrogramWidget.Left)
self.left_dendrogram.setAcceptedMouseButtons(Qt.NoButton)
self.left_dendrogram.setAcceptHoverEvents(False)
self.top_dendrogram = DendrogramWidget(
self.grid_widget, orientation=DendrogramWidget.Top)
self.top_dendrogram.setAcceptedMouseButtons(Qt.NoButton)
self.top_dendrogram.setAcceptHoverEvents(False)
self.grid.addItem(self.left_dendrogram, 1, 0)
self.grid.addItem(self.top_dendrogram, 0, 1)
self.right_labels = TextList(
alignment=Qt.AlignLeft)
self.bottom_labels = TextList(
orientation=Qt.Horizontal, alignment=Qt.AlignRight)
self.grid.addItem(self.right_labels, 1, 2)
self.grid.addItem(self.bottom_labels, 2, 1)
self.view.setCentralItem(self.grid_widget)
self.left_dendrogram.hide()
self.top_dendrogram.hide()
self.right_labels.hide()
self.bottom_labels.hide()
self.matrix_item = None
self.dendrogram = None
self.grid_widget.scene().installEventFilter(self)
def set_distances(self, matrix):
self.clear()
self.matrix = matrix
if matrix is not None:
self.set_items(matrix.row_items, matrix.axis)
else:
self.set_items(None)
def set_items(self, items, axis=1):
self.items = items
model = self.annot_combo.model()
if items is None:
model[:] = ["None", "Enumeration"]
elif not axis:
model[:] = ["None", "Enumeration", "Attribute names"]
self.annotation_idx = 2
elif isinstance(items, Orange.data.Table):
model[:] = ["None", "Enumeration"] + list(items.domain)
elif isinstance(items, list) and \
all(isinstance(item, Orange.data.Variable) for item in items):
model[:] = ["None", "Enumeration", "Name"]
else:
model[:] = ["None", "Enumeration"]
self.annotation_idx = min(self.annotation_idx, len(model) - 1)
def clear(self):
self.matrix = None
self.cluster = None
self._tree = None
self._ordered_tree = None
self._sorted_matrix = None
self._selection = []
self._clear_plot()
def handleNewSignals(self):
if self.matrix is not None:
self._update_ordering()
self._setup_scene()
self._update_labels()
self.unconditional_commit()
def _clear_plot(self):
def remove(item):
item.setParentItem(None)
item.scene().removeItem(item)
if self.matrix_item:
remove(self.matrix_item)
self.matrix_item = None
self.top_dendrogram.hide()
self.left_dendrogram.hide()
self._set_labels(None)
def _cluster_tree(self):
if self._tree is None:
self._tree = hierarchical.dist_matrix_clustering(self.matrix)
return self._tree
def _ordered_cluster_tree(self):
if self._ordered_tree is None:
tree = self._cluster_tree()
self._ordered_tree = \
hierarchical.optimal_leaf_ordering(tree, self.matrix)
return self._ordered_tree
def _setup_scene(self):
self.matrix_item = DistanceMapItem(self._sorted_matrix[:, ::-1])
self.viewbox.addItem(self.matrix_item)
self.viewbox.setRange(QRectF(0, 0, *self._sorted_matrix.shape),
padding=0)
self.matrix_item.selectionChanged.connect(self._invalidate_selection)
if self.sorting == 0:
tree = None
elif self.sorting == 1:
tree = self._cluster_tree()
else:
tree = self._ordered_cluster_tree()
self._set_displayed_dendrogram(tree)
self._update_color()
def _set_displayed_dendrogram(self, root):
self.left_dendrogram.set_root(root)
self.top_dendrogram.set_root(root)
self.left_dendrogram.setVisible(root is not None)
self.top_dendrogram.setVisible(root is not None)
constraint = 0 if root is None else -1 # 150
self.left_dendrogram.setMaximumWidth(constraint)
self.top_dendrogram.setMaximumHeight(constraint)
def _invalidate_ordering(self):
self._sorted_matrix = None
if self.matrix is not None:
self._update_ordering()
self._setup_scene()
def _update_ordering(self):
if self.sorting == 0:
self._sorted_matrix = self.matrix.X
self._sort_indices = None
else:
if self.sorting == 1:
tree = self._cluster_tree()
elif self.sorting == 2:
tree = self._ordered_cluster_tree()
leaves = hierarchical.leaves(tree)
indices = numpy.array([leaf.value.index for leaf in leaves])
X = self.matrix.X
self._sorted_matrix = X[indices[:, numpy.newaxis],
indices[numpy.newaxis, :]]
self._sort_indices = indices
def _invalidate_annotations(self):
if self.matrix is not None:
self._update_labels()
def _update_labels(self, ):
if self.annotation_idx == 0:
labels = None
elif self.annotation_idx == 1:
labels = [str(i + 1) for i in range(self.matrix.dim[0])]
elif self.annot_combo.model()[self.annotation_idx] == "Attribute names":
attr = self.matrix.row_items.domain.attributes
labels = [str(attr[i]) for i in range(self.matrix.dim[0])]
elif self.annotation_idx == 2 and \
isinstance(self.items, widget.AttributeList):
labels = [v.name for v in self.items]
elif isinstance(self.items, Orange.data.Table):
var = self.annot_combo.model()[self.annotation_idx]
column, _ = self.items.get_column_view(var)
labels = [var.repr_val(value) for value in column]
self._set_labels(labels)
def _set_labels(self, labels):
self._labels = labels
if labels and self.sorting:
sortind = self._sort_indices
labels = [labels[i] for i in sortind]
for textlist in [self.right_labels, self.bottom_labels]:
textlist.set_labels(labels or [])
textlist.setVisible(bool(labels))
constraint = -1 if labels else 0
self.right_labels.setMaximumWidth(constraint)
self.bottom_labels.setMaximumHeight(constraint)
def _update_color(self):
if self.matrix_item:
name, colors = self.palettes[self.colormap]
n, colors = max(colors.items())
colors = numpy.array(colors, dtype=numpy.ubyte)
low, high = self.color_low * 255, self.color_high * 255
points = numpy.linspace(low, high, n)
space = numpy.linspace(0, 255, 255)
r = numpy.interp(space, points, colors[:, 0], left=255, right=0)
g = numpy.interp(space, points, colors[:, 1], left=255, right=0)
b = numpy.interp(space, points, colors[:, 2], left=255, right=0)
colortable = numpy.c_[r, g, b]
self.matrix_item.setLookupTable(colortable)
def _invalidate_selection(self):
ranges = self.matrix_item.selections()
ranges = reduce(iadd, ranges, [])
indices = reduce(iadd, ranges, [])
if self.sorting:
sortind = self._sort_indices
indices = [sortind[i] for i in indices]
self._selection = list(sorted(set(indices)))
self.commit()
def commit(self):
datasubset = None
featuresubset = None
if not self._selection:
pass
elif isinstance(self.items, Orange.data.Table):
indices = self._selection
datasubset = self.items.from_table_rows(self.items, indices)
elif isinstance(self.items, widget.AttributeList):
subset = [self.items[i] for i in self._selection]
featuresubset = widget.AttributeList(subset)
self.send("Data", datasubset)
self.send("Features", featuresubset)
class TextList(GraphicsSimpleTextList):
def resizeEvent(self, event):
super().resizeEvent(event)
self._updateFontSize()
def _updateFontSize(self):
crect = self.contentsRect()
if self.orientation == Qt.Vertical:
h = crect.height()
else:
h = crect.width()
n = len(getattr(self, "label_items", []))
if n == 0:
return
lineheight = max(1, h / n)
fontsize = self._point_size(lineheight)
font = self.font()
font.setPointSize(fontsize)
self.setFont(font)
self.layout().invalidate()
self.layout().activate()
def _point_size(self, height):
font = self.font()
font.setPointSize(height)
fix = 0
while QFontMetrics(font).lineSpacing() > height and height - fix > 1:
fix += 1
font.setPointSize(height - fix)
return height - fix
##########################
# Color palette management
##########################
def palette_gradient(colors, discrete=False):
n = len(colors)
stops = numpy.linspace(0.0, 1.0, n, endpoint=True)
gradstops = [(float(stop), color) for stop, color in zip(stops, colors)]
grad = QLinearGradient(QPointF(0, 0), QPointF(1, 0))
grad.setStops(gradstops)
return grad
def palette_pixmap(colors, size):
img = QPixmap(size)
img.fill(Qt.transparent)
painter = QPainter(img)
grad = palette_gradient(colors)
grad.setCoordinateMode(QLinearGradient.ObjectBoundingMode)
painter.setPen(Qt.NoPen)
painter.setBrush(QBrush(grad))
painter.drawRect(0, 0, size.width(), size.height())
painter.end()
return img
def init_color_combo(cb, palettes, iconsize):
cb.clear()
iconsize = cb.iconSize()
for name, palette in palettes:
n, colors = max(palette.items())
colors = [QColor(*c) for c in colors]
cb.addItem(QIcon(palette_pixmap(colors, iconsize)), name,
palette)
def load_default_palettes():
palettes = colorbrewer.colorSchemes["sequential"]
return list(palettes.items())
def test():
from PyQt4.QtGui import QApplication
import sip
import Orange.distance
app = QApplication([])
w = OWDistanceMap()
w.show()
w.raise_()
data = Orange.data.Table("iris")
# data = Orange.data.Table("housing")
dist = Orange.distance.Euclidean(data)
w.set_distances(dist)
w.handleNewSignals()
rval = app.exec_()
w.onDeleteWidget()
sip.delete(w)
del w
return rval
if __name__ == "__main__":
import sys
sys.exit(test())
| {
"content_hash": "999771090234c74c5c1d850822821f9f",
"timestamp": "",
"source": "github",
"line_count": 660,
"max_line_length": 80,
"avg_line_length": 33.557575757575755,
"alnum_prop": 0.5793751128770092,
"repo_name": "qusp/orange3",
"id": "c64229a52cd011c56eda5ff308356dbe7b22c3e1",
"size": "22148",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Orange/widgets/unsupervised/owdistancemap.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "20412"
},
{
"name": "GLSL",
"bytes": "75"
},
{
"name": "JavaScript",
"bytes": "3025"
},
{
"name": "NSIS",
"bytes": "19239"
},
{
"name": "Python",
"bytes": "3378832"
},
{
"name": "Shell",
"bytes": "37336"
}
],
"symlink_target": ""
} |
import gmsh
gmsh.initialize()
gmsh.merge('param.txt')
print(gmsh.onelab.get())
bcs = gmsh.onelab.getNames("Conditions aux limites.*")
for bc in bcs:
print(bc, gmsh.onelab.getNumber(bc))
| {
"content_hash": "1e9e68bf291b7a3151803809776c2dba",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 54,
"avg_line_length": 23.875,
"alnum_prop": 0.7225130890052356,
"repo_name": "rboman/progs",
"id": "4c5b8f7a10ca649550bd5def934c2ee291516104",
"size": "191",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "classes/param.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "15571"
},
{
"name": "C",
"bytes": "166004"
},
{
"name": "C#",
"bytes": "2021"
},
{
"name": "C++",
"bytes": "1063256"
},
{
"name": "CMake",
"bytes": "211806"
},
{
"name": "Eiffel",
"bytes": "5484041"
},
{
"name": "Fortran",
"bytes": "576316"
},
{
"name": "GLSL",
"bytes": "3366"
},
{
"name": "HTML",
"bytes": "7199"
},
{
"name": "Java",
"bytes": "21330"
},
{
"name": "JavaScript",
"bytes": "28"
},
{
"name": "Julia",
"bytes": "1730"
},
{
"name": "Lua",
"bytes": "10474"
},
{
"name": "M",
"bytes": "143"
},
{
"name": "MATLAB",
"bytes": "7915698"
},
{
"name": "Makefile",
"bytes": "251"
},
{
"name": "Objective-C++",
"bytes": "183"
},
{
"name": "PHP",
"bytes": "10089"
},
{
"name": "PostScript",
"bytes": "450068"
},
{
"name": "Processing",
"bytes": "2358"
},
{
"name": "Python",
"bytes": "1107870"
},
{
"name": "QMake",
"bytes": "3608"
},
{
"name": "SWIG",
"bytes": "14104"
},
{
"name": "Shell",
"bytes": "52373"
},
{
"name": "TeX",
"bytes": "166564"
}
],
"symlink_target": ""
} |
"""Tests for n-gram layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensor2tensor.layers import ngram
from tensor2tensor.utils import test_utils
import tensorflow.compat.v1 as tf
tf.enable_eager_execution()
class NGramTest(tf.test.TestCase):
@test_utils.run_in_graph_and_eager_modes()
def testNGramLayerShape(self):
batch_size = 2
length = 8
vocab_size = 3
minval = 1
maxval = 4
inputs = tf.random_uniform(
[batch_size, length], minval=0, maxval=vocab_size, dtype=tf.int32)
layer = ngram.NGram(vocab_size, minval, maxval)
outputs = layer(inputs)
outputs_val = self.evaluate(outputs)
num_ngrams = sum([vocab_size**n for n in range(minval, maxval)])
self.assertEqual(outputs_val.shape, (batch_size, num_ngrams))
@test_utils.run_in_graph_and_eager_modes()
def testNGramLayerOutput(self):
inputs = tf.constant(
[[0, 0, 0, 0, 1],
[2, 1, 2, 1, 0]], dtype=tf.int32)
layer = ngram.NGram(3, minval=1, maxval=3)
outputs = layer(inputs)
expected_outputs = tf.constant(
[[4., 1., 0., 2., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 2., 2., 0., 0., 0., 0., 0., 0., 0., 2., 0.]], dtype=tf.float32)
outputs_val, expected_outputs_val = self.evaluate(
[outputs, expected_outputs])
self.assertAllEqual(outputs_val, expected_outputs_val)
if __name__ == "__main__":
tf.test.main()
| {
"content_hash": "8aa7a4371f21bd934fd9b62b96e73bf7",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 77,
"avg_line_length": 30.604166666666668,
"alnum_prop": 0.6337644656228727,
"repo_name": "tensorflow/tensor2tensor",
"id": "a7b8e8787f92503a14fac6c6e822c35af17757f7",
"size": "2075",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensor2tensor/layers/ngram_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "32015"
},
{
"name": "HTML",
"bytes": "34684"
},
{
"name": "JavaScript",
"bytes": "78408"
},
{
"name": "Jupyter Notebook",
"bytes": "2859453"
},
{
"name": "Python",
"bytes": "5109255"
},
{
"name": "Shell",
"bytes": "11941"
}
],
"symlink_target": ""
} |
"""Various classes representing distributed values."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import weakref
from tensorflow.python.distribute import device_util
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.distribute import distribution_strategy_context as ds_context
from tensorflow.python.distribute import reduce_util
from tensorflow.python.eager import context
from tensorflow.python.eager import tape
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import type_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.training.saving import saveable_object
from tensorflow.python.training.saving import saveable_object_util
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
def _get_current_replica_id_as_int():
"""Returns the current replica ID as an integer, or `None`."""
replica_context = ds_context.get_replica_context()
if replica_context:
replica_id = replica_context.replica_id_in_sync_group
if not isinstance(replica_id, int):
replica_id = tensor_util.constant_value(replica_id)
else:
replica_id = distribute_lib.get_update_replica_id()
return replica_id
@tf_export("distribute.DistributedValues", v1=[])
class DistributedValues(object):
"""Base class for representing distributed values.
A subclass instance of DistributedValues is created when creating variables
within a distribution strategy, iterating a `tf.Dataset` or through
`strategy.run`. This base class should never be instantiated
directly. DistributedValues contains a value per replica. Depending on
the subclass, the values could either be synced on update, synced on demand,
or never synced.
DistributedValues can be reduced to obtain single value across replicas,
as input into `run` or the per replica values inspected
using `experimental_local_results`.
Example usage:
1. Created from Dataset:
>>> strategy = tf.distribute.MirroredStrategy()
>>> dataset = tf.data.Dataset.from_tensor_slices([5., 6., 7., 8.]).batch(2)
>>> dataset_iterator = iter(strategy.experimental_distribute_dataset(dataset))
>>> distributed_values = next(dataset_iterator)
2. Returned by `run`:
>>> strategy = tf.distribute.MirroredStrategy()
>>> @tf.function
... def run():
... ctx = tf.distribute.get_replica_context()
... return ctx.replica_id_in_sync_group
>>> distributed_values = strategy.run(run)
3. As input into `run`:
>>> strategy = tf.distribute.MirroredStrategy()
>>> dataset = tf.data.Dataset.from_tensor_slices([5., 6., 7., 8.]).batch(2)
>>> dataset_iterator = iter(strategy.experimental_distribute_dataset(dataset))
>>> distributed_values = next(dataset_iterator)
>>> @tf.function
... def run(input):
... return input + 1.0
>>> updated_value = strategy.run(run, args=(distributed_values,))
4. Reduce value:
>>> strategy = tf.distribute.MirroredStrategy()
>>> dataset = tf.data.Dataset.from_tensor_slices([5., 6., 7., 8.]).batch(2)
>>> dataset_iterator = iter(strategy.experimental_distribute_dataset(dataset))
>>> distributed_values = next(dataset_iterator)
>>> reduced_value = strategy.reduce(tf.distribute.ReduceOp.SUM,
... distributed_values,
... axis = 0)
5. Inspect per replica values:
>>> strategy = tf.distribute.MirroredStrategy()
>>> dataset = tf.data.Dataset.from_tensor_slices([5., 6., 7., 8.]).batch(2)
>>> dataset_iterator = iter(strategy.experimental_distribute_dataset(dataset))
>>> per_replica_values = strategy.experimental_local_results(
... distributed_values)
>>> per_replica_values
(<tf.Tensor: shape=(2,), dtype=float32,
numpy=array([5., 6.], dtype=float32)>,)
"""
def __init__(self, values):
"""Should only be called by subclass __init__."""
self._values = tuple(values)
def _get(self):
"""Returns the value for the current device or raises a ValueError."""
replica_id = _get_current_replica_id_as_int()
if replica_id is None:
return self._get_cross_replica()
else:
return self._values[replica_id]
def _get_cross_replica(self):
raise NotImplementedError(
"This method should be overridden by sub-classes which support cross-"
"replica accesses.")
def _get_closest(self):
"""Returns value in same replica or device if possible, else the _primary."""
replica_id = _get_current_replica_id_as_int()
if replica_id is None:
# Try to find a value on the current device.
current_device = device_util.canonicalize(device_util.current())
for value in self._values:
if device_util.canonicalize(value.device) == current_device:
return value
return self._primary
else:
return self._values[replica_id]
@property
def _primary(self):
"""Returns a representative component."""
return self._values[0]
@property
def _devices(self):
return tuple(v.device for v in self._values)
def __str__(self):
debug_str = ",\n".join(
" %d: %s" % (i, v) for i, v in enumerate(self._values))
return "%s:{\n%s\n}" % (self.__class__.__name__, debug_str)
def __repr__(self):
debug_repr = ",\n".join(
" %d: %r" % (i, v) for i, v in enumerate(self._values))
return "%s:{\n%s\n}" % (self.__class__.__name__, debug_repr)
# NOTE(josh11b,apassos): It would be great if we could inspect the values this was
# initialized with and use that to generate the overloaded operators here.
# Unfortunately, Python's rules for special methods don't allow this, see
# https://docs.python.org/3/reference/datamodel.html#special-method-names
# "if a class defines a method named __getitem__(), and x is an instance of
# this class, then x[i] is roughly equivalent to type(x).__getitem__(x, i)."
# In particular, these special methods don't go through __getattr__, and
# it will only use those methods if they are defined in the class, not the
# object.
class DistributedDelegate(DistributedValues):
"""A map from device to values; acts as the same type as the values."""
def __getattr__(self, name):
# The '_use_resource_variables' and the attrs starts with '_self' are used
# for restoring the saved_model proto, and '_attribute_sentinel' is used for
# Layer tracking. At the point these attrs are queried, the variable has not
# been initialized. Thus it should not query those of the underlying
# components.
if name.startswith("_self_") or name in ("_use_resource_variables",
"_attribute_sentinel",
"_distributed_container"):
return super(DistributedDelegate, self).__getattr__(name)
# TODO(priyag): This needs to be made robust against pitfalls from mix use
# __getattr__ and @property. See b/120402273.
return getattr(self._get(), name)
@property
def values(self):
"""Returns the per replica values."""
return self._values
def _get_as_operand(self):
"""Returns the value for operations for the current device.
Some implementations, e.g. `TPUMirroredVariable`, are not able to return the
value type within a replica context. They can, however, return a value that
can be used by the operations below.
"""
return self._get()
# pylint: disable=multiple-statements
def __add__(self, o):
return self._get_as_operand() + o
def __radd__(self, o):
return o + self._get_as_operand()
def __sub__(self, o):
return self._get_as_operand() - o
def __rsub__(self, o):
return o - self._get_as_operand()
def __mul__(self, o):
return self._get_as_operand() * o
def __rmul__(self, o):
return o * self._get_as_operand()
def __truediv__(self, o):
return self._get_as_operand() / o
def __rtruediv__(self, o):
return o / self._get_as_operand()
def __floordiv__(self, o):
return self._get_as_operand() // o
def __rfloordiv__(self, o):
return o // self._get_as_operand()
def __mod__(self, o):
return self._get_as_operand() % o
def __rmod__(self, o):
return o % self._get_as_operand()
def __lt__(self, o):
return self._get_as_operand() < o
def __le__(self, o):
return self._get_as_operand() <= o
def __gt__(self, o):
return self._get_as_operand() > o
def __ge__(self, o):
return self._get_as_operand() >= o
def __and__(self, o):
return self._get_as_operand() & o
def __rand__(self, o):
return o & self._get_as_operand()
def __or__(self, o):
return self._get_as_operand() | o
def __ror__(self, o):
return o | self._get_as_operand()
def __xor__(self, o):
return self._get_as_operand() ^ o
def __rxor__(self, o):
return o ^ self._get_as_operand()
def __getitem__(self, o):
return self._get_as_operand()[o]
def __pow__(self, o, modulo=None):
return pow(self._get_as_operand(), o, modulo)
def __rpow__(self, o):
return pow(o, self._get_as_operand())
def __invert__(self):
return ~self._get_as_operand()
def __neg__(self):
return -self._get_as_operand()
def __abs__(self):
return abs(self._get_as_operand())
def __div__(self, o):
try:
return self._get_as_operand().__div__(o)
except AttributeError:
# See https://docs.python.org/3/library/constants.html#NotImplemented
return NotImplemented
def __rdiv__(self, o):
try:
return self._get_as_operand().__rdiv__(o)
except AttributeError:
# See https://docs.python.org/3/library/constants.html#NotImplemented
return NotImplemented
def __matmul__(self, o):
try:
return self._get_as_operand().__matmul__(o)
except AttributeError:
# See https://docs.python.org/3/library/constants.html#NotImplemented
return NotImplemented
def __rmatmul__(self, o):
try:
return self._get_as_operand().__rmatmul__(o)
except AttributeError:
# See https://docs.python.org/3/library/constants.html#NotImplemented
return NotImplemented
# TODO(josh11b): Even more operator overloads.
class PerReplica(DistributedValues, composite_tensor.CompositeTensor):
"""Holds a map from replica to unsynchronized values."""
@property
def _type_spec(self):
return PerReplicaSpec(
*(type_spec.type_spec_from_value(v) for v in self._values))
@property
def values(self):
"""Returns the per replica values."""
return self._values
class PerReplicaSpec(type_spec.TypeSpec):
"""Type specification for a `PerReplica`."""
__slots__ = ["_value_specs"]
value_type = property(lambda self: PerReplica)
def __init__(self, *value_specs):
self._value_specs = tuple(value_specs)
def _serialize(self):
return self._value_specs
@property
def _component_specs(self):
return self._value_specs
def _to_components(self, value):
replica_context = ds_context.get_replica_context()
if replica_context is not None and replica_context.num_replicas_in_sync > 1:
raise ValueError(
"Flattening a PerReplica to components is not supported in replica "
"context.")
return value._values # pylint: disable=protected-access
def _from_components(self, tensor_list):
return PerReplica(tensor_list)
# Note that unlike PerReplica, Mirrored values inherit from
# DistributedDelegate and so can be used directly in cross-replica mode.
# TODO(tomhennigan) Should this extend CompositeTensor?
class Mirrored(DistributedDelegate):
"""Holds a map from replica to values which are kept in sync."""
def _get_cross_replica(self):
return self._get_closest()
def _as_graph_element(self):
obj = self._get()
conv_fn = getattr(obj, "_as_graph_element", None)
if conv_fn and callable(conv_fn):
return conv_fn()
return obj
def _assign_on_device(device, variable, tensor):
with ops.device(device):
return variable.assign(tensor)
def _assign_add_on_device(device, variable, tensor):
with ops.device(device):
return variable.assign_add(tensor)
def _assign_sub_on_device(device, variable, tensor):
with ops.device(device):
return variable.assign_sub(tensor)
class DistributedVarOp(object):
"""A class that looks like `tf.Operation`."""
def __init__(self, name, graph, traceback, typ):
self.name = name
self.graph = graph
self.traceback = traceback
self.type = typ
def __eq__(self, o):
if not isinstance(o, self.__class__):
raise NotImplementedError
return (self.name == o.name and self.graph == o.graph and
self.traceback == o.traceback and self.type == o.type)
def __hash__(self):
return hash((self.name, self.graph, self.traceback, self.type))
class DistributedVariable(DistributedDelegate, variables_lib.Variable):
"""Holds a map from replica to variables."""
# TODO(josh11b): Support changing the set of variables if e.g. if new
# devices are joining or a device is to leave.
def __init__(self, strategy, values, aggregation):
self._distribute_strategy = strategy
self._aggregation = aggregation
super(DistributedVariable, self).__init__(values)
self._common_name = self._primary.name.split(":")[0]
# tf.keras keeps track of variables initialized using this attribute. When
# tf.keras gets the default session, it initializes all uninitialized vars.
# We need to make _keras_initialized a member of DistributedVariable because
# without this it will use `__getattr__` which will delegate to a component
# variable.
self._keras_initialized = False
# Typically, a `DistributedVariable`'s initializer is composed of the
# initializers of the components variables. However, in some cases, such as
# when restoring from a checkpoint, we may set the _initializer_op
# property on the entire `DistributedVariable`.
self._initializer_op = None
def is_initialized(self, name=None):
"""Identifies if all the component variables are initialized.
Args:
name: Name of the final `logical_and` op.
Returns:
The op that evaluates to True or False depending on if all the
component variables are initialized.
"""
result = self._primary.is_initialized()
# We iterate through the list of values except the last one to allow us to
# name the final `logical_and` op the same name that is passed by the user
# to the `is_initialized` op. For distributed variables, the
# `is_initialized` op is a `logical_and` op.
for v in self._values[1:-1]:
result = math_ops.logical_and(result, v.is_initialized())
result = math_ops.logical_and(
result, self._values[-1].is_initialized(), name=name)
return result
@property
def initializer(self):
if self._initializer_op:
init_op = self._initializer_op
else:
# return grouped ops of all the var initializations of component values of
# the mirrored variable
init_op = control_flow_ops.group(
tuple(v.initializer for v in self._values))
return init_op
def initialized_value(self):
return self._get_closest().initialized_value()
@property
def initial_value(self):
return self._get_closest().initial_value
@property
def constraint(self):
return self._primary.constraint
@property
def graph(self):
return self._primary.graph
@property
def _shared_name(self):
return self._common_name
@property
def _unique_id(self):
return self._primary._unique_id # pylint: disable=protected-access
@property
def _graph_key(self):
"""Lets Optimizers know which graph this variable is from."""
return self._primary._graph_key # pylint: disable=protected-access
@property
def name(self):
return self._primary.name
@property
def dtype(self):
return self._primary.dtype
@property
def shape(self):
return self._primary.shape
@property
def synchronization(self):
return self._primary.synchronization
@property
def aggregation(self):
return self._aggregation
@property
def handle(self):
replica_id = _get_current_replica_id_as_int()
if replica_id is None:
raise ValueError("`handle` is not available outside the replica context"
" or a `tf.distribute.Strategy.update()` call.")
else:
return self._values[replica_id].handle
def eval(self, session=None):
return self._get_closest().eval(session)
@property
def _save_slice_info(self):
return self._primary._save_slice_info # pylint: disable=protected-access
def _get_save_slice_info(self):
return self._primary._get_save_slice_info() # pylint: disable=protected-access
def _set_save_slice_info(self, save_slice_info):
for v in self._values:
v._set_save_slice_info(save_slice_info) # pylint: disable=protected-access
@property
def device(self):
return self._get_closest().device
@property
def trainable(self):
return self._primary.trainable
@property
def distribute_strategy(self):
return self._distribute_strategy
def get_shape(self):
return self._primary.get_shape()
def to_proto(self, export_scope=None):
return self._primary.to_proto(export_scope=export_scope)
@property
def op(self):
# We want cross-replica code that does some var.op.X calls
# to work (even if the current device isn't in self._devices), but
# other uses of var.op in a cross-replica context to fail.
if ds_context.in_cross_replica_context():
return DistributedVarOp(self._primary.op.name, self._primary.op.graph,
self._primary.op.traceback, self._primary.op.type)
return self._get().op
@property
def _in_graph_mode(self):
return self._primary._in_graph_mode # pylint: disable=protected-access
def read_value(self):
with ds_context.enter_or_assert_strategy(self._distribute_strategy):
return array_ops.identity(self._get())
def value(self):
return self._get_closest().value()
def _update_cross_replica(self, update_fn, value, **kwargs):
"""Applies updates across replicas.
Args:
update_fn: A callable to pass to `strategy.extended.update` to update the
variable. It should has the same signature as `Variable.assign()`.
value: value to be passed to `update_fn`.
**kwargs: remaining arguments to `update_fn`.
Returns:
Updated variable or `tf.Operation`.
"""
return self.distribute_strategy.extended.update(
self, update_fn, args=(value,), kwargs=kwargs, group=True)
def _update_replica(self, update_fn, value, **kwargs):
"""Applies updates in one replica.
Args:
update_fn: A callable to update the variable. It should has the same
signature as `Variable.assign()`.
value: value to be passed to `update_fn`.
**kwargs: remaining arguments to `update_fn`.
Returns:
Updated variable or `tf.Operation`.
"""
raise NotImplementedError("should be implemented by subclass.")
def _update(self, update_fn, value, **kwargs):
"""Applies updates depending on the context.
The method calls `_update_replica` in replica context,
`_update_cross_replica` in cross replica context, and `update_fn` in update
context.
If `read_value` is True, the method returns the updated Variable. If
`read_value` is False, the method returns the update `tf.Operation`.
Args:
update_fn: A callable to pass to `strategy.extended.update` to update the
variable. It should have the same signature as `Variable.assign()`.
value: value to be passed to `update_fn`.
**kwargs: keyword arguments to `update_fn`.
Returns:
Updated variable or `tf.Operation`.
"""
with ds_context.enter_or_assert_strategy(self.distribute_strategy):
if ds_context.in_cross_replica_context():
update_replica_id = distribute_lib.get_update_replica_id()
if update_replica_id is not None:
return update_fn(self._values[update_replica_id], value, **kwargs)
return self._update_cross_replica(update_fn, value, **kwargs)
else:
_assert_replica_context(self.distribute_strategy)
return self._update_replica(update_fn, value, **kwargs)
def _should_act_as_resource_variable(self):
"""Pass resource_variable_ops.is_resource_variable check."""
pass
ops.register_dense_tensor_like_type(DistributedVariable)
def _validate_colocate_extended(v, extended):
variable_strategy = v._distribute_strategy # pylint: disable=protected-access
if variable_strategy.extended is not extended:
raise ValueError(
"`colocate_vars_with` must only be passed a variable created in this "
"tf.distribute.Strategy.scope(), not %s created in scope: %s" %
(v, variable_strategy))
def validate_colocate_distributed_variable(v, extended):
if not isinstance(v, DistributedVariable):
raise ValueError(
"`colocate_vars_with` must only be passed a variable created in this "
"tf.distribute.Strategy.scope(), not: %r" % (v,))
_validate_colocate_extended(v, extended)
def validate_colocate(v, extended):
if not hasattr(v, "_distribute_strategy"):
raise ValueError(
"`colocate_vars_with` must only be passed a variable created in this "
"tf.distribute.Strategy.scope(), not: %r" % (v,))
_validate_colocate_extended(v, extended)
def _apply_aggregation(strategy, value, aggregation, destinations):
if aggregation == vs.VariableAggregation.ONLY_FIRST_REPLICA:
return strategy.extended.broadcast_to(
strategy.experimental_local_results(value)[0],
destinations=destinations)
reduce_op = reduce_util.ReduceOp.from_variable_aggregation(aggregation)
return strategy.extended.reduce_to(reduce_op, value, destinations)
_aggregation_error_msg = (
"You must specify an aggregation method to update a "
"{variable_type} in Replica Context. You can do so by passing "
"an explicit value for argument `aggregation` to tf.Variable(..)."
"e.g. `tf.Variable(..., aggregation=tf.VariableAggregation.SUM)`"
"`tf.VariableAggregation` lists the possible aggregation methods."
"This is required because {variable_type} should always be "
"kept in sync. When updating them or assigning to them in a "
"replica context, we automatically try to aggregate the values "
"before updating the variable. For this aggregation, we need to "
"know the aggregation method. "
"Another alternative is to not try to update such "
"{variable_type} in replica context, but in cross replica "
"context. You can enter cross replica context by calling "
"`tf.distribute.get_replica_context().merge_call(merge_fn, ..)`."
"Inside `merge_fn`, you can then update the {variable_type} "
"using `tf.distribute.StrategyExtended.update()`.")
class _MirroredSaveable(saveable_object_util.ResourceVariableSaveable):
"""Class for defining how to restore a MirroredVariable."""
def __init__(self, mirrored_variable, primary_variable, name):
self._mirrored_variable = mirrored_variable
super(_MirroredSaveable, self).__init__(primary_variable, "", name)
def restore(self, restored_tensors, restored_shapes):
"""Restore the same value into all variables."""
tensor, = restored_tensors
return control_flow_ops.group(
tuple(
_assign_on_device(v.device, v, tensor)
for v in self._mirrored_variable.values))
def create_mirrored_variable( # pylint: disable=missing-docstring
strategy, real_mirrored_creator, mirrored_cls, sync_on_read_cls, **kwargs):
# Figure out what collections this variable should be added to.
# We'll add the MirroredVariable to those collections instead.
var_collections = kwargs.pop("collections", None)
if var_collections is None:
var_collections = [ops.GraphKeys.GLOBAL_VARIABLES]
kwargs["collections"] = []
synchronization = kwargs.get("synchronization",
vs.VariableSynchronization.ON_WRITE)
if synchronization == vs.VariableSynchronization.NONE:
raise ValueError(
"`NONE` variable synchronization mode is not supported with `Mirrored` "
"distribution strategy. Please change the `synchronization` for "
"variable: " + str(kwargs["name"]))
elif synchronization == vs.VariableSynchronization.ON_READ:
is_sync_on_read = True
elif synchronization in (vs.VariableSynchronization.ON_WRITE,
vs.VariableSynchronization.AUTO):
# `AUTO` synchronization defaults to `ON_WRITE`.
is_sync_on_read = False
else:
raise ValueError(
"Invalid variable synchronization mode: %s for variable: %s" %
(synchronization, kwargs["name"]))
aggregation = kwargs.pop("aggregation", vs.VariableAggregation.NONE)
if aggregation not in (vs.VariableAggregation.NONE,
vs.VariableAggregation.SUM,
vs.VariableAggregation.MEAN,
vs.VariableAggregation.ONLY_FIRST_REPLICA):
raise ValueError("Invalid variable aggregation mode: %s for variable: %s" %
(aggregation, kwargs["name"]))
# Ignore user-specified caching device, not needed for mirrored variables.
kwargs.pop("caching_device", None)
# TODO(josh11b,apassos): It would be better if variable initialization
# was never recorded on the tape instead of having to do this manually
# here.
with tape.stop_recording():
value_list = real_mirrored_creator(**kwargs)
var_cls = sync_on_read_cls if is_sync_on_read else mirrored_cls
result = var_cls(strategy, value_list, aggregation)
# Install the created DistributedVariable as _distributed_container property
# of the underlying variables, to make it easy to map back to the container.
for v in result.values:
# Hold a strong reference to avoid the container from being GC-ed. After
# v = v.assign(), the user code may no longer holds references to the
# original container, since v.assign() returns a new DistributedVariable.
v._distributed_container = result # pylint: disable=protected-access
# Add the wrapped variable to the requested collections.
# The handling of eager mode and the global step matches
# ResourceVariable._init_from_args().
if not context.executing_eagerly():
g = ops.get_default_graph()
# If "trainable" is True, next_creator() will add the member variables
# to the TRAINABLE_VARIABLES collection, so we manually remove
# them and replace with the MirroredVariable. We can't set
# "trainable" to False for next_creator() since that causes functions
# like implicit_gradients to skip those variables.
if kwargs.get("trainable", True):
var_collections.append(ops.GraphKeys.TRAINABLE_VARIABLES)
l = g.get_collection_ref(ops.GraphKeys.TRAINABLE_VARIABLES)
for value in value_list:
for i, trainable_variable in enumerate(l):
if value is trainable_variable:
del l[i]
break
g.add_to_collections(var_collections, result)
elif ops.GraphKeys.GLOBAL_STEP in var_collections:
ops.add_to_collections(ops.GraphKeys.GLOBAL_STEP, result)
return result
class MirroredVariable(DistributedVariable, Mirrored):
"""Holds a map from replica to variables whose values are kept in sync."""
def _update_replica(self, update_fn, value, **kwargs):
if self.aggregation == vs.VariableAggregation.NONE:
raise ValueError(
_aggregation_error_msg.format(variable_type="MirroredVariable"))
def merge_fn(strategy, value, **kwargs):
"""Aggregate values and update all variables in cross replica context."""
# Don't allow MEAN with non float dtype, since it may cause unexpected
# precision loss. Python3 and NumPy automatically upcast integers to
# float in division, but we should always preserve the type.
#
# Note that to be backward compatible we allow the case when the value
# is *always* the same on each replica. I.E. value is not a
# PerReplica. Refer to regroup() to see how values are grouped.
if self._aggregation == vs.VariableAggregation.MEAN and (
not self.dtype.is_floating) and isinstance(value, PerReplica):
raise ValueError(
"Cannot update non-float variables with "
"tf.VariableAggregation.MEAN aggregation in replica context. "
"Either change the variable dtype to float or update it in "
"cross-replica context.")
assert strategy == self.distribute_strategy
v = _apply_aggregation(strategy, value, self.aggregation, self)
return self._update_cross_replica(update_fn, v, **kwargs)
return ds_context.get_replica_context().merge_call(
merge_fn, args=(value,), kwargs=kwargs)
def assign_sub(self, value, use_locking=False, name=None, read_value=True):
assign_sub_fn = lambda var, *a, **kw: var.assign_sub(*a, **kw)
return self._update(
update_fn=assign_sub_fn,
value=value,
use_locking=use_locking,
name=name,
read_value=read_value)
def assign_add(self, value, use_locking=False, name=None, read_value=True):
assign_add_fn = lambda var, *a, **kw: var.assign_add(*a, **kw)
return self._update(
update_fn=assign_add_fn,
value=value,
use_locking=use_locking,
name=name,
read_value=read_value)
def assign(self, value, use_locking=False, name=None, read_value=True):
assign_fn = lambda var, *a, **kw: var.assign(*a, **kw)
return self._update(
update_fn=assign_fn,
value=value,
use_locking=use_locking,
name=name,
read_value=read_value)
def scatter_sub(self, sparse_delta, use_locking=False, name=None):
scatter_sub_fn = lambda var, *a, **kw: var.scatter_sub(*a, **kw)
return self._update(
update_fn=scatter_sub_fn,
value=sparse_delta,
use_locking=use_locking,
name=name)
def scatter_add(self, sparse_delta, use_locking=False, name=None):
scatter_add_fn = lambda var, *a, **kw: var.scatter_add(*a, **kw)
return self._update(
update_fn=scatter_add_fn,
value=sparse_delta,
use_locking=use_locking,
name=name)
def scatter_mul(self, sparse_delta, use_locking=False, name=None):
scatter_mul_fn = lambda var, *a, **kw: var.scatter_mul(*a, **kw)
return self._update(
update_fn=scatter_mul_fn,
value=sparse_delta,
use_locking=use_locking,
name=name)
def scatter_div(self, sparse_delta, use_locking=False, name=None):
scatter_div_fn = lambda var, *a, **kw: var.scatter_div(*a, **kw)
return self._update(
update_fn=scatter_div_fn,
value=sparse_delta,
use_locking=use_locking,
name=name)
def scatter_min(self, sparse_delta, use_locking=False, name=None):
if (self._aggregation != vs.VariableAggregation.ONLY_FIRST_REPLICA and
self._aggregation != vs.VariableAggregation.NONE):
raise NotImplementedError("scatter_min is only supported for mirrored "
"variable (variable created within certain "
"`tf.distribute.Strategy` scope) with NONE or "
"`ONLY_FIRST_REPLICA` aggregation, got: %s" %
self._aggregation)
scatter_min_fn = lambda var, *a, **kw: var.scatter_min(*a, **kw)
return self._update(
update_fn=scatter_min_fn,
value=sparse_delta,
use_locking=use_locking,
name=name)
def scatter_max(self, sparse_delta, use_locking=False, name=None):
if (self._aggregation != vs.VariableAggregation.ONLY_FIRST_REPLICA and
self._aggregation != vs.VariableAggregation.NONE):
raise NotImplementedError("scatter_max is only supported for mirrored "
"variable (variable created within certain "
"`tf.distribute.Strategy` scope) with NONE or "
"`ONLY_FIRST_REPLICA` aggregation, got: %s" %
self._aggregation)
scatter_max_fn = lambda var, *a, **kw: var.scatter_max(*a, **kw)
return self._update(
update_fn=scatter_max_fn,
value=sparse_delta,
use_locking=use_locking,
name=name)
def scatter_update(self, sparse_delta, use_locking=False, name=None):
if (self._aggregation != vs.VariableAggregation.ONLY_FIRST_REPLICA and
self._aggregation != vs.VariableAggregation.NONE):
raise NotImplementedError("scatter_update is only supported for mirrored "
"variable (variable created within certain "
"`tf.distribute.Strategy` scope) with NONE or "
"`ONLY_FIRST_REPLICA` aggregation, got: %s" %
self._aggregation)
scatter_update_fn = lambda var, *a, **kw: var.scatter_update(*a, **kw)
return self._update(
update_fn=scatter_update_fn,
value=sparse_delta,
use_locking=use_locking,
name=name)
def _get_cross_replica(self):
# Return identity, to avoid directly exposing the variable to the user and
# allowing it to be modified by mistake.
return array_ops.identity(Mirrored._get_cross_replica(self))
def _as_graph_element(self):
return self._get_closest()._as_graph_element() # pylint: disable=protected-access
def _gather_saveables_for_checkpoint(self):
"""Overrides Trackable method.
This allows both name-based and object-based save and restore of
MirroredVariables.
Returns:
A dictionary mapping attribute names to `SaveableObject` factories.
"""
def _saveable_factory(name=self._common_name):
return _MirroredSaveable(self, self._primary, name)
return {trackable.VARIABLE_VALUE_KEY: _saveable_factory}
def _dense_var_to_tensor(self, dtype=None, name=None, as_ref=False):
"""Converts a variable to a tensor."""
# Try to avoid assignments to and other mutations of MirroredVariable
# state except through a DistributionStrategy.extended.update() call.
if as_ref:
# A TF 1.x case where the variable is a boolean variable and used like:
# tf.cond(v, true_fn, false_fn).
raise ValueError(
"You may be using variable created under distribute strategy in TF "
"1.x control flows. Try explicitly converting the variable to Tensor "
"using variable.read_value(), or switch to TF 2.x.")
return ops.convert_to_tensor(
self._get(), dtype=dtype, name=name, as_ref=as_ref)
# Register a conversion function which reads the value of the variable,
# allowing instances of the class to be used as tensors.
def _tensor_conversion_mirrored(var, dtype=None, name=None, as_ref=False):
return var._dense_var_to_tensor(dtype=dtype, name=name, as_ref=as_ref) # pylint: disable=protected-access
ops.register_tensor_conversion_function(MirroredVariable,
_tensor_conversion_mirrored)
def _tensor_conversion_mirrored_val(value, dtype=None, name=None, as_ref=False):
return ops.convert_to_tensor(
value._get(), dtype=dtype, name=name, as_ref=as_ref) # pylint: disable=protected-access
ops.register_tensor_conversion_function(Mirrored,
_tensor_conversion_mirrored_val)
def is_distributed_variable(v):
"""Determine if a variable is ds variable or TPU mirrored variable."""
return isinstance(v, DistributedVariable)
class _SyncOnReadSaveable(saveable_object.SaveableObject):
"""Class for defining how to restore a SyncOnReadVariable."""
def __init__(self, sync_on_read_variable, name):
self._sync_on_read_variable = sync_on_read_variable
# We use a callable so that we don't have to evaluate this expression
# in the case where we are trying to restore instead of save.
def tensor():
strategy = sync_on_read_variable._distribute_strategy # pylint: disable=protected-access
return strategy.extended.read_var(sync_on_read_variable)
spec = saveable_object.SaveSpec(
tensor=tensor,
slice_spec="",
name=name,
dtype=sync_on_read_variable.dtype,
device=sync_on_read_variable._primary.device) # pylint: disable=protected-access
super(_SyncOnReadSaveable, self).__init__(tensor, [spec], name)
def restore(self, restored_tensors, restored_shapes):
"""Restore the same value into all variables."""
# To preserve the sum across save and restore, we have to divide the
# total across all devices when restoring a variable that was summed
# when saving.
tensor, = restored_tensors
if self._sync_on_read_variable.aggregation == vs.VariableAggregation.SUM:
tensor = math_ops.cast(tensor / len(self._sync_on_read_variable._devices), # pylint: disable=protected-access
self._sync_on_read_variable.dtype)
return control_flow_ops.group(
tuple(
_assign_on_device(v.device, v, tensor)
for v in self._sync_on_read_variable.values))
def _assert_replica_context(strategy):
replica_context = ds_context.get_replica_context()
if not replica_context:
raise RuntimeError(
"Replica-local variables may only be assigned in a replica context.")
if replica_context.strategy is not strategy:
raise RuntimeError(
"Replica-local variables may only be assigned in a replica context.")
class SyncOnReadVariable(DistributedVariable):
"""Holds a map from replica to variables whose values are reduced on save."""
def assign_sub(self, *args, **kwargs):
with ds_context.enter_or_assert_strategy(self._distribute_strategy):
if ds_context.in_cross_replica_context():
if self._aggregation == vs.VariableAggregation.SUM:
raise ValueError(
"SyncOnReadVariable does not support `assign_sub` in "
"cross-replica context when aggregation is set to "
"`tf.VariableAggregation.SUM`.")
return control_flow_ops.group(
tuple(
_assign_sub_on_device(v.device, v, args[0])
for v in self._values))
else:
return self._get().assign_sub(*args, **kwargs)
def assign_add(self, *args, **kwargs):
with ds_context.enter_or_assert_strategy(self._distribute_strategy):
if ds_context.in_cross_replica_context():
if self._aggregation == vs.VariableAggregation.SUM:
raise ValueError(
"SyncOnReadVariable does not support `assign_add` in "
"cross-replica context when aggregation is set to "
"`tf.VariableAggregation.SUM`.")
return control_flow_ops.group(
tuple(
_assign_add_on_device(v.device, v, args[0])
for v in self._values))
else:
return self._get().assign_add(*args, **kwargs)
def assign(self, *args, **kwargs):
with ds_context.enter_or_assert_strategy(self._distribute_strategy):
if ds_context.in_cross_replica_context():
# To preserve the sum across save and restore, we have to divide the
# total across all devices when restoring a variable that was summed
# when saving.
tensor = args[0]
if self._aggregation == vs.VariableAggregation.SUM:
tensor = math_ops.cast(tensor / len(self._values), self.dtype)
return control_flow_ops.group(
tuple(_assign_on_device(v.device, v, tensor) for v in self._values))
else:
return self._get().assign(*args, **kwargs)
def value(self):
with ds_context.enter_or_assert_strategy(self._distribute_strategy):
if ds_context.in_cross_replica_context():
return self._get_cross_replica()
else:
# _get_closest() returns a Variable.
return self._get_closest().value()
def numpy(self):
if context.executing_eagerly():
return self.read_value().numpy()
else:
raise NotImplementedError(
"numpy() is only available when eager execution is enabled.")
def _get_cross_replica(self):
if self._aggregation == vs.VariableAggregation.ONLY_FIRST_REPLICA:
return self._primary
with ds_context.enter_or_assert_strategy(self._distribute_strategy):
return self._distribute_strategy.reduce(
reduce_util.ReduceOp.from_variable_aggregation(self.aggregation),
self,
axis=None)
def _as_graph_element(self):
# pylint: disable=protected-access
with ds_context.enter_or_assert_strategy(self._distribute_strategy):
if ds_context.in_cross_replica_context():
return ops.convert_to_tensor(self._get_cross_replica())
return self._get()._as_graph_element()
def _gather_saveables_for_checkpoint(self):
"""Overrides Trackable method.
This allows both name-based and object-based save and restore of
`SyncOnReadVariable`s.
Returns:
A dictionary mapping attribute names to `SaveableObject` factories.
"""
def _saveable_factory(name=self._common_name):
return _SyncOnReadSaveable(self, name)
return {trackable.VARIABLE_VALUE_KEY: _saveable_factory}
def _dense_var_to_tensor(self, dtype=None, name=None, as_ref=False):
"""Converts a variable to a tensor."""
with ds_context.enter_or_assert_strategy(self._distribute_strategy):
return ops.convert_to_tensor(
self._get(), dtype=dtype, name=name, as_ref=as_ref)
# Register a conversion function for SyncOnReadVariable which allows as_ref to
# be true.
def _tensor_conversion_sync_on_read(var, dtype=None, name=None, as_ref=False):
return var._dense_var_to_tensor(dtype=dtype, name=name, as_ref=as_ref) # pylint: disable=protected-access
ops.register_tensor_conversion_function(SyncOnReadVariable,
_tensor_conversion_sync_on_read)
def regroup(values, wrap_class=PerReplica, always_wrap=False):
"""Makes a nest per-replica into a nest of PerReplica/Mirrored values.
Args:
values: Values to regroup
wrap_class: Class that `values` be wrapped in.
always_wrap: Always wrap the `values` in `wrap_class` even if the values
are the same except for DistributeVariable.
Returns:
Wrapped `values`.
"""
v0 = values[0]
if isinstance(v0, list):
for v in values[1:]:
assert isinstance(v, list)
assert len(v) == len(v0), ("len(v) == %d, len(v0) == %d, v: %s, v0: %s" %
(len(v), len(v0), v, v0))
return [
regroup(tuple(v[i] for v in values), wrap_class)
for i in range(len(v0))
]
if isinstance(v0, tuple):
for v in values[1:]:
assert isinstance(v, tuple)
assert len(v) == len(v0)
regrouped_tuple = tuple(
regroup(tuple(v[i] for v in values), wrap_class)
for i in range(len(v0)))
if hasattr(v0, "_fields"):
# This tuple is in fact a namedtuple! Create a new namedtuple instance
# and initialize it with the regrouped values:
assert hasattr(type(v0), "_make")
return type(v0)._make(regrouped_tuple)
else:
return regrouped_tuple
if isinstance(v0, dict):
v0keys = v0.keys()
for v in values[1:]:
assert isinstance(v, dict), ("v[0]: %r v[i]: %r" % (v0, v))
assert set(v.keys()) == set(v0keys), ("v[0].keys: %s v[i].keys: %s" %
(set(v0keys), set(v.keys())))
# Use the actual type in case it is a class inherited from a dict.
return type(v0)({
key: regroup(tuple(v[key] for v in values), wrap_class)
for key in v0keys
})
# If exactly the same object across all devices, return it unwrapped.
same_id = True
for v in values[1:]:
if v is not v0:
same_id = False
break
# Consider three cases where same_id is true:
# * If v0 is a DistributedVariable (a MirroredVariable or
# SyncOnReadVariable, and same_id means it is the same across all
# devices), we want to return it. We check DistributedVariable
# specifically since it can look like it has a
# _distributed_container member since its members do.
if same_id and isinstance(v0, DistributedVariable):
return v0
# * If v0 is a member of a distributed variable, in which case
# hasattr(v0, "_distributed_container") is true, we want to
# return the DistributedVariable that contains it using the
# _distributed_container logic below. This case can trigger
# same_id when there is only one device.
# * In any other situation, same_id means we return v0 unless `always_wrap` is
# true.
if same_id and not always_wrap and not hasattr(v0, "_distributed_container"):
return v0
# Detect the case where each device has a parallel component of the
# same MirroredVariable (or SyncOnReadVariable). In this case we
# want to return the containing MirroredVariable, after a bunch of
# sanity checking. In particular, each component should have the
# same container, and the devices of the variables should match the
# keys of the per-replica dictionary.
if hasattr(v0, "_distributed_container"):
# pylint: disable=protected-access
assert not isinstance(v0, MirroredVariable), (
"ids = %s, values = %s" % ([id(v) for v in values], values))
distributed_container = v0._distributed_container
assert distributed_container is not None
for v in values[1:]:
assert distributed_container is v._distributed_container
return distributed_container
# pylint: enable=protected-access
return wrap_class(values)
def select_replica(replica_id, structured):
"""Specialize a nest of regular & per-replica values for one replica."""
def _get(x):
# `DistributedValues` would be sliced according to replica unless it is a
# `DistributedVariable` because `DistributedVariable` can be handled
# directly in the replica context.
if (isinstance(x, DistributedVariable) or
not isinstance(x, DistributedValues)):
return x
else:
return x.values[replica_id]
return nest.map_structure(_get, structured)
def select_replica_mirrored(replica_id, structured):
"""Specialize a nest of regular & mirrored values for one replica."""
def _get_mirrored(x):
if isinstance(x, DistributedValues):
if not isinstance(x, Mirrored):
raise TypeError(
"Expected value to be mirrored across replicas: %s in %s." %
(x, structured))
return x.values[replica_id]
else:
return x
return nest.map_structure(_get_mirrored, structured)
def update_regroup(extended, updates, group):
"""Regroup for an update, with dependencies to ensure all updates execute."""
if not group:
regrouped = regroup(updates, Mirrored)
return nest.map_structure(extended._local_results, regrouped) # pylint: disable=protected-access
def _make_grouped_mirrored(values):
"""Convert per-replica list `values` into Mirrored type with grouping."""
if len(values) == 1:
return Mirrored(values)
# Make sure we run all updates. Without this, something like
# session.run(extended.update(...)) may only update one replica.
g = control_flow_ops.group(values)
# If values is just ops, the grouping is enough. Everything in values
# should have the same type, since we expect every replica to be performing
# the same computation.
if not all(tensor_util.is_tensor(v) for v in values):
return g
# Otherwise we need tensors with the same values as `values`, but
# that have a dependency on `g`.
with_dep = []
for v in values:
with ops.device(v.device), ops.control_dependencies([g]):
with_dep.append(array_ops.identity(v))
return Mirrored(with_dep)
return regroup(updates, _make_grouped_mirrored)
def value_container(val):
"""Returns the container that this per-replica `value` belongs to.
Args:
val: A value returned by `call_for_each_replica()` or a variable created in
`scope()`.
Returns:
A container that `value` belongs to.
If value does not belong to any container (including the case of
container having been destroyed), returns the value itself.
"""
if (hasattr(val, "_distributed_container") and
# DistributedVariable has _distributed_container defined
# but we don't want to return it.
not isinstance(val, DistributedVariable)):
container = val._distributed_container # pylint: disable=protected-access
if container is not None:
return container
return val
class AggregatingVariable(variables_lib.Variable):
"""A wrapper around a variable that aggregates updates across replicas."""
def __init__(self, strategy, v, aggregation):
self._distribute_strategy = strategy
self._v = v
# NOTE: We don't use "_distributed_container" here because we don't want
# to trigger that code path in regroup().
v._aggregating_container = weakref.ref(self) # pylint: disable=protected-access
self._aggregation = aggregation
def get(self):
return self._v
@property
def distribute_strategy(self):
return self._distribute_strategy
def __getattr__(self, name):
return getattr(self._v, name)
def _assign_func(self, *args, **kwargs):
with ds_context.enter_or_assert_strategy(self._distribute_strategy):
f = kwargs.pop("f")
if ds_context.in_cross_replica_context():
if distribute_lib.get_update_replica_id() is not None:
# We are calling an assign function in an update context.
return f(self._v, *args, **kwargs)
# We are calling an assign function in cross replica context, wrap it in
# an update call.
return self._distribute_strategy.extended.update(
self, f, args=args, kwargs=kwargs)
else:
replica_context = ds_context.get_replica_context()
assert replica_context
# We are calling an assign function in replica context.
# We reduce the value we want to assign/add/sub. More details about how
# we handle the different use cases can be found in the _reduce method.
# We call the function with the reduced value.
if self._aggregation == vs.VariableAggregation.NONE:
raise ValueError(
_aggregation_error_msg.format(
variable_type="AggregatingVariable"))
def merge_fn(strategy, value, *other_args, **other_kwargs):
v = _apply_aggregation(strategy, value, self._aggregation, self)
return strategy.extended.update(
self, f, args=(v,) + other_args, kwargs=other_kwargs)
return replica_context.merge_call(merge_fn, args=args, kwargs=kwargs)
def assign_sub(self, *args, **kwargs):
assign_sub_fn = lambda var, *a, **kw: var.assign_sub(*a, **kw)
return self._assign_func(f=assign_sub_fn, *args, **kwargs)
def assign_add(self, *args, **kwargs):
assign_add_fn = lambda var, *a, **kw: var.assign_add(*a, **kw)
return self._assign_func(f=assign_add_fn, *args, **kwargs)
def assign(self, *args, **kwargs):
assign_fn = lambda var, *a, **kw: var.assign(*a, **kw)
return self._assign_func(f=assign_fn, *args, **kwargs)
@property
def initializer(self):
return self._v.initializer
def initialized_value(self):
return self._v.initialized_value()
@property
def initial_value(self):
return self._v.initial_value
@property
def op(self):
return self._v.op
def read_value(self):
return self._v.read_value()
def eval(self, session=None):
return self._v.eval(session)
@property
def graph(self):
return self._v.graph
@property
def device(self):
return self._v.device
@property
def shape(self):
return self._v.shape
@property
def aggregation(self):
return self._aggregation
@property
def synchronization(self):
return self._v.synchronization
@property
def name(self):
return self._v.name
@property
def trainable(self):
return self._v.trainable
@property
def dtype(self):
return self._v.dtype
# TODO(josh11b): Test saving & restoring.
def _gather_saveables_for_checkpoint(self):
return {trackable.VARIABLE_VALUE_KEY: self._v}
# pylint: disable=multiple-statements
def __add__(self, o):
return self._v + o
def __radd__(self, o):
return o + self._v
def __sub__(self, o):
return self._v - o
def __rsub__(self, o):
return o - self._v
def __mul__(self, o):
return self._v * o
def __rmul__(self, o):
return o * self._v
def __truediv__(self, o):
return self._v / o
def __rtruediv__(self, o):
return o / self._v
def __floordiv__(self, o):
return self._v // o
def __rfloordiv__(self, o):
return o // self._v
def __mod__(self, o):
return self._v % o
def __rmod__(self, o):
return o % self._v
def __lt__(self, o):
return self._v < o
def __le__(self, o):
return self._v <= o
def __gt__(self, o):
return self._v > o
def __ge__(self, o):
return self._v >= o
def __and__(self, o):
return self._v & o
def __rand__(self, o):
return o & self._v
def __or__(self, o):
return self._v | o
def __ror__(self, o):
return o | self._v
def __xor__(self, o):
return self._v ^ o
def __rxor__(self, o):
return o ^ self._v
def __getitem__(self, o):
return self._v[o]
def __pow__(self, o, modulo=None):
return pow(self._v, o, modulo)
def __rpow__(self, o):
return pow(o, self._v)
def __invert__(self):
return ~self._v
def __neg__(self):
return -self._v
def __abs__(self):
return abs(self._v)
def __div__(self, o):
try:
return self._v.__div__(o)
except AttributeError:
# See https://docs.python.org/3/library/constants.html#NotImplemented
return NotImplemented
def __rdiv__(self, o):
try:
return self._v.__rdiv__(o)
except AttributeError:
# See https://docs.python.org/3/library/constants.html#NotImplemented
return NotImplemented
def __matmul__(self, o):
try:
return self._v.__matmul__(o)
except AttributeError:
# See https://docs.python.org/3/library/constants.html#NotImplemented
return NotImplemented
def __rmatmul__(self, o):
try:
return self._v.__rmatmul__(o)
except AttributeError:
# See https://docs.python.org/3/library/constants.html#NotImplemented
return NotImplemented
def __str__(self):
return str(self._v)
def __repr__(self):
return repr(self._v)
def _should_act_as_resource_variable(self):
"""Pass resource_variable_ops.is_resource_variable check."""
pass
def _dense_var_to_tensor(self, dtype=None, name=None, as_ref=False):
return ops.convert_to_tensor(self.get(), dtype=dtype, name=name,
as_ref=as_ref)
# Register a conversion function which reads the value of the variable,
# allowing instances of the class to be used as tensors.
def _tensor_conversion_aggregate(var, dtype=None, name=None, as_ref=False):
return var._dense_var_to_tensor(dtype, name, as_ref) # pylint: disable=protected-access
ops.register_tensor_conversion_function(AggregatingVariable,
_tensor_conversion_aggregate)
ops.register_dense_tensor_like_type(AggregatingVariable)
| {
"content_hash": "38c4999d7d0b47be566d30dbbdada1b6",
"timestamp": "",
"source": "github",
"line_count": 1586,
"max_line_length": 116,
"avg_line_length": 35.32534678436318,
"alnum_prop": 0.6642451718844822,
"repo_name": "gunan/tensorflow",
"id": "fda258578aa4ac6458cb3187f09e048dcd877e24",
"size": "56715",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/distribute/values.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "5003"
},
{
"name": "Batchfile",
"bytes": "45924"
},
{
"name": "C",
"bytes": "774953"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "77908225"
},
{
"name": "CMake",
"bytes": "6500"
},
{
"name": "Dockerfile",
"bytes": "104215"
},
{
"name": "Go",
"bytes": "1841471"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "962443"
},
{
"name": "Jupyter Notebook",
"bytes": "556650"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "1479029"
},
{
"name": "Makefile",
"bytes": "58603"
},
{
"name": "Objective-C",
"bytes": "104667"
},
{
"name": "Objective-C++",
"bytes": "297830"
},
{
"name": "PHP",
"bytes": "23994"
},
{
"name": "Pascal",
"bytes": "3739"
},
{
"name": "Pawn",
"bytes": "17039"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "39476740"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Roff",
"bytes": "2472"
},
{
"name": "Ruby",
"bytes": "7459"
},
{
"name": "Shell",
"bytes": "650007"
},
{
"name": "Smarty",
"bytes": "34649"
},
{
"name": "Swift",
"bytes": "62814"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
} |
from settings.base import rel
STATIC_ROOT = rel('public', 'static')
MEDIA_ROOT = rel('public', 'media')
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
STATICFILES_DIRS = (rel('static'),)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'djangobower.finders.BowerFinder'
)
| {
"content_hash": "81f3963e73b2fa9052e6b2322ad3ac13",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 62,
"avg_line_length": 25.066666666666666,
"alnum_prop": 0.7180851063829787,
"repo_name": "pinkevich/django-project-template",
"id": "6b07512250a660d41b3d82bd94a57921dc97941d",
"size": "376",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "settings/static.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "10381"
},
{
"name": "Makefile",
"bytes": "1568"
},
{
"name": "Nginx",
"bytes": "1069"
},
{
"name": "Python",
"bytes": "18805"
}
],
"symlink_target": ""
} |
def can_build(env, platform):
return True
def configure(env):
env.use_ptrcall = True
def get_doc_classes():
return [
"ARVRInterfaceGDNative",
"GDNative",
"GDNativeLibrary",
"MultiplayerPeerGDNative",
"NativeScript",
"PacketPeerGDNative",
"PluginScript",
"StreamPeerGDNative",
"VideoStreamGDNative",
"WebRTCPeerConnectionGDNative",
"WebRTCDataChannelGDNative",
]
def get_doc_path():
return "doc_classes"
| {
"content_hash": "09d254fe348ab22a985632aca0c86202",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 39,
"avg_line_length": 19.884615384615383,
"alnum_prop": 0.6073500967117988,
"repo_name": "ex/godot",
"id": "bf371ed95b124ab55ed3f99d52809305aeaeb202",
"size": "517",
"binary": false,
"copies": "4",
"ref": "refs/heads/3.5",
"path": "modules/gdnative/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "AIDL",
"bytes": "1633"
},
{
"name": "Batchfile",
"bytes": "26"
},
{
"name": "C",
"bytes": "1045182"
},
{
"name": "C#",
"bytes": "1061492"
},
{
"name": "C++",
"bytes": "39315087"
},
{
"name": "CMake",
"bytes": "606"
},
{
"name": "GAP",
"bytes": "62"
},
{
"name": "GDScript",
"bytes": "323212"
},
{
"name": "GLSL",
"bytes": "836846"
},
{
"name": "Java",
"bytes": "595274"
},
{
"name": "JavaScript",
"bytes": "194742"
},
{
"name": "Kotlin",
"bytes": "84098"
},
{
"name": "Makefile",
"bytes": "1421"
},
{
"name": "Objective-C",
"bytes": "20550"
},
{
"name": "Objective-C++",
"bytes": "365306"
},
{
"name": "PowerShell",
"bytes": "2713"
},
{
"name": "Python",
"bytes": "475722"
},
{
"name": "Shell",
"bytes": "30899"
}
],
"symlink_target": ""
} |
from django.apps import AppConfig
# Pip package update 12/10/2018 (davve.ath)
# import watson
from watson import search as watson
class VerceRegConfig(AppConfig):
name = 'vercereg'
verbose_name = "VERCE dispel4py Registry"
def ready(self):
workspace = self.get_model('Workspace')
pe = self.get_model('PESig')
fun = self.get_model('FunctionSig')
watson.register(workspace)
watson.register(pe)
watson.register(fun)
| {
"content_hash": "30012816f2e77c3dc888c6f24653e8df",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 47,
"avg_line_length": 25.263157894736842,
"alnum_prop": 0.6645833333333333,
"repo_name": "iaklampanos/dj-vercereg",
"id": "b1dd08c40077dfdb8387f72f551b4ca254a872f0",
"size": "480",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dj_vercereg/vercereg/apps.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "401"
},
{
"name": "Makefile",
"bytes": "2006"
},
{
"name": "Python",
"bytes": "134477"
},
{
"name": "Shell",
"bytes": "1652"
}
],
"symlink_target": ""
} |
import re
import subprocess
from datetime import datetime as DT, date
CONFIG_FILE="~/.todo.cfg"
_tagTest = re.compile(r'.+:.+')
_prioTest = re.compile(r'\([A-Z]\)$')
_validPrio = re.compile(r'[A-Z]')
def _makeDate(word):
if word is None: return None
if isinstance(word, date): return word
return DT.strptime(word, "%Y-%m-%d").date()
def _isDate(word):
# print "date testing:", word
try:
_makeDate(word)
except Exception, e:
# print "Failed date parse on: %s" % (word,)
# print "exeption", e
return False
return True
def _isPriority(word):
return bool(_prioTest.match(word))
def _isProject(word):
return word.startswith("+")
def _isContext(word):
return word.startswith("@")
def _isTag(word):
return bool(_tagTest.search(word))
def get_todo_env(key):
cmd = ". %s; echo $%s"
cmd %= (CONFIG_FILE, key)
var = subprocess.check_output([cmd], shell=True)
return var.strip()
class Task(object):
def __init__(self, task="", projects=None, contexts=None, tags=None, autodate=False):
self.priority = ''
self._create = None
self._finish = None
self.task = task
self.done = False
self.projects = projects if projects else list()
self.contexts = contexts if contexts else list()
self.tags = tags if tags else dict()
if autodate:
self.create = date.today()
# can "undo" - pass false
def do(self, value=True):
if bool(value):
self.done = True
self.finish = DT.now().date()
else:
self.done = False
self.finish = None
@property
def priority(self):
return self._priority
@priority.setter
def priority(self, value):
if not value:
self._priority = ""
return
value = value.upper()
if _isPriority(value):
self._priority = value
elif len(value) == 1 and _validPrio.match(value):
self._priority = "(%s)" % value
else:
raise Exception('Bad prio')
@property
def create(self):
return self._create
@create.setter
def create(self, val):
self._create = _makeDate(val)
@property
def finish(self):
return self._finish
@finish.setter
def finish(self, val):
self._finish = _makeDate(val)
def __str__(self):
# Question - strip prio as option?
tok = []
finish = str(self.finish) if self.finish else ""
create = str(self.create) if self.create else ""
if self.done:
tok.append("x")
# strip prio because:
# tood.sh do [TASK]
# does it
tok.extend([finish, create, self.task])
else:
tok.extend([self.priority, create, self.task])
tok.extend(self.projects)
tok.extend(self.contexts)
tok.extend("%s:%s" % (k,v) for k,v in self.tags.iteritems())
return " ".join(v for v in tok if v)
@staticmethod
def parse(todoline):
leading_space=False
bare_words = []
task = Task()
if todoline.strip(' \t\n') == "":
return None
if todoline.startswith(' '):
leading_space = True
tokens = todoline.split(" ")
if not leading_space:
# get rid of internal "" tokens
tokens = [tok for tok in tokens if tok]
else:
# preserve leading ws
leader = []
while tokens[0] == '':
leader.append(tokens.pop(0))
tokens.insert(0, " ".join(leader))
# Deal with leading space wierdness
if not leading_space:
if tokens[0] == 'x':
task.done = True
tokens.pop(0)
if _isDate(tokens[0]):
task.finish = tokens.pop(0)
if _isPriority(tokens[0]):
task.priority = tokens.pop(0)
else:
bare_words.append(tokens.pop(0))
# creation date still valid for leading space... TODO: verify
if _isDate(tokens[0]):
task.create = tokens.pop(0)
# Now the meat
for word in tokens:
if _isProject(word):
task.projects.append(word)
elif _isContext(word):
task.contexts.append(word)
elif _isTag(word):
k, v = word.partition(":")[::2]
task.tags[k] = v
else:
bare_words.append(word)
task.task = " ".join(bare_words)
return task
class TodoFile(object):
def __init__(self, filename=""):
self.filename = filename
def __str__(self):
return "\n".join(str(task) for task in self.tasks) + "\n"
def open(self):
try:
with open(self.filename, 'r') as fd:
self.tasks = [Task.parse(x.strip()) for x in fd.readlines()]
self.tasks = [x for x in self.tasks if x is not None]
except:
self.tasks = []
def save(self):
with open(self.filename, 'w') as fd:
fd.write(str(self))
| {
"content_hash": "2a0ef4d6368ddedc3b38bddf8ea22cb8",
"timestamp": "",
"source": "github",
"line_count": 188,
"max_line_length": 89,
"avg_line_length": 27.6968085106383,
"alnum_prop": 0.5285193009410409,
"repo_name": "sophacles/todo-scripts",
"id": "7874fdcf67b61362c523fdbe685d82a66c06da28",
"size": "5207",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "todo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "46431"
}
],
"symlink_target": ""
} |
from AppKit import NSPathControl, NSPathStyleStandard, NSColor, NSFocusRingTypeNone, NSURL, NSPathStylePopUp, NSRoundedBezelStyle
import os
from vanilla.vanillaBase import VanillaBaseControl
_pathStylesMap = {
"standard": NSPathStyleStandard,
"popUp": NSPathStylePopUp,
}
class PathControl(VanillaBaseControl):
"""
A path control.
**posSize** Tuple of form *(left, top, width, height)* representing the position
and size of the control. The size of the control sould match the appropriate value
for the given *sizeStyle*.
+-------------------------+
| **Standard Dimensions** |
+=========+===+===========+
| Regular | H | 22 |
+---------+---+-----------+
| Small | H | 20 |
+---------+---+-----------+
| Mini | H | 18 |
+---------+---+-----------+
**url** The url to be displayed in the control.
**callback** The method to be called when the user presses the control.
**pathStyle** A string representing the path style. The options are:
+------------+
| "standard" |
+------------+
| "popUp" |
+------------+
**sizeStyle** A string representing the desired size style of the button. The options are:
+-----------+
| "regular" |
+-----------+
| "small" |
+-----------+
| "mini" |
+-----------+
"""
nsPathControlClass = NSPathControl
def __init__(self, posSize, url, callback=None, pathStyle="standard", sizeStyle="regular"):
self._setupView(self.nsPathControlClass, posSize, callback=callback)
self._nsObject.setPathStyle_(_pathStylesMap[pathStyle])
self._setSizeStyle(sizeStyle)
self._nsObject.setBackgroundColor_(NSColor.clearColor())
self._nsObject.setFocusRingType_(NSFocusRingTypeNone)
self._nsObject.cell().setBordered_(True)
self._nsObject.cell().setBezelStyle_(NSRoundedBezelStyle)
self.set(url)
def set(self, url):
if url is not None:
url = NSURL.URLWithString_(url)
self._nsObject.setURL_(url)
def get(self):
url = self._nsObject.URL()
if url is not None:
return url.path()
return None
def getSelected(self):
path = []
for item in self._nsObject.pathItems():
cell = item.pathComponentCell()
path.append(item.title())
if cell == self._nsObject.clickedPathComponentCell():
break
return os.sep.join(path)
| {
"content_hash": "0a44e9b83dbf452a90e9fb944a6cf33e",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 129,
"avg_line_length": 29.988095238095237,
"alnum_prop": 0.5613338626439063,
"repo_name": "moyogo/vanilla",
"id": "0cd78824c93903e56ac9c4394275f92a7d68ef52",
"size": "2519",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Lib/vanilla/vanillaPathControl.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "331750"
}
],
"symlink_target": ""
} |
from jsk_network_tools.msg import FC2OCS, OCS2FC
from jsk_network_tools.silverhammer_util import *
from threading import Lock, Thread
from socket import *
from struct import Struct
import diagnostic_updater
from diagnostic_msgs.msg import DiagnosticStatus
import os
import rospy
import signal
import sys
import roslib
from roslib.message import get_message_class
from std_msgs.msg import Time
from sensor_msgs.msg import *
from jsk_network_tools.srv import SetSendRate, SetSendRateResponse
class SilverHammerLowspeedStreamer():
def __init__(self):
message_class_str = rospy.get_param("~message",
"sensor_msgs/Imu")
try:
self.send_message = get_message_class(message_class_str)
except:
raise Exception("invalid topic type: %s"%message_class_str)
print(self.send_message())
self.lock = Lock()
self.launched_time = rospy.Time.now()
self.diagnostic_updater = diagnostic_updater.Updater()
self.diagnostic_updater.setHardwareID("none")
self.diagnostic_updater.add("LowspeedStreamer", self.diagnosticCallback)
self.send_num = 0
self.last_send_time = rospy.Time(0)
self.last_input_received_time = rospy.Time(0)
self.last_send_time_pub = rospy.Publisher("~last_send_time", Time)
self.last_input_received_time_pub = rospy.Publisher(
"~last_input_received_time", Time)
self.to_port = rospy.get_param("~to_port", 1024)
self.to_ip = rospy.get_param("~to_ip", "127.0.0.1")
self.send_rate = rospy.get_param("~send_rate", 1.0)
self.event_driven = rospy.get_param("~event_driven", False)
self.latest_message = None
self.socket_client = socket(AF_INET, SOCK_DGRAM)
self.send_format = msgToStructFormat(self.send_message())
self.sub = rospy.Subscriber("~input",
self.send_message, self.messageCallback)
if not self.event_driven:
self.send_timer = rospy.Timer(rospy.Duration(1.0 / self.send_rate),
self.sendTimerCallback)
self.diagnostic_timer = rospy.Timer(rospy.Duration(1.0 / 10),
self.diagnosticTimerCallback)
self.send_rate_service = rospy.Service('~set_send_rate', SetSendRate, self.setSendRate)
def setSendRate(self, req):
try:
if self.event_driven:
rospy.logerr("failed to change send_rate. event_driven is enabled.")
return SetSendRateResponse(ok=False)
if self.send_timer.is_alive():
self.send_timer.shutdown()
self.send_rate = req.rate
rospy.set_param("~send_rate", self.send_rate)
# self.send_rate = rospy.get_param("~send_rate", 1.0)
rospy.loginfo("send_rate is set to %f" % self.send_rate)
self.send_timer = rospy.Timer(rospy.Duration(1.0 / self.send_rate),
self.sendTimerCallback)
return SetSendRateResponse(ok=True)
except Exception as e:
rospy.logerr("failed to set send_rate: %s" % e)
return SetSendRateResponse(ok=False)
def diagnosticTimerCallback(self, event):
self.diagnostic_updater.update()
def diagnosticCallback(self, stat):
# always OK
stat.summary(DiagnosticStatus.OK, "OK")
with self.lock:
now = rospy.Time.now()
stat.add("Uptime [sec]",
(now - self.launched_time).to_sec())
stat.add("Time from the last sending [sec]",
(now - self.last_send_time).to_sec())
stat.add("Number of transmission", self.send_num)
stat.add("Time from the last input [sec]",
(now - self.last_input_received_time).to_sec())
# properties
stat.add("UDP address", self.to_ip)
stat.add("UDP port", self.to_port)
stat.add("EventDrivenMode", self.event_driven)
self.last_send_time_pub.publish(self.last_send_time)
self.last_input_received_time_pub.publish(self.last_input_received_time)
return stat
def messageCallback(self, msg):
with self.lock:
self.latest_message = msg
self.last_input_received_time = rospy.Time.now()
if self.event_driven:
self.sendMessage(msg)
def sendMessage(self, msg):
packed_data = packMessage(msg, self.send_format)
self.socket_client.sendto(packed_data, (self.to_ip, self.to_port))
self.last_send_time = rospy.Time.now()
self.send_num = self.send_num + 1
def sendTimerCallback(self, event):
with self.lock:
if self.latest_message:
rospy.logdebug("sending message")
self.sendMessage(self.latest_message)
self.latest_message = None
else:
rospy.loginfo("no message is available")
if __name__ == "__main__":
rospy.init_node("silverhammer_lowspeed_streamer")
st = SilverHammerLowspeedStreamer()
rospy.spin()
| {
"content_hash": "7f478b338b8b0aa1a3fe51a7178c02f2",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 95,
"avg_line_length": 43.50833333333333,
"alnum_prop": 0.5998850794866883,
"repo_name": "cretaceous-creature/jsk_mbzirc_task3",
"id": "d0c36431a63ddb9b8004fcbce4bcabc828cccff9",
"size": "5244",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jsk_network_tools/scripts/silverhammer_lowspeed_streamer.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "145501"
},
{
"name": "CMake",
"bytes": "11823"
},
{
"name": "Cuda",
"bytes": "7448"
},
{
"name": "Lua",
"bytes": "932"
},
{
"name": "Python",
"bytes": "68628"
},
{
"name": "Shell",
"bytes": "2667"
}
],
"symlink_target": ""
} |
from random import sample
from shenfun import *
from ChannelFlow import KMM
np.warnings.filterwarnings('ignore')
class MicroPolar(KMM):
"""Micropolar channel flow solver
Parameters
----------
N : 3-tuple of ints
The global shape in physical space (quadrature points)
domain : 3-tuple of 2-tuples
The size of the three domains
Re : Reynolds number
J : number
model parameter
m : number
model parameter
NP : number
model parameter
dt : Timestep
conv : Choose velocity convection method
- 0 - Standard convection
- 1 - Vortex type
filename : str, optional
Filenames are started with this name
family : str, optional
Chebyshev is normal, but Legendre works as well
padding_factor : 3-tuple of numbers, optional
For dealiasing, backward transforms to real space are
padded with zeros in spectral space using these many points
modplot : int, optional
Plot some results every modplot timestep. If negative, no plotting
modsave : int, optional
Save results to hdf5 every modsave timestep
moderror : int, optional
Print diagnostics every moderror timestep
checkpoint : int, optional
Save required data for restart to hdf5 every checkpoint timestep
sample_stats : int, optional
Sample statistics every sample_stats timestep
timestepper : str, optional
Choose timestepper
- 'IMEXRK222'
- 'IMEXRK3'
- 'IMEXRK443'
Note
----
Simulations may be killed gracefully by placing a file named 'killshenfun'
in the folder running the solver from. The solver will then first store
the results by checkpointing, before exiting.
"""
def __init__(self,
N=(32, 32, 32),
domain=((-1, 1), (0, 2*np.pi), (0, np.pi)),
Re=100,
J=1e-5,
m=0.1,
NP=8.3e4,
dt=0.001,
conv=0,
filename='MicroPolar',
family='C',
padding_factor=(1, 1.5, 1.5),
modplot=100,
modsave=1e8,
moderror=100,
checkpoint=1000,
timestepper='IMEXRK3'):
utau = self.utau = 1
KMM.__init__(self, N=N, domain=domain, nu=1/Re, dt=dt, conv=conv,
filename=filename, family=family, padding_factor=padding_factor,
modplot=modplot, modsave=modsave, moderror=moderror, dpdy=-utau**2,
checkpoint=checkpoint, timestepper=timestepper)
self.Re = Re
self.J = J
self.m = m
self.NP = NP
# New spaces and Functions used by micropolar model
self.WC = VectorSpace(self.TC) # Curl curl vector space
self.w_ = Function(self.CD) # Angular velocity solution
self.HW_ = Function(self.CD) # convection angular velocity
self.wz = Function(self.D00)
self.wy = Function(self.D00)
self.ub = Array(self.BD)
self.wb = Array(self.CD)
self.cb = Array(self.BD)
# Classes for fast projections used by convection
self.dw0dx = Project(Dx(self.w_[0], 0, 1), self.TC)
self.dw0dy = Project(Dx(self.w_[0], 1, 1), self.TD)
self.dw0dz = Project(Dx(self.w_[0], 2, 1), self.TD)
self.dw1dx = Project(Dx(self.w_[1], 0, 1), self.TC)
self.dw1dy = Project(Dx(self.w_[1], 1, 1), self.TD)
self.dw1dz = Project(Dx(self.w_[1], 2, 1), self.TD)
self.dw2dx = Project(Dx(self.w_[2], 0, 1), self.TC)
self.dw2dy = Project(Dx(self.w_[2], 1, 1), self.TD)
self.dw2dz = Project(Dx(self.w_[2], 2, 1), self.TD)
self.curlwx = Project(curl(self.w_)[0], self.TD)
self.curlcurlwx = Project(curl(curl(self.w_))[0], self.TC)
# File for storing the results
self.file_w = ShenfunFile('_'.join((filename, 'W')), self.CD, backend='hdf5', mode='w', mesh='uniform')
# Create a checkpoint file used to restart simulations
self.checkpoint.data['0']['W'] = [self.w_]
h = TestFunction(self.TD)
# Chebyshev matrices are not sparse, so need a tailored solver. Legendre has simply 5 nonzero diagonals and can use generic solvers.
sol2 = chebyshev.la.Helmholtz if self.B0.family() == 'chebyshev' else la.SolverGeneric1ND
# Modify u equation
nu = self.nu
cwx_ = self.curlwx.output_array
self.pdes['u'].N = [self.pdes['u'].N, m*nu*div(grad(cwx_))]
self.pdes['u'].latex += r'+m \nu \nabla^2 (\nabla \times \vec{w})_x'
# Modify g equation
ccw_ = self.curlcurlwx.output_array
self.pdes['g'].N = [self.pdes['g'].N, m*nu*Expr(ccw_)]
self.pdes['g'].latex += r'+m \nu (\nabla \times \nabla \times \vec{w})_x'
if comm.Get_rank() == 0:
# Modify v0 and w0 equations
self.pdes1d['v0'].N.append(-m*nu*Dx(self.wz, 0, 1))
self.pdes1d['w0'].N = [self.pdes1d['w0'].N, m*nu*Dx(self.wy, 0, 1)]
self.pdes1d['v0'].latex += r'-m \nu \frac{\partial w_z}{\partial x}'
self.pdes1d['w0'].latex += r'+m \nu \frac{\partial w_y}{\partial x}'
# Angular momentum equations
self.kappa = kappa = m/J/NP/Re
self.pdes['w0'] = self.PDE(h,
self.w_[0],
lambda f: kappa*div(grad(f))-2*NP*kappa*f,
[-Expr(self.HW_[0]), kappa*NP*Expr(self.curl[0])],
dt=self.dt,
solver=sol2,
latex=r"\frac{\partial w_x}{\partial t} +\vec{u} \cdot \nabla w_x = \kappa \nabla^2 w_x + \kappa N (\nabla \times \vec{u})_x")
self.pdes['w1'] = self.PDE(h,
self.w_[1],
lambda f: kappa*div(grad(f))-2*NP*kappa*f,
[-Expr(self.HW_[1]), kappa*NP*Expr(self.curl[1])],
dt=self.dt,
solver=sol2,
latex=r"\frac{\partial w_y}{\partial t} +\vec{u} \cdot \nabla w_y = \kappa \nabla^2 w_y + \kappa N (\nabla \times \vec{u})_y")
self.pdes['w2'] = self.PDE(h,
self.w_[2],
lambda f: kappa*div(grad(f))-2*NP*kappa*f,
[-Expr(self.HW_[2]), kappa*NP*Expr(self.curl[2])],
dt=self.dt,
solver=sol2,
latex=r"\frac{\partial w_z}{\partial t} +\vec{u} \cdot \nabla w_z = \kappa \nabla^2 w_z + \kappa N (\nabla \times \vec{u})_z")
def init_from_checkpoint(self):
self.checkpoint.read(self.u_, 'U', step=0)
self.checkpoint.read(self.w_, 'W', step=0)
self.g_[:] = 1j*self.K[1]*self.u_[2] - 1j*self.K[2]*self.u_[1]
self.checkpoint.open()
tstep = self.checkpoint.f.attrs['tstep']
t = self.checkpoint.f.attrs['t']
self.checkpoint.close()
return t, tstep
def convection(self):
KMM.convection(self)
HW = self.HW_
up = self.up
dw0dxp = self.dw0dx().backward(padding_factor=self.padding_factor)
dw0dyp = self.dw0dy().backward(padding_factor=self.padding_factor)
dw0dzp = self.dw0dz().backward(padding_factor=self.padding_factor)
dw1dxp = self.dw1dx().backward(padding_factor=self.padding_factor)
dw1dyp = self.dw1dy().backward(padding_factor=self.padding_factor)
dw1dzp = self.dw1dz().backward(padding_factor=self.padding_factor)
dw2dxp = self.dw2dx().backward(padding_factor=self.padding_factor)
dw2dyp = self.dw2dy().backward(padding_factor=self.padding_factor)
dw2dzp = self.dw2dz().backward(padding_factor=self.padding_factor)
HW[0] = self.TDp.forward(up[0]*dw0dxp+up[1]*dw0dyp+up[2]*dw0dzp, HW[0])
HW[1] = self.TDp.forward(up[0]*dw1dxp+up[1]*dw1dyp+up[2]*dw1dzp, HW[1])
HW[2] = self.TDp.forward(up[0]*dw2dxp+up[1]*dw2dyp+up[2]*dw2dzp, HW[2])
HW.mask_nyquist(self.mask)
def tofile(self, tstep):
self.file_u.write(tstep, {'u': [self.u_.backward(mesh='uniform')]}, as_scalar=True)
self.file_w.write(tstep, {'w': [self.w_.backward(mesh='uniform')]}, as_scalar=True)
def compute_vw(self, rk):
if comm.Get_rank() == 0:
self.wy[:] = self.w_[1, :, 0, 0].real
self.wz[:] = self.w_[2, :, 0, 0].real
KMM.compute_vw(self, rk)
| {
"content_hash": "667b11b3b2cf1c53c9223f7610bdd594",
"timestamp": "",
"source": "github",
"line_count": 197,
"max_line_length": 161,
"avg_line_length": 44.35532994923858,
"alnum_prop": 0.5426871137560082,
"repo_name": "spectralDNS/shenfun",
"id": "498f018f090cb2d05e4a72f4769c9b134a60c867",
"size": "8738",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "demo/MicroPolar.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C++",
"bytes": "175354"
},
{
"name": "Cython",
"bytes": "216604"
},
{
"name": "Jupyter Notebook",
"bytes": "214827"
},
{
"name": "Makefile",
"bytes": "362"
},
{
"name": "Python",
"bytes": "1176389"
},
{
"name": "Shell",
"bytes": "1013"
}
],
"symlink_target": ""
} |
"""vtdiscourse
NAME
vtd - create content for category and topic talk.vTaiwan
SYNOPSIS
vtd [OPTION]
DESCRIPTION
-n, --name
user name for Discourse api_user.
-p, --password
password for Discourse api_eky.
-g, --repo name
github's repo name
-s, --service
GET: get .md content
NAME: get description
DEPLOY: deploy the category and sub-category in talk.vTaiwan
-h, --help
show usage
-v, --verbose
EXAMPLES
1. vtd -n "api_user" -p "api_key" -g "directors-election-gitbook" -s GET
2. vtd -n "api_user" -p "api_key" -g "directors-election-gitbook" -s DEPLOY
COPYRIGHT
MIT Licence
SOURCE
https://github.com/chairco/vtdiscourse
"""
import sys
import getopt
from vtdiscourse.vtdiscourse import Discourse, Parser
from vtdiscourse.run import supervisors
def main(argv=None):
if not argv:
argv = sys.argv[1:]
if len(argv) == 0:
print(__doc__)
sys.exit(0)
try:
opts, args = getopt.getopt(argv, "g:s:n:p:hv", ["github","service", "name", "password", "help"])
except getopt.GetoptError as e:
print(__doc__)
sys.exit("invalid option: " + str(e))
name = None
password = None
service_type = None
for o, a in opts:
if o in ('-h', '--help'):
print(__doc__)
sys.exit(0)
elif o in ('-n', '--name'):
name = a
elif o in ('-p', '--password'):
password = a
elif o in ('-g', '--github'):
github = a
elif o in ('-s', '--service'):
service_type = a
if not name and not password:
try:
name = ""
password = ""
except KeyError:
sys.exit("invalid type")
discourse = Discourse(
host = 'https://talk.vtaiwan.tw',
api_username=name,
api_key=password)
service_type = str(service_type).upper()
if service_type == 'GET':
parm = Parser(name=github, githubfile='SUMMARY.md')
print(parm.get_topics_content)
elif service_type == 'NAME':
parm = Parser(name=github, githubfile='package.json')
description = parm.get_name
elif service_type == 'DEPLOY':
if not github: sys.exit("invalid type")
result = supervisors(api_key=password, api_username=name, name=github)
print('Result', result)
else:
print(discourse)
if __name__ == "__main__":
main(sys.argv[1:])
| {
"content_hash": "c193056e3f1b3fac93aa2c37a0191448",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 104,
"avg_line_length": 23.39252336448598,
"alnum_prop": 0.5585297642828606,
"repo_name": "chairco/vtdiscourse",
"id": "e68acfc5ad11d20c23cc932e0578f53f6d9b11e2",
"size": "2503",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vtdiscourse/__main__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22602"
}
],
"symlink_target": ""
} |
from stagecraft.apps.dashboards.models.module import(
ModuleType,
Module)
import os
import json
import pprint
import jsonschema
# For this script to work correctly, you need to run
# run it like this:
# workon/source the virtualenv
# run export DJANGO_SETTINGS_MODULE=stagecraft.settings.production
# venv/bin/python import.py
def get_schema_for_module_type(name):
path = os.path.join(
os.path.dirname(__file__),
'tools/import_schemas/schema/modules_json/{}_schema.json'.format(name))
try:
with open(path, "r") as file:
schema = file.read()
except IOError as e:
print "NO SCHEMA FOUND - USING DEFAULT"
print name
print "^NO SCHEMA FOUND - USING DEFAULT"
path = os.path.join(
os.path.dirname(__file__),
'tools/import_schemas/schema/module_schema.json'.format(name))
with open(path, "r") as file:
schema = file.read()
schema_dict = json.loads(schema)
return schema_dict
def check_module_type_schemas_correct():
for module_type, new_schema in module_types_with_proper_schemas():
try:
module_type.validate_schema()
except jsonschema.exceptions.SchemaError as e:
print "==============="
print module_type.name
print "==============="
raise e
def clear_module_type_schemas():
for module_type, new_schema in module_types_with_proper_schemas():
update_module_type_schema(module_type, schema={})
def update_module_type_with_correct_schemas():
for module_type, new_schema in module_types_with_proper_schemas():
update_module_type_schema(module_type, schema=new_schema)
def update_module_type_schema(module_type, schema={}):
module_type.schema = schema
module_type.save()
def module_types_with_proper_schemas():
module_types_with_proper_schemas = [
(module_type, get_schema_for_module_type(module_type.name))
for module_type in ModuleType.objects.all()
]
return module_types_with_proper_schemas
def validate_all_modules():
for module in Module.objects.all():
module.validate_options()
print "======"
print "{} valid in {} dashboard".format(
module.slug, module.dashboard.slug)
print "^====="
return True
def validate_all_modules_against_files():
for module in Module.objects.all():
schema = get_schema_for_module_type(module.type.name)
try:
jsonschema.validate(module.options, schema)
print "======"
print "{} valid in {} dashboard".format(
module.slug, module.dashboard.slug)
print "^====="
except jsonschema.exceptions.ValidationError as e:
print 'failure validating {} in {} dashboard'.format(
module.slug, module.dashboard.slug)
raise e
return True
if __name__ == '__main__':
print "Clearing schemas"
clear_module_type_schemas()
print "Checking schemas valid"
check_module_type_schemas_correct()
print "Checking current modules valid"
validate_all_modules_against_files()
print "Setting module type schemas"
update_module_type_with_correct_schemas()
print "Checking current modules valid using real method"
validate_all_modules()
| {
"content_hash": "ec6e5e0fa88356b1baf917eb6d6b9071",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 79,
"avg_line_length": 31.60377358490566,
"alnum_prop": 0.6328358208955224,
"repo_name": "alphagov/stagecraft",
"id": "568531c1325cd3e83d1bf72be4034c09e802625f",
"size": "3391",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "import_schemas.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "134"
},
{
"name": "HTML",
"bytes": "855"
},
{
"name": "JavaScript",
"bytes": "3223"
},
{
"name": "Python",
"bytes": "622720"
},
{
"name": "Shell",
"bytes": "14467"
}
],
"symlink_target": ""
} |
"""
Code to support Off-line Microscope and transfer of coordinates to Sample Stage
main functions:
uscope2sample(suffix=''):
transfer all named positions saved for the IDE Microscope (off-line)
to IDE SampleStage (on-line), applying the rotation matrix saved by
the function `make_uscope_rotation()`
make_uscope_rotation()
calculate and store best rotation matrix to transform positions from
the IDE Microscope (off-line) to IDE SampleStage (on-line).
this uses position names that are the same in both instruments, and
requires at least 6 such positions. That is, save positions with the
same names in both the IDE_Microscope and IDE_SampleStage.
Positions names not found in both instruments are ignored.
"""
import json
from collections import OrderedDict
# if not hasattr(_scan, '_instdb'):
# connect_scandb(dbname='epics_scan',
# server='postgresql',
# host='ion.cars.aps.anl.gov',
# user='epics',
# password='microXAFS@13IDE')
# #endif
def read_uscope_xyz(name='IDE_Microscope'):
"""
read XYZ Positions from IDE Microscope Instrument
returns dictionary of PositionName: (x, y, z)
"""
out = OrderedDict()
instdb = _scan._instdb
scandb = _scan._scandb
for pname in instdb.get_positionlist(name):
v = instdb.get_position_vals(name, pname)
out[pname] = [v['13IDE:m1.VAL'],
v['13IDE:m2.VAL'],
v['13IDE:m3.VAL']]
#endfor
return out
#enddef
def read_sample_xyz(name='IDE_SampleStage'):
"""
read XYZ Positions from IDE SampleStage Instrument
returns dictionary of PositionName: (x, y, z)
Note: FineX, FineY and Theta stages are not included
"""
out = OrderedDict()
instdb = _scan._instdb
for pname in instdb.get_positionlist(name):
v = instdb.get_position_vals(name, pname)
out[pname] = [v['13XRM:m4.VAL'],
v['13XRM:m6.VAL'],
v['13XRM:m5.VAL']]
#endfor
return out
#enddef
def params2rotmatrix(params, mat):
"""--private-- turn fitting parameters
into rotation matrix
"""
mat[0][1] = params.c01
mat[1][0] = params.c10
mat[0][2] = params.c02
mat[2][0] = params.c20
mat[1][2] = params.c12
mat[2][1] = params.c21
return mat
#enddef
def resid_rotmatrix(params, mat, v1, v2):
"--private-- resdiual function for fit"
mat = params2rotmatrix(params, mat)
return (v2 - dot(mat, v1)).flatten()
#enddef
def calc_rotmatrix(d1, d2):
"""get best-fit rotation matrix to transform coordinates
from 1st position dict into the 2nd position dict
"""
labels = []
d2keys = d2.keys()
for x in d1.keys():
if x in d2keys:
labels.append(x)
#endif
#endfor
labels.sort()
if len(labels) < 6:
print """Error: need at least 6 saved positions
in common to calculate rotation matrix"""
return None, None, None
#endif
print("Calculating Rotation Matrix using Labels:")
print(labels)
v1 = ones((4, len(labels)))
v2 = ones((4, len(labels)))
for i, label in enumerate(labels):
v1[0, i] = d1[label][0]
v1[1, i] = d1[label][1]
v1[2, i] = d1[label][2]
v2[0, i] = d2[label][0]
v2[1, i] = d2[label][1]
v2[2, i] = d2[label][2]
#endfor
# get initial rotation matrix, assuming that
# there are orthogonal coordinate systems.
mat = transforms.superimposition_matrix(v1, v2, scale=True)
params = group(c10 = param(mat[1][0], vary=True),
c01 = param(mat[0][1], vary=True),
c20 = param(mat[2][0], vary=True),
c02 = param(mat[0][2], vary=True),
c12 = param(mat[1][2], vary=True),
c21 = param(mat[2][1], vary=True) )
fit_result = minimize(resid_rotmatrix, params, args=(mat, v1, v2))
mat = params2rotmatrix(params, mat)
print(" Calculated Rotation Matrix:")
print(mat)
return mat, v1, v2
#enddef
##
## Main Interface
##
def make_uscope_rotation():
"""
Calculate and store the rotation maxtrix needed to convert
positions from the GSECARS IDE offline microscope (OSCAR)
to the IDE SampleStage in the microprobe station.
This calculates the rotation matrix based on all position
names that occur in the Position List for both instruments.
Note:
The result is saved as a json dictionary of the
IDE_Microscope instrument
Warning:
Please consult with Matt or Tony before running this!
"""
d1 = read_uscope_xyz()
d2 = read_sample_xyz()
# calculate the rotation matrix
mat, v1, v2 = calc_rotmatrix(d1, d2)
if mat is None:
return
#endif
# now save to 'notes' for the Microscope instrument
uscope = _scan._instdb.get_instrument('IDE_Microscope')
notes = uscope.notes
if notes is None:
notes = {}
else:
notes = json.loads(notes)
#endif
notes['rotmat2SampleStage'] = mat.tolist()
uscope.notes = json.dumps(notes)
_scan._scandb.commit()
return mat
#enddef
def uscope2sample(suffix='', xoffset=0, yoffset=0, zoffset=0):
"""
transfer *all* named positions saved for the GSECARS IDE offline
microscope (OSCAR) to the IDE SampleStage in the microprobe station.
Applies the rotation matrix saved by the function `make_uscope_rotation()`
Parameters:
suffix (string): suffix to apply when transferring names,
so as to avoid name clashes.
xoffset (float, default=0): offset in X, after coordinate transform
yoffset (float, default=0): offset in Y, after coordinate transform
zoffset (float, default=0): offset in Z, after coordinate transform
Example:
uscope2sample(suffix='_mount1')
Note:
Saved position names may be overwritten.
Non-zero values for xoffset, yoffset, zoffset can accomodate for
offsets for IDE SampleStage, due to changes in mirror pitch.
"""
uscope = _scan._instdb.get_instrument('IDE_Microscope')
sample = _scan._instdb.get_instrument('IDE_SampleStage')
try:
notes = json.loads(uscope.notes)
rotmat = array(notes['rotmat2SampleStage'])
except:
print("Error: could not get rotation matrix!")
return
#endtry
upos = read_uscope_xyz()
labels = upos.keys()
v = ones((4, len(labels)))
for i, key in enumerate(labels):
v[0, i] = upos[key][0]
v[1, i] = upos[key][1]
v[2, i] = upos[key][2]
#endfor
# Predict coordinates in SampleStage coordination system
pred = dot(rotmat, v)
# make SampleStage coordinates
poslist = _scan._instdb.get_positionlist('IDE_SampleStage')
pos0 = _scan._instdb.get_position_vals('IDE_SampleStage', poslist[0])
pvs = pos0.keys()
pvs.sort()
spos = OrderedDict()
for pvname in pvs:
spos[pvname] = 0.000
#endfor
xpv, ypv, zpv = '13XRM:m4.VAL', '13XRM:m6.VAL', '13XRM:m5.VAL'
for i, label in enumerate(labels):
spos[xpv] = pred[0, i] + xoffset
spos[ypv] = pred[1, i] + yoffset
spos[zpv] = pred[2, i] + zoffset
nlabel = '%s%s' % (label, suffix)
_scan._instdb.save_position('IDE_SampleStage', nlabel, spos)
#endfor
#enddef
| {
"content_hash": "09f82ffb73216f7aaa97fdc64c63f5b4",
"timestamp": "",
"source": "github",
"line_count": 248,
"max_line_length": 79,
"avg_line_length": 29.983870967741936,
"alnum_prop": 0.6167294244217321,
"repo_name": "newville/microprobe_docs",
"id": "d72bbd5366164ed233180e487a153c820a5da925",
"size": "7436",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doc/macros/uscope.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "749"
}
],
"symlink_target": ""
} |
"""IBEA selector"""
from __future__ import division
# from past.builtins import xrange # pylint: disable=W0622
"""
Copyright (c) 2016, EPFL/Blue Brain Project
This file is part of BluePyOpt <https://github.com/BlueBrain/BluePyOpt>
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License version 3.0 as published
by the Free Software Foundation.
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
The code in this file was original written in 2015 at the
BlueBrain Project, EPFL, Lausanne
The authors were Werner Van Geit, Michael Gevaert and Jean-Denis Courcol
It is based on a C implementation of the IBEA algorithm in the PISA
optimization framework developed at the ETH, Zurich
http://www.tik.ee.ethz.ch/pisa/selectors/ibea/?page=ibea.php
"""
import numpy
import random
def selIBEA(population, mu, alpha=None, kappa=.05, tournament_n=4):
"""IBEA Selector"""
if alpha is None:
alpha = len(population)
# Calculate a matrix with the fitness components of every individual
components = _calc_fitness_components(population, kappa=kappa)
# Calculate the fitness values
_calc_fitnesses(population, components)
# Do the environmental selection
population[:] = _environmental_selection(population, alpha)
# Select the parents in a tournament
parents = _mating_selection(population, mu, tournament_n)
return parents
def _calc_fitness_components(population, kappa):
"""returns an N * N numpy array of doubles, which is their IBEA fitness """
# DEAP selector are supposed to maximise the objective values
# We take the negative objectives because this algorithm will minimise
population_matrix = numpy.fromiter(
iter(-x for individual in population
for x in individual.fitness.wvalues),
dtype=numpy.float)
pop_len = len(population)
feat_len = len(population[0].fitness.wvalues)
population_matrix = population_matrix.reshape((pop_len, feat_len))
# Calculate minimal square bounding box of the objectives
box_ranges = (numpy.max(population_matrix, axis=0) -
numpy.min(population_matrix, axis=0))
# Replace all possible zeros to avoid division by zero
# Basically 0/0 is replaced by 0/1
box_ranges[box_ranges == 0] = 1.0
components_matrix = numpy.zeros((pop_len, pop_len))
for i in xrange(0, pop_len):
diff = population_matrix - population_matrix[i, :]
components_matrix[i, :] = numpy.max(
numpy.divide(diff, box_ranges),
axis=1)
# Calculate max of absolute value of all elements in matrix
max_absolute_indicator = numpy.max(numpy.abs(components_matrix))
# Normalisation
if max_absolute_indicator != 0:
components_matrix = numpy.exp(
(-1.0 / (kappa * max_absolute_indicator)) * components_matrix.T)
return components_matrix
def _calc_fitnesses(population, components):
"""Calculate the IBEA fitness of every individual"""
# Calculate sum of every column in the matrix, ignore diagonal elements
column_sums = numpy.sum(components, axis=0) - numpy.diagonal(components)
# Fill the 'ibea_fitness' field on the individuals with the fitness value
for individual, ibea_fitness in zip(population, column_sums):
individual.ibea_fitness = ibea_fitness
def _choice(seq):
"""Python 2 implementation of choice"""
return seq[int(random.random() * len(seq))]
def _mating_selection(population, mu, tournament_n):
"""Returns the n_of_parents individuals with the best fitness"""
parents = []
for _ in xrange(mu):
winner = _choice(population)
for _ in xrange(tournament_n - 1):
individual = _choice(population)
# Save winner is element with smallest fitness
if individual.ibea_fitness < winner.ibea_fitness:
winner = individual
parents.append(winner)
return parents
def _environmental_selection(population, selection_size):
"""Returns the selection_size individuals with the best fitness"""
# Sort the individuals based on their fitness
population.sort(key=lambda ind: ind.ibea_fitness)
# Return the first 'selection_size' elements
return population[:selection_size]
__all__ = ['selIBEA']
| {
"content_hash": "d3bb1bf96e42bfa98531f9f48e2bf93d",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 79,
"avg_line_length": 34.31654676258993,
"alnum_prop": 0.7039832285115304,
"repo_name": "Ginfung/FSSE",
"id": "b05c40f5ea2463d90e87b7cd1b66a64696ab1f55",
"size": "4770",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Algorithms/selIBEA.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "159840"
},
{
"name": "Shell",
"bytes": "237"
}
],
"symlink_target": ""
} |
from eda.components.ST import *
| {
"content_hash": "53c28f44b12693c346d2ee6e13904e93",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 31,
"avg_line_length": 32,
"alnum_prop": 0.78125,
"repo_name": "32bitmicro/EDA",
"id": "89551b72d08b7daa3b86e6d52500c2b1c4b43371",
"size": "32",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/eda/eda/components/ST/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "526791"
},
{
"name": "Shell",
"bytes": "1139"
}
],
"symlink_target": ""
} |
from collections import defaultdict
def _single_trie(string_to_value_pairs, index):
"""Build a single trie from a dict of input string to output values.
This function assumes that all of the strings in
string_to_value_pairs have the same length.
The input
{'abcd': 'ABCD', 'adef': 'ADEF', 'adeg': 'ADEG'}
creates a trie like this:
{
'a' : {
'b': {'cd' : "ABCD"},
'd' : {
'e' : {
'f' : {'': "ADEF"},
'g' : {'': "ADEG"},
},
},
},
}
"""
dicts_by_indexed_letter = defaultdict(list)
for string, value in string_to_value_pairs:
dicts_by_indexed_letter[string[index]].append((string, value))
output = {}
for char, d in dicts_by_indexed_letter.items():
if len(d) == 1:
string = d[0][0]
value = d[0][1]
output[char] = {string[index + 1:]: value}
else:
output[char] = _single_trie(d, index + 1)
return output
def trie_list_by_str_length(str_to_return_value_dict):
"""Make a list of tries from a dict of input string to output value.
All strings should be all lower case.
"""
dicts_by_length = defaultdict(list)
for string, value in str_to_return_value_dict.items():
dicts_by_length[len(string)].append((string, value))
output = []
for length, pairs in dicts_by_length.items():
output.append((length, _single_trie(sorted(pairs), 0)))
return output
| {
"content_hash": "4ce499aebd9fd610ddca51e4bfd7f42e",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 72,
"avg_line_length": 27.943396226415093,
"alnum_prop": 0.5685347738014855,
"repo_name": "chromium/chromium",
"id": "2a159e261a44270dd1443a9cbe0098d424dff987",
"size": "1622",
"binary": false,
"copies": "6",
"ref": "refs/heads/main",
"path": "third_party/blink/renderer/build/scripts/trie_builder.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import signal
from rq import Connection, Queue, Worker
from tornado.options import parse_command_line
from .debug import debug_on_signal
if __name__ == '__main__':
parse_command_line()
debug_on_signal(signal.SIGUSR1)
# Tell rq what Redis connection to use
with Connection():
q = Queue()
Worker(q).work()
| {
"content_hash": "1ef602ffc0abcf8a758683f6912ef71f",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 66,
"avg_line_length": 22.6,
"alnum_prop": 0.6460176991150443,
"repo_name": "mikegreen7892003/PythonToy",
"id": "3001ca6892468dd85dcaff8f101e19947a5a7689",
"size": "476",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "toys/debug_hang_python_rq/worker.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12724"
}
],
"symlink_target": ""
} |
import base64
import hashlib
import re
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.models import User
from django.db.models import Q
from mozilla_django_oidc.auth import OIDCAuthenticationBackend
from mozillians.users.models import ExternalAccount
def calculate_username(email):
"""Calculate username from email address."""
email = email.split('@')[0]
username = re.sub(r'[^\w.@+-]', '-', email)
username = username[:settings.USERNAME_MAX_LENGTH]
suggested_username = username
count = 0
while User.objects.filter(username=suggested_username).exists():
count += 1
suggested_username = '%s%d' % (username, count)
if len(suggested_username) > settings.USERNAME_MAX_LENGTH:
# We failed to calculate a name for you, default to a
# email digest.
return base64.urlsafe_b64encode(hashlib.sha1(email).digest()).rstrip('=')
return suggested_username
class MozilliansAuthBackend(OIDCAuthenticationBackend):
"""Override OIDCAuthenticationBackend to provide custom functionality."""
def filter_users_by_claims(self, claims):
"""Override default method to add multiple emails in an account."""
email = claims.get('email')
request_user = self.request.user
if not email:
return self.UserModel.objects.none()
account_type = ExternalAccount.TYPE_EMAIL
alternate_emails = ExternalAccount.objects.filter(type=account_type, identifier=email)
primary_email_qs = Q(email=email)
alternate_email_qs = Q(userprofile__externalaccount=alternate_emails)
user_q = self.UserModel.objects.filter(primary_email_qs | alternate_email_qs).distinct()
# In this case we have a registered user who is adding a secondary email
if request_user.is_authenticated():
if not user_q:
ExternalAccount.objects.create(type=account_type,
user=request_user.userprofile,
identifier=email)
else:
if not user_q.filter(pk=request_user.id).exists():
msg = u'Email {0} already exists in the database.'.format(email)
messages.error(self.request, msg)
return [request_user]
return user_q
| {
"content_hash": "dbd805b2b0939a83a2d9ca4fa1686aab",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 96,
"avg_line_length": 37,
"alnum_prop": 0.6453222453222454,
"repo_name": "fxa90id/mozillians",
"id": "e9e89a04cfc9cd354a73617a0ca363057fa0f179",
"size": "2405",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mozillians/common/authbackend.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "1986"
},
{
"name": "CSS",
"bytes": "181742"
},
{
"name": "HTML",
"bytes": "165063"
},
{
"name": "JavaScript",
"bytes": "141584"
},
{
"name": "Makefile",
"bytes": "478"
},
{
"name": "Python",
"bytes": "887164"
},
{
"name": "Shell",
"bytes": "1332"
}
],
"symlink_target": ""
} |
"""Mycroft util library.
A collections of utils and tools for making skill development easier.
"""
from __future__ import absolute_import
import os
import mycroft.audio
from mycroft.util.format import nice_number
from .string_utils import camel_case_split
from .audio_utils import (play_audio_file, play_wav, play_ogg, play_mp3,
record, find_input_device)
from .file_utils import (
resolve_resource_file,
read_stripped_lines,
read_dict,
create_file,
get_temp_path,
ensure_directory_exists,
curate_cache,
get_cache_directory)
from .network_utils import connected
from .process_utils import (reset_sigint_handler, create_daemon,
wait_for_exit_signal, create_echo_function,
start_message_bus_client)
from .log import LOG
from .parse import extract_datetime, extract_number, normalize
from .signal import check_for_signal, create_signal, get_ipc_directory
from .platform import get_arch
| {
"content_hash": "a7abbd424c4f8ce6cfcc0e4be7dfc6c5",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 72,
"avg_line_length": 33.333333333333336,
"alnum_prop": 0.701,
"repo_name": "forslund/mycroft-core",
"id": "8cb5ab71951cb4ba76095021830903a0f9f2b1d7",
"size": "1580",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "mycroft/util/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3791"
},
{
"name": "Python",
"bytes": "1371642"
},
{
"name": "QML",
"bytes": "18805"
},
{
"name": "Shell",
"bytes": "83796"
}
],
"symlink_target": ""
} |
def kernelcache_process_external_methods(ea=None, struct_type=None, count=None):
import idc
import ida_kernelcache as kc
import ida_kernelcache.ida_utilities as idau
kIOUCVariableStructureSize = 0xffffffff
kIOUCTypeMask = 0xf
kIOUCScalarIScalarO = 0
kIOUCScalarIStructO = 2
kIOUCStructIStructO = 3
kIOUCScalarIStructI = 4
kIOUCFlags = 0xff
IOExternalMethod_types = (kIOUCScalarIScalarO, kIOUCScalarIStructO, kIOUCStructIStructO,
kIOUCScalarIStructI)
IOExternalMethod_count0_scalar = (kIOUCScalarIScalarO, kIOUCScalarIStructO,
kIOUCScalarIStructI)
IOExternalMethod_count1_scalar = (kIOUCScalarIScalarO,)
def check_scalar(scalar_count):
return (0 <= scalar_count <= 400)
def check_structure(structure_size):
return (0 <= structure_size <= 0x100000 or structure_size == kIOUCVariableStructureSize)
def is_IOExternalMethodDispatch(obj):
return (idau.is_mapped(obj.function)
and check_scalar(obj.checkScalarInputCount)
and check_structure(obj.checkStructureInputSize)
and check_scalar(obj.checkScalarOutputCount)
and check_structure(obj.checkStructureOutputSize))
def process_IOExternalMethodDispatch(obj):
return (obj.checkScalarInputCount, obj.checkStructureInputSize,
obj.checkScalarOutputCount, obj.checkStructureOutputSize)
def is_IOExternalMethod(obj):
method_type = obj.flags & kIOUCTypeMask
check_count0 = check_scalar if method_type in IOExternalMethod_count0_scalar else check_structure
check_count1 = check_scalar if method_type in IOExternalMethod_count1_scalar else check_structure
return ((obj.object == 0 or idau.is_mapped(obj.object))
and (obj.flags & kIOUCFlags == obj.flags)
and idau.is_mapped(obj.func)
and method_type in IOExternalMethod_types
and check_count0(obj.count0)
and check_count1(obj.count1))
def process_IOExternalMethod(obj):
isc, iss, osc, oss = 0, 0, 0, 0
method_type = obj.flags & kIOUCTypeMask
if method_type == kIOUCScalarIScalarO:
isc, osc = obj.count0, obj.count1
elif method_type == kIOUCScalarIStructO:
isc, oss = obj.count0, obj.count1
elif method_type == kIOUCStructIStructO:
iss, oss = obj.count0, obj.count1
elif method_type == kIOUCScalarIStructI:
isc, iss = obj.count0, obj.count1
else:
assert False
return (isc, iss, osc, oss)
TYPE_MAP = {
'IOExternalMethodDispatch':
(is_IOExternalMethodDispatch, process_IOExternalMethodDispatch),
'IOExternalMethod': (is_IOExternalMethod, process_IOExternalMethod),
}
# Get the EA.
if ea is None:
ea = idc.ScreenEA()
# Get the struct_type and the check and process functions.
if struct_type is None:
for stype in TYPE_MAP:
struct_type = stype
check, process = TYPE_MAP[struct_type]
obj = idau.read_struct(ea, struct=struct_type, asobject=True)
if check(obj):
break
else:
print 'Address {:#x} does not look like any known external method struct'.format(ea)
return False
else:
if struct_type not in TYPE_MAP:
print 'Unknown external method struct type {}'.format(struct_type)
return False
check, process = TYPE_MAP[struct_type]
obj = idau.read_struct(ea, struct=struct_type, asobject=True)
if not check(obj):
print 'Address {:#x} does not look like {}'.format(ea, struct_type)
# Process the external methods.
selector = 0;
while (count is None and check(obj)) or (selector < count):
isc, iss, osc, oss = process(obj)
print '{{ {:3}, {:5}, {:#10x}, {:5}, {:#10x} }}'.format(selector, isc, iss, osc, oss)
selector += 1
ea += len(obj)
obj = idau.read_struct(ea, struct=struct_type, asobject=True)
return True
kernelcache_process_external_methods()
| {
"content_hash": "a27643df8dd09f0342a0f8c680ce83d4",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 105,
"avg_line_length": 38.46788990825688,
"alnum_prop": 0.6355831147150012,
"repo_name": "bazad/ida_kernelcache",
"id": "4ce91466d479f5523882830e6f8d16c56259b784",
"size": "4492",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/process_external_methods.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "157185"
}
],
"symlink_target": ""
} |
import geopandas as gpd
import pandas as pd
import shared
import osmnx
# this script is used to assign an ESRI jobs dataset which comes with lat-lng
# locations. We're not using this right now - instead we're using assign_jobs.py
# which uses maz-level control totals to place jobs. This is subject to change
# in the future, and is mostly to keep private data out of the UrbanSim micro
# data in order to be able to release all of our data as a public download.
print "Reading data"
buildings = gpd.read_geocsv(
"cache/buildings_match_controls.csv", index_col="building_id")
parcels = gpd.read_geocsv("cache/moved_attribute_parcels.csv", index_col="apn")
establishments = gpd.read_geocsv(
"cache/establishments.csv", index_col="duns_number")
mazs = gpd.read_geocsv("mazs.csv", index_col="maz_id")
berkeley = osmnx.gdf_from_place("Berkeley, California")
berkeley_mazs = gpd.sjoin(mazs, berkeley).drop("index_right", axis=1)
print "Intersecting with buildings"
# goal here is to create a dictionary where keys are establishments ids and
# values are possible building_ids - this lets us write a function to assign
# jobs to buildings. when we have a match to a parcel, we list the buildings
# on that parcel; when we have a match to a maz, we list the buildings in
# that maz.
establishments_intersect_buildings = gpd.sjoin(establishments, buildings)
establishments_possible_buildings = {
k: [v]
for k, v
in establishments_intersect_buildings.index_right.to_dict().iteritems()
}
print "Intersecting with parcels"
# intersect establishments and parcels, and drop intersections from buildings
parcels["num_buildings"] = \
buildings.apn.value_counts().reindex(parcels.index).fillna(0)
# don't bother intersect with parcels which don't have buildings
# we'll cover those with mazs
establishments_intersect_parcels = gpd.sjoin(
establishments, parcels[parcels.num_buildings > 0])
establishments_intersect_parcels.drop(establishments_possible_buildings.keys(),
inplace=True, errors='ignore')
del parcels["num_buildings"]
establishments_possible_buildings.update({
establishment_id: buildings[buildings.apn == apn].index
for establishment_id, apn
in establishments_intersect_parcels.index_right.iteritems()
})
print "Intersecting with mazs"
# intersect establishments from mazs, and drop intersections from buildings
# and parcels
berkeley_mazs["num_buildings"] = buildings.maz_id.value_counts().\
reindex(berkeley_mazs.index).fillna(0)
establishments_intersect_mazs = gpd.sjoin(
establishments, berkeley_mazs[berkeley_mazs.num_buildings > 0])
establishments_intersect_mazs.drop(establishments_possible_buildings.keys(),
inplace=True, errors='ignore')
del berkeley_mazs["num_buildings"]
establishments_possible_buildings.update({
establishment_id: buildings[buildings.maz_id == maz_id].index
for establishment_id, maz_id
in establishments_intersect_mazs.index_right.iteritems()
})
def assign_establishments_to_buildings(establishments_possible_buildings):
def assign_establishment_to_buildings(eid, building_ids):
if len(buildings) == 1:
return building_ids[0]
possible_buildings = buildings.loc[building_ids]
if possible_buildings.non_residential_sqft.sum() == 0:
# there's no non-res buildings - assign to random building
return possible_buildings.sample().index[0]
return possible_buildings.sample(
weights="non_residential_sqft").index[0]
return pd.Series({
eid: assign_establishment_to_buildings(eid, buildings)
for eid, buildings in establishments_possible_buildings.iteritems()
})
print "Picking buildings from among options"
establishments["building_id"] = \
assign_establishments_to_buildings(establishments_possible_buildings)
berkeley_establishments = establishments[establishments.building_id.notnull()]
outdf = berkeley_establishments.loc[
berkeley_establishments.index.repeat(
berkeley_establishments.local_employment)
][["PBA_category", "building_id"]]
print "Writing data"
outdf.index.name = "establishment_id"
outdf.reset_index(inplace=True)
outdf.index.name = "job_id"
outdf.index = outdf.index + 1 # starts at zero
outdf.to_csv("cache/jobs.csv")
| {
"content_hash": "3513c158457c7792860dc005f0c89979",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 81,
"avg_line_length": 39.445454545454545,
"alnum_prop": 0.7354229085042636,
"repo_name": "oaklandanalytics/cutting_board",
"id": "c2e922bee7bf2eaf497448db18120f6abbcbfad2",
"size": "4339",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "not_used/assign_jobs_lat_lng.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "55507"
}
],
"symlink_target": ""
} |
""" An action manager item that represents an actual action. """
# Enthought library imports.
from traits.api import Any, Instance, List, Property, Str, on_trait_change
# Local imports.
from pyface.action.action import Action
from pyface.action.action_manager_item import ActionManagerItem
# Import the toolkit specific versions of the internal classes.
from pyface.toolkit import toolkit_object
_MenuItem = toolkit_object('action.action_item:_MenuItem')
_Tool = toolkit_object('action.action_item:_Tool')
_PaletteTool = toolkit_object('action.action_item:_PaletteTool')
class ActionItem(ActionManagerItem):
""" An action manager item that represents an actual action. """
#### 'ActionManagerItem' interface ########################################
#: The item's unique identifier ('unique' in this case means unique within
#: its group).
id = Property(Str)
#### 'ActionItem' interface ###############################################
#: The action!
action = Instance(Action)
#: The toolkit specific control created for this item.
control = Any
#: The toolkit specific Id of the control created for this item.
#
#: We have to keep the Id as well as the control because wx tool bar tools
#: are created as 'wxObjectPtr's which do not have Ids, and the Id is
#: required to manipulate the state of a tool via the tool bar 8^(
# FIXME v3: Why is this part of the public interface?
control_id = Any
#### Private interface ####################################################
#: All of the internal instances that wrap this item.
_wrappers = List(Any)
###########################################################################
# 'ActionManagerItem' interface.
###########################################################################
#### Trait properties #####################################################
def _get_id(self):
return self.action.id
#### Trait change handlers ################################################
def _enabled_changed(self, trait_name, old, new):
self.action.enabled = new
def _visible_changed(self, trait_name, old, new):
self.action.visible = new
@on_trait_change('_wrappers.control')
def _on_destroy(self, object, name, old, new):
""" Handle the destruction of the wrapper. """
if name == 'control' and new is None:
self._wrappers.remove(object)
###########################################################################
# 'ActionItem' interface.
###########################################################################
def add_to_menu(self, parent, menu, controller):
""" Add the item to a menu.
Parameters
----------
parent : toolkit control
The parent of the new menu item control.
menu : toolkit menu
The menu to add the action item to.
controller : ActionController instance or None
The controller to use.
"""
if (controller is None) or controller.can_add_to_menu(self.action):
wrapper = _MenuItem(parent, menu, self, controller)
# fixme: Martin, who uses this information?
if controller is None:
self.control = wrapper.control
self.control_id = wrapper.control_id
self._wrappers.append(wrapper)
def add_to_toolbar(self, parent, tool_bar, image_cache, controller,
show_labels=True):
""" Adds the item to a tool bar.
Parameters
----------
parent : toolkit control
The parent of the new menu item control.
tool_bar : toolkit toolbar
The toolbar to add the action item to.
image_cache : ImageCache instance
The image cache for resized images.
controller : ActionController instance or None
The controller to use.
show_labels : bool
Should the toolbar item show a label.
"""
if (controller is None) or controller.can_add_to_toolbar(self.action):
wrapper = _Tool(
parent, tool_bar, image_cache, self, controller, show_labels
)
# fixme: Martin, who uses this information?
if controller is None:
self.control = wrapper.control
self.control_id = wrapper.control_id
self._wrappers.append(wrapper)
def add_to_palette(self, tool_palette, image_cache, show_labels=True):
""" Adds the item to a tool palette.
Parameters
----------
parent : toolkit control
The parent of the new menu item control.
tool_palette : toolkit tool palette
The tool palette to add the action item to.
image_cache : ImageCache instance
The image cache for resized images.
show_labels : bool
Should the toolbar item show a label.
"""
wrapper = _PaletteTool(tool_palette, image_cache, self, show_labels)
self._wrappers.append(wrapper)
def destroy(self):
""" Called when the action is no longer required.
By default this method calls 'destroy' on the action itself.
"""
self.action.destroy()
| {
"content_hash": "630d0123ed59b466245390a17fcf0039",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 79,
"avg_line_length": 36.0472972972973,
"alnum_prop": 0.5555763823805061,
"repo_name": "geggo/pyface",
"id": "b0b4000c58fe2d0235759b52db0805a44f7e226b",
"size": "5978",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "pyface/action/action_item.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "896"
},
{
"name": "Python",
"bytes": "2246684"
},
{
"name": "Shell",
"bytes": "940"
}
],
"symlink_target": ""
} |
import functools
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
# fmt: off
def build_list_request(
endpoint_name, # type: str
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
order_by = kwargs.pop('order_by', None) # type: Optional[str]
top = kwargs.pop('top', None) # type: Optional[int]
skip = kwargs.pop('skip', None) # type: Optional[str]
api_version = "2021-10-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/batchEndpoints/{endpointName}/deployments')
path_format_arguments = {
"endpointName": _SERIALIZER.url("endpoint_name", endpoint_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
if order_by is not None:
query_parameters['$orderBy'] = _SERIALIZER.query("order_by", order_by, 'str')
if top is not None:
query_parameters['$top'] = _SERIALIZER.query("top", top, 'int')
if skip is not None:
query_parameters['$skip'] = _SERIALIZER.query("skip", skip, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_delete_request_initial(
endpoint_name, # type: str
deployment_name, # type: str
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = "2021-10-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/batchEndpoints/{endpointName}/deployments/{deploymentName}')
path_format_arguments = {
"endpointName": _SERIALIZER.url("endpoint_name", endpoint_name, 'str'),
"deploymentName": _SERIALIZER.url("deployment_name", deployment_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_request(
endpoint_name, # type: str
deployment_name, # type: str
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = "2021-10-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/batchEndpoints/{endpointName}/deployments/{deploymentName}')
path_format_arguments = {
"endpointName": _SERIALIZER.url("endpoint_name", endpoint_name, 'str'),
"deploymentName": _SERIALIZER.url("deployment_name", deployment_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_update_request_initial(
endpoint_name, # type: str
deployment_name, # type: str
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2021-10-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/batchEndpoints/{endpointName}/deployments/{deploymentName}')
path_format_arguments = {
"endpointName": _SERIALIZER.url("endpoint_name", endpoint_name, 'str', pattern=r'^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$'),
"deploymentName": _SERIALIZER.url("deployment_name", deployment_name, 'str', pattern=r'^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PATCH",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_create_or_update_request_initial(
endpoint_name, # type: str
deployment_name, # type: str
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2021-10-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/batchEndpoints/{endpointName}/deployments/{deploymentName}')
path_format_arguments = {
"endpointName": _SERIALIZER.url("endpoint_name", endpoint_name, 'str', pattern=r'^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$'),
"deploymentName": _SERIALIZER.url("deployment_name", deployment_name, 'str', pattern=r'^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
# fmt: on
class BatchDeploymentsOperations(object):
"""BatchDeploymentsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.machinelearningservices.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
endpoint_name, # type: str
resource_group_name, # type: str
workspace_name, # type: str
order_by=None, # type: Optional[str]
top=None, # type: Optional[int]
skip=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.BatchDeploymentTrackedResourceArmPaginatedResult"]
"""Lists Batch inference deployments in the workspace.
Lists Batch inference deployments in the workspace.
:param endpoint_name: Endpoint name.
:type endpoint_name: str
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace.
:type workspace_name: str
:param order_by: Ordering of list.
:type order_by: str
:param top: Top of list.
:type top: int
:param skip: Continuation token for pagination.
:type skip: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either BatchDeploymentTrackedResourceArmPaginatedResult
or the result of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.BatchDeploymentTrackedResourceArmPaginatedResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BatchDeploymentTrackedResourceArmPaginatedResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
endpoint_name=endpoint_name,
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
order_by=order_by,
top=top,
skip=skip,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
endpoint_name=endpoint_name,
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
order_by=order_by,
top=top,
skip=skip,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("BatchDeploymentTrackedResourceArmPaginatedResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/batchEndpoints/{endpointName}/deployments'} # type: ignore
def _delete_initial(
self,
endpoint_name, # type: str
deployment_name, # type: str
resource_group_name, # type: str
workspace_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
endpoint_name=endpoint_name,
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
if response.status_code == 202:
response_headers['x-ms-async-operation-timeout']=self._deserialize('duration', response.headers.get('x-ms-async-operation-timeout'))
response_headers['Location']=self._deserialize('str', response.headers.get('Location'))
response_headers['Retry-After']=self._deserialize('int', response.headers.get('Retry-After'))
if cls:
return cls(pipeline_response, None, response_headers)
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/batchEndpoints/{endpointName}/deployments/{deploymentName}'} # type: ignore
@distributed_trace
def begin_delete(
self,
endpoint_name, # type: str
deployment_name, # type: str
resource_group_name, # type: str
workspace_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Delete Batch Inference deployment (asynchronous).
Delete Batch Inference deployment (asynchronous).
:param endpoint_name: Endpoint name.
:type endpoint_name: str
:param deployment_name: Inference deployment identifier.
:type deployment_name: str
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace.
:type workspace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
endpoint_name=endpoint_name,
deployment_name=deployment_name,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/batchEndpoints/{endpointName}/deployments/{deploymentName}'} # type: ignore
@distributed_trace
def get(
self,
endpoint_name, # type: str
deployment_name, # type: str
resource_group_name, # type: str
workspace_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.BatchDeploymentData"
"""Gets a batch inference deployment by id.
Gets a batch inference deployment by id.
:param endpoint_name: Endpoint name.
:type endpoint_name: str
:param deployment_name: The identifier for the Batch deployments.
:type deployment_name: str
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace.
:type workspace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BatchDeploymentData, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.BatchDeploymentData
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BatchDeploymentData"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
endpoint_name=endpoint_name,
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('BatchDeploymentData', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/batchEndpoints/{endpointName}/deployments/{deploymentName}'} # type: ignore
def _update_initial(
self,
endpoint_name, # type: str
deployment_name, # type: str
resource_group_name, # type: str
workspace_name, # type: str
body, # type: "_models.PartialBatchDeploymentPartialTrackedResource"
**kwargs # type: Any
):
# type: (...) -> Optional["_models.BatchDeploymentData"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.BatchDeploymentData"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(body, 'PartialBatchDeploymentPartialTrackedResource')
request = build_update_request_initial(
endpoint_name=endpoint_name,
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
content_type=content_type,
json=_json,
template_url=self._update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
response_headers = {}
if response.status_code == 200:
deserialized = self._deserialize('BatchDeploymentData', pipeline_response)
if response.status_code == 202:
response_headers['x-ms-async-operation-timeout']=self._deserialize('duration', response.headers.get('x-ms-async-operation-timeout'))
response_headers['Location']=self._deserialize('str', response.headers.get('Location'))
response_headers['Retry-After']=self._deserialize('int', response.headers.get('Retry-After'))
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/batchEndpoints/{endpointName}/deployments/{deploymentName}'} # type: ignore
@distributed_trace
def begin_update(
self,
endpoint_name, # type: str
deployment_name, # type: str
resource_group_name, # type: str
workspace_name, # type: str
body, # type: "_models.PartialBatchDeploymentPartialTrackedResource"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.BatchDeploymentData"]
"""Update a batch inference deployment (asynchronous).
Update a batch inference deployment (asynchronous).
:param endpoint_name: Inference endpoint name.
:type endpoint_name: str
:param deployment_name: The identifier for the Batch inference deployment.
:type deployment_name: str
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace.
:type workspace_name: str
:param body: Batch inference deployment definition object.
:type body:
~azure.mgmt.machinelearningservices.models.PartialBatchDeploymentPartialTrackedResource
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either BatchDeploymentData or the result of
cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.BatchDeploymentData]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.BatchDeploymentData"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_initial(
endpoint_name=endpoint_name,
deployment_name=deployment_name,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
body=body,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('BatchDeploymentData', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/batchEndpoints/{endpointName}/deployments/{deploymentName}'} # type: ignore
def _create_or_update_initial(
self,
endpoint_name, # type: str
deployment_name, # type: str
resource_group_name, # type: str
workspace_name, # type: str
body, # type: "_models.BatchDeploymentData"
**kwargs # type: Any
):
# type: (...) -> "_models.BatchDeploymentData"
cls = kwargs.pop('cls', None) # type: ClsType["_models.BatchDeploymentData"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(body, 'BatchDeploymentData')
request = build_create_or_update_request_initial(
endpoint_name=endpoint_name,
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
if response.status_code == 200:
deserialized = self._deserialize('BatchDeploymentData', pipeline_response)
if response.status_code == 201:
response_headers['x-ms-async-operation-timeout']=self._deserialize('duration', response.headers.get('x-ms-async-operation-timeout'))
response_headers['Azure-AsyncOperation']=self._deserialize('str', response.headers.get('Azure-AsyncOperation'))
deserialized = self._deserialize('BatchDeploymentData', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/batchEndpoints/{endpointName}/deployments/{deploymentName}'} # type: ignore
@distributed_trace
def begin_create_or_update(
self,
endpoint_name, # type: str
deployment_name, # type: str
resource_group_name, # type: str
workspace_name, # type: str
body, # type: "_models.BatchDeploymentData"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.BatchDeploymentData"]
"""Creates/updates a batch inference deployment (asynchronous).
Creates/updates a batch inference deployment (asynchronous).
:param endpoint_name: Inference endpoint name.
:type endpoint_name: str
:param deployment_name: The identifier for the Batch inference deployment.
:type deployment_name: str
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace.
:type workspace_name: str
:param body: Batch inference deployment definition object.
:type body: ~azure.mgmt.machinelearningservices.models.BatchDeploymentData
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either BatchDeploymentData or the result of
cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.BatchDeploymentData]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.BatchDeploymentData"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
endpoint_name=endpoint_name,
deployment_name=deployment_name,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
body=body,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('BatchDeploymentData', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/batchEndpoints/{endpointName}/deployments/{deploymentName}'} # type: ignore
| {
"content_hash": "115d38641a7a1a7de7aa6dc4ff814fd3",
"timestamp": "",
"source": "github",
"line_count": 830,
"max_line_length": 264,
"avg_line_length": 45.51566265060241,
"alnum_prop": 0.648816771666049,
"repo_name": "Azure/azure-sdk-for-python",
"id": "d93b3da5be6f3eb9c9e187fc3675ce54bf2495af",
"size": "38245",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/ml/azure-ai-ml/azure/ai/ml/_restclient/v2021_10_01/operations/_batch_deployments_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
"""Bridges between the `asyncio` module and Tornado IOLoop.
.. versionadded:: 3.2
This module integrates Tornado with the ``asyncio`` module introduced
in Python 3.4 (and available `as a separate download
<https://pypi.python.org/pypi/asyncio>`_ for Python 3.3). This makes
it possible to combine the two libraries on the same event loop.
Most applications should use `AsyncIOMainLoop` to run Tornado on the
default ``asyncio`` event loop. Applications that need to run event
loops on multiple threads may use `AsyncIOLoop` to create multiple
loops.
.. note::
Tornado requires the `~asyncio.BaseEventLoop.add_reader` family of methods,
so it is not compatible with the `~asyncio.ProactorEventLoop` on Windows.
Use the `~asyncio.SelectorEventLoop` instead.
"""
from __future__ import absolute_import, division, print_function, with_statement
import functools
import tornado.concurrent
from tornado.gen import convert_yielded
from tornado.ioloop import IOLoop
from tornado import stack_context
try:
# Import the real asyncio module for py33+ first. Older versions of the
# trollius backport also use this name.
import asyncio # type: ignore
except ImportError as e:
# Asyncio itself isn't available; see if trollius is (backport to py26+).
try:
import trollius as asyncio # type: ignore
except ImportError:
# Re-raise the original asyncio error, not the trollius one.
raise e
class BaseAsyncIOLoop(IOLoop):
def initialize(self, asyncio_loop, close_loop=False, **kwargs):
super(BaseAsyncIOLoop, self).initialize(**kwargs)
self.asyncio_loop = asyncio_loop
self.close_loop = close_loop
# Maps fd to (fileobj, handler function) pair (as in IOLoop.add_handler)
self.handlers = {}
# Set of fds listening for reads/writes
self.readers = set()
self.writers = set()
self.closing = False
def close(self, all_fds=False):
self.closing = True
for fd in list(self.handlers):
fileobj, handler_func = self.handlers[fd]
self.remove_handler(fd)
if all_fds:
self.close_fd(fileobj)
if self.close_loop:
self.asyncio_loop.close()
def add_handler(self, fd, handler, events):
fd, fileobj = self.split_fd(fd)
if fd in self.handlers:
raise ValueError("fd %s added twice" % fd)
self.handlers[fd] = (fileobj, stack_context.wrap(handler))
if events & IOLoop.READ:
self.asyncio_loop.add_reader(
fd, self._handle_events, fd, IOLoop.READ)
self.readers.add(fd)
if events & IOLoop.WRITE:
self.asyncio_loop.add_writer(
fd, self._handle_events, fd, IOLoop.WRITE)
self.writers.add(fd)
def update_handler(self, fd, events):
fd, fileobj = self.split_fd(fd)
if events & IOLoop.READ:
if fd not in self.readers:
self.asyncio_loop.add_reader(
fd, self._handle_events, fd, IOLoop.READ)
self.readers.add(fd)
else:
if fd in self.readers:
self.asyncio_loop.remove_reader(fd)
self.readers.remove(fd)
if events & IOLoop.WRITE:
if fd not in self.writers:
self.asyncio_loop.add_writer(
fd, self._handle_events, fd, IOLoop.WRITE)
self.writers.add(fd)
else:
if fd in self.writers:
self.asyncio_loop.remove_writer(fd)
self.writers.remove(fd)
def remove_handler(self, fd):
fd, fileobj = self.split_fd(fd)
if fd not in self.handlers:
return
if fd in self.readers:
self.asyncio_loop.remove_reader(fd)
self.readers.remove(fd)
if fd in self.writers:
self.asyncio_loop.remove_writer(fd)
self.writers.remove(fd)
del self.handlers[fd]
def _handle_events(self, fd, events):
fileobj, handler_func = self.handlers[fd]
handler_func(fileobj, events)
def start(self):
old_current = IOLoop.current(instance=False)
try:
self._setup_logging()
self.make_current()
self.asyncio_loop.run_forever()
finally:
if old_current is None:
IOLoop.clear_current()
else:
old_current.make_current()
def stop(self):
self.asyncio_loop.stop()
def call_at(self, when, callback, *args, **kwargs):
# asyncio.call_at supports *args but not **kwargs, so bind them here.
# We do not synchronize self.time and asyncio_loop.time, so
# convert from absolute to relative.
return self.asyncio_loop.call_later(
max(0, when - self.time()), self._run_callback,
functools.partial(stack_context.wrap(callback), *args, **kwargs))
def remove_timeout(self, timeout):
timeout.cancel()
def add_callback(self, callback, *args, **kwargs):
if self.closing:
# TODO: this is racy; we need a lock to ensure that the
# loop isn't closed during call_soon_threadsafe.
raise RuntimeError("IOLoop is closing")
self.asyncio_loop.call_soon_threadsafe(
self._run_callback,
functools.partial(stack_context.wrap(callback), *args, **kwargs))
add_callback_from_signal = add_callback
class AsyncIOMainLoop(BaseAsyncIOLoop):
"""``AsyncIOMainLoop`` creates an `.IOLoop` that corresponds to the
current ``asyncio`` event loop (i.e. the one returned by
``asyncio.get_event_loop()``). Recommended usage::
from tornado.platform.asyncio import AsyncIOMainLoop
import asyncio
AsyncIOMainLoop().install()
asyncio.get_event_loop().run_forever()
See also :meth:`tornado.ioloop.IOLoop.install` for general notes on
installing alternative IOLoops.
"""
def initialize(self, **kwargs):
super(AsyncIOMainLoop, self).initialize(asyncio.get_event_loop(),
close_loop=False, **kwargs)
class AsyncIOLoop(BaseAsyncIOLoop):
"""``AsyncIOLoop`` is an `.IOLoop` that runs on an ``asyncio`` event loop.
This class follows the usual Tornado semantics for creating new
``IOLoops``; these loops are not necessarily related to the
``asyncio`` default event loop. Recommended usage::
from tornado.ioloop import IOLoop
IOLoop.configure('tornado.platform.asyncio.AsyncIOLoop')
IOLoop.current().start()
Each ``AsyncIOLoop`` creates a new ``asyncio.EventLoop``; this object
can be accessed with the ``asyncio_loop`` attribute.
"""
def initialize(self, **kwargs):
loop = asyncio.new_event_loop()
try:
super(AsyncIOLoop, self).initialize(loop, close_loop=True, **kwargs)
except Exception:
# If initialize() does not succeed (taking ownership of the loop),
# we have to close it.
loop.close()
raise
def to_tornado_future(asyncio_future):
"""Convert an `asyncio.Future` to a `tornado.concurrent.Future`.
.. versionadded:: 4.1
"""
tf = tornado.concurrent.Future()
tornado.concurrent.chain_future(asyncio_future, tf)
return tf
def to_asyncio_future(tornado_future):
"""Convert a Tornado yieldable object to an `asyncio.Future`.
.. versionadded:: 4.1
.. versionchanged:: 4.3
Now accepts any yieldable object, not just
`tornado.concurrent.Future`.
"""
tornado_future = convert_yielded(tornado_future)
af = asyncio.Future()
tornado.concurrent.chain_future(tornado_future, af)
return af
if hasattr(convert_yielded, 'register'):
convert_yielded.register(asyncio.Future, to_tornado_future) # type: ignore
| {
"content_hash": "f2ef33e461bef608e348f93236f1a356",
"timestamp": "",
"source": "github",
"line_count": 221,
"max_line_length": 80,
"avg_line_length": 35.89592760180995,
"alnum_prop": 0.6272532459347031,
"repo_name": "cyrusin/tornado",
"id": "3fd67dbd89cf3713e3b771b4312e616de85f0ef0",
"size": "7933",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tornado/platform/asyncio.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1078"
},
{
"name": "CSS",
"bytes": "7736"
},
{
"name": "HTML",
"bytes": "12417"
},
{
"name": "JavaScript",
"bytes": "6073"
},
{
"name": "Python",
"bytes": "1511611"
},
{
"name": "Ruby",
"bytes": "1733"
},
{
"name": "Shell",
"bytes": "4881"
}
],
"symlink_target": ""
} |
def pytest_configure(config):
config.addinivalue_line(
'markers', 'flake8: marks tests checking for flake8 compliance')
| {
"content_hash": "dac01819bbc265530c553bd8ba8b317f",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 72,
"avg_line_length": 44,
"alnum_prop": 0.7196969696969697,
"repo_name": "ament/ament_lint",
"id": "92b36911c7ed2047408908c4e56ea47254ea9463",
"size": "735",
"binary": false,
"copies": "1",
"ref": "refs/heads/rolling",
"path": "ament_flake8/ament_flake8/pytest_marker.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "79076"
},
{
"name": "C++",
"bytes": "5218"
},
{
"name": "CMake",
"bytes": "72215"
},
{
"name": "Python",
"bytes": "286752"
}
],
"symlink_target": ""
} |
"""
DNRM Supervisor resource and resource factory exceptions.
"""
from dnrm.exceptions import base
class InvalidResource(base.SupervisorException):
message = _("Resource validation failed.")
class InvalidResourceType(base.SupervisorException):
message = _('Invalid resource type name: %(type_name)s')
class ResourceAllocated(base.SupervisorException):
message = _("Resource %(resource_id)s was been allocated.")
class ResourceProcessing(base.SupervisorException):
message = _("Resource %(resource_id)s is processed.")
| {
"content_hash": "d82cde7d2dbb34396ff9bbdd22f2ca83",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 63,
"avg_line_length": 25.857142857142858,
"alnum_prop": 0.7513812154696132,
"repo_name": "Brocade-OpenSource/OpenStack-DNRM",
"id": "7323b3161fe3d01212f78ddf63ea987f0495b879",
"size": "1225",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dnrm/exceptions/resource.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "603854"
},
{
"name": "Shell",
"bytes": "6704"
}
],
"symlink_target": ""
} |
import pymysql
DEBUG = True
if DEBUG:
from .dev import *
else:
from .prod import *
pymysql.install_as_MySQLdb() | {
"content_hash": "f72466bf13675fe1b3a5eba6cf6d1d99",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 28,
"avg_line_length": 14.333333333333334,
"alnum_prop": 0.6434108527131783,
"repo_name": "zhengze/zblog",
"id": "ca82ebb2e2f10d501ca2cf73a3bae99aabe2255c",
"size": "129",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zblogsite/settings/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "26277"
},
{
"name": "Dockerfile",
"bytes": "419"
},
{
"name": "HTML",
"bytes": "23952"
},
{
"name": "JavaScript",
"bytes": "294183"
},
{
"name": "Python",
"bytes": "26757"
},
{
"name": "Shell",
"bytes": "104"
}
],
"symlink_target": ""
} |
'****************************************************'
'Created by C. Nichols #B0)~ '
'E-mail: [email protected] '
'Created: 11/16/02 '
'Updated! 11/19/02 '
'Version: Python 2+ '
'Desc: IPy Notify for Micro$oft Windoze '
'Use: To notify whomever that your IP address has '
'changed if you have a non-static IP address and run '
'a web server, game server, etc. '
'****************************************************'
' '
' '
' # '
' 0 0 '
'~~~~~~~~~~~~~~~~~uuu~~~~U~~~~uuu~~~~~~~~~~~~~~~~~~~~'
"!!!!!!!!!!!!HERE'S LOOKING AT YOU KID!!!!!!!!!!!!!!!"
'****************************************************'
import os, os.path, string, time
import smtplib, socket
import win32api
# GLOBALS --------------------------------------------
(head,tail) = os.path.split(win32api.GetSystemDirectory()) # Get the win path.
(ldrive,os_root) = os.path.split(head) # Now get the local drive.
# The path will generally, if not always be c:\
# Path_log = ldrive+'\yourdir\IPy_Notify.log'
# will specify a dir of your choice - dir must be created.
Path_dat = ldrive+'IPy_Notify.dat' # Program requires this file to run properly.
Path_log = ldrive+'IPy_Notify.log'
Name = win32api.GetComputerName() # Get actual machine name.
#Add your server name, mail server, and email addresses receiving notification.
MailServer = 'smtp.yourprovider.com'
Address = ['[email protected]']
#Address = ['[email protected]','[email protected]'] # Multiple Addresses - uncomment will override above.
Frm_add = '[email protected]' # From must be a valid e-mail address or the mail function will fail.
# If your ISP requires authentication, leave blank if unsure and test.
User = ''
Pass = ''
# Functions ------------------------------------------
def mail(to='',frm='',subj='',body='',server=''):
try:
message='From: %s\r\nTo: %s\r\nSubject: %s\r\n%s'%(frm,to,subj,body)
mail=smtplib.SMTP(server)
mail.sendmail(frm,to,message)
mail.close()
except:
try:
# Logon to the server... If needed
message='From: %s\r\nTo: %s\r\nSubject: %s\r\n%s'%(frm,to,subj,body)
mail=smtplib.SMTP(server)
mail.login(User,Pass)
mail.sendmail(frm,to,message)
mail.close()
except:
print 'ERROR: Unable to send notification! - '+time.ctime()
open(Path_log,'a').write(time.ctime()+' \nERROR: Unable to send notification!')
def start():
def getIP(name, path):
print 'IPy Notify by C. Nichols, 2002.\n'
ip = socket.gethostbyname(name)
print 'Current ip: '+str(ip)
open(path,'w').write(ip) #Save the current IP address.
out(name,Path_dat)
def out(name, path, stat=1):
while stat:
cur_ip = open(path,'r').readline()
new_ip = str(socket.gethostbyname(name))
if cur_ip==new_ip:
print 'Sleeping...'
time.sleep(15) # Sleep in seconds - adjust polling interval to your taste.
print 'Polling: '+new_ip+', '+time.ctime()
else:
print 'IP address has changed: '+new_ip
open(Path_log,'a').write(time.ctime()+'\nINFO: IP address has changed: '+new_ip)
print 'sending notification...'
for add in Address:
mail(to=add,frm=Frm_add,subj='Message from '+name,body='New IP address: '+new_ip+' assigned to '+name, server=MailServer)
getIP(name,Path_dat)
stat=0
getIP(Name,Path_dat)
# Run ------------------------------------------------
# Make sure this is started via the command line or
# by a .cmd file in startup - The command window can
# be hidden from a cmd file if you hate it like I do.
# Download Python @ www.python.org or PythonWin
# (active python) from www.activestate.com.
try:
open(Path_log,'a').write(time.ctime()+' START: IP Polling\n------------------------------------------\n')
start()
except:
open(Path_log,'a').write(time.ctime()+' \nERROR: IPy Notify failed!')
| {
"content_hash": "82065844562ce8abbc50e405f67fc4a7",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 141,
"avg_line_length": 44.23529411764706,
"alnum_prop": 0.5088652482269503,
"repo_name": "ActiveState/code",
"id": "11de294000e8d36c6a090c93a5a6469585c40310",
"size": "4512",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "recipes/Python/162994_IPy_Notify/recipe-162994.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "35894"
},
{
"name": "C",
"bytes": "56048"
},
{
"name": "C++",
"bytes": "90880"
},
{
"name": "HTML",
"bytes": "11656"
},
{
"name": "Java",
"bytes": "57468"
},
{
"name": "JavaScript",
"bytes": "181218"
},
{
"name": "PHP",
"bytes": "250144"
},
{
"name": "Perl",
"bytes": "37296"
},
{
"name": "Perl 6",
"bytes": "9914"
},
{
"name": "Python",
"bytes": "17387779"
},
{
"name": "Ruby",
"bytes": "40233"
},
{
"name": "Shell",
"bytes": "190732"
},
{
"name": "Tcl",
"bytes": "674650"
}
],
"symlink_target": ""
} |
class Trie(object):
def __init__(self):
self.root = {}
def insert(self, word):
"""
Inserts a word into the trie.
:type word: str
:rtype: void
"""
node = self.root
for c in word:
if c not in node:
node[c] = {}
node = node[c]
node['#'] = '#'
def search(self, word):
"""
Returns if the word is in the trie.
:type word: str
:rtype: bool
"""
node = self.root
for c in word:
if c not in node:
return False
node = node[c]
return '#' in node
def startsWith(self, prefix):
"""
Returns if there is any word in the trie
that starts with the given prefix.
:type prefix: str
:rtype: bool
"""
node = self.root
for c in prefix:
if c not in node:
return False
node = node[c]
return True
| {
"content_hash": "442cada7752986d3c0f0f72c4b2cdf05",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 48,
"avg_line_length": 23.674418604651162,
"alnum_prop": 0.43909626719056977,
"repo_name": "ChuanleiGuo/AlgorithmsPlayground",
"id": "d18d94ec8b81408567a9f69f7dcdba028e53a3b8",
"size": "1018",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "LeetCodeSolutions/python/208_Implement_Trie_(Prefix_Tree).py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "8884"
},
{
"name": "C++",
"bytes": "58994"
},
{
"name": "Java",
"bytes": "441895"
},
{
"name": "Python",
"bytes": "335460"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0001_squashed_0016_change_page_url_path_to_text_field'),
('home', '0004_auto_20150907_1916'),
]
operations = [
migrations.CreateModel(
name='CafesPage',
fields=[
('page_ptr', models.OneToOneField(serialize=False, parent_link=True, primary_key=True, auto_created=True, to='wagtailcore.Page')),
('header', models.CharField(max_length=255, default='Find your Gentle Coffee Cafes')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
]
| {
"content_hash": "f0839924299e9c8f7ce05b766996b080",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 146,
"avg_line_length": 31.04,
"alnum_prop": 0.5721649484536082,
"repo_name": "taedori81/gentlecoffee",
"id": "1f2f7746ebf49860efc30d724d5bca2b161a28ad",
"size": "800",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "home/migrations/0005_cafespage.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "29329"
},
{
"name": "HTML",
"bytes": "202119"
},
{
"name": "JavaScript",
"bytes": "18234"
},
{
"name": "Python",
"bytes": "306540"
},
{
"name": "Shell",
"bytes": "612"
}
],
"symlink_target": ""
} |
import sys
import argparse
from flask import Flask, g, session
from flask.ext.github import GitHub
from flask.ext.sqlalchemy import SQLAlchemy
app = Flask(__name__)
github = GitHub()
db = SQLAlchemy()
@app.before_request
def before_request():
from fish_bundles_web.models import User
g.user = None
if 'user' in session:
g.user = User.query.filter_by(username=session['user']).first()
def parse_arguments(args=None):
if args is None:
args = sys.argv[1:]
parser = argparse.ArgumentParser()
parser.add_argument('--port', '-p', type=int, default="5000", help="Port to start the server with.")
parser.add_argument('--bind', '-b', default="0.0.0.0", help="IP to bind the server to.")
parser.add_argument('--conf', '-c', default='fish_bundles_web/config/local.conf', help="Path to configuration file.")
parser.add_argument('--debug', '-d', action='store_true', default=False, help='Indicates whether to run in debug mode.')
options = parser.parse_args(args)
return options
def init_app(conf):
import fish_bundles_web.handlers # NOQA
import fish_bundles_web.login # NOQA
from fish_bundles_web.bundles import init_bundles # NOQA
from fish_bundles_web import config # NOQA
config.init_app(app, path=conf)
github.init_app(app)
db.init_app(app)
init_bundles()
def main():
args = parse_arguments()
init_app(args.conf)
app.run(debug=args.debug, host=args.bind, port=args.port)
if __name__ == "__main__":
main()
| {
"content_hash": "a61cbc2cab791bdc350a42924ccd35cb",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 124,
"avg_line_length": 26.344827586206897,
"alnum_prop": 0.6662303664921466,
"repo_name": "fish-bundles/fish-bundles-web",
"id": "6aea87f764d3cb6f5a07f0fe206df9663751d9d0",
"size": "1571",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fish_bundles_web/app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "40088"
},
{
"name": "CoffeeScript",
"bytes": "2523"
},
{
"name": "JavaScript",
"bytes": "30380"
},
{
"name": "Python",
"bytes": "39297"
},
{
"name": "Ruby",
"bytes": "952"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from bcc import BPF, USDT
from unittest import main, TestCase
from subprocess import Popen, PIPE
import ctypes as ct
import inspect, os, tempfile
class TestUDST(TestCase):
def setUp(self):
common_h = b"""
#include "folly/tracing/StaticTracepoint.h"
static inline void record_val(int val)
{
FOLLY_SDT(test, probe, val);
FOLLY_SDT(test_dup_name, probe, val);
}
extern void record_a(int val);
extern void record_b(int val);
"""
a_c = b"""
#include <stdio.h>
#include "common.h"
void record_a(int val)
{
record_val(val);
}
"""
b_c = b"""
#include <stdio.h>
#include "common.h"
void record_b(int val)
{
record_val(val);
}
"""
m_c = b"""
#include <stdio.h>
#include <unistd.h>
#include "common.h"
int main() {
while (1) {
record_a(1);
record_b(2);
record_val(3);
sleep(1);
}
return 0;
}
"""
# BPF program
self.bpf_text = b"""
BPF_PERF_OUTPUT(event);
int do_trace(struct pt_regs *ctx) {
int result = 0;
bpf_usdt_readarg(1, ctx, &result);
event.perf_submit(ctx, &result, sizeof(result));
return 0;
};
"""
def _create_file(name, text):
text_file = open(name, "wb")
text_file.write(text)
text_file.close()
# Create source files
self.tmp_dir = tempfile.mkdtemp()
print("temp directory: " + self.tmp_dir)
_create_file(self.tmp_dir + "/common.h", common_h)
_create_file(self.tmp_dir + "/a.cpp", a_c)
_create_file(self.tmp_dir + "/b.cpp", b_c)
_create_file(self.tmp_dir + "/m.cpp", m_c)
# Compilation
# the usdt test:probe exists in liba.so, libb.so and a.out
include_path = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) + "/include"
a_src = self.tmp_dir + "/a.cpp"
a_obj = self.tmp_dir + "/a.o"
a_lib = self.tmp_dir + "/liba.so"
b_src = self.tmp_dir + "/b.cpp"
b_obj = self.tmp_dir + "/b.o"
b_lib = self.tmp_dir + "/libb.so"
m_src = self.tmp_dir + "/m.cpp"
m_bin = self.tmp_dir + "/a.out"
m_linker_opt = " -L" + self.tmp_dir + " -la -lb"
self.assertEqual(os.system("gcc -I" + include_path + " -fpic -c -o " + a_obj + " " + a_src), 0)
self.assertEqual(os.system("gcc -I" + include_path + " -fpic -c -o " + b_obj + " " + b_src), 0)
self.assertEqual(os.system("gcc -shared -o " + a_lib + " " + a_obj), 0)
self.assertEqual(os.system("gcc -shared -o " + b_lib + " " + b_obj), 0)
self.assertEqual(os.system("gcc -I" + include_path + " " + m_src + " -o " + m_bin + m_linker_opt), 0)
# Run the application
self.app = Popen([m_bin], env=dict(os.environ, LD_LIBRARY_PATH=self.tmp_dir))
os.system("../../tools/tplist.py -vvv -p " + str(self.app.pid))
def test_attach1(self):
# enable USDT probe from given PID and verifier generated BPF programs
u = USDT(pid=int(self.app.pid))
u.enable_probe(probe="test:probe", fn_name="do_trace")
b = BPF(text=self.bpf_text, usdt_contexts=[u])
# processing events
self.probe_value_1 = 0
self.probe_value_2 = 0
self.probe_value_3 = 0
self.probe_value_other = 0
def print_event(cpu, data, size):
result = ct.cast(data, ct.POINTER(ct.c_int)).contents
if result.value == 1:
self.probe_value_1 = 1
elif result.value == 2:
self.probe_value_2 = 1
elif result.value == 3:
self.probe_value_3 = 1
else:
self.probe_value_other = 1
b[b"event"].open_perf_buffer(print_event)
for i in range(100):
if (self.probe_value_1 == 0 or
self.probe_value_2 == 0 or
self.probe_value_3 == 0 or
self.probe_value_other != 0):
b.perf_buffer_poll()
else:
break;
self.assertTrue(self.probe_value_1 != 0)
self.assertTrue(self.probe_value_2 != 0)
self.assertTrue(self.probe_value_3 != 0)
self.assertTrue(self.probe_value_other == 0)
def tearDown(self):
# kill the subprocess, clean the environment
self.app.kill()
self.app.wait()
os.system("rm -rf " + self.tmp_dir)
if __name__ == "__main__":
main()
| {
"content_hash": "b0e806329286bdb11d469f3d8daa62a8",
"timestamp": "",
"source": "github",
"line_count": 149,
"max_line_length": 109,
"avg_line_length": 29.758389261744966,
"alnum_prop": 0.5466847090663058,
"repo_name": "iovisor/bcc",
"id": "70fa5a028df9fcdd8ff40ad06c15599463637aae",
"size": "4581",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/python/test_usdt3.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "11636356"
},
{
"name": "C++",
"bytes": "916663"
},
{
"name": "CMake",
"bytes": "58262"
},
{
"name": "HTML",
"bytes": "2997"
},
{
"name": "Lua",
"bytes": "299473"
},
{
"name": "Makefile",
"bytes": "5763"
},
{
"name": "Python",
"bytes": "1449659"
},
{
"name": "Shell",
"bytes": "21840"
}
],
"symlink_target": ""
} |
from msrest.serialization import Model
class PagedApplicationTypeInfoList(Model):
"""The list of application types that are provisioned or being provisioned in
the cluster. The list is paged when all of the results cannot fit in a
single message. The next set of results can be obtained by executing the
same query with the continuation token provided in this list.
:param continuation_token:
:type continuation_token: str
:param items:
:type items: list of :class:`ApplicationTypeInfo
<azure.servicefabric.models.ApplicationTypeInfo>`
"""
_attribute_map = {
'continuation_token': {'key': 'ContinuationToken', 'type': 'str'},
'items': {'key': 'Items', 'type': '[ApplicationTypeInfo]'},
}
def __init__(self, continuation_token=None, items=None):
self.continuation_token = continuation_token
self.items = items
| {
"content_hash": "d0896219821a97b2e80494876d7b573e",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 81,
"avg_line_length": 37.458333333333336,
"alnum_prop": 0.692992213570634,
"repo_name": "AutorestCI/azure-sdk-for-python",
"id": "62997960a61de7837bb9f5a3368a16a33e56d38e",
"size": "1373",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-servicefabric/azure/servicefabric/models/paged_application_type_info_list.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34619070"
}
],
"symlink_target": ""
} |
"""Configuration schemas for Django."""
| {
"content_hash": "b6ca96bc1d285235c84aeece6c0a92d6",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 39,
"avg_line_length": 40,
"alnum_prop": 0.725,
"repo_name": "benoitbryon/django-confit",
"id": "79bb84e6170ac252e7b1b55ca6de5dc7c1c774ad",
"size": "40",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_confit/schemas/django/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "792"
},
{
"name": "Makefile",
"bytes": "1593"
},
{
"name": "Python",
"bytes": "252838"
}
],
"symlink_target": ""
} |
"""
XX. Generating HTML forms from models
This is mostly just a reworking of the ``form_for_model``/``form_for_instance``
tests to use ``ModelForm``. As such, the text may not make sense in all cases,
and the examples are probably a poor fit for the ``ModelForm`` syntax. In other
words, most of these tests should be rewritten.
"""
import tempfile
from django.db import models
from django.core.files.storage import FileSystemStorage
temp_storage_dir = tempfile.mkdtemp()
temp_storage = FileSystemStorage(temp_storage_dir)
ARTICLE_STATUS = (
(1, 'Draft'),
(2, 'Pending'),
(3, 'Live'),
)
ARTICLE_STATUS_CHAR = (
('d', 'Draft'),
('p', 'Pending'),
('l', 'Live'),
)
class Category(models.Model):
name = models.CharField(max_length=20)
slug = models.SlugField(max_length=20)
url = models.CharField('The URL', max_length=40)
def __unicode__(self):
return self.name
class Writer(models.Model):
name = models.CharField(max_length=50, help_text='Use both first and last names.')
class Meta:
ordering = ('name',)
def __unicode__(self):
return self.name
class Article(models.Model):
headline = models.CharField(max_length=50)
slug = models.SlugField()
pub_date = models.DateField()
created = models.DateField(editable=False)
writer = models.ForeignKey(Writer)
article = models.TextField()
categories = models.ManyToManyField(Category, blank=True)
status = models.PositiveIntegerField(choices=ARTICLE_STATUS, blank=True, null=True)
def save(self):
import datetime
if not self.id:
self.created = datetime.date.today()
return super(Article, self).save()
def __unicode__(self):
return self.headline
class ImprovedArticle(models.Model):
article = models.OneToOneField(Article)
class ImprovedArticleWithParentLink(models.Model):
article = models.OneToOneField(Article, parent_link=True)
class BetterWriter(Writer):
score = models.IntegerField()
class WriterProfile(models.Model):
writer = models.OneToOneField(Writer, primary_key=True)
age = models.PositiveIntegerField()
def __unicode__(self):
return "%s is %s" % (self.writer, self.age)
from django.contrib.localflavor.us.models import PhoneNumberField
class PhoneNumber(models.Model):
phone = PhoneNumberField()
description = models.CharField(max_length=20)
def __unicode__(self):
return self.phone
class TextFile(models.Model):
description = models.CharField(max_length=20)
file = models.FileField(storage=temp_storage, upload_to='tests', max_length=15)
def __unicode__(self):
return self.description
try:
# If PIL is available, try testing ImageFields. Checking for the existence
# of Image is enough for CPython, but for PyPy, you need to check for the
# underlying modules If PIL is not available, ImageField tests are omitted.
# Try to import PIL in either of the two ways it can end up installed.
try:
from PIL import Image, _imaging
except ImportError:
import Image, _imaging
test_images = True
class ImageFile(models.Model):
def custom_upload_path(self, filename):
path = self.path or 'tests'
return '%s/%s' % (path, filename)
description = models.CharField(max_length=20)
# Deliberately put the image field *after* the width/height fields to
# trigger the bug in #10404 with width/height not getting assigned.
width = models.IntegerField(editable=False)
height = models.IntegerField(editable=False)
image = models.ImageField(storage=temp_storage, upload_to=custom_upload_path,
width_field='width', height_field='height')
path = models.CharField(max_length=16, blank=True, default='')
def __unicode__(self):
return self.description
class OptionalImageFile(models.Model):
def custom_upload_path(self, filename):
path = self.path or 'tests'
return '%s/%s' % (path, filename)
description = models.CharField(max_length=20)
image = models.ImageField(storage=temp_storage, upload_to=custom_upload_path,
width_field='width', height_field='height',
blank=True, null=True)
width = models.IntegerField(editable=False, null=True)
height = models.IntegerField(editable=False, null=True)
path = models.CharField(max_length=16, blank=True, default='')
def __unicode__(self):
return self.description
except ImportError:
test_images = False
class CommaSeparatedInteger(models.Model):
field = models.CommaSeparatedIntegerField(max_length=20)
def __unicode__(self):
return self.field
class Product(models.Model):
slug = models.SlugField(unique=True)
def __unicode__(self):
return self.slug
class Price(models.Model):
price = models.DecimalField(max_digits=10, decimal_places=2)
quantity = models.PositiveIntegerField()
def __unicode__(self):
return u"%s for %s" % (self.quantity, self.price)
class Meta:
unique_together = (('price', 'quantity'),)
class ArticleStatus(models.Model):
status = models.CharField(max_length=2, choices=ARTICLE_STATUS_CHAR, blank=True, null=True)
class Inventory(models.Model):
barcode = models.PositiveIntegerField(unique=True)
parent = models.ForeignKey('self', to_field='barcode', blank=True, null=True)
name = models.CharField(blank=False, max_length=20)
class Meta:
ordering = ('name',)
def __unicode__(self):
return self.name
class Book(models.Model):
title = models.CharField(max_length=40)
author = models.ForeignKey(Writer, blank=True, null=True)
special_id = models.IntegerField(blank=True, null=True, unique=True)
class Meta:
unique_together = ('title', 'author')
class BookXtra(models.Model):
isbn = models.CharField(max_length=16, unique=True)
suffix1 = models.IntegerField(blank=True, default=0)
suffix2 = models.IntegerField(blank=True, default=0)
class Meta:
unique_together = (('suffix1', 'suffix2'))
abstract = True
class DerivedBook(Book, BookXtra):
pass
class ExplicitPK(models.Model):
key = models.CharField(max_length=20, primary_key=True)
desc = models.CharField(max_length=20, blank=True, unique=True)
class Meta:
unique_together = ('key', 'desc')
def __unicode__(self):
return self.key
class Post(models.Model):
title = models.CharField(max_length=50, unique_for_date='posted', blank=True)
slug = models.CharField(max_length=50, unique_for_year='posted', blank=True)
subtitle = models.CharField(max_length=50, unique_for_month='posted', blank=True)
posted = models.DateField()
def __unicode__(self):
return self.name
class DerivedPost(Post):
pass
class BigInt(models.Model):
biggie = models.BigIntegerField()
def __unicode__(self):
return unicode(self.biggie)
class MarkupField(models.CharField):
def __init__(self, *args, **kwargs):
kwargs["max_length"] = 20
super(MarkupField, self).__init__(*args, **kwargs)
def formfield(self, **kwargs):
# don't allow this field to be used in form (real use-case might be
# that you know the markup will always be X, but it is among an app
# that allows the user to say it could be something else)
# regressed at r10062
return None
class CustomFieldForExclusionModel(models.Model):
name = models.CharField(max_length=10)
markup = MarkupField()
class FlexibleDatePost(models.Model):
title = models.CharField(max_length=50, unique_for_date='posted', blank=True)
slug = models.CharField(max_length=50, unique_for_year='posted', blank=True)
subtitle = models.CharField(max_length=50, unique_for_month='posted', blank=True)
posted = models.DateField(blank=True, null=True)
| {
"content_hash": "53dfa77626325b637d78e9ea4dd2cda7",
"timestamp": "",
"source": "github",
"line_count": 248,
"max_line_length": 95,
"avg_line_length": 32.520161290322584,
"alnum_prop": 0.6669559826410415,
"repo_name": "disqus/django-old",
"id": "5dcc0391ce3759e49e30c4a55890e8670d50af13",
"size": "8065",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/modeltests/model_forms/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "85749"
},
{
"name": "Python",
"bytes": "7413553"
},
{
"name": "Shell",
"bytes": "9076"
}
],
"symlink_target": ""
} |
from django.test import TestCase
from core.tests.mommy_utils import make_recipe
from cla_eventlog import event_registry
from cla_eventlog.tests.base import EventTestCaseMixin
from cla_eventlog.constants import LOG_TYPES, LOG_LEVELS
class AssignToProviderEventTestCase(EventTestCaseMixin, TestCase):
EVENT_KEY = "assign_to_provider"
def test_assign_to_provider_manually(self):
eligible_case = make_recipe("legalaid.eligible_case")
self._test_process_with_implicit_code("MANALC", process_kwargs={"is_manual": True}, dummy_case=eligible_case)
def test_assign_to_provider_MANREF(self):
eligible_case = make_recipe("legalaid.eligible_case")
self._test_process_with_implicit_code(
"MANREF", process_kwargs={"is_manual_ref": True}, dummy_case=eligible_case
)
def test_assign_to_provider_automatically(self):
eligible_case = make_recipe("legalaid.eligible_case")
self.assertEqual(eligible_case.eligibility_check.state, "yes")
self._test_process_with_implicit_code("REFSP", process_kwargs={"is_manual": False}, dummy_case=eligible_case)
def test_assign_to_provider_automatically_ineligible_case(self):
eligible_case = make_recipe("legalaid.case")
self.assertNotEqual(eligible_case.eligibility_check.state, "yes")
self._test_process_with_implicit_code("SPOR", process_kwargs={"is_manual": False, "is_spor": True})
def test_assign_to_provider_manually_ineligible_case(self):
eligible_case = make_recipe("legalaid.case")
self.assertNotEqual(eligible_case.eligibility_check.state, "yes")
self._test_process_with_implicit_code("SPOR", process_kwargs={"is_manual": True, "is_spor": True})
class DeferAssignmentEventTestCase(EventTestCaseMixin, TestCase):
EVENT_KEY = "defer_assignment"
def test_defer_assignment(self):
self._test_process_with_implicit_code("CBSP")
class DeclineHelpEventTestCase(EventTestCaseMixin, TestCase):
EVENT_KEY = "decline_help"
CODES = ["DESP", "DECL", "NRES"]
def test_DESP(self):
self._test_process_with_expicit_code_and_requires_action_None_if_op_or_op_manager(self.CODES, code="DESP")
def test_DECL(self):
self._test_process_with_expicit_code_and_requires_action_None_if_op_or_op_manager(self.CODES, code="DECL")
def test_NRES(self):
self._test_process_with_expicit_code_and_requires_action_None_if_op_or_op_manager(self.CODES, code="NRES")
class CallMeBackEventTestCase(EventTestCaseMixin, TestCase):
EVENT_KEY = "call_me_back"
def test_CB1(self):
self._test_process_with_implicit_code("CB1")
def test_CB2(self):
case = make_recipe("legalaid.case", callback_attempt=1)
self._test_process_with_implicit_code("CB2", dummy_case=case)
def test_CB3(self):
case = make_recipe("legalaid.case", callback_attempt=2)
self._test_process_with_implicit_code("CB3", dummy_case=case)
def test_CB4_errors(self):
dummy_case = make_recipe("legalaid.case", callback_attempt=3)
event = event_registry.get_event(self.EVENT_KEY)()
self.assertRaises(
ValueError, event.process, **{"case": dummy_case, "notes": "this is a note", "created_by": self.dummy_user}
)
self.assertEqual(dummy_case.log_set.count(), 0)
class StopCallMeBackEventTestCase(EventTestCaseMixin, TestCase):
EVENT_KEY = "stop_call_me_back"
def test_CBC(self):
# with callback_attempt == 1
case = make_recipe("legalaid.case", callback_attempt=1)
self._test_process_with_implicit_code("CBC", dummy_case=case, process_kwargs={"cancel": True})
# with callback_attempt == 2
case = make_recipe("legalaid.case", callback_attempt=2)
self._test_process_with_implicit_code("CBC", dummy_case=case, process_kwargs={"cancel": True})
# with callback_attempt == 3
case = make_recipe("legalaid.case", callback_attempt=3)
self._test_process_with_implicit_code("CBC", dummy_case=case, process_kwargs={"cancel": True})
def test_CBC_errors_without_prev_CBx(self):
dummy_case = make_recipe("legalaid.case", callback_attempt=0)
event = event_registry.get_event(self.EVENT_KEY)()
self.assertRaises(
ValueError,
event.process,
**{"case": dummy_case, "notes": "this is a note", "created_by": self.dummy_user, "cancel": True}
)
self.assertEqual(dummy_case.log_set.count(), 0)
def test_CALLBACK_COMPLETE(self):
# with callback_attempt == 1
case = make_recipe("legalaid.case", callback_attempt=1)
self._test_process_with_implicit_code(
"CALLBACK_COMPLETE",
dummy_case=case,
expected_type=LOG_TYPES.SYSTEM,
expected_level=LOG_LEVELS.HIGH,
process_kwargs={"complete": True},
)
# with callback_attempt == 2
case = make_recipe("legalaid.case", callback_attempt=2)
self._test_process_with_implicit_code(
"CALLBACK_COMPLETE",
dummy_case=case,
expected_type=LOG_TYPES.SYSTEM,
expected_level=LOG_LEVELS.HIGH,
process_kwargs={"complete": True},
)
# with callback_attempt == 3
case = make_recipe("legalaid.case", callback_attempt=3)
self._test_process_with_implicit_code(
"CALLBACK_COMPLETE",
dummy_case=case,
expected_type=LOG_TYPES.SYSTEM,
expected_level=LOG_LEVELS.HIGH,
process_kwargs={"complete": True},
)
def test_CALLBACK_COMPLETE_errors_without_prev_CBx(self):
dummy_case = make_recipe("legalaid.case", callback_attempt=0)
event = event_registry.get_event(self.EVENT_KEY)()
self.assertRaises(
ValueError,
event.process,
**{"case": dummy_case, "notes": "this is a note", "created_by": self.dummy_user, "complete": True}
)
self.assertEqual(dummy_case.log_set.count(), 0)
| {
"content_hash": "d87ecfa4bd2c59d6ea699d8f20720e36",
"timestamp": "",
"source": "github",
"line_count": 156,
"max_line_length": 119,
"avg_line_length": 38.967948717948715,
"alnum_prop": 0.6510939299226847,
"repo_name": "ministryofjustice/cla_backend",
"id": "78ec488b77a14f56ec0863c3758a44d03155cf60",
"size": "6079",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cla_backend/apps/call_centre/tests/test_events.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "45941"
},
{
"name": "Dockerfile",
"bytes": "1272"
},
{
"name": "HTML",
"bytes": "14794"
},
{
"name": "JavaScript",
"bytes": "2762"
},
{
"name": "Mustache",
"bytes": "3607"
},
{
"name": "Python",
"bytes": "1577558"
},
{
"name": "Shell",
"bytes": "11204"
},
{
"name": "Smarty",
"bytes": "283906"
}
],
"symlink_target": ""
} |
'''
Compare sample's similarity
Input: a square matrix (rows=samples, cols=samples, cells=similarity)
clonesize info (sample --> number of clones)
Output:
normalized matrix
'''
import os
import sys
import re
import math
import numbers
import cPickle as pickle
import gzip
import numpy as np
from jobTree.scriptTree.target import Target
from sonLib.bioio import system
import aimseqtk.lib.common as lcommon
def read_clonesize(file):
samples = []
s2clones = {}
f = open(file, 'r')
f.readline()
for line in f:
items = line.strip().split('\t')
sample = items[0]
clones = int(items[1])
if not re.search("Avr", sample):
samples.append(sample)
s2clones[sample] = clones
f.close()
return samples, s2clones
def read_matrix(file):
s2s2c = {}
f = open(file, 'r')
colnames = f.readline().strip().split('\t')
for line in f:
items = line.strip().split('\t')
rowname = items[0]
s2s2c[rowname] = {}
for i in xrange(1, len(items)):
s2s2c[rowname][colnames[i]] = float(items[i])
f.close()
return s2s2c
def normalize_matrix(r2c2count, names, name2total):
p_mean = 3.4 * 10**(-10) # from Murugan 2012
rows = []
minval = float('inf')
for rowname in names:
row = []
c2count = r2c2count[rowname]
total1 = name2total[rowname]
assert total1 > 0
for colname in names:
total2 = name2total[colname]
assert total2 > 0
newcount = c2count[colname]
#newcount = c2count[colname] / (total1 * total2 * p_mean)
#newcount = math.log10(c2count[colname] / (total1 * total2 * p_mean))
minval = min(minval, newcount)
row.append(newcount)
rows.append(row)
# adjust artifical values of self-overlap (so heatmap is visible)
#minval = minval * 0.9
for i, row in enumerate(rows):
row[i] = minval
return rows
def print_matrix(names, rows, file):
f = open(file, 'w')
f.write("Samples\t%s\n" % ("\t".join(names)))
for i, name in enumerate(names):
row = ["%f" % c for c in rows[i]]
f.write("%s\t%s\n" % (name, "\t".join(row)))
f.close()
def main():
matrixfile = sys.argv[1]
clonefile = sys.argv[2]
outfile = sys.argv[3]
names, name2total = read_clonesize(clonefile)
r2c2count = read_matrix(matrixfile)
matrix = normalize_matrix(r2c2count, names, name2total)
print_matrix(names, matrix, outfile)
if __name__ == '__main__':
main()
| {
"content_hash": "7ce32bb85f761571217b45de65bed4a4",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 81,
"avg_line_length": 26.979166666666668,
"alnum_prop": 0.5945945945945946,
"repo_name": "ngannguyen/aimseqtk",
"id": "9455232e6d306c93f3dc8a454e5885b5ac17348c",
"size": "2677",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/properties/similarity_normmatrix.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Perl",
"bytes": "239"
},
{
"name": "Python",
"bytes": "473996"
}
],
"symlink_target": ""
} |
from __future__ import division,print_function
import sys
sys.dont_write_bytecode = True
from lib import *
import numpy as np
import pandas
import statsmodels.formula.api as smf
import statsmodels.api as sm
from scipy.stats import spearmanr
from scipy.stats.stats import skew
__author__ = 'panzer'
def make_formula(model, col_names):
labels = []
for index, col_name in enumerate(model.indep):
if col_name not in col_names: continue
if model.is_continuous[index]:
labels.append(col_name)
else:
labels.append("C(%s, levels=[0,1,2,3,4,5,6])"%col_name)
formula = "%s ~ %s"%(model.less[0], " + ".join(labels))
return formula
def lin_reg_pruned(model, test_row, rows):
headers = model.indep + model.less
train_data = [row.cells[:] for row in rows]
continuous_variables = [decision for decision in model.decisions if model.is_continuous[decision]]
transforms = get_transform_funcs(train_data, continuous_variables)
for row in train_data:
transform_row(row, continuous_variables, transforms)
columnar_data = make_columnar(train_data, model.decisions)
column_size = len(columnar_data)
row_size = len(rows)
if row_size < column_size * 10:
pruned_column_size = int(row_size/10)
efforts = [effort(model, row) for row in rows]
ranks = []
for index, column in enumerate(columnar_data):
is_continuous = model.is_continuous[index]
ranks.append((covariance(column, efforts, is_continuous), model.indep[index]))
pruned_headers = [rank[1] for rank in sorted(ranks)[:pruned_column_size]]
else:
pruned_headers = headers
df = pandas.DataFrame(train_data, columns=headers)
lin_model = smf.ols(formula=make_formula(model, pruned_headers), data=df).fit()
test_data = transform_row(test_row.cells[:], continuous_variables, transforms)
df_test = pandas.DataFrame([test_data], columns=headers)
return lin_model.predict(df_test)[0]
def get_transform_funcs(train, cols):
transform_funcs = []
for col in cols:
vector = [row[col] for row in train]
transforms = [
(skew(vector, bias=False), "none"),
(skew(log_transform(vector), bias=False), "log"),
(skew(sqrt_transform(vector), bias=False), "sqrt")
]
best_transform = sorted(transforms)[0][1]
transform_funcs.append(best_transform)
return transform_funcs
def transform_row(row, cols, transforms):
for col, transform in zip(cols, transforms):
if transform == "log":
row[col] = math.log(row[col])
elif transform == "sqrt":
row[col] = math.sqrt(row[col])
elif transform == "none":
continue
else:
raise RuntimeError("Unknown transformation type : %s"%transform)
return row
def log_transform(vector):
transforms = []
for one in vector:
if one == 0:
transforms.append(-float("inf"))
else:
transforms.append(math.log(one))
return transforms
def sqrt_transform(vector):
return [math.sqrt(one) for one in vector]
def make_columnar(rows, columns):
column_data = []
for column in columns:
column_data.append([row[column] for row in rows])
return column_data
def get_column(rows, column_index):
return [row[column_index] for row in rows]
def covariance(x_vector, y_vector, is_continuous):
if is_continuous:
return abs(spearmanr(x_vector, y_vector, nan_policy="omit")[0])
else:
x_vector_str = map(str, x_vector)
labels = list(set(x_vector_str))
y_vector_str = list(pandas.cut(y_vector, len(labels), labels=labels).get_values())
return abs(spearmanr(x_vector_str, y_vector_str, nan_policy="omit")[0])
| {
"content_hash": "2eb07583f23a08a9a557e9c2b427dca1",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 100,
"avg_line_length": 34.01904761904762,
"alnum_prop": 0.6856103023516238,
"repo_name": "ai-se/george",
"id": "b36a435f2393fb038cc9f1bb522e840e45e98358",
"size": "3572",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Technix/atlm_pruned.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "240914"
},
{
"name": "R",
"bytes": "2712"
}
],
"symlink_target": ""
} |
"""
Defines LineSplitter and helper functions.
-----
Permission to use, modify, and distribute this software is given under the
terms of the NumPy License. See http://scipy.org.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
Author: Pearu Peterson <[email protected]>
Created: May 2006
-----
"""
__all__ = ['String','string_replace_map','splitquote','splitparen']
import re
class String(str): pass
class ParenString(str): pass
def split2(line, lower=False):
"""
Split line into non-string part and into a start of a string part.
Returns 2-tuple. The second item either is empty string or start
of a string part.
"""
return LineSplitter(line,lower=lower).split2()
_f2py_str_findall = re.compile(r"_F2PY_STRING_CONSTANT_\d+_").findall
_is_name = re.compile(r'\w*\Z',re.I).match
_is_simple_str = re.compile(r'\w*\Z',re.I).match
_f2py_findall = re.compile(r'(_F2PY_STRING_CONSTANT_\d+_|F2PY_EXPR_TUPLE_\d+)').findall
class string_replace_dict(dict):
"""
Dictionary object that is callable for applying map returned
by string_replace_map() function.
"""
def __call__(self, line):
for k in _f2py_findall(line):
line = line.replace(k, self[k])
return line
def string_replace_map(line, lower=False,
_cache={'index':0,'pindex':0}):
"""
1) Replaces string constants with symbol `'_F2PY_STRING_CONSTANT_<index>_'`
2) Replaces (expression) with symbol `(F2PY_EXPR_TUPLE_<index>)`
Returns a new line and the replacement map.
"""
items = []
string_map = string_replace_dict()
rev_string_map = {}
for item in splitquote(line, lower=lower)[0]:
if isinstance(item, String) and not _is_simple_str(item[1:-1]):
key = rev_string_map.get(item)
if key is None:
_cache['index'] += 1
index = _cache['index']
key = "_F2PY_STRING_CONSTANT_%s_" % (index)
it = item[1:-1]
string_map[key] = it
rev_string_map[it] = key
items.append(item[0]+key+item[-1])
else:
items.append(item)
newline = ''.join(items)
items = []
expr_keys = []
for item in splitparen(newline):
if isinstance(item, ParenString) and not _is_name(item[1:-1]):
key = rev_string_map.get(item)
if key is None:
_cache['pindex'] += 1
index = _cache['pindex']
key = 'F2PY_EXPR_TUPLE_%s' % (index)
it = item[1:-1].strip()
string_map[key] = it
rev_string_map[it] = key
expr_keys.append(key)
items.append(item[0]+key+item[-1])
else:
items.append(item)
found_keys = set()
for k in expr_keys:
v = string_map[k]
l = _f2py_str_findall(v)
if l:
found_keys = found_keys.union(l)
for k1 in l:
v = v.replace(k1, string_map[k1])
string_map[k] = v
for k in found_keys:
del string_map[k]
return ''.join(items), string_map
def splitquote(line, stopchar=None, lower=False, quotechars = '"\''):
"""
Fast LineSplitter
"""
items = []
i = 0
while 1:
try:
char = line[i]; i += 1
except IndexError:
break
l = []
l_append = l.append
nofslashes = 0
if stopchar is None:
# search for string start
while 1:
if char in quotechars and not nofslashes % 2:
stopchar = char
i -= 1
break
if char=='\\':
nofslashes += 1
else:
nofslashes = 0
l_append(char)
try:
char = line[i]; i += 1
except IndexError:
break
if not l: continue
item = ''.join(l)
if lower: item = item.lower()
items.append(item)
continue
if char==stopchar:
# string starts with quotechar
l_append(char)
try:
char = line[i]; i += 1
except IndexError:
if l:
item = String(''.join(l))
items.append(item)
break
# else continued string
while 1:
if char==stopchar and not nofslashes % 2:
l_append(char)
stopchar = None
break
if char=='\\':
nofslashes += 1
else:
nofslashes = 0
l_append(char)
try:
char = line[i]; i += 1
except IndexError:
break
if l:
item = String(''.join(l))
items.append(item)
return items, stopchar
class LineSplitterBase(object):
def __iter__(self):
return self
def __next__(self):
item = ''
while not item:
item = self.get_item() # get_item raises StopIteration
return item
class LineSplitter(LineSplitterBase):
""" Splits a line into non strings and strings. E.g.
abc=\"123\" -> ['abc=','\"123\"']
Handles splitting lines with incomplete string blocks.
"""
def __init__(self, line,
quotechar = None,
lower=False,
):
self.fifo_line = [c for c in line]
self.fifo_line.reverse()
self.quotechar = quotechar
self.lower = lower
def split2(self):
"""
Split line until the first start of a string.
"""
try:
item1 = self.get_item()
except StopIteration:
return '',''
i = len(item1)
l = self.fifo_line[:]
l.reverse()
item2 = ''.join(l)
return item1,item2
def get_item(self):
fifo_pop = self.fifo_line.pop
try:
char = fifo_pop()
except IndexError:
raise StopIteration
fifo_append = self.fifo_line.append
quotechar = self.quotechar
l = []
l_append = l.append
nofslashes = 0
if quotechar is None:
# search for string start
while 1:
if char in '"\'' and not nofslashes % 2:
self.quotechar = char
fifo_append(char)
break
if char=='\\':
nofslashes += 1
else:
nofslashes = 0
l_append(char)
try:
char = fifo_pop()
except IndexError:
break
item = ''.join(l)
if self.lower: item = item.lower()
return item
if char==quotechar:
# string starts with quotechar
l_append(char)
try:
char = fifo_pop()
except IndexError:
return String(''.join(l))
# else continued string
while 1:
if char==quotechar and not nofslashes % 2:
l_append(char)
self.quotechar = None
break
if char=='\\':
nofslashes += 1
else:
nofslashes = 0
l_append(char)
try:
char = fifo_pop()
except IndexError:
break
return String(''.join(l))
def splitparen(line,paren='()'):
"""
Fast LineSplitterParen.
"""
stopchar = None
startchar, endchar = paren[0],paren[1]
items = []
i = 0
while 1:
try:
char = line[i]; i += 1
except IndexError:
break
nofslashes = 0
l = []
l_append = l.append
if stopchar is None:
# search for parenthesis start
while 1:
if char==startchar and not nofslashes % 2:
stopchar = endchar
i -= 1
break
if char=='\\':
nofslashes += 1
else:
nofslashes = 0
l_append(char)
try:
char = line[i]; i += 1
except IndexError:
break
item = ''.join(l)
else:
nofstarts = 0
while 1:
if char==stopchar and not nofslashes % 2 and nofstarts==1:
l_append(char)
stopchar = None
break
if char=='\\':
nofslashes += 1
else:
nofslashes = 0
if char==startchar:
nofstarts += 1
elif char==endchar:
nofstarts -= 1
l_append(char)
try:
char = line[i]; i += 1
except IndexError:
break
item = ParenString(''.join(l))
items.append(item)
return items
class LineSplitterParen(LineSplitterBase):
""" Splits a line into strings and strings with parenthesis. E.g.
a(x) = b(c,d) -> ['a','(x)',' = b','(c,d)']
"""
def __init__(self, line, paren = '()'):
self.fifo_line = [c for c in line]
self.fifo_line.reverse()
self.startchar = paren[0]
self.endchar = paren[1]
self.stopchar = None
def get_item(self):
fifo_pop = self.fifo_line.pop
try:
char = fifo_pop()
except IndexError:
raise StopIteration
fifo_append = self.fifo_line.append
startchar = self.startchar
endchar = self.endchar
stopchar = self.stopchar
l = []
l_append = l.append
nofslashes = 0
if stopchar is None:
# search for parenthesis start
while 1:
if char==startchar and not nofslashes % 2:
self.stopchar = endchar
fifo_append(char)
break
if char=='\\':
nofslashes += 1
else:
nofslashes = 0
l_append(char)
try:
char = fifo_pop()
except IndexError:
break
item = ''.join(l)
return item
nofstarts = 0
while 1:
if char==stopchar and not nofslashes % 2 and nofstarts==1:
l_append(char)
self.stopchar = None
break
if char=='\\':
nofslashes += 1
else:
nofslashes = 0
if char==startchar:
nofstarts += 1
elif char==endchar:
nofstarts -= 1
l_append(char)
try:
char = fifo_pop()
except IndexError:
break
return ParenString(''.join(l))
def test():
splitter = LineSplitter('abc\\\' def"12\\"3""56"dfad\'a d\'')
l = [item for item in splitter]
assert l==['abc\\\' def','"12\\"3"','"56"','dfad','\'a d\''],repr(l)
assert splitter.quotechar is None
l,stopchar=splitquote('abc\\\' def"12\\"3""56"dfad\'a d\'')
assert l==['abc\\\' def','"12\\"3"','"56"','dfad','\'a d\''],repr(l)
assert stopchar is None
splitter = LineSplitter('"abc123&')
l = [item for item in splitter]
assert l==['"abc123&'],repr(l)
assert splitter.quotechar=='"'
l,stopchar = splitquote('"abc123&')
assert l==['"abc123&'],repr(l)
assert stopchar=='"'
splitter = LineSplitter(' &abc"123','"')
l = [item for item in splitter]
assert l==[' &abc"','123']
assert splitter.quotechar is None
l,stopchar = splitquote(' &abc"123','"')
assert l==[' &abc"','123']
assert stopchar is None
l = split2('')
assert l==('',''),repr(l)
l = split2('12')
assert l==('12',''),repr(l)
l = split2('1"a"//"b"')
assert l==('1','"a"//"b"'),repr(l)
l = split2('"ab"')
assert l==('','"ab"'),repr(l)
splitter = LineSplitterParen('a(b) = b(x,y(1)) b\((a)\)')
l = [item for item in splitter]
assert l==['a', '(b)', ' = b', '(x,y(1))', ' b\\(', '(a)', '\\)'],repr(l)
l = splitparen('a(b) = b(x,y(1)) b\((a)\)')
assert l==['a', '(b)', ' = b', '(x,y(1))', ' b\\(', '(a)', '\\)'],repr(l)
l = string_replace_map('a()')
print(l)
print('ok')
if __name__ == '__main__':
test()
| {
"content_hash": "d5299054fa67842d5f6aac8a89706f68",
"timestamp": "",
"source": "github",
"line_count": 425,
"max_line_length": 87,
"avg_line_length": 29.978823529411766,
"alnum_prop": 0.4635428930225257,
"repo_name": "elezar/fortran-beautifier",
"id": "6024d61c4a7d23c59df54635f0e500b5b824d553",
"size": "12763",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fparser/splitline.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "FORTRAN",
"bytes": "202"
},
{
"name": "Python",
"bytes": "669488"
}
],
"symlink_target": ""
} |
"""setuptools.command.egg_info
Create a distribution's .egg-info directory and contents"""
from distutils.filelist import FileList as _FileList
from distutils.util import convert_path
from distutils import log
import distutils.errors
import os
import re
import sys
try:
import packaging.version
except ImportError:
# fallback to vendored version
import setuptools._vendor.packaging.version
packaging = setuptools._vendor.packaging
from setuptools import Command
from setuptools.command.sdist import sdist
from setuptools.compat import basestring, PY3, StringIO
from setuptools import svn_utils
from setuptools.command.sdist import walk_revctrl
from pkg_resources import (
parse_requirements, safe_name, parse_version,
safe_version, yield_lines, EntryPoint, iter_entry_points, to_filename)
import setuptools.unicode_utils as unicode_utils
class egg_info(Command):
description = "create a distribution's .egg-info directory"
user_options = [
('egg-base=', 'e', "directory containing .egg-info directories"
" (default: top of the source tree)"),
('tag-svn-revision', 'r',
"Add subversion revision ID to version number"),
('tag-date', 'd', "Add date stamp (e.g. 20050528) to version number"),
('tag-build=', 'b', "Specify explicit tag to add to version number"),
('no-svn-revision', 'R',
"Don't add subversion revision ID [default]"),
('no-date', 'D', "Don't include date stamp [default]"),
]
boolean_options = ['tag-date', 'tag-svn-revision']
negative_opt = {'no-svn-revision': 'tag-svn-revision',
'no-date': 'tag-date'}
def initialize_options(self):
self.egg_name = None
self.egg_version = None
self.egg_base = None
self.egg_info = None
self.tag_build = None
self.tag_svn_revision = 0
self.tag_date = 0
self.broken_egg_info = False
self.vtags = None
def save_version_info(self, filename):
from setuptools.command.setopt import edit_config
values = dict(
egg_info=dict(
tag_svn_revision=0,
tag_date=0,
tag_build=self.tags(),
)
)
edit_config(filename, values)
def finalize_options(self):
self.egg_name = safe_name(self.distribution.get_name())
self.vtags = self.tags()
self.egg_version = self.tagged_version()
parsed_version = parse_version(self.egg_version)
try:
is_version = isinstance(parsed_version, packaging.version.Version)
spec = (
"%s==%s" if is_version else "%s===%s"
)
list(
parse_requirements(spec % (self.egg_name, self.egg_version))
)
except ValueError:
raise distutils.errors.DistutilsOptionError(
"Invalid distribution name or version syntax: %s-%s" %
(self.egg_name, self.egg_version)
)
if self.egg_base is None:
dirs = self.distribution.package_dir
self.egg_base = (dirs or {}).get('', os.curdir)
self.ensure_dirname('egg_base')
self.egg_info = to_filename(self.egg_name) + '.egg-info'
if self.egg_base != os.curdir:
self.egg_info = os.path.join(self.egg_base, self.egg_info)
if '-' in self.egg_name:
self.check_broken_egg_info()
# Set package version for the benefit of dumber commands
# (e.g. sdist, bdist_wininst, etc.)
#
self.distribution.metadata.version = self.egg_version
# If we bootstrapped around the lack of a PKG-INFO, as might be the
# case in a fresh checkout, make sure that any special tags get added
# to the version info
#
pd = self.distribution._patched_dist
if pd is not None and pd.key == self.egg_name.lower():
pd._version = self.egg_version
pd._parsed_version = parse_version(self.egg_version)
self.distribution._patched_dist = None
def write_or_delete_file(self, what, filename, data, force=False):
"""Write `data` to `filename` or delete if empty
If `data` is non-empty, this routine is the same as ``write_file()``.
If `data` is empty but not ``None``, this is the same as calling
``delete_file(filename)`. If `data` is ``None``, then this is a no-op
unless `filename` exists, in which case a warning is issued about the
orphaned file (if `force` is false), or deleted (if `force` is true).
"""
if data:
self.write_file(what, filename, data)
elif os.path.exists(filename):
if data is None and not force:
log.warn(
"%s not set in setup(), but %s exists", what, filename
)
return
else:
self.delete_file(filename)
def write_file(self, what, filename, data):
"""Write `data` to `filename` (if not a dry run) after announcing it
`what` is used in a log message to identify what is being written
to the file.
"""
log.info("writing %s to %s", what, filename)
if PY3:
data = data.encode("utf-8")
if not self.dry_run:
f = open(filename, 'wb')
f.write(data)
f.close()
def delete_file(self, filename):
"""Delete `filename` (if not a dry run) after announcing it"""
log.info("deleting %s", filename)
if not self.dry_run:
os.unlink(filename)
def tagged_version(self):
version = self.distribution.get_version()
# egg_info may be called more than once for a distribution,
# in which case the version string already contains all tags.
if self.vtags and version.endswith(self.vtags):
return safe_version(version)
return safe_version(version + self.vtags)
def run(self):
self.mkpath(self.egg_info)
installer = self.distribution.fetch_build_egg
for ep in iter_entry_points('egg_info.writers'):
writer = ep.load(installer=installer)
writer(self, ep.name, os.path.join(self.egg_info, ep.name))
# Get rid of native_libs.txt if it was put there by older bdist_egg
nl = os.path.join(self.egg_info, "native_libs.txt")
if os.path.exists(nl):
self.delete_file(nl)
self.find_sources()
def tags(self):
version = ''
if self.tag_build:
version += self.tag_build
if self.tag_svn_revision:
rev = self.get_svn_revision()
if rev: # is 0 if it's not an svn working copy
version += '-r%s' % rev
if self.tag_date:
import time
version += time.strftime("-%Y%m%d")
return version
@staticmethod
def get_svn_revision():
return str(svn_utils.SvnInfo.load(os.curdir).get_revision())
def find_sources(self):
"""Generate SOURCES.txt manifest file"""
manifest_filename = os.path.join(self.egg_info, "SOURCES.txt")
mm = manifest_maker(self.distribution)
mm.manifest = manifest_filename
mm.run()
self.filelist = mm.filelist
def check_broken_egg_info(self):
bei = self.egg_name + '.egg-info'
if self.egg_base != os.curdir:
bei = os.path.join(self.egg_base, bei)
if os.path.exists(bei):
log.warn(
"-" * 78 + '\n'
"Note: Your current .egg-info directory has a '-' in its name;"
'\nthis will not work correctly with "setup.py develop".\n\n'
'Please rename %s to %s to correct this problem.\n' + '-' * 78,
bei, self.egg_info
)
self.broken_egg_info = self.egg_info
self.egg_info = bei # make it work for now
class FileList(_FileList):
"""File list that accepts only existing, platform-independent paths"""
def append(self, item):
if item.endswith('\r'): # Fix older sdists built on Windows
item = item[:-1]
path = convert_path(item)
if self._safe_path(path):
self.files.append(path)
def extend(self, paths):
self.files.extend(filter(self._safe_path, paths))
def _repair(self):
"""
Replace self.files with only safe paths
Because some owners of FileList manipulate the underlying
``files`` attribute directly, this method must be called to
repair those paths.
"""
self.files = list(filter(self._safe_path, self.files))
def _safe_path(self, path):
enc_warn = "'%s' not %s encodable -- skipping"
# To avoid accidental trans-codings errors, first to unicode
u_path = unicode_utils.filesys_decode(path)
if u_path is None:
log.warn("'%s' in unexpected encoding -- skipping" % path)
return False
# Must ensure utf-8 encodability
utf8_path = unicode_utils.try_encode(u_path, "utf-8")
if utf8_path is None:
log.warn(enc_warn, path, 'utf-8')
return False
try:
# accept is either way checks out
if os.path.exists(u_path) or os.path.exists(utf8_path):
return True
# this will catch any encode errors decoding u_path
except UnicodeEncodeError:
log.warn(enc_warn, path, sys.getfilesystemencoding())
class manifest_maker(sdist):
template = "MANIFEST.in"
def initialize_options(self):
self.use_defaults = 1
self.prune = 1
self.manifest_only = 1
self.force_manifest = 1
def finalize_options(self):
pass
def run(self):
self.filelist = FileList()
if not os.path.exists(self.manifest):
self.write_manifest() # it must exist so it'll get in the list
self.filelist.findall()
self.add_defaults()
if os.path.exists(self.template):
self.read_template()
self.prune_file_list()
self.filelist.sort()
self.filelist.remove_duplicates()
self.write_manifest()
def _manifest_normalize(self, path):
path = unicode_utils.filesys_decode(path)
return path.replace(os.sep, '/')
def write_manifest(self):
"""
Write the file list in 'self.filelist' to the manifest file
named by 'self.manifest'.
"""
self.filelist._repair()
# Now _repairs should encodability, but not unicode
files = [self._manifest_normalize(f) for f in self.filelist.files]
msg = "writing manifest file '%s'" % self.manifest
self.execute(write_file, (self.manifest, files), msg)
def warn(self, msg): # suppress missing-file warnings from sdist
if not msg.startswith("standard file not found:"):
sdist.warn(self, msg)
def add_defaults(self):
sdist.add_defaults(self)
self.filelist.append(self.template)
self.filelist.append(self.manifest)
rcfiles = list(walk_revctrl())
if rcfiles:
self.filelist.extend(rcfiles)
elif os.path.exists(self.manifest):
self.read_manifest()
ei_cmd = self.get_finalized_command('egg_info')
self.filelist.include_pattern("*", prefix=ei_cmd.egg_info)
def prune_file_list(self):
build = self.get_finalized_command('build')
base_dir = self.distribution.get_fullname()
self.filelist.exclude_pattern(None, prefix=build.build_base)
self.filelist.exclude_pattern(None, prefix=base_dir)
sep = re.escape(os.sep)
self.filelist.exclude_pattern(r'(^|' + sep + r')(RCS|CVS|\.svn)' + sep,
is_regex=1)
def write_file(filename, contents):
"""Create a file with the specified name and write 'contents' (a
sequence of strings without line terminators) to it.
"""
contents = "\n".join(contents)
# assuming the contents has been vetted for utf-8 encoding
contents = contents.encode("utf-8")
with open(filename, "wb") as f: # always write POSIX-style manifest
f.write(contents)
def write_pkg_info(cmd, basename, filename):
log.info("writing %s", filename)
if not cmd.dry_run:
metadata = cmd.distribution.metadata
metadata.version, oldver = cmd.egg_version, metadata.version
metadata.name, oldname = cmd.egg_name, metadata.name
try:
# write unescaped data to PKG-INFO, so older pkg_resources
# can still parse it
metadata.write_pkg_info(cmd.egg_info)
finally:
metadata.name, metadata.version = oldname, oldver
safe = getattr(cmd.distribution, 'zip_safe', None)
from setuptools.command import bdist_egg
bdist_egg.write_safety_flag(cmd.egg_info, safe)
def warn_depends_obsolete(cmd, basename, filename):
if os.path.exists(filename):
log.warn(
"WARNING: 'depends.txt' is not used by setuptools 0.6!\n"
"Use the install_requires/extras_require setup() args instead."
)
def _write_requirements(stream, reqs):
lines = yield_lines(reqs or ())
append_cr = lambda line: line + '\n'
lines = map(append_cr, lines)
stream.writelines(lines)
def write_requirements(cmd, basename, filename):
dist = cmd.distribution
data = StringIO()
_write_requirements(data, dist.install_requires)
extras_require = dist.extras_require or {}
for extra in sorted(extras_require):
data.write('\n[{extra}]\n'.format(**vars()))
_write_requirements(data, extras_require[extra])
cmd.write_or_delete_file("requirements", filename, data.getvalue())
def write_toplevel_names(cmd, basename, filename):
pkgs = dict.fromkeys(
[
k.split('.', 1)[0]
for k in cmd.distribution.iter_distribution_names()
]
)
cmd.write_file("top-level names", filename, '\n'.join(sorted(pkgs)) + '\n')
def overwrite_arg(cmd, basename, filename):
write_arg(cmd, basename, filename, True)
def write_arg(cmd, basename, filename, force=False):
argname = os.path.splitext(basename)[0]
value = getattr(cmd.distribution, argname, None)
if value is not None:
value = '\n'.join(value) + '\n'
cmd.write_or_delete_file(argname, filename, value, force)
def write_entries(cmd, basename, filename):
ep = cmd.distribution.entry_points
if isinstance(ep, basestring) or ep is None:
data = ep
elif ep is not None:
data = []
for section, contents in sorted(ep.items()):
if not isinstance(contents, basestring):
contents = EntryPoint.parse_group(section, contents)
contents = '\n'.join(sorted(map(str, contents.values())))
data.append('[%s]\n%s\n\n' % (section, contents))
data = ''.join(data)
cmd.write_or_delete_file('entry points', filename, data, True)
def get_pkg_info_revision():
# See if we can get a -r### off of PKG-INFO, in case this is an sdist of
# a subversion revision
#
if os.path.exists('PKG-INFO'):
f = open('PKG-INFO', 'rU')
for line in f:
match = re.match(r"Version:.*-r(\d+)\s*$", line)
if match:
return int(match.group(1))
f.close()
return 0
| {
"content_hash": "78de1412bed3498048ac7f193b700dd4",
"timestamp": "",
"source": "github",
"line_count": 447,
"max_line_length": 79,
"avg_line_length": 34.83221476510067,
"alnum_prop": 0.5958895311496467,
"repo_name": "boooka/GeoPowerOff",
"id": "43df87dcb547a4902aa323ef596cc8c029d74415",
"size": "15570",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "venv/lib/python2.7/site-packages/setuptools/command/egg_info.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "38253"
},
{
"name": "CSS",
"bytes": "102019"
},
{
"name": "JavaScript",
"bytes": "121188"
},
{
"name": "Python",
"bytes": "7232605"
},
{
"name": "Shell",
"bytes": "3777"
},
{
"name": "XSLT",
"bytes": "152770"
}
],
"symlink_target": ""
} |
from django.apps import AppConfig
from django.utils.translation import gettext_lazy as _
class RestAPIConfig(AppConfig):
name = 'osmaxx.rest_api'
verbose_name = _("Rest API")
| {
"content_hash": "398d2aa754a9800f125fab751ed03868",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 54,
"avg_line_length": 26.428571428571427,
"alnum_prop": 0.7351351351351352,
"repo_name": "geometalab/osmaxx-frontend",
"id": "478e5dd77c6d14e67e0f06782540703b4647bb36",
"size": "185",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "osmaxx/rest_api/apps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "26077"
},
{
"name": "HTML",
"bytes": "22722"
},
{
"name": "JavaScript",
"bytes": "271988"
},
{
"name": "Python",
"bytes": "194135"
},
{
"name": "Shell",
"bytes": "823"
}
],
"symlink_target": ""
} |
"""Tests for conversions from markdown to other formats"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import re
from copy import copy
from IPython.utils.py3compat import string_types
from IPython.testing import decorators as dec
from ...tests.base import TestsBase
from ..markdown import markdown2latex, markdown2html, markdown2rst
from jinja2 import Environment
class TestMarkdown(TestsBase):
tests = [
'*test',
'**test',
'*test*',
'_test_',
'__test__',
'__*test*__',
'**test**',
'#test',
'##test',
'test\n----',
'test [link](https://google.com/)',
]
tokens = [
'*test',
'**test',
'test',
'test',
'test',
'test',
'test',
'test',
'test',
'test',
('test', 'https://google.com/'),
]
@dec.onlyif_cmds_exist('pandoc')
def test_markdown2latex(self):
"""markdown2latex test"""
for index, test in enumerate(self.tests):
self._try_markdown(markdown2latex, test, self.tokens[index])
@dec.onlyif_cmds_exist('pandoc')
def test_markdown2latex_markup(self):
"""markdown2latex with markup kwarg test"""
# This string should be passed through unaltered with pandoc's
# markdown_strict reader
s = '1) arabic number with parenthesis'
self.assertEqual(markdown2latex(s, markup='markdown_strict'), s)
# This string should be passed through unaltered with pandoc's
# markdown_strict+tex_math_dollars reader
s = r'$\alpha$ latex math'
# sometimes pandoc uses $math$, sometimes it uses \(math\)
expected = re.compile(r'(\$|\\\()\\alpha(\$|\\\)) latex math')
try:
# py3
assertRegex = self.assertRegex
except AttributeError:
# py2
assertRegex = self.assertRegexpMatches
assertRegex(
markdown2latex(s, markup='markdown_strict+tex_math_dollars'),
expected)
@dec.onlyif_cmds_exist('pandoc')
def test_pandoc_extra_args(self):
# pass --no-wrap
s = '\n'.join([
"#latex {{long_line | md2l('markdown', ['--no-wrap'])}}",
"#rst {{long_line | md2r(['--columns', '5'])}}",
])
long_line = ' '.join(['long'] * 30)
env = Environment()
env.filters.update({
'md2l': markdown2latex,
'md2r': markdown2rst,
})
tpl = env.from_string(s)
rendered = tpl.render(long_line=long_line)
_, latex, rst = rendered.split('#')
self.assertEqual(latex.strip(), 'latex %s' % long_line)
self.assertEqual(rst.strip(), 'rst %s' % long_line.replace(' ', '\n'))
def test_markdown2html(self):
"""markdown2html test"""
for index, test in enumerate(self.tests):
self._try_markdown(markdown2html, test, self.tokens[index])
def test_markdown2html_heading_anchors(self):
for md, tokens in [
('# test',
('<h1', '>test', 'id="test"', u'¶</a>', "anchor-link")
),
('###test head space',
('<h3', '>test head space', 'id="test-head-space"',
u'¶</a>', "anchor-link")
)
]:
self._try_markdown(markdown2html, md, tokens)
def test_markdown2html_math(self):
# Mathematical expressions should be passed through unaltered
cases = [("\\begin{equation*}\n"
"\\left( \\sum_{k=1}^n a_k b_k \\right)^2 \\leq \\left( \\sum_{k=1}^n a_k^2 \\right) \\left( \\sum_{k=1}^n b_k^2 \\right)\n"
"\\end{equation*}"),
("$$\n"
"a = 1 *3* 5\n"
"$$"),
"$ a = 1 *3* 5 $",
]
for case in cases:
self.assertIn(case, markdown2html(case))
def test_markdown2html_math_paragraph(self):
# https://github.com/ipython/ipython/issues/6724
a = """Water that is stored in $t$, $s_t$, must equal the storage content of the previous stage,
$s_{t-1}$, plus a stochastic inflow, $I_t$, minus what is being released in $t$, $r_t$.
With $s_0$ defined as the initial storage content in $t=1$, we have"""
self.assertIn(a, markdown2html(a))
@dec.onlyif_cmds_exist('pandoc')
def test_markdown2rst(self):
"""markdown2rst test"""
# Modify token array for rst, escape asterik
tokens = copy(self.tokens)
tokens[0] = r'\*test'
tokens[1] = r'\*\*test'
for index, test in enumerate(self.tests):
self._try_markdown(markdown2rst, test, tokens[index])
def _try_markdown(self, method, test, tokens):
results = method(test)
if isinstance(tokens, string_types):
assert tokens in results
else:
for token in tokens:
assert token in results
| {
"content_hash": "6084f762837254e11b75ab41f9462af3",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 142,
"avg_line_length": 33.37748344370861,
"alnum_prop": 0.5412698412698412,
"repo_name": "mattvonrocketstein/smash",
"id": "9ffcb8a5eb06d626d86cac97cc601f943222c063",
"size": "5056",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "smashlib/ipy3x/nbconvert/filters/tests/test_markdown.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "162188"
},
{
"name": "HTML",
"bytes": "32106"
},
{
"name": "JavaScript",
"bytes": "1615935"
},
{
"name": "Makefile",
"bytes": "550"
},
{
"name": "Python",
"bytes": "4934398"
},
{
"name": "Shell",
"bytes": "2990"
}
],
"symlink_target": ""
} |
'test pep8'
import os
from integration.python_integration_check_base import run_command, \
AbstractPythonCheckerTest
# pylint: disable=R0904,C0111
class Pep8Test(AbstractPythonCheckerTest):
def test_all(self):
cmd = ["unilint", "-spep8", "-l0"]
output = run_command(self.stackfolder, cmd)
self.assertEqual(2, output.count('indentation'), output)
self.assertEqual(2, output.count('missing whitespace after'), output)
self.assertEqual(2, output.count('expected 2 blank lines'), output)
self.assertEquals(3, output.count('pack/bin/foo :'), output)
self.assertEquals(5, output.count('pack/src/foo.py :'), output)
self.assertEquals(0, output.count('/pack/bin/foo'), output)
self.assertEquals(0, output.count('/pack/src/foo'), output)
def test_all_pack(self):
cmd = ["unilint", "pack", "-spep8", "-l0"]
output = run_command(self.stackfolder, cmd)
self.assertEqual(2, output.count('indentation'), output)
self.assertEqual(2, output.count('missing whitespace after'), output)
self.assertEqual(2, output.count('expected 2 blank lines'), output)
self.assertEquals(3, output.count('bin/foo :'), output)
self.assertEquals(5, output.count('src/foo.py :'), output)
self.assertEquals(0, output.count('/bin/foo'), output)
self.assertEquals(0, output.count('/src/foo'), output)
def test_all_pack_l10(self):
cmd = ["unilint", "pack", "-spep8", "-l10"]
output = run_command(self.stackfolder, cmd)
self.assertEqual(2, output.count('indentation'), output)
self.assertEqual(2, output.count('missing whitespace after'), output)
self.assertEqual(2, output.count('expected 2 blank lines'), output)
def test_all_bin(self):
cmd = ["unilint", "pack/bin", "-spep8", "-l0"]
output = run_command(self.stackfolder, cmd)
self.assertEqual(1, output.count('indentation'), output)
self.assertEqual(1, output.count('missing whitespace after'), output)
self.assertEqual(1, output.count('expected 2 blank lines'), output)
self.assertEquals(3, output.count('foo :'), output)
self.assertEquals(0, output.count('/foo'), output)
def test_src_file(self):
cmd = ["unilint", os.path.join("src", "foo.py"), "-spep8", "-l0"]
output = run_command(self.package_path, cmd)
self.assertEqual(1, output.count('indentation'), output)
self.assertEqual(1, output.count('missing whitespace after'), output)
self.assertEqual(1, output.count('expected 2 blank lines'), output)
self.assertEquals(5, output.count('foo.py :'), output)
self.assertEquals(0, output.count('/foo.py'), output)
| {
"content_hash": "76cb9b35eaa0b6e2899927cf82a95878",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 77,
"avg_line_length": 49.17857142857143,
"alnum_prop": 0.6474219317356572,
"repo_name": "tkruse/unilint",
"id": "0c9ff4b721d754002726de007d6bd52a4df3b129",
"size": "2754",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/integration/pep8_test.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "125362"
}
],
"symlink_target": ""
} |
"""Test/benchmark deep_dream.py."""
from pathlib import Path
import logging
import time
import click
from PIL import Image
import deep_dream as dd
from deep_dream import tile_worker
import utils
utils.setup_traceback()
logger = logging.getLogger(__name__)
handler = logging.StreamHandler(dd.stream)
handler.setFormatter(utils.ColorFormatter())
logging.basicConfig(level=logging.INFO, handlers=[handler])
tile_worker.logger.setLevel(logging.DEBUG)
@click.command()
@click.option('--cpu-workers', default=0, help='The number of CPU workers to start.')
@click.option('--gpus', type=utils.List(int, 'integer'), default='',
help='The CUDA device IDs to use.')
@click.option('--max-tile-size', default=512, help='The maximum dimension of a tile.')
def main(cpu_workers=None, gpus=None, max_tile_size=None):
"""Test/benchmark deep_dream.py."""
pwd = Path(__file__).parent
cnn = dd.CNN(dd.GOOGLENET_BVLC, cpu_workers=cpu_workers, gpus=gpus)
input_img = Image.open(str(pwd/'kodim/img0022.jpg')).resize((1536, 1024), Image.LANCZOS)
print('Input image classes:')
for category in cnn.classify(input_img, 5):
print('%.3f %s' % category)
cnn.dream(input_img, 'inception_3a/3x3', min_size=129, n=1, max_tile_size=max_tile_size)
time_0 = time.perf_counter()
img = cnn.dream(input_img, 'inception_3a/3x3', min_size=129, n=10, step_size=1,
max_tile_size=max_tile_size)
time_1 = time.perf_counter()
print('Input image classes:')
for category in cnn.classify(img, 5):
print('%.3f %s' % category)
print('Time taken: %.3f s' % (time_1-time_0))
dd.to_image(img).save('test_output.png')
print('Saved to test_output.png.')
if __name__ == '__main__':
main()
| {
"content_hash": "1f12dbd7a825a1b00c11c0be047289cb",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 92,
"avg_line_length": 36.520833333333336,
"alnum_prop": 0.6714204221334854,
"repo_name": "crowsonkb/deep_dream",
"id": "2246bd6a95c87368143d260f562bb0912381b9bb",
"size": "1777",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "deep_dream_test.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1273225"
},
{
"name": "Python",
"bytes": "30276"
},
{
"name": "Shell",
"bytes": "547"
}
],
"symlink_target": ""
} |
import sentry_sdk
from sentry_sdk.integrations.django import DjangoIntegration
from .base import * # noqa
from .base import DATABASES, MIDDLEWARE, env
SECRET_KEY = env("SECRET_KEY")
# CELERY_BROKER_URL = env("CELERY_BROKER_URL")
USE_SCHEDULER = False
# if FRONTEND_URL == "http://testfrontend.it/":
# raise ImproperlyConfigured("Please configure FRONTEND_URL for production")
SENTRY_DSN = env("SENTRY_DSN", default="")
if SENTRY_DSN:
sentry_sdk.init(dsn=SENTRY_DSN, integrations=[DjangoIntegration()])
SLACK_INCOMING_WEBHOOK_URL = env("SLACK_INCOMING_WEBHOOK_URL")
DEFAULT_FILE_STORAGE = env(
"DEFAULT_FILE_STORAGE", default="storages.backends.s3boto3.S3Boto3Storage"
)
AWS_STORAGE_BUCKET_NAME = env("AWS_MEDIA_BUCKET", default=None)
AWS_S3_REGION_NAME = env("AWS_REGION_NAME", default="eu-central-1")
AWS_ACCESS_KEY_ID = env("AWS_ACCESS_KEY_ID", default=None)
AWS_SECRET_ACCESS_KEY = env("AWS_SECRET_ACCESS_KEY", default=None)
EMAIL_BACKEND = env(
"EMAIL_BACKEND", default="django.core.mail.backends.locmem.EmailBackend"
)
FORCE_PYCON_HOST = env("FORCE_PYCON_HOST", bool, default=True)
if FORCE_PYCON_HOST: # pragma: no cover
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
MIDDLEWARE += ["pycon.middleware.force_pycon_host"]
DEFAULT_FROM_EMAIL = "[email protected]"
SIMULATE_PRETIX_DB = False
DATABASES["pretix"] = {**DATABASES["default"], "NAME": "pretix"}
| {
"content_hash": "ca7fb94d608fe7e6bea6774cfc0ff73f",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 80,
"avg_line_length": 32.83720930232558,
"alnum_prop": 0.726628895184136,
"repo_name": "patrick91/pycon",
"id": "58a2bfaa37911f1a55f1a86d99af8a4bf4e5fa47",
"size": "1412",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/pycon/settings/prod.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1456"
},
{
"name": "Python",
"bytes": "13911"
}
],
"symlink_target": ""
} |
import requests
from bs4 import BeautifulSoup
from bs4.dammit import EncodingDetector
import urllib.parse
import logging
import time
import reppy
import re
class Spider:
__headers = {'User-Agent': 'MedBot', 'Content-type': 'text/html'}
__robots_agent = {}
__base_urls = set()
__unknown_urls = []
__default = 10
def __init__(self, db_cursor, store):
self.__db_cursor = db_cursor
self.__store = store
def get_urls(self, url, delay):
time.sleep(delay)
result = []
try:
parsed_url = urllib.parse.urlparse(url)
except ValueError as e:
logging.warning(str(e))
return result
if parsed_url.netloc not in self.__base_urls:
self.__unknown_urls.append(url)
return result
try:
response = requests.get(url, headers=self.__headers)
if response.status_code != requests.codes.ok:
logging.warning("Invalid status code " + response.status_code)
return result
http_encoding = response.encoding if 'charset' in response.headers.get('content-type', '').lower() else None
html_encoding = EncodingDetector.find_declared_encoding(response.content, is_html=True)
encoding = html_encoding or http_encoding
soup = BeautifulSoup(response.content, "lxml", from_encoding=encoding)
try:
self.__store.store(url, response.content.decode(encoding))
except FileNotFoundError as e:
logging.warning('url = ' + url + ' ' + str(e))
self.__db_cursor.add_data(url)
for tag in soup.find_all('a', href=True):
if tag is None:
logging.warning("invalid tag in link " + url)
continue
result.append(urllib.parse.urljoin(url, tag['href']))
self.__db_cursor.update_incoming_links(result)
except requests.exceptions.ReadTimeout:
logging.error("Read timeout")
except requests.exceptions.ConnectTimeout:
logging.error("Connect timeout")
except:
logging.error("Exception")
finally:
return result
def process_url(self, url):
regex = re.compile(
r'^(?:http|ftp)s?://'
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|'
r'localhost|'
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})'
r'(?::\d+)?'
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
m = regex.match(url)
if not m:
logging.info("Invalid url " + url)
return
agent = None
try:
robots_url = reppy.Robots.robots_url(url)
if robots_url not in self.__robots_agent:
robots = reppy.Robots.fetch(robots_url)
agent = robots.agent(self.__headers["User-Agent"])
self.__robots_agent[robots_url] = agent
agent = self.__robots_agent[robots_url]
if not agent.allowed(url):
logging.info("Disallow crawling " + url)
return
except:
logging.error("Parse Robot.txt " + url)
if agent is None or agent.delay is None:
delay = 0.5
else:
delay = agent.delay
urls = self.get_urls(url, delay)
self.__db_cursor.add_url(urls)
def spider(self):
for url in self.__db_cursor.get_base():
self.__base_urls.add(url)
urls = self.__db_cursor.get_url(self.__default)
while urls:
for url in urls:
self.process_url(url)
urls = self.__db_cursor.get_url(self.__default)
if not urls and self.__unknown_urls:
for url in self.__db_cursor.get_base():
self.__base_urls.add(url)
urls = self.__unknown_urls
self.__unknown_urls.clear()
| {
"content_hash": "2fcf861e65e318c37972aa1f658e88d2",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 120,
"avg_line_length": 36.16216216216216,
"alnum_prop": 0.5311410064773293,
"repo_name": "YorovSobir/informational_retrieval",
"id": "475952eeda3e3c53e7be9d203c1ae47eb0cf2c61",
"size": "4014",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "crawler/spider.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "38855"
}
],
"symlink_target": ""
} |
from PIL import Image
SHRED_WIDTH = 32
NUMBER_SHREDS = 20
class Region(object):
def __init__(self, x1, y1, x2, y2):
self.x1, self.y1, self.x2, self.y2 = x1, y1, x2, y2
def __unicode__(self):
return "({0},{1},{2},{3})".format(self.x1,self.x2,self.x3,self.x4)
class Shred(object):
def __init__(self, image, region, id):
self.id = id
self.image = image
self.region = region
self.data = self.image.getdata()
self.left_edge, self.right_edge = self.get_edge_data()
self.left_scores, self.right_scores = {}, {}
def get_edge_data(self):
left, right = [],[]
current = left
for x in (self.region.x1, self.region.x2-1):
for y in range(self.region.y1, self.region.y2):
current.append(self.get_pixel_value(x, y))
current = right
return left, right
def get_pixel_value(self, x, y):
width, height = self.image.size
# Go y rows down and then x pixels over
pixel = self.data[y * width + x]
return pixel
def compare(self, my_edge, other_edge):
score = 0
for index, pixel in enumerate(my_edge):
other_pixel = other_edge[index]
pixels = zip(pixel, other_pixel)
similar = True
for value in pixels:
if abs(value[0]-value[1]) > 10:
score -= 1
return score
def compare_left(self, other_shred):
return self.compare(self.left_edge, other_shred.right_edge)
def compare_right(self, other_shred):
return self.compare(self.right_edge, other_shred.left_edge)
def closest_left_score(self):
return max(self.left_scores.items(),key=lambda item:item[0])
def next_closest_right_score(self):
return sorted(self.right_scores.items(), key=lambda item:item[0])[1]
def closest_right_score(self):
return max(self.right_scores.items(), key=lambda item:item[0])
def total_left_similarity(self):
return sum(self.left_scores.keys())
def total_right_similarity(self):
return sum(self.right_scores.keys())
class Unshredder(object):
def compute_shred_width(self):
import math
max_y = self.image.size[1]
max_x = self.image.size[0]
min_x = 0
guesses = {}
for y in range(max_y):
last = None
for x in range(y*max_x, y*max_x+int(max_x/2)):
next = self.data[x]
if last:
diff = math.sqrt((last[0]-next[0])**2+(last[1]-next[1])**2+(last[2]-next[2])**2)
if diff > 150:
if x%max_x in guesses:
guesses[x%max_x] += 1
else:
guesses[x%max_x] = 1
break
last = next
# print sorted(guesses.items(), key=lambda x: -x[1])
return max(guesses.items(),key=lambda x: x[1])[0]
def __init__(self, image_name, num_shreds = None):
self.image = Image.open(image_name)
self.data = self.image.getdata()
self.shreds = []
global SHRED_WIDTH
global NUMBER_SHREDS
if num_shreds:
NUMBER_SHREDS = num_shreds
SHRED_WIDTH = self.image.size[0]/NUMBER_SHREDS
else:
SHRED_WIDTH = self.compute_shred_width()
NUMBER_SHREDS = self.image.size[0]/SHRED_WIDTH
print "Guessing there are {0} shreds each {1} pixels wide. If this is wrong you can give a hint with the -s option!".format(NUMBER_SHREDS, SHRED_WIDTH)
# SHRED_WIDTH = self.image.size[0]/NUMBER_SHREDS
x1, y1, x2, y2 = 0, 0, SHRED_WIDTH, self.image.size[1]
for i in range(NUMBER_SHREDS):
region = Region(x1, y1, x2, y2)
self.shreds.append(Shred(self.image, region, i))
x1 += SHRED_WIDTH
x2 += SHRED_WIDTH
def solve(self):
scores = {}
for index, shred in enumerate(self.shreds):
for shred2 in self.shreds[index+1:]:
left_score = shred.compare_left(shred2)
right_score = shred.compare_right(shred2)
shred.left_scores[left_score] = shred2
shred.right_scores[right_score] = shred2
shred2.left_scores[right_score] = shred
shred2.right_scores[left_score] = shred
scores[(shred2, shred)] = left_score
scores[(shred, shred2)] = right_score
left = min(self.shreds, key=lambda shred:shred.closest_left_score()[0])
right = min(self.shreds, key=lambda shred:shred.closest_right_score()[0])
unshredded = Image.new("RGBA", self.image.size)
x1, y1, x2, y2 = left.id*SHRED_WIDTH, 0, left.id*SHRED_WIDTH+SHRED_WIDTH, self.image.size[1]
source_region = self.image.crop((x1,y1,x2,y2))
destination_point = (0,0)
unshredded.paste(source_region, destination_point)
shreds_pasted = 1
last_shred = left
working_shreds = set(self.shreds)
while shreds_pasted < NUMBER_SHREDS:
working_shreds.remove(last_shred)
relevant_scores = [(shred, scores[(last_shred, shred)]) for shred in working_shreds]
# for shred in working_shreds:
# relevant_scores.append((shred,scores[(last_shred, shred)]))
next_shred = max(relevant_scores,key=lambda item:item[1])[0]
x1 = next_shred.id * SHRED_WIDTH
x2 += x1 + SHRED_WIDTH
destination_point = (destination_point[0]+SHRED_WIDTH, 0)
source_region = self.image.crop((x1, y1, x2, y2))
unshredded.paste(source_region, destination_point)
last_shred = next_shred
shreds_pasted += 1
unshredded.save("unshredded.jpg", "JPEG")
def run():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-f',default='TokyoPanoramaShredded.png')
parser.add_argument('-s',default=None,type=int)
args = parser.parse_args()
filename = args.f
unshredder = Unshredder(filename,args.s)
unshredder.solve()
if __name__ == "__main__":
run()
| {
"content_hash": "43bc3d622b9552e0b4dde96910f9edac",
"timestamp": "",
"source": "github",
"line_count": 169,
"max_line_length": 159,
"avg_line_length": 37.124260355029584,
"alnum_prop": 0.5615237488045903,
"repo_name": "ehopealot/image-unshredder",
"id": "0591bcedb62f484cea0fa21d0e40de2ea772a98a",
"size": "6274",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "unshredder.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4597"
}
],
"symlink_target": ""
} |
"""
Connection classes to connect to the Lookup/Ibis web service and allow API
methods to be invoked.
"""
import base64
from datetime import date
from httplib import HTTPSConnection
import socket
import os
import urllib
from dto import IbisDto, IbisError, IbisResult, IbisResultParser
try:
import ssl
_have_ssl = True
except ImportError:
print "WARNING: No SSL support - connection may be insecure"
_have_ssl = False
_MONTHS = ["Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
class IbisException(Exception):
"""
Exception thrown when a web service API method fails. This is wrapper
around the :any:`IbisError` object returned by the server, which contains
the full details of what went wrong.
.. codeauthor:: Dean Rasheed ([email protected])
"""
def __init__(self, error):
Exception.__init__(self, error.message)
self.error = error
def get_error(self):
"""
Returns the underlying error from the server.
**Returns**
:any:`IbisError`
The underlying error from the server.
"""
return self.error
class HTTPSValidatingConnection(HTTPSConnection):
"""
Class extending the standard :py:class:`HTTPSConnection` class from
:any:`httplib`, so that it checks the server's certificates, validating
them against the specified CA certificates.
.. codeauthor:: Dean Rasheed ([email protected])
"""
def __init__(self, host, port, ca_certs):
HTTPSConnection.__init__(self, host, port)
self.ca_certs = ca_certs
def connect(self):
"""
Overridden connect() method to wrap the socket using an SSLSocket,
and check the server certificates.
"""
try:
self.sock = socket.create_connection((self.host, self.port))
except AttributeError:
HTTPSConnection.connect(self)
if not _have_ssl:
# No SSL available - insecure connection
print "WARNING: No SSL support - connection may be insecure"
elif self.ca_certs:
# Wrap the socket in an SSLSocket, and tell it to validate
# the server certificates. Note that this does not check that
# the certificate's host matches, so we must do that ourselves.
self.sock = ssl.wrap_socket(self.sock,
ca_certs = self.ca_certs,
cert_reqs = ssl.CERT_REQUIRED,
ssl_version = ssl.PROTOCOL_TLSv1)
cert = self.sock.getpeercert()
cert_hosts = []
host_valid = False
if "subject" in cert:
for x in cert["subject"]:
if x[0][0] == "commonName":
cert_hosts.append(x[0][1])
if "subjectAltName" in cert:
for x in cert["subjectAltName"]:
if x[0] == "dns":
cert_hosts.append(x[1])
for cert_host in cert_hosts:
if self.host.startswith(cert_host):
host_valid = True
if not host_valid:
raise ssl.SSLError("Host name '%s' doesn't match "\
"certificate host %s"\
% (self.host, str(cert_hosts)))
else:
# No CA certificates supplied, so can't validate the server
# certificates, but we still wrap the socket in an SSLSocket
# so that all data is encrypted.
self.sock = ssl.wrap_socket(self.sock,
ca_certs = None,
cert_reqs = ssl.CERT_NONE,
ssl_version = ssl.PROTOCOL_TLSv1)
class IbisClientConnection:
"""
Class to connect to the Lookup/Ibis server and invoke web service API
methods.
.. codeauthor:: Dean Rasheed ([email protected])
"""
def __init__(self, host, port, url_base, check_certs):
self.host = host
self.port = port
self.url_base = url_base
if not self.url_base.startswith("/"):
self.url_base = "/%s" % self.url_base
if not self.url_base.endswith("/"):
self.url_base = "%s/" % self.url_base
if check_certs:
ibisclient_dir = os.path.realpath(os.path.dirname(__file__))
self.ca_certs = os.path.join(ibisclient_dir, "cacerts.txt")
else:
self.ca_certs = None
self.username = None
self.password = None
self.set_username("anonymous")
def _update_authorization(self):
credentials = "%s:%s" % (self.username, self.password)
self.authorization = "Basic %s" % base64.b64encode(credentials)
def set_username(self, username):
"""
Set the username to use when connecting to the Lookup/Ibis web
service. By default connections are anonymous, which gives read-only
access. This method enables authentication as a group, using the
group's password, which gives read/write access and also access to
certain non-public data, based on the group's privileges.
This method may be called at any time, and affects all subsequent
access using this connection, but does not affect any other
:any:`IbisClientConnection` objects.
**Parameters**
`username` : str
[required] The username to connect as. This should either be
``"anonymous"`` (the default) or the name of a group.
"""
self.username = username
self._update_authorization()
def set_password(self, password):
"""
Set the password to use when connecting to the Lookup/Ibis web
service. This is only necessary when connecting as a group, in
which case it should be that group's password.
**Parameters**
`password` : str
[required] The group password.
"""
self.password = password
self._update_authorization()
def _params_to_strings(self, params):
"""
Convert the values in a parameter map into strings suitable for
sending to the server. Any null values will be omitted.
"""
new_params = {}
for key, value in params.iteritems():
if value != None:
if isinstance(value, bool):
if value: new_params[key] = "true"
else: new_params[key] = "false"
elif isinstance(value, date):
new_params[key] = "%02d %s %d" % (value.day,
_MONTHS[value.month-1],
value.year)
elif isinstance(value, list) or isinstance(value, tuple):
new_params[key] = ",".join(value)
elif isinstance(value, IbisDto):
new_params[key] = value.encoded_string()
elif not isinstance(value, str):
new_params[key] = str(value)
else:
new_params[key] = value
return new_params
def _build_url(self, path, path_params={}, query_params={}):
"""
Build the full URL needed to invoke a method in the web service API.
The path may contain standard Python format specifiers, which will
be substituted from the path parameters (suitably URL-encoded). Thus
for example, given the following arguments:
* path = "api/v1/person/%(scheme)s/%(identifier)s"
* path_params = {"scheme": "crsid", "identifier": "dar17"}
* query_params = {"fetch": "email,title"}
this method will create a URL like the following:
api/v1/person/crsid/dar17?fetch=email%2Ctitle
Note that all parameter values are automatically URL-encoded.
"""
for key, value in path_params.iteritems():
path_params[key] = urllib.quote_plus(value)
path = path % path_params
if not query_params.has_key("flatten"):
query_params["flatten"] = "true"
path += "?%s" % urllib.urlencode(query_params)
if path.startswith("/"):
return "%s%s" % (self.url_base, path[1:])
return "%s%s" % (self.url_base, path)
def invoke_method(self, method, path, path_params={},
query_params={}, form_params={}):
"""
Invoke a web service GET, POST, PUT or DELETE method.
The path should be the relative path to the method with standard
Python format specifiers for any path parameters, for example
``"/api/v1/person/%(scheme)s/%(identifier)s"``. Any path parameters
specified are then substituted into the path.
**Parameters**
`method` : str
[required] The method type (``"GET"``, ``"POST"``, ``"PUT"`` or
``"DELETE"``).
`path` : str
[required] The path to the method to invoke.
`path_params` : dict
[optional] Any path parameters that should be inserted into the
path in place of any format specifiers.
`query_params` : dict
[optional] Any query parameters to add as part of the URL's query
string.
`form_params` : dict
[optional] Any form parameters to submit.
**Returns**
:any:`IbisResult`
The result of invoking the method.
"""
path_params = self._params_to_strings(path_params)
query_params = self._params_to_strings(query_params)
form_params = self._params_to_strings(form_params)
conn = HTTPSValidatingConnection(self.host, self.port, self.ca_certs)
url = self._build_url(path, path_params, query_params)
headers = {"Accept": "application/xml",
"Authorization": self.authorization}
if form_params:
body = urllib.urlencode(form_params)
conn.request(method, url, body, headers)
else:
conn.request(method, url, headers=headers)
response = conn.getresponse()
content_type = response.getheader("Content-type")
if content_type != "application/xml":
error = IbisError({"status": response.status,
"code": response.reason})
error.message = "Unexpected result from server"
error.details = response.read()
result = IbisResult()
result.error = error
return result
parser = IbisResultParser()
result = parser.parse_xml(response.read())
conn.close()
return result
def createConnection():
"""
Create an IbisClientConnection to the Lookup/Ibis web service API at
https://www.lookup.cam.ac.uk/.
The connection is initially anonymous, but this may be changed using
its :any:`set_username() <IbisClientConnection.set_username>` and
:any:`set_password() <IbisClientConnection.set_password>` methods.
**Returns**
:any:`IbisClientConnection`
A new connection to the Lookup server.
"""
return IbisClientConnection("www.lookup.cam.ac.uk", 443, "", True)
def createTestConnection():
"""
Create an IbisClientConnection to the Lookup/Ibis test web service API
at https://lookup-test.csx.cam.ac.uk/.
The connection is initially anonymous, but this may be changed using
its :any:`set_username() <IbisClientConnection.set_username>` and
:any:`set_password() <IbisClientConnection.set_password>` methods.
.. note::
This test server is not guaranteed to always be available, and the data
on it may be out of sync with the data on the live system.
**Returns**
:any:`IbisClientConnection`
A new connection to the Lookup test server.
"""
return IbisClientConnection("lookup-test.csx.cam.ac.uk", 443, "", True)
def createLocalConnection():
"""
Create an IbisClientConnection to a Lookup/Ibis web service API running
locally on https://localhost:8443/ibis/.
The connection is initially anonymous, but this may be changed using
its :any:`set_username() <IbisClientConnection.set_username>` and
:any:`set_password() <IbisClientConnection.set_password>` methods.
This is intended for testing during development. The local server is
assumed to be using self-signed certificates, which will not be checked.
**Returns**
:any:`IbisClientConnection`
A new connection to a Lookup server assumed to be running on
localhost.
"""
return IbisClientConnection("localhost", 8443, "ibis", False)
| {
"content_hash": "eb7518c16038d4a502a39e379a5bdd07",
"timestamp": "",
"source": "github",
"line_count": 349,
"max_line_length": 77,
"avg_line_length": 36.919770773638966,
"alnum_prop": 0.5829258828094683,
"repo_name": "abrahammartin/django-ucamlookup",
"id": "9557c8d8af9422459237ec5dac0dcb1c313c3915",
"size": "13869",
"binary": false,
"copies": "1",
"ref": "refs/heads/2.0",
"path": "ucamlookup/ibisclient/connection.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "17062"
},
{
"name": "HTML",
"bytes": "7054"
},
{
"name": "JavaScript",
"bytes": "284598"
},
{
"name": "Python",
"bytes": "170041"
}
],
"symlink_target": ""
} |
from app import create_app
import unittest
class TestCase(unittest.TestCase):
def setUp(self):
self.app = create_app('Testing')
self.app_context = self.app.app_context()
self.app_context.push()
self.client = self.app.test_client()
def tearDown(self):
self.app_context.pop()
def test_train_nlu(self):
rv = self.client.post('/nlu/build_models', json={}, follow_redirects=True)
assert rv.status_code == 200
if __name__ == '__main__':
unittest.main() | {
"content_hash": "7e81a4f92f58e0a0cb6aed1b6fa885d5",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 82,
"avg_line_length": 27.473684210526315,
"alnum_prop": 0.6187739463601533,
"repo_name": "alfredfrancis/ai-chatbot-framework",
"id": "c86ae8985a9d45c5ce818694e545ff7fe6d7078b",
"size": "522",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_nlu.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4686"
},
{
"name": "Dockerfile",
"bytes": "1305"
},
{
"name": "HTML",
"bytes": "16833"
},
{
"name": "JavaScript",
"bytes": "9107"
},
{
"name": "Procfile",
"bytes": "116"
},
{
"name": "Python",
"bytes": "48118"
},
{
"name": "SCSS",
"bytes": "4384"
},
{
"name": "Smarty",
"bytes": "1912"
},
{
"name": "TypeScript",
"bytes": "43583"
}
],
"symlink_target": ""
} |
import csv
from mapping_classes import FunctionMapper
import sys
def build_key_func_dict(fields, hashing_func=None, separator="|"):
if fields.__class__ not in ([].__class__, ().__class__):
fields = [fields]
def hash_func(input_dict):
key_list = []
for field in fields:
key_list += [input_dict[field]]
key_list = [kl for kl in key_list if kl is not None and len(kl)]
key_string = separator.join(key_list)
if hashing_func is not None:
key_string = hashing_func(key_string)
return key_string
return hash_func
def build_name_lookup_csv(input_csv_file_name, output_csv_file_name, field_names, key_fields, hashing_func=None):
"""Build a table to lookup values"""
lookup_dict = {}
key_func = build_key_func_dict(key_fields, hashing_func=hashing_func)
with open(input_csv_file_name, "r", newline="", errors="replace") as f:
csv_dict = csv.DictReader(f)
for row_dict in csv_dict:
key_str = key_func(row_dict)
new_dict = {}
for field_name in field_names:
new_dict[field_name] = row_dict[field_name]
lookup_dict[key_str] = new_dict
with open(output_csv_file_name, "w", newline="", errors="replace") as fw:
csv_writer = csv.writer(fw)
i = 0
for key_name in lookup_dict:
row_dict = lookup_dict[key_name]
if i == 0:
row_field_names = list(row_dict.keys())
header = ["key_name"] + row_field_names
csv_writer.writerow(header)
if len(key_name):
row_to_write = [key_name]
for field_name in row_field_names:
row_to_write += [row_dict[field_name]]
csv_writer.writerow(row_to_write)
i += 1
return FunctionMapper(key_func)
| {
"content_hash": "307efd69a21bd5f3e2ecb621d942bb34",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 113,
"avg_line_length": 29.828125,
"alnum_prop": 0.5605028810895757,
"repo_name": "jhajagos/CommonDataModelMapper",
"id": "cdfcc0bdf264fd9d97065d107a4dc7c79e0d18f8",
"size": "1909",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/prepared_source_functions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "421825"
}
],
"symlink_target": ""
} |
import io
import importlib
import os
import shutil
import pytest
from optimus.setup_project import setup_project
from optimus.conf.loader import import_pages_module
from optimus.pages.builder import PageBuilder
from optimus.assets.registry import register_assets
def DummyFilter(content):
return "DummyFilter: {}".format(content)
@pytest.mark.parametrize(
"sample_fixture_name,attempted_destinations",
[
(
"basic_template",
# Relative destination path from dev build dir
[
"index.html",
],
),
(
"basic2_template",
[
"index.html",
"sub/foo.html",
"sub/bar.html",
],
),
(
"i18n_template",
[
"index.html",
"index_fr_FR.html",
],
),
],
)
def test_build_item(
minimal_basic_settings,
fixtures_settings,
reset_syspath,
temp_builds_dir,
sample_fixture_name,
attempted_destinations,
):
"""
Build each page
This will only works for sample fixtures that use the same as
'basic_template'.
Also we build in production mode so webassets apply minification, this is
required since in development mode webassets use a hash on every asset
file that we can't rely on and would break builded file comparaison
"""
basepath = temp_builds_dir.join("builder_build_item_{}".format(sample_fixture_name))
projectdir = os.path.join(basepath.strpath, sample_fixture_name)
attempts_dir = os.path.join(
fixtures_settings.fixtures_path, "builds", sample_fixture_name
)
# Copy sample from fixtures dir
templatedir = os.path.join(fixtures_settings.fixtures_path, sample_fixture_name)
shutil.copytree(templatedir, projectdir)
# Setup project
setup_project(projectdir, "dummy_value")
# Get basic sample settings
settings = minimal_basic_settings(projectdir)
# Enabled production mode for webassets without url expire in a custom
# cache dir, so we have stable asset filename for comparaison
cache_dir = os.path.join(projectdir, "webassets-cache")
os.makedirs(cache_dir)
settings.DEBUG = False
settings.WEBASSETS_CACHE = cache_dir
settings.WEBASSETS_URLEXPIRE = False
# Define a dummy filter to test filter registration and usage
settings.JINJA_FILTERS = {"dummy_filter": DummyFilter}
# Init webassets and builder
assets_env = register_assets(settings)
builder = PageBuilder(settings, assets_env=assets_env)
pages_map = import_pages_module(settings.PAGES_MAP, basedir=projectdir)
# NOTE: We need to force reloading importation else the previous import settings
# with different values, is still re-used
pages_map = importlib.reload(pages_map)
# Collect finded templates for each defined page view
buildeds = []
for pageview in pages_map.PAGES:
found = builder.build_item(pageview)
buildeds.append(found)
# Add absolute build dir to each attempted relative path
assert buildeds == [
os.path.join(settings.PUBLISH_DIR, path) for path in attempted_destinations
]
# Check every builded destination exists
for path in attempted_destinations:
dest_path = os.path.join(settings.PUBLISH_DIR, path)
attempt_path = os.path.join(attempts_dir, path)
# Open builded file
with io.open(dest_path, "r") as destfp:
built = destfp.read()
# Write attempted file from builded file
# This is only temporary stuff to enable when writing new test or
# updating existing one
# with io.open(attempt_path, 'w') as writefp:
# writefp.write(built)
# Open attempted file from 'builds'
with io.open(attempt_path, "r") as attemptfp:
attempted = attemptfp.read()
assert built == attempted
# Cleanup sys.path for next tests
reset_syspath(projectdir)
@pytest.mark.parametrize(
"sample_fixture_name,attempted_destinations",
[
(
"basic_template",
# Relative destination path from dev build dir
[
"index.html",
],
),
(
"basic2_template",
[
"index.html",
"sub/foo.html",
"sub/bar.html",
],
),
(
"i18n_template",
[
"index.html",
"index_fr_FR.html",
],
),
],
)
def test_build_bulk(
minimal_basic_settings,
fixtures_settings,
reset_syspath,
temp_builds_dir,
sample_fixture_name,
attempted_destinations,
):
"""
Build all pages in one bulk action
Since 'build_item' test allready compare builded file, we dont do it again
here, just check returned paths
"""
basepath = temp_builds_dir.join("builder_build_bulk_{}".format(sample_fixture_name))
projectdir = os.path.join(basepath.strpath, sample_fixture_name)
# Copy sample from fixtures dir
templatedir = os.path.join(fixtures_settings.fixtures_path, sample_fixture_name)
shutil.copytree(templatedir, projectdir)
# Setup project
setup_project(projectdir, "dummy_value")
# Get basic sample settings
settings = minimal_basic_settings(projectdir)
# Define a dummy filter to test filter registration and usage
settings.JINJA_FILTERS = {"dummy_filter": DummyFilter}
# Init webassets and builder
assets_env = register_assets(settings)
builder = PageBuilder(settings, assets_env=assets_env)
pages_map = import_pages_module(settings.PAGES_MAP, basedir=projectdir)
# NOTE: We need to force reloading importation else the previous import settings
# with different values, is still re-used
pages_map = importlib.reload(pages_map)
# Collect finded templates for each defined page view
buildeds = builder.build_bulk(pages_map.PAGES)
# Check every attempted file has been created (promise)
assert buildeds == [
os.path.join(settings.PUBLISH_DIR, path) for path in attempted_destinations
]
# Check promised builded file exists
for dest in attempted_destinations:
absdest = os.path.join(settings.PUBLISH_DIR, dest)
assert os.path.exists(absdest) is True
# Cleanup sys.path for next tests
reset_syspath(projectdir)
| {
"content_hash": "3bb6e71c6c14c5ce02f1123b36452f3c",
"timestamp": "",
"source": "github",
"line_count": 214,
"max_line_length": 88,
"avg_line_length": 30.39252336448598,
"alnum_prop": 0.6383763837638377,
"repo_name": "sveetch/Optimus",
"id": "875cdd5cb57c6ee53ca1191299b460ad03918147",
"size": "6504",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/0800_builder/06_build.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "14380"
},
{
"name": "HTML",
"bytes": "16553"
},
{
"name": "JavaScript",
"bytes": "101904"
},
{
"name": "Makefile",
"bytes": "1564"
},
{
"name": "Python",
"bytes": "245913"
},
{
"name": "Ruby",
"bytes": "855"
},
{
"name": "Smarty",
"bytes": "8827"
}
],
"symlink_target": ""
} |
from parsley import wrapGrammar
from ometa.grammar import OMeta
from ometa.runtime import OMetaBase
from core.search.compiler.invparsley import grammar
name = 'InvDSL'
B = OMeta.makeGrammar(grammar, name=name).createParserClass(
OMetaBase, globals()
)
class ICompiler(B):
def directive(self, d, v):
raise NotImplemented()
def regexpr(self, r):
raise NotImplemented()
def text(self, t):
raise NotImplemented()
def compile(self, initial, values):
raise NotImplemented()
def OR_op(self, a, b):
raise NotImplemented()
def AND_op(self, a, b):
raise NotImplemented()
def NOT_op(self, a):
raise NotImplemented()
class DebugCompiler(ICompiler):
def directive(self, d, v):
return d, v
def regexpr(self, r):
return r
def text(self, t):
return t
def compile(self, initial, values):
ret = initial
for op, value in values:
ret = op(ret, value)
return ret
def OR_op(self):
return lambda a, b: '({0} {1} {2})'.format(a, 'OR', b)
def AND_op(self):
return lambda a, b: '({0} {1} {2})'.format(a, 'AND', b)
def NOT_op(self):
return lambda a: '({0} {1})'.format('NOT', a)
def make_debug_compiler():
return wrapGrammar(DebugCompiler)
| {
"content_hash": "1bb7214a93b25a466e630f98776015ae",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 63,
"avg_line_length": 20.90625,
"alnum_prop": 0.5994020926756353,
"repo_name": "rtucker-mozilla/mozilla_inventory",
"id": "02c36eb00b3b38180a9d26e254c269491ab5dc7f",
"size": "1338",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/search/compiler/invdsl.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CoffeeScript",
"bytes": "9538"
},
{
"name": "JavaScript",
"bytes": "1485560"
},
{
"name": "PHP",
"bytes": "27273"
},
{
"name": "Puppet",
"bytes": "6422"
},
{
"name": "Python",
"bytes": "1960271"
},
{
"name": "Ruby",
"bytes": "1459"
},
{
"name": "Shell",
"bytes": "8766"
}
],
"symlink_target": ""
} |
'''
Created on 23/10/2015
@author: david
'''
from abc import ABCMeta
class I2CSensor(object):
'''
Abstract class for I2C sensors
'''
__metaclass__ = ABCMeta
def _setAddress(self, address):
self._address = address
def _readWord(self, regH, regL):
byteH = self._bus.read_byte_data(self._address, regH)
byteL = self._bus.read_byte_data(self._address, regL)
word = (byteH << 8) | byteL
if (byteH & 0x80) != 0:
word = -(0xffff - word + 1)
return word
def _readWordHL(self, reg):
return self._readWord(reg, reg+1)
def _readWordLH(self, reg):
return self._readWord(reg+1, reg)
def _writeWord(self, regH, regL, word):
byteH = word >> 8
byteL = word & 0xff
self._bus.write_byte_data(self._address, regH, byteH)
self._bus.write_byte_data(self._address, regL, byteL)
def _writeWordHL(self, reg, word):
self._writeWord(reg, reg+1, word)
def _writeWordLH(self, reg, word):
self._writeWord(reg+1, reg, word)
| {
"content_hash": "06fd784462fe2f6d9602dfa19e3e6419",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 61,
"avg_line_length": 19.79310344827586,
"alnum_prop": 0.5435540069686411,
"repo_name": "dpm76/Bot1",
"id": "4209bc0edc1dc62178386dd94c1c1066f9209126",
"size": "1148",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bot1/sensor/I2CSensor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "244329"
},
{
"name": "Shell",
"bytes": "3779"
}
],
"symlink_target": ""
} |
import requests
def scrape_vsco(username):
#start a new web-browsing session
s = requests.session()
#make containers for all the links to be collected
messy_links, clean_links = [],[]
#download the user's profile page
profile_page = s.get('http://vsco.co/'+username)
#check if account exists
if '<p class="page-error-heading mt40">This page does not exist</p>' not in profile_page.text:
#get the unique session id from the site's cookies
unique_session_id = str(profile_page.cookies).split('vs=')[1]
unique_session_id = unique_session_id[:unique_session_id.index(' ')]
#convert the profile page to a string
profile_page = profile_page.text
#get the user's unique user id from the profile page
unique_user_id = profile_page.split('"id":')[1]
unique_user_id = unique_user_id[:unique_user_id.index(',')]
#find the user's profile picture link
profile_picture_link = profile_page.split('responsive_url":"')[1]
profile_picture_link = profile_picture_link[:profile_picture_link.index('"')]
#add the profile picture link to the list
messy_links.append('http://'+profile_picture_link)
#using the session and user id's, download the file containing the links to all pictures ever posted
user_links_page = s.get('http://vsco.co/ajxp/'+unique_session_id+'/2.0/medias?site_id='+unique_user_id+'&page=1&size=10000').text.split('"')
#collect the url of every possible jpg picture
for link in user_links_page:
if ((('im.vsco.co' in link) and ('.jpg' in link))):
messy_links.append('http://'+link)
#find the uncompressed links to the images provided, and clean them up
for link in messy_links:
clean_links.append(link.replace('\\',''))
#terminate the browsing session
s.close()
#return all the decompressed image links
return clean_links
| {
"content_hash": "5a20ef936af77fadf789e00d70075e7f",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 142,
"avg_line_length": 35.94,
"alnum_prop": 0.7084028937117418,
"repo_name": "matthewnau/mirror-mirror",
"id": "0f4dc3304d2bf99463470a4f929ff494c42c4c98",
"size": "1797",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mirror-mirror/vsco.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17262"
}
],
"symlink_target": ""
} |
import json
import logging
import multiprocessing as mp
import pickle
import time
from collections import Counter
from copy import deepcopy
from typing import List, Union, Tuple
import numpy as np
from flare.env import AtomicEnvironment
from flare.gp_algebra import (
get_like_from_mats,
get_neg_like_grad,
get_ky_mat_update,
_global_training_data,
_global_training_labels,
_global_training_structures,
_global_energy_labels,
get_Ky_mat,
get_kernel_vector,
en_kern_vec,
efs_kern_vec,
)
from flare.kernels.utils import (
str_to_kernel_set,
from_mask_to_args,
kernel_str_to_array,
)
from flare.output import Output, set_logger
from flare.parameters import Parameters
from flare.struc import Structure
from flare.utils.element_coder import NumpyEncoder, Z_to_element
from numpy.random import random
from scipy.linalg import solve_triangular
from scipy.optimize import minimize
class GaussianProcess:
"""Gaussian process force field. Implementation is based on Algorithm 2.1
(pg. 19) of "Gaussian Processes for Machine Learning" by Rasmussen and
Williams.
Methods within GaussianProcess allow you to make predictions on
AtomicEnvironment objects (see env.py) generated from
FLARE Structures (see struc.py), and after data points are added,
optimize hyperparameters based on available training data (train method).
Args:
kernels (list, optional): Determine the type of kernels. Example:
['twbody', 'threebody'], ['2', '3', 'mb'], ['2']. Defaults to [
'twboody', 'threebody']
component (str, optional): Determine single- ("sc") or multi-
component ("mc") kernel to use. Defaults to "mc"
hyps (np.ndarray, optional): Hyperparameters of the GP.
cutoffs (Dict, optional): Cutoffs of the GP kernel. For simple hyper-
parameter setups, formatted like {"twobody":7, "threebody":4.5},
etc.
hyp_labels (List, optional): List of hyperparameter labels. Defaults
to None.
opt_algorithm (str, optional): Hyperparameter optimization algorithm.
Defaults to 'L-BFGS-B'.
maxiter (int, optional): Maximum number of iterations of the
hyperparameter optimization algorithm. Defaults to 10.
parallel (bool, optional): If True, the covariance matrix K of the GP is
computed in parallel. Defaults to False.
n_cpus (int, optional): Number of cpus used for parallel
calculations. Defaults to 1 (serial)
n_sample (int, optional): Size of submatrix to use when parallelizing
predictions.
output (Output, optional): Output object used to dump hyperparameters
during optimization. Defaults to None.
hyps_mask (dict, optional): hyps_mask can set up which hyper parameter
is used for what interaction. Details see kernels/mc_sephyps.py
name (str, optional): Name for the GP instance which dictates global
memory access.
"""
def __init__(
self,
kernels: List[str] = None,
component: str = "mc",
hyps: "ndarray" = None,
cutoffs: dict = None,
hyps_mask: dict = None,
hyp_labels: List = None,
opt_algorithm: str = "L-BFGS-B",
maxiter: int = 10,
parallel: bool = False,
per_atom_par: bool = True,
n_cpus: int = 1,
n_sample: int = 100,
output: Output = None,
name="default_gp",
energy_noise: float = 0.01,
**kwargs,
):
"""Initialize GP parameters and training data."""
# load arguments into attributes
self.name = name
self.output = output
self.opt_algorithm = opt_algorithm
self.per_atom_par = per_atom_par
self.maxiter = maxiter
# set up parallelization
self.n_cpus = n_cpus
self.n_sample = n_sample
self.parallel = parallel
self.component = component
self.kernels = (
["twobody", "threebody"]
if kernels is None
else kernel_str_to_array("".join(kernels))
)
self.cutoffs = {} if cutoffs is None else cutoffs
self.hyp_labels = hyp_labels
self.hyps_mask = {} if hyps_mask is None else hyps_mask
self.hyps = hyps
GaussianProcess.backward_arguments(kwargs, self.__dict__)
GaussianProcess.backward_attributes(self.__dict__)
# ------------ "computed" attributes ------------
if self.output is None:
self.logger_name = self.name + "GaussianProcess"
set_logger(self.logger_name, stream=True, fileout_name=None, verbose="info")
else:
self.logger_name = self.output.basename + "log"
if self.hyps is None:
# If no hyperparameters are passed in, assume 2 hyps for each
# kernel, plus one noise hyperparameter, and use a guess value
self.hyps = np.array([0.1] * (1 + 2 * len(self.kernels)))
else:
self.hyps = np.array(self.hyps, dtype=np.float64)
kernel, grad, ek, efk, efs_e, efs_f, efs_self = str_to_kernel_set(
self.kernels, self.component, self.hyps_mask
)
self.kernel = kernel
self.kernel_grad = grad
self.energy_force_kernel = efk
self.energy_kernel = ek
self.efs_energy_kernel = efs_e
self.efs_force_kernel = efs_f
self.efs_self_kernel = efs_self
self.kernels = kernel_str_to_array(kernel.__name__)
# parallelization
if self.parallel:
if self.n_cpus is None:
self.n_cpus = mp.cpu_count()
else:
self.n_cpus = n_cpus
else:
self.n_cpus = 1
self.training_data = [] # Atomic environments
self.training_labels = [] # Forces acting on central atoms
self.training_labels_np = np.empty(
0,
)
self.n_envs_prev = len(self.training_data)
# Attributes to accomodate energy labels:
self.training_structures = [] # Environments of each structure
self.energy_labels = [] # Energies of training structures
self.energy_labels_np = np.empty(
0,
)
self.energy_noise = energy_noise
self.all_labels = np.empty(
0,
)
# Parameters set during training
self.ky_mat = None
self.force_block = None
self.energy_block = None
self.force_energy_block = None
self.l_mat = None
self.l_mat_inv = None
self.alpha = None
self.ky_mat_inv = None
self.likelihood = None
self.likelihood_gradient = None
self.bounds = None
# File used for reading / writing model if model is large
self.ky_mat_file = None
# Flag if too-big warning has been printed for this model
self.large_warning = False
if self.logger_name is None:
if self.output is None:
self.logger_name = self.name + "GaussianProcess"
set_logger(
self.logger_name, stream=True, fileout_name=None, verbose="info"
)
else:
self.logger_name = self.output.basename + "log"
logger = logging.getLogger(self.logger_name)
if self.cutoffs == {}:
# If no cutoffs are passed in, assume 7 A for 2 body, 3.5 for
# 3-body.
cutoffs = {}
if "twobody" in self.kernels:
cutoffs["twobody"] = 7
if "threebody" in self.kernels:
cutoffs["threebody"] = 3.5
if "manybody" in self.kernels:
raise ValueError(
"No cutoff was set for the manybody kernel."
"A default value will not be set by default."
)
self.cutoffs = cutoffs
logger.warning(
"Warning: No cutoffs were set for your GP."
"Default values have been assigned but you "
"should think carefully about which are "
"appropriate for your use case."
)
self.check_instantiation()
@property
def force_noise(self):
return Parameters.get_noise(self.hyps_mask, self.hyps, constraint=False)
@property
def hyps_and_labels(self):
return Parameters.get_hyps(
self.hyps_mask, self.hyps, constraint=False, label=True
)
def check_instantiation(self):
"""
Runs a series of checks to ensure that the user has not supplied
contradictory arguments which will result in undefined behavior
with multiple hyperparameters.
:return:
"""
logger = logging.getLogger(self.logger_name)
# check whether it's be loaded before
loaded = False
if self.name in _global_training_labels:
if (
_global_training_labels.get(self.name, None)
is not self.training_labels_np
):
loaded = True
if self.name in _global_energy_labels:
if _global_energy_labels.get(self.name, None) is not self.energy_labels_np:
loaded = True
if loaded:
base = f"{self.name}"
count = 2
while self.name in _global_training_labels and count < 100:
time.sleep(random())
self.name = f"{base}_{count}"
logger.debug(
"Specified GP name is present in global memory; "
"Attempting to rename the "
f"GP instance to {self.name}"
)
count += 1
if self.name in _global_training_labels:
milliseconds = int(round(time.time() * 1000) % 10000000)
self.name = f"{base}_{milliseconds}"
logger.debug(
"Specified GP name still present in global memory: "
f"renaming the gp instance to {self.name}"
)
logger.debug(f"Final name of the gp instance is {self.name}")
self.sync_data()
self.hyps_mask = Parameters.check_instantiation(
hyps=self.hyps,
cutoffs=self.cutoffs,
kernels=self.kernels,
param_dict=self.hyps_mask,
)
self.bounds = deepcopy(self.hyps_mask.get("bounds", None))
def update_kernel(
self,
kernels: List[str],
component: str = "mc",
hyps=None,
cutoffs: dict = None,
hyps_mask: dict = None,
):
kernel, grad, ek, efk, _, _, _ = str_to_kernel_set(
kernels, component, hyps_mask
)
self.kernel = kernel
self.kernel_grad = grad
self.energy_force_kernel = efk
self.energy_kernel = ek
self.kernels = kernel_str_to_array(kernel.__name__)
if hyps_mask is not None:
self.hyps_mask = hyps_mask
# Cutoffs argument will override hyps mask's cutoffs key, if present
if isinstance(hyps_mask, dict) and cutoffs is None:
cutoffs = hyps_mask.get("cutoffs", None)
if cutoffs is not None:
if self.cutoffs != cutoffs:
self.adjust_cutoffs(cutoffs, train=False, new_hyps_mask=hyps_mask)
self.cutoffs = cutoffs
if isinstance(hyps_mask, dict) and hyps is None:
hyps = hyps_mask.get("hyps", None)
if hyps is not None:
self.hyps = hyps
def update_db(
self,
struc: Structure,
forces: "ndarray" = None,
custom_range: List[int] = (),
energy: float = None,
stress: "ndarray" = None,
):
"""Given a structure and forces, add local environments from the
structure to the training set of the GP. If energy is given, add the
entire structure to the training set.
Args:
struc (Structure): Input structure. Local environments of atoms
in this structure will be added to the training set of the GP.
forces (np.ndarray): Forces on atoms in the structure.
custom_range (List[int]): Indices of atoms whose local
environments will be added to the training set of the GP.
energy (float): Energy of the structure.
stress (np.ndarray): Stress tensor of the structure. The stress
tensor components should be given in the following order:
xx, xy, xz, yy, yz, zz.
"""
# By default, use all atoms in the structure
noa = len(struc.positions)
update_indices = custom_range or list(range(noa))
# If forces are given, update the environment list.
if forces is not None:
for atom in update_indices:
env_curr = AtomicEnvironment(
struc, atom, self.cutoffs, cutoffs_mask=self.hyps_mask
)
forces_curr = np.array(forces[atom])
self.training_data.append(env_curr)
self.training_labels.append(forces_curr)
# create numpy array of training labels
self.training_labels_np = np.hstack(self.training_labels)
# If an energy is given, update the structure list.
if energy is not None:
structure_list = [] # Populate with all environments of the struc
for atom in range(noa):
env_curr = AtomicEnvironment(
struc, atom, self.cutoffs, cutoffs_mask=self.hyps_mask
)
structure_list.append(env_curr)
self.energy_labels.append(energy)
self.training_structures.append(structure_list)
self.energy_labels_np = np.array(self.energy_labels)
if forces is None and energy is None and stress is None:
logger = logging.getLogger(self.logger_name)
logger.warn(
"Update DB method called with data but no labels!"
"The GP has not been updated with data!"
)
# update list of all labels
self.all_labels = np.concatenate(
(self.training_labels_np, self.energy_labels_np)
)
self.sync_data()
def add_one_env(
self,
env: AtomicEnvironment,
force: "np.ndarray" = None,
train: bool = False,
**kwargs,
):
"""Add a single local environment to the training set of the GP.
Args:
env (AtomicEnvironment): Local environment to be added to the
training set of the GP.
force (np.ndarray): Force on the central atom of the local
environment in the form of a 3-component Numpy array
containing the x, y, and z components.
train (bool): If True, the GP is trained after the local
environment is added.
"""
self.training_data.append(env)
if force is None:
self.training_labels.append(env.force)
else:
self.training_labels.append(force)
self.training_labels_np = np.hstack(self.training_labels)
self.sync_data()
# update list of all labels
self.all_labels = np.concatenate(
(self.training_labels_np, self.energy_labels_np)
)
if train:
self.train(**kwargs)
def train(
self,
logger_name: str = None,
custom_bounds=None,
grad_tol: float = 1e-4,
x_tol: float = 1e-5,
line_steps: int = 20,
print_progress: bool = False,
):
"""Train Gaussian Process model on training data. Tunes the
hyperparameters to maximize the likelihood, then computes L and alpha
(related to the covariance matrix of the training set).
Args:
logger (logging.logger): logger object specifying where to write the
progress of the optimization.
custom_bounds (np.ndarray): Custom bounds on the hyperparameters.
grad_tol (float): Tolerance of the hyperparameter gradient that
determines when hyperparameter optimization is terminated.
x_tol (float): Tolerance on the x values used to decide when
Nelder-Mead hyperparameter optimization is terminated.
line_steps (int): Maximum number of line steps for L-BFGS
hyperparameter optimization.
:param logger_name:
:param print_progress:
"""
verbose = "warning"
if print_progress:
verbose = "info"
if logger_name is None:
set_logger(
"gp_algebra",
stream=True,
fileout_name="log.gp_algebra",
verbose=verbose,
)
logger_name = "gp_algebra"
disp = print_progress
if (
max(len(self.training_data), len(self.training_labels)) > 5000
and not self.large_warning
):
self.large_warning = True
warning_message = (
"WARNING! Your GP is very large (>5000 atomic "
"environments). The hyperparameter optimization process "
"does not scale favorably with increasing atomic "
"environments"
" (roughly N^2)"
"and so your GP may take a very long time to train."
"Consider finding a way to reduce the number of atomic "
"environments in your model if you want to optimize the "
"hyperparameters or optimize them by a different route."
)
logger = logging.getLogger(self.logger_name)
logger.warning(warning_message)
if len(self.training_data) == 0 or len(self.training_labels) == 0:
raise Warning(
"You are attempting to train a GP with no "
"training data. Add environments and forces "
"to the GP and try again."
)
x_0 = self.hyps
args = (
self.name,
self.kernel_grad,
logger_name,
self.cutoffs,
self.hyps_mask,
self.n_cpus,
self.n_sample,
)
res = None
if self.opt_algorithm == "L-BFGS-B":
# bound signal noise below to avoid overfitting
if self.bounds is None:
bounds = np.array([(1e-6, np.inf)] * len(x_0))
bounds[-1, 0] = 1e-3
else:
bounds = self.bounds
# Catch linear algebra errors and switch to BFGS if necessary
try:
res = minimize(
get_neg_like_grad,
x_0,
args,
method="L-BFGS-B",
jac=True,
bounds=bounds,
options={
"disp": disp,
"gtol": grad_tol,
"maxls": line_steps,
"maxiter": self.maxiter,
},
)
except np.linalg.LinAlgError:
logger = logging.getLogger(self.logger_name)
logger.warning(
"Algorithm for L-BFGS-B failed. Changing to "
"BFGS for remainder of run."
)
self.opt_algorithm = "BFGS"
if custom_bounds is not None:
res = minimize(
get_neg_like_grad,
x_0,
args,
method="L-BFGS-B",
jac=True,
bounds=custom_bounds,
options={
"disp": disp,
"gtol": grad_tol,
"maxls": line_steps,
"maxiter": self.maxiter,
},
)
elif self.opt_algorithm == "BFGS":
res = minimize(
get_neg_like_grad,
x_0,
args,
method="BFGS",
jac=True,
options={"disp": disp, "gtol": grad_tol, "maxiter": self.maxiter},
)
if res is None:
raise RuntimeError("Optimization failed for some reason.")
self.hyps = res.x
self.set_L_alpha()
self.likelihood = -res.fun
self.likelihood_gradient = -res.jac
return res
def check_L_alpha(self):
"""
Check that the alpha vector is up to date with the training set. If
not, update_L_alpha is called.
"""
# Check that alpha is up to date with training set
size3 = len(self.training_data) * 3 + len(self.training_structures)
# If model is empty, then just return
if size3 == 0:
return
if self.alpha is None:
self.update_L_alpha()
elif size3 > self.alpha.shape[0]:
self.update_L_alpha()
elif size3 != self.alpha.shape[0]:
self.set_L_alpha()
def predict(self, x_t: AtomicEnvironment, d: int) -> [float, float]:
"""
Predict a force component of the central atom of a local environment.
Args:
x_t (AtomicEnvironment): Input local environment.
d (int): Force component to be predicted (1 is x, 2 is y, and
3 is z).
Return:
(float, float): Mean and epistemic variance of the prediction.
"""
if d not in [1, 2, 3]:
raise ValueError("d should be 1, 2, or 3")
# Kernel vector allows for evaluation of atomic environments.
if self.parallel and not self.per_atom_par:
n_cpus = self.n_cpus
else:
n_cpus = 1
self.sync_data()
k_v = get_kernel_vector(
self.name,
self.kernel,
self.energy_force_kernel,
x_t,
d,
self.hyps,
cutoffs=self.cutoffs,
hyps_mask=self.hyps_mask,
n_cpus=n_cpus,
n_sample=self.n_sample,
)
# Guarantee that alpha is up to date with training set
self.check_L_alpha()
# get predictive mean
pred_mean = np.matmul(k_v, self.alpha)
# get predictive variance without cholesky (possibly faster)
# pass args to kernel based on if mult. hyperparameters in use
args = from_mask_to_args(self.hyps, self.cutoffs, self.hyps_mask)
self_kern = self.kernel(x_t, x_t, d, d, *args)
pred_var = self_kern - np.matmul(np.matmul(k_v, self.ky_mat_inv), k_v)
return pred_mean, pred_var
def predict_force_xyz(self, x_t: AtomicEnvironment) -> ("np.ndarray", "np.ndarray"):
"""
Simple wrapper to predict all three components of a force in one go.
:param x_t:
:return:
"""
forces = []
stds = []
for d in (1, 2, 3):
force, std = self.predict(x_t, d)
forces.append(force)
stds.append(std)
return np.array(forces), np.array(stds)
def predict_local_energy(self, x_t: AtomicEnvironment) -> float:
"""Predict the local energy of a local environment.
Args:
x_t (AtomicEnvironment): Input local environment.
Return:
float: Local energy predicted by the GP.
"""
if self.parallel and not self.per_atom_par:
n_cpus = self.n_cpus
else:
n_cpus = 1
self.sync_data()
k_v = en_kern_vec(
self.name,
self.energy_force_kernel,
self.energy_kernel,
x_t,
self.hyps,
cutoffs=self.cutoffs,
hyps_mask=self.hyps_mask,
n_cpus=n_cpus,
n_sample=self.n_sample,
)
pred_mean = np.matmul(k_v, self.alpha)
return pred_mean
def predict_local_energy_and_var(self, x_t: AtomicEnvironment):
"""Predict the local energy of a local environment and its
uncertainty.
Args:
x_t (AtomicEnvironment): Input local environment.
Return:
(float, float): Mean and predictive variance predicted by the GP.
"""
if self.parallel and not self.per_atom_par:
n_cpus = self.n_cpus
else:
n_cpus = 1
self.sync_data()
# get kernel vector
k_v = en_kern_vec(
self.name,
self.energy_force_kernel,
self.energy_kernel,
x_t,
self.hyps,
cutoffs=self.cutoffs,
hyps_mask=self.hyps_mask,
n_cpus=n_cpus,
n_sample=self.n_sample,
)
# get predictive mean
pred_mean = np.matmul(k_v, self.alpha)
# get predictive variance
v_vec = solve_triangular(self.l_mat, k_v, lower=True)
args = from_mask_to_args(self.hyps, self.cutoffs, self.hyps_mask)
self_kern = self.energy_kernel(x_t, x_t, *args)
pred_var = self_kern - np.matmul(v_vec, v_vec)
return pred_mean, pred_var
def predict_efs(self, x_t: AtomicEnvironment):
"""Predict the local energy, forces, and partial stresses of an
atomic environment and their predictive variances."""
# Kernel vector allows for evaluation of atomic environments.
if self.parallel and not self.per_atom_par:
n_cpus = self.n_cpus
else:
n_cpus = 1
self.sync_data()
energy_vector, force_array, stress_array = efs_kern_vec(
self.name,
self.efs_force_kernel,
self.efs_energy_kernel,
x_t,
self.hyps,
cutoffs=self.cutoffs,
hyps_mask=self.hyps_mask,
n_cpus=n_cpus,
n_sample=self.n_sample,
)
# Check that alpha is up to date with training set.
self.check_L_alpha()
# Compute mean predictions.
en_pred = np.matmul(energy_vector, self.alpha)
force_pred = np.matmul(force_array, self.alpha)
stress_pred = np.matmul(stress_array, self.alpha)
# Compute uncertainties.
args = from_mask_to_args(self.hyps, self.cutoffs, self.hyps_mask)
self_en, self_force, self_stress = self.efs_self_kernel(x_t, *args)
en_var = self_en - np.matmul(
np.matmul(energy_vector, self.ky_mat_inv), energy_vector
)
force_var = self_force - np.diag(
np.matmul(np.matmul(force_array, self.ky_mat_inv), force_array.transpose())
)
stress_var = self_stress - np.diag(
np.matmul(
np.matmul(stress_array, self.ky_mat_inv), stress_array.transpose()
)
)
return en_pred, force_pred, stress_pred, en_var, force_var, stress_var
def set_L_alpha(self):
"""
Invert the covariance matrix, setting L (a lower triangular
matrix s.t. L L^T = (K + sig_n^2 I)) and alpha, the inverse
covariance matrix multiplied by the vector of training labels.
The forces and variances are later obtained using alpha.
"""
self.sync_data()
ky_mat = get_Ky_mat(
self.hyps,
self.name,
self.kernel,
self.energy_kernel,
self.energy_force_kernel,
self.energy_noise,
cutoffs=self.cutoffs,
hyps_mask=self.hyps_mask,
n_cpus=self.n_cpus,
n_sample=self.n_sample,
)
l_mat = np.linalg.cholesky(ky_mat)
l_mat_inv = np.linalg.inv(l_mat)
ky_mat_inv = l_mat_inv.T @ l_mat_inv
alpha = np.matmul(ky_mat_inv, self.all_labels)
self.ky_mat = ky_mat
self.l_mat = l_mat
self.alpha = alpha
self.ky_mat_inv = ky_mat_inv
self.likelihood = get_like_from_mats(ky_mat, l_mat, alpha, self.name)
self.n_envs_prev = len(self.training_data)
def update_L_alpha(self):
"""
Update the GP's L matrix and alpha vector without recalculating
the entire covariance matrix K.
"""
# Set L matrix and alpha if set_L_alpha has not been called yet
if self.l_mat is None or np.array(self.ky_mat) is np.array(None):
self.set_L_alpha()
return
# Reset global variables.
self.sync_data()
ky_mat = get_ky_mat_update(
self.ky_mat,
self.n_envs_prev,
self.hyps,
self.name,
self.kernel,
self.energy_kernel,
self.energy_force_kernel,
self.energy_noise,
cutoffs=self.cutoffs,
hyps_mask=self.hyps_mask,
n_cpus=self.n_cpus,
n_sample=self.n_sample,
)
l_mat = np.linalg.cholesky(ky_mat)
l_mat_inv = np.linalg.inv(l_mat)
ky_mat_inv = l_mat_inv.T @ l_mat_inv
alpha = np.matmul(ky_mat_inv, self.all_labels)
self.ky_mat = ky_mat
self.l_mat = l_mat
self.alpha = alpha
self.ky_mat_inv = ky_mat_inv
self.n_envs_prev = len(self.training_data)
def __str__(self):
"""String representation of the GP model."""
thestr = ""
thestr += f"Number of cpu cores: {self.n_cpus}\n"
thestr += f"Kernel: {self.kernels}\n"
thestr += f"Training points: {len(self.training_data)}\n"
thestr += f"Cutoffs: {self.cutoffs}\n"
thestr += f"Number of hyperparameters: {len(self.hyps)}\n"
thestr += f"Hyperparameter array: {str(self.hyps)}\n"
if self.hyp_labels is None:
# Put unlabeled hyperparameters on one line
thestr = thestr[:-1]
thestr += str(self.hyps) + "\n"
else:
for hyp, label in zip(self.hyps, self.hyp_labels):
thestr += f"{label}: {hyp} \n"
return thestr
def as_dict(self):
"""Dictionary representation of the GP model."""
self.check_L_alpha()
out_dict = dict(vars(self))
out_dict["training_data"] = [env.as_dict() for env in self.training_data]
# Write training structures (which are just list of environments)
out_dict["training_structures"] = []
for n, env_list in enumerate(self.training_structures):
out_dict["training_structures"].append([])
for env_curr in env_list:
out_dict["training_structures"][n].append(env_curr.as_dict())
# Remove the callables
for key in [
"kernel",
"kernel_grad",
"energy_kernel",
"energy_force_kernel",
"efs_energy_kernel",
"efs_force_kernel",
"efs_self_kernel",
"output",
]:
out_dict.pop(key)
return out_dict
def sync_data(self):
_global_training_data[self.name] = self.training_data
_global_training_labels[self.name] = self.training_labels_np
_global_training_structures[self.name] = self.training_structures
_global_energy_labels[self.name] = self.energy_labels_np
@staticmethod
def from_dict(dictionary):
"""Create GP object from dictionary representation."""
GaussianProcess.backward_arguments(dictionary, dictionary)
GaussianProcess.backward_attributes(dictionary)
new_gp = GaussianProcess(**dictionary)
# Save time by attempting to load in computed attributes
if "training_data" in dictionary:
new_gp.training_data = [
AtomicEnvironment.from_dict(env) for env in dictionary["training_data"]
]
new_gp.training_labels = deepcopy(dictionary["training_labels"])
new_gp.training_labels_np = deepcopy(dictionary["training_labels_np"])
new_gp.sync_data()
# Reconstruct training structures.
if "training_structures" in dictionary:
new_gp.training_structures = []
for n, env_list in enumerate(dictionary["training_structures"]):
new_gp.training_structures.append([])
for env_curr in env_list:
new_gp.training_structures[n].append(
AtomicEnvironment.from_dict(env_curr)
)
new_gp.energy_labels = deepcopy(dictionary["energy_labels"])
new_gp.energy_labels_np = deepcopy(dictionary["energy_labels_np"])
new_gp.sync_data()
new_gp.all_labels = np.concatenate(
(new_gp.training_labels_np, new_gp.energy_labels_np)
)
new_gp.likelihood = dictionary.get("likelihood", None)
new_gp.likelihood_gradient = dictionary.get("likelihood_gradient", None)
new_gp.n_envs_prev = len(new_gp.training_data)
# Save time by attempting to load in computed attributes
if dictionary.get("ky_mat_file"):
try:
new_gp.ky_mat = np.load(dictionary["ky_mat_file"])
new_gp.compute_matrices()
new_gp.ky_mat_file = None
except FileNotFoundError:
new_gp.ky_mat = None
new_gp.l_mat = None
new_gp.alpha = None
new_gp.ky_mat_inv = None
filename = dictionary.get("ky_mat_file")
logger = logging.getLogger(new_gp.logger_name)
logger.warning(
"the covariance matrices are not loaded"
f"because {filename} cannot be found"
)
else:
new_gp.ky_mat = (
np.array(dictionary["ky_mat"])
if dictionary.get("ky_mat") is not None
else None
)
new_gp.ky_mat_inv = (
np.array(dictionary["ky_mat_inv"])
if dictionary.get("ky_mat_inv") is not None
else None
)
new_gp.ky_mat = (
np.array(dictionary["ky_mat"])
if dictionary.get("ky_mat") is not None
else None
)
new_gp.l_mat = (
np.array(dictionary["l_mat"])
if dictionary.get("l_mat") is not None
else None
)
new_gp.alpha = (
np.array(dictionary["alpha"])
if dictionary.get("alpha") is not None
else None
)
return new_gp
def compute_matrices(self):
"""
When covariance matrix is known, reconstruct other matrices.
Used in re-loading large GPs.
:return:
"""
ky_mat = self.ky_mat
if ky_mat is None or (isinstance(ky_mat, np.ndarray) and not np.any(ky_mat)):
Warning(
"Warning: Covariance matrix was not loaded but "
"compute_matrices was called. Computing covariance "
"matrix and proceeding..."
)
self.set_L_alpha()
else:
self.l_mat = np.linalg.cholesky(ky_mat)
self.l_mat_inv = np.linalg.inv(self.l_mat)
self.ky_mat_inv = self.l_mat_inv.T @ self.l_mat_inv
self.alpha = np.matmul(self.ky_mat_inv, self.all_labels)
def adjust_cutoffs(
self,
new_cutoffs: Union[list, tuple, "np.ndarray"] = None,
reset_L_alpha=True,
train=True,
new_hyps_mask=None,
):
"""
Loop through atomic environment objects stored in the training data,
and re-compute cutoffs for each. Useful if you want to gauge the
impact of cutoffs given a certain training set! Unless you know
*exactly* what you are doing for some development or test purpose,
it is **highly** suggested that you call set_L_alpha and
re-optimize your hyperparameters afterwards as is default here.
A helpful way to update the cutoffs and kernel for an extant
GP is to perform the following commands:
>> hyps_mask = pm.as_dict()
>> hyps = hyps_mask['hyps']
>> cutoffs = hyps_mask['cutoffs']
>> kernels = hyps_mask['kernels']
>> gp_model.update_kernel(kernels, 'mc', hyps, cutoffs, hyps_mask)
:param reset_L_alpha:
:param train:
:param new_hyps_mask:
:param new_cutoffs:
:return:
"""
if new_hyps_mask is not None:
hm = new_hyps_mask
self.hyps_mask = new_hyps_mask
else:
hm = self.hyps_mask
if new_cutoffs is None:
try:
new_cutoffs = hm["cutoffs"]
except KeyError:
raise KeyError(
"New cutoffs not found in the hyps_mask"
"dictionary via call to 'cutoffs' key."
)
# update environment
nenv = len(self.training_data)
for i in range(nenv):
self.training_data[i].cutoffs = new_cutoffs
self.training_data[i].cutoffs_mask = hm
self.training_data[i].setup_mask(hm)
self.training_data[i].compute_env()
# Ensure that training data and labels are still consistent
self.sync_data()
self.cutoffs = new_cutoffs
if reset_L_alpha:
del self.l_mat
del self.ky_mat
self.set_L_alpha()
if train:
self.train()
def remove_force_data(
self, indexes: Union[int, List[int]], update_matrices: bool = True
) -> Tuple[List[Structure], List["ndarray"]]:
"""
Remove force components from the model. Convenience function which
deletes individual data points.
Matrices should *always* be updated if you intend to use the GP to make
predictions afterwards. This might be time consuming for large GPs,
so, it is provided as an option, but, only do so with extreme caution.
(Undefined behavior may result if you try to make predictions and/or
add to the training set afterwards).
Returns training data which was removed akin to a pop method, in order
of lowest to highest index passed in.
:param indexes: Indexes of envs in training data to remove.
:param update_matrices: If false, will not update the GP's matrices
afterwards (which can be time consuming for large models).
This should essentially always be true except for niche development
applications.
:return:
"""
# Listify input even if one integer
if isinstance(indexes, int):
indexes = [indexes]
if max(indexes) > len(self.training_data):
raise ValueError("Index out of range of data")
if len(indexes) == 0:
return [], []
# Get in reverse order so that modifying higher indexes doesn't affect
# lower indexes
indexes.sort(reverse=True)
removed_data = []
removed_labels = []
for i in indexes:
removed_data.append(self.training_data.pop(i))
removed_labels.append(self.training_labels.pop(i))
self.training_labels_np = np.hstack(self.training_labels)
self.all_labels = np.concatenate(
(self.training_labels_np, self.energy_labels_np)
)
self.sync_data()
if update_matrices:
self.set_L_alpha()
# Put removed data in order of lowest to highest index
removed_data.reverse()
removed_labels.reverse()
return removed_data, removed_labels
def write_model(
self, name: str, format: str = None, split_matrix_size_cutoff: int = 5000
):
"""
Write model in a variety of formats to a file for later re-use.
JSON files are open to visual inspection and are easier to use
across different versions of FLARE or GP implementations. However,
they are larger and loading them in takes longer (by setting up a
new GP from the specifications). Pickled files can be faster to
read & write, and they take up less memory.
Args:
name (str): Output name.
format (str): Output format.
split_matrix_size_cutoff (int): If there are more than this
number of training points in the set, save the matrices seperately.
"""
if len(self.training_data) > split_matrix_size_cutoff:
np.save(f"{name}_ky_mat.npy", self.ky_mat)
self.ky_mat_file = f"{name}_ky_mat.npy"
temp_ky_mat = self.ky_mat
temp_l_mat = self.l_mat
temp_alpha = self.alpha
temp_ky_mat_inv = self.ky_mat_inv
self.ky_mat = None
self.l_mat = None
self.alpha = None
self.ky_mat_inv = None
# Automatically detect output format from name variable
for detect in ["json", "pickle", "binary"]:
if detect in name.lower():
format = detect
break
if format is None:
format = "json"
supported_formats = ["json", "pickle", "binary"]
if format.lower() == "json":
if ".json" != name[-5:]:
name += ".json"
with open(name, "w") as f:
json.dump(self.as_dict(), f, cls=NumpyEncoder)
elif format.lower() == "pickle" or format.lower() == "binary":
if ".pickle" != name[-7:]:
name += ".pickle"
with open(name, "wb") as f:
pickle.dump(self, f)
else:
raise ValueError(
"Output format not supported: try from {}".format(supported_formats)
)
if len(self.training_data) > split_matrix_size_cutoff:
self.ky_mat = temp_ky_mat
self.l_mat = temp_l_mat
self.alpha = temp_alpha
self.ky_mat_inv = temp_ky_mat_inv
@staticmethod
def from_file(filename: str, format: str = ""):
"""
One-line convenience method to load a GP from a file stored using
write_file
Args:
filename (str): path to GP model
format (str): json or pickle if format is not in filename
:return:
"""
if ".json" in filename or "json" in format:
with open(filename, "r") as f:
gp_model = GaussianProcess.from_dict(json.loads(f.readline()))
elif ".pickle" in filename or "pickle" in format:
with open(filename, "rb") as f:
gp_model = pickle.load(f)
GaussianProcess.backward_arguments(gp_model.__dict__, gp_model.__dict__)
GaussianProcess.backward_attributes(gp_model.__dict__)
if hasattr(gp_model, "ky_mat_file") and gp_model.ky_mat_file:
try:
gp_model.ky_mat = np.load(
gp_model.ky_mat_file, allow_pickle=True
)
gp_model.compute_matrices()
except FileNotFoundError:
gp_model.ky_mat = None
gp_model.l_mat = None
gp_model.alpha = None
gp_model.ky_mat_inv = None
Warning(
f"the covariance matrices are not loaded, "
f"this can take a long time to recompute"
)
else:
raise ValueError(
"Warning: Format unspecieified or file is not .json or .pickle format."
)
gp_model.check_instantiation()
return gp_model
def __len__(self):
return len(self.training_data)
@property
def training_statistics(self) -> dict:
"""
Return a dictionary with statistics about the current training data.
Useful for quickly summarizing info about the GP.
:return:
"""
data = dict()
data["N"] = len(self.training_data)
# Count all of the present species in the atomic env. data
present_species = []
for env, _ in zip(self.training_data, self.training_labels):
present_species.append(Z_to_element(env.structure.coded_species[env.atom]))
# Summarize the relevant information
data["species"] = list(set(present_species))
data["envs_by_species"] = dict(Counter(present_species))
return data
@property
def par(self):
"""
Backwards compability attribute
:return:
"""
return self.parallel
def __deepcopy__(self, memo):
# this way can also deepcopy the training data in _global_training dicts
return GaussianProcess.from_dict(self.as_dict())
def __del__(self):
if self is None:
return
if self.name in _global_training_labels:
return (
_global_training_data.pop(self.name, None),
_global_training_labels.pop(self.name, None),
_global_training_structures.pop(self.name, None),
_global_energy_labels.pop(self.name, None),
)
@staticmethod
def backward_arguments(kwargs, new_args={}):
"""
update the initialize arguments that were renamed
"""
if "kernel_name" in kwargs:
DeprecationWarning("kernel_name is being replaced with kernels")
new_args["kernels"] = kernel_str_to_array(kwargs["kernel_name"])
kwargs.pop("kernel_name")
if "nsample" in kwargs:
DeprecationWarning("nsample is being replaced with n_sample")
new_args["n_sample"] = kwargs["nsample"]
kwargs.pop("nsample")
if "par" in kwargs:
DeprecationWarning("par is being replaced with parallel")
new_args["parallel"] = kwargs["par"]
kwargs.pop("par")
if "no_cpus" in kwargs:
DeprecationWarning("no_cpus is being replaced with n_cpu")
new_args["n_cpus"] = kwargs["no_cpus"]
kwargs.pop("no_cpus")
if "multihyps" in kwargs:
DeprecationWarning("multihyps is removed")
kwargs.pop("multihyps")
return new_args
@staticmethod
def backward_attributes(dictionary):
"""
add new attributes to old instance
or update attribute types
"""
if "name" not in dictionary:
dictionary["name"] = "default_gp"
if "per_atom_par" not in dictionary:
dictionary["per_atom_par"] = True
if "opt_algorithm" not in dictionary:
dictionary["opt_algorithm"] = "L-BFGS-B"
if "hyps_mask" not in dictionary:
dictionary["hyps_mask"] = None
if "parallel" not in dictionary:
dictionary["parallel"] = False
if "component" not in dictionary:
dictionary["component"] = "mc"
if "training_structures" not in dictionary:
# Environments of each structure
dictionary["training_structures"] = []
dictionary["energy_labels"] = [] # Energies of training structures
dictionary["energy_labels_np"] = np.empty(
0,
)
if "training_labels" not in dictionary:
dictionary["training_labels"] = []
dictionary["training_labels_np"] = np.empty(
0,
)
if "energy_noise" not in dictionary:
dictionary["energy_noise"] = 0.01
if not isinstance(dictionary["cutoffs"], dict):
dictionary["cutoffs"] = Parameters.cutoff_array_to_dict(
dictionary["cutoffs"]
)
dictionary["hyps_mask"] = Parameters.backward(
dictionary["kernels"], deepcopy(dictionary["hyps_mask"])
)
if "logger_name" not in dictionary:
dictionary["logger_name"] = None
| {
"content_hash": "d72de104ff11424c071770e30076dcb7",
"timestamp": "",
"source": "github",
"line_count": 1408,
"max_line_length": 88,
"avg_line_length": 34.38210227272727,
"alnum_prop": 0.5494525924395786,
"repo_name": "mir-group/flare",
"id": "c22b6cb07af34ff291a74bba9c5236d630dd1db8",
"size": "48410",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flare/gp.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "89901"
},
{
"name": "Python",
"bytes": "1372138"
}
],
"symlink_target": ""
} |
class channel(object):
def __init__(self, gen):
self.gen = gen
self.args = ()
self.kwargs = {}
def __iter__(self):
return self
def next(self):
self.args = ()
self.kwargs = {}
return self.gen.next()
def send(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
return self.gen.next()
class channelgen(object):
def __init__(self, genfunc):
self.genfunc = genfunc
def __call__(self, *args, **kwargs):
c = channel(None)
c.gen = self.genfunc(c, *args, **kwargs)
return c
# A simple example
@channelgen
def skipper(chan, seq, skip = 0):
for i in seq:
if skip:
skip -= 1
else:
yield i
if chan.args:
skip = chan.args[0]
skip = skipper(xrange(100))
skip.next()
skip.next()
skip.send(10) # Skips ten items in the sequence before yeilding one
| {
"content_hash": "fd9a083d507fa711e1f24eddadd0d0d4",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 67,
"avg_line_length": 21.225,
"alnum_prop": 0.5936395759717314,
"repo_name": "ActiveState/code",
"id": "15612d7e447a635e2c7ba795837f4e932bf2a797",
"size": "849",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "recipes/Python/438374_Concurrent_Generator/recipe-438374.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "35894"
},
{
"name": "C",
"bytes": "56048"
},
{
"name": "C++",
"bytes": "90880"
},
{
"name": "HTML",
"bytes": "11656"
},
{
"name": "Java",
"bytes": "57468"
},
{
"name": "JavaScript",
"bytes": "181218"
},
{
"name": "PHP",
"bytes": "250144"
},
{
"name": "Perl",
"bytes": "37296"
},
{
"name": "Perl 6",
"bytes": "9914"
},
{
"name": "Python",
"bytes": "17387779"
},
{
"name": "Ruby",
"bytes": "40233"
},
{
"name": "Shell",
"bytes": "190732"
},
{
"name": "Tcl",
"bytes": "674650"
}
],
"symlink_target": ""
} |
'''This generates the yaml file called john-smith-labels.yaml
'''
from __future__ import absolute_import, print_function
from collections import defaultdict
import os
import sys
import yaml
def paths():
original = os.path.join(os.path.dirname(__file__), 'original')
for root, dirs, fnames in os.walk(original):
for fname in fnames:
yield os.path.join(root, fname)
dd = defaultdict(list)
for path in paths():
target_id, fname = path.strip().split('/')[-2:]
dd[target_id].append(fname)
assert sorted(map(len, dd.values())) == [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 4, 4, 5, 9, 15, 20, 22, 88]
output_labels = dict(
root_path = '.',
source = 'bagga-and-baldwin',
annotator_id = 'bagga-and-baldwin',
entities = []
)
github_base_url = 'https://raw.githubusercontent.com/trec-kba/streamcorpus-pipeline/master/'
for target_id, fnames in sorted(dd.items(), key=lambda x: (len(x[1]), int(x[0])), reverse=False):
docs = []
for fname in fnames:
doc_path = os.path.join('data/john-smith/original/', target_id, fname)
docs.append(dict(doc_path = doc_path,
abs_url = os.path.join(github_base_url, doc_path)))
entity = dict(
doc_path = docs,
slots = ['John ur"^smith[a-z]*$"'],
target_id = target_id,
)
output_labels['entities'].append(entity)
output_fname = os.path.join(os.path.dirname(__file__), 'john-smith-labels.yaml')
fh = open(output_fname, 'wb')
fh.write('''
## This was generated by streamcorpus-pipeline/data/john-smith/make-abs-urls.py
## This is intended to be used from the git repo's root directory, like so:
#
# streamcorpus_pipeline -c data/john-smith/john-smith-config.yaml -i data/john-smith/john-smith-labels.yaml
''')
yaml.dump(output_labels, fh)
fh.close()
print('done writing: %s' % output_fname)
| {
"content_hash": "b8ca24d85017bae6f196221b445a2799",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 149,
"avg_line_length": 32.32203389830509,
"alnum_prop": 0.631882538017829,
"repo_name": "trec-kba/streamcorpus-pipeline",
"id": "f50c614215b2eec766c11dd2c52589481f3ed17d",
"size": "1907",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "data/john-smith/make-john-smith-labels.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "855862"
},
{
"name": "Makefile",
"bytes": "1559"
},
{
"name": "Python",
"bytes": "582428"
},
{
"name": "Roff",
"bytes": "35618"
},
{
"name": "Shell",
"bytes": "5107"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from .base import *
########## IN-MEMORY TEST DATABASE
DATABASES = {
"default": env.db('sqlite://:memory:'),
}
########## EMAIL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
########## END EMAIL CONFIGURATION
########## CACHE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#caches
# CACHES = {
# 'default': env.cache_url_config('locmem://'),
# }
########## END CACHE CONFIGURATION
########## LOGGING CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING['loggers'] = {
'': {
'handlers': ['console'],
'level': 'DEBUG'
}
}
########## END LOGGING CONFIGURATION | {
"content_hash": "c4bb0d15c786bc4d7f68ab67ec80af32",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 72,
"avg_line_length": 26.485714285714284,
"alnum_prop": 0.6494066882416397,
"repo_name": "DeppSRL/open-partecipate",
"id": "1cf76a774e51e903f35fd10ae744b147b873d131",
"size": "927",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project/open_partecipate/settings/test.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "37"
},
{
"name": "HTML",
"bytes": "3646"
},
{
"name": "JavaScript",
"bytes": "44"
},
{
"name": "Nginx",
"bytes": "980"
},
{
"name": "Python",
"bytes": "99354"
}
],
"symlink_target": ""
} |
from itertools import islice
import os
import numpy as np
import pandas._libs.json as json
from pandas._libs.tslibs import iNaT
from pandas.compat import StringIO, long, to_str, u
from pandas.errors import AbstractMethodError
from pandas.core.dtypes.common import is_period_dtype
from pandas import DataFrame, MultiIndex, Series, compat, isna, to_datetime
from pandas.core.reshape.concat import concat
from pandas.io.common import (
BaseIterator, _get_handle, _infer_compression, _stringify_path,
get_filepath_or_buffer)
from pandas.io.formats.printing import pprint_thing
from pandas.io.parsers import _validate_integer
from .normalize import _convert_to_line_delimits
from .table_schema import build_table_schema, parse_table_schema
loads = json.loads
dumps = json.dumps
TABLE_SCHEMA_VERSION = '0.20.0'
# interface to/from
def to_json(path_or_buf, obj, orient=None, date_format='epoch',
double_precision=10, force_ascii=True, date_unit='ms',
default_handler=None, lines=False, compression='infer',
index=True):
if not index and orient not in ['split', 'table']:
raise ValueError("'index=False' is only valid when 'orient' is "
"'split' or 'table'")
path_or_buf = _stringify_path(path_or_buf)
if lines and orient != 'records':
raise ValueError(
"'lines' keyword only valid when 'orient' is records")
if orient == 'table' and isinstance(obj, Series):
obj = obj.to_frame(name=obj.name or 'values')
if orient == 'table' and isinstance(obj, DataFrame):
writer = JSONTableWriter
elif isinstance(obj, Series):
writer = SeriesWriter
elif isinstance(obj, DataFrame):
writer = FrameWriter
else:
raise NotImplementedError("'obj' should be a Series or a DataFrame")
s = writer(
obj, orient=orient, date_format=date_format,
double_precision=double_precision, ensure_ascii=force_ascii,
date_unit=date_unit, default_handler=default_handler,
index=index).write()
if lines:
s = _convert_to_line_delimits(s)
if isinstance(path_or_buf, compat.string_types):
fh, handles = _get_handle(path_or_buf, 'w', compression=compression)
try:
fh.write(s)
finally:
fh.close()
elif path_or_buf is None:
return s
else:
path_or_buf.write(s)
class Writer(object):
def __init__(self, obj, orient, date_format, double_precision,
ensure_ascii, date_unit, index, default_handler=None):
self.obj = obj
if orient is None:
orient = self._default_orient
self.orient = orient
self.date_format = date_format
self.double_precision = double_precision
self.ensure_ascii = ensure_ascii
self.date_unit = date_unit
self.default_handler = default_handler
self.index = index
self.is_copy = None
self._format_axes()
def _format_axes(self):
raise AbstractMethodError(self)
def write(self):
return self._write(self.obj, self.orient, self.double_precision,
self.ensure_ascii, self.date_unit,
self.date_format == 'iso', self.default_handler)
def _write(self, obj, orient, double_precision, ensure_ascii,
date_unit, iso_dates, default_handler):
return dumps(
obj,
orient=orient,
double_precision=double_precision,
ensure_ascii=ensure_ascii,
date_unit=date_unit,
iso_dates=iso_dates,
default_handler=default_handler
)
class SeriesWriter(Writer):
_default_orient = 'index'
def _format_axes(self):
if not self.obj.index.is_unique and self.orient == 'index':
raise ValueError("Series index must be unique for orient="
"'{orient}'".format(orient=self.orient))
def _write(self, obj, orient, double_precision, ensure_ascii,
date_unit, iso_dates, default_handler):
if not self.index and orient == 'split':
obj = {"name": obj.name, "data": obj.values}
return super(SeriesWriter, self)._write(obj, orient,
double_precision,
ensure_ascii, date_unit,
iso_dates, default_handler)
class FrameWriter(Writer):
_default_orient = 'columns'
def _format_axes(self):
"""
Try to format axes if they are datelike.
"""
if not self.obj.index.is_unique and self.orient in (
'index', 'columns'):
raise ValueError("DataFrame index must be unique for orient="
"'{orient}'.".format(orient=self.orient))
if not self.obj.columns.is_unique and self.orient in (
'index', 'columns', 'records'):
raise ValueError("DataFrame columns must be unique for orient="
"'{orient}'.".format(orient=self.orient))
def _write(self, obj, orient, double_precision, ensure_ascii,
date_unit, iso_dates, default_handler):
if not self.index and orient == 'split':
obj = obj.to_dict(orient='split')
del obj["index"]
return super(FrameWriter, self)._write(obj, orient,
double_precision,
ensure_ascii, date_unit,
iso_dates, default_handler)
class JSONTableWriter(FrameWriter):
_default_orient = 'records'
def __init__(self, obj, orient, date_format, double_precision,
ensure_ascii, date_unit, index, default_handler=None):
"""
Adds a `schema` attribute with the Table Schema, resets
the index (can't do in caller, because the schema inference needs
to know what the index is, forces orient to records, and forces
date_format to 'iso'.
"""
super(JSONTableWriter, self).__init__(
obj, orient, date_format, double_precision, ensure_ascii,
date_unit, index, default_handler=default_handler)
if date_format != 'iso':
msg = ("Trying to write with `orient='table'` and "
"`date_format='{fmt}'`. Table Schema requires dates "
"to be formatted with `date_format='iso'`"
.format(fmt=date_format))
raise ValueError(msg)
self.schema = build_table_schema(obj, index=self.index)
# NotImplementd on a column MultiIndex
if obj.ndim == 2 and isinstance(obj.columns, MultiIndex):
raise NotImplementedError(
"orient='table' is not supported for MultiIndex")
# TODO: Do this timedelta properly in objToJSON.c See GH #15137
if ((obj.ndim == 1) and (obj.name in set(obj.index.names)) or
len(obj.columns & obj.index.names)):
msg = "Overlapping names between the index and columns"
raise ValueError(msg)
obj = obj.copy()
timedeltas = obj.select_dtypes(include=['timedelta']).columns
if len(timedeltas):
obj[timedeltas] = obj[timedeltas].applymap(
lambda x: x.isoformat())
# Convert PeriodIndex to datetimes before serialzing
if is_period_dtype(obj.index):
obj.index = obj.index.to_timestamp()
# exclude index from obj if index=False
if not self.index:
self.obj = obj.reset_index(drop=True)
else:
self.obj = obj.reset_index(drop=False)
self.date_format = 'iso'
self.orient = 'records'
self.index = index
def _write(self, obj, orient, double_precision, ensure_ascii,
date_unit, iso_dates, default_handler):
data = super(JSONTableWriter, self)._write(obj, orient,
double_precision,
ensure_ascii, date_unit,
iso_dates,
default_handler)
serialized = '{{"schema": {schema}, "data": {data}}}'.format(
schema=dumps(self.schema), data=data)
return serialized
def read_json(path_or_buf=None, orient=None, typ='frame', dtype=True,
convert_axes=True, convert_dates=True, keep_default_dates=True,
numpy=False, precise_float=False, date_unit=None, encoding=None,
lines=False, chunksize=None, compression='infer'):
"""
Convert a JSON string to pandas object.
Parameters
----------
path_or_buf : a valid JSON string or file-like, default: None
The string could be a URL. Valid URL schemes include http, ftp, s3,
gcs, and file. For file URLs, a host is expected. For instance, a local
file could be ``file://localhost/path/to/table.json``
orient : string,
Indication of expected JSON string format.
Compatible JSON strings can be produced by ``to_json()`` with a
corresponding orient value.
The set of possible orients is:
- ``'split'`` : dict like
``{index -> [index], columns -> [columns], data -> [values]}``
- ``'records'`` : list like
``[{column -> value}, ... , {column -> value}]``
- ``'index'`` : dict like ``{index -> {column -> value}}``
- ``'columns'`` : dict like ``{column -> {index -> value}}``
- ``'values'`` : just the values array
The allowed and default values depend on the value
of the `typ` parameter.
* when ``typ == 'series'``,
- allowed orients are ``{'split','records','index'}``
- default is ``'index'``
- The Series index must be unique for orient ``'index'``.
* when ``typ == 'frame'``,
- allowed orients are ``{'split','records','index',
'columns','values', 'table'}``
- default is ``'columns'``
- The DataFrame index must be unique for orients ``'index'`` and
``'columns'``.
- The DataFrame columns must be unique for orients ``'index'``,
``'columns'``, and ``'records'``.
.. versionadded:: 0.23.0
'table' as an allowed value for the ``orient`` argument
typ : type of object to recover (series or frame), default 'frame'
dtype : boolean or dict, default True
If True, infer dtypes, if a dict of column to dtype, then use those,
if False, then don't infer dtypes at all, applies only to the data.
convert_axes : boolean, default True
Try to convert the axes to the proper dtypes.
convert_dates : boolean, default True
List of columns to parse for dates; If True, then try to parse
datelike columns default is True; a column label is datelike if
* it ends with ``'_at'``,
* it ends with ``'_time'``,
* it begins with ``'timestamp'``,
* it is ``'modified'``, or
* it is ``'date'``
keep_default_dates : boolean, default True
If parsing dates, then parse the default datelike columns
numpy : boolean, default False
Direct decoding to numpy arrays. Supports numeric data only, but
non-numeric column and index labels are supported. Note also that the
JSON ordering MUST be the same for each term if numpy=True.
precise_float : boolean, default False
Set to enable usage of higher precision (strtod) function when
decoding string to double values. Default (False) is to use fast but
less precise builtin functionality
date_unit : string, default None
The timestamp unit to detect if converting dates. The default behaviour
is to try and detect the correct precision, but if this is not desired
then pass one of 's', 'ms', 'us' or 'ns' to force parsing only seconds,
milliseconds, microseconds or nanoseconds respectively.
encoding : str, default is 'utf-8'
The encoding to use to decode py3 bytes.
.. versionadded:: 0.19.0
lines : boolean, default False
Read the file as a json object per line.
.. versionadded:: 0.19.0
chunksize : integer, default None
Return JsonReader object for iteration.
See the `line-delimted json docs
<http://pandas.pydata.org/pandas-docs/stable/io.html#io-jsonl>`_
for more information on ``chunksize``.
This can only be passed if `lines=True`.
If this is None, the file will be read into memory all at once.
.. versionadded:: 0.21.0
compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, default 'infer'
For on-the-fly decompression of on-disk data. If 'infer', then use
gzip, bz2, zip or xz if path_or_buf is a string ending in
'.gz', '.bz2', '.zip', or 'xz', respectively, and no decompression
otherwise. If using 'zip', the ZIP file must contain only one data
file to be read in. Set to None for no decompression.
.. versionadded:: 0.21.0
Returns
-------
result : Series or DataFrame, depending on the value of `typ`.
See Also
--------
DataFrame.to_json
Notes
-----
Specific to ``orient='table'``, if a :class:`DataFrame` with a literal
:class:`Index` name of `index` gets written with :func:`to_json`, the
subsequent read operation will incorrectly set the :class:`Index` name to
``None``. This is because `index` is also used by :func:`DataFrame.to_json`
to denote a missing :class:`Index` name, and the subsequent
:func:`read_json` operation cannot distinguish between the two. The same
limitation is encountered with a :class:`MultiIndex` and any names
beginning with ``'level_'``.
Examples
--------
>>> df = pd.DataFrame([['a', 'b'], ['c', 'd']],
... index=['row 1', 'row 2'],
... columns=['col 1', 'col 2'])
Encoding/decoding a Dataframe using ``'split'`` formatted JSON:
>>> df.to_json(orient='split')
'{"columns":["col 1","col 2"],
"index":["row 1","row 2"],
"data":[["a","b"],["c","d"]]}'
>>> pd.read_json(_, orient='split')
col 1 col 2
row 1 a b
row 2 c d
Encoding/decoding a Dataframe using ``'index'`` formatted JSON:
>>> df.to_json(orient='index')
'{"row 1":{"col 1":"a","col 2":"b"},"row 2":{"col 1":"c","col 2":"d"}}'
>>> pd.read_json(_, orient='index')
col 1 col 2
row 1 a b
row 2 c d
Encoding/decoding a Dataframe using ``'records'`` formatted JSON.
Note that index labels are not preserved with this encoding.
>>> df.to_json(orient='records')
'[{"col 1":"a","col 2":"b"},{"col 1":"c","col 2":"d"}]'
>>> pd.read_json(_, orient='records')
col 1 col 2
0 a b
1 c d
Encoding with Table Schema
>>> df.to_json(orient='table')
'{"schema": {"fields": [{"name": "index", "type": "string"},
{"name": "col 1", "type": "string"},
{"name": "col 2", "type": "string"}],
"primaryKey": "index",
"pandas_version": "0.20.0"},
"data": [{"index": "row 1", "col 1": "a", "col 2": "b"},
{"index": "row 2", "col 1": "c", "col 2": "d"}]}'
"""
compression = _infer_compression(path_or_buf, compression)
filepath_or_buffer, _, compression, should_close = get_filepath_or_buffer(
path_or_buf, encoding=encoding, compression=compression,
)
json_reader = JsonReader(
filepath_or_buffer, orient=orient, typ=typ, dtype=dtype,
convert_axes=convert_axes, convert_dates=convert_dates,
keep_default_dates=keep_default_dates, numpy=numpy,
precise_float=precise_float, date_unit=date_unit, encoding=encoding,
lines=lines, chunksize=chunksize, compression=compression,
)
if chunksize:
return json_reader
result = json_reader.read()
if should_close:
try:
filepath_or_buffer.close()
except: # noqa: flake8
pass
return result
class JsonReader(BaseIterator):
"""
JsonReader provides an interface for reading in a JSON file.
If initialized with ``lines=True`` and ``chunksize``, can be iterated over
``chunksize`` lines at a time. Otherwise, calling ``read`` reads in the
whole document.
"""
def __init__(self, filepath_or_buffer, orient, typ, dtype, convert_axes,
convert_dates, keep_default_dates, numpy, precise_float,
date_unit, encoding, lines, chunksize, compression):
self.path_or_buf = filepath_or_buffer
self.orient = orient
self.typ = typ
self.dtype = dtype
self.convert_axes = convert_axes
self.convert_dates = convert_dates
self.keep_default_dates = keep_default_dates
self.numpy = numpy
self.precise_float = precise_float
self.date_unit = date_unit
self.encoding = encoding
self.compression = compression
self.lines = lines
self.chunksize = chunksize
self.nrows_seen = 0
self.should_close = False
if self.chunksize is not None:
self.chunksize = _validate_integer("chunksize", self.chunksize, 1)
if not self.lines:
raise ValueError("chunksize can only be passed if lines=True")
data = self._get_data_from_filepath(filepath_or_buffer)
self.data = self._preprocess_data(data)
def _preprocess_data(self, data):
"""
At this point, the data either has a `read` attribute (e.g. a file
object or a StringIO) or is a string that is a JSON document.
If self.chunksize, we prepare the data for the `__next__` method.
Otherwise, we read it into memory for the `read` method.
"""
if hasattr(data, 'read') and not self.chunksize:
data = data.read()
if not hasattr(data, 'read') and self.chunksize:
data = StringIO(data)
return data
def _get_data_from_filepath(self, filepath_or_buffer):
"""
The function read_json accepts three input types:
1. filepath (string-like)
2. file-like object (e.g. open file object, StringIO)
3. JSON string
This method turns (1) into (2) to simplify the rest of the processing.
It returns input types (2) and (3) unchanged.
"""
data = filepath_or_buffer
exists = False
if isinstance(data, compat.string_types):
try:
exists = os.path.exists(filepath_or_buffer)
# gh-5874: if the filepath is too long will raise here
except (TypeError, ValueError):
pass
if exists or self.compression is not None:
data, _ = _get_handle(filepath_or_buffer, 'r',
encoding=self.encoding,
compression=self.compression)
self.should_close = True
self.open_stream = data
return data
def _combine_lines(self, lines):
"""
Combines a list of JSON objects into one JSON object.
"""
lines = filter(None, map(lambda x: x.strip(), lines))
return '[' + ','.join(lines) + ']'
def read(self):
"""
Read the whole JSON input into a pandas object.
"""
if self.lines and self.chunksize:
obj = concat(self)
elif self.lines:
data = to_str(self.data)
obj = self._get_object_parser(
self._combine_lines(data.split('\n'))
)
else:
obj = self._get_object_parser(self.data)
self.close()
return obj
def _get_object_parser(self, json):
"""
Parses a json document into a pandas object.
"""
typ = self.typ
dtype = self.dtype
kwargs = {
"orient": self.orient, "dtype": self.dtype,
"convert_axes": self.convert_axes,
"convert_dates": self.convert_dates,
"keep_default_dates": self.keep_default_dates, "numpy": self.numpy,
"precise_float": self.precise_float, "date_unit": self.date_unit
}
obj = None
if typ == 'frame':
obj = FrameParser(json, **kwargs).parse()
if typ == 'series' or obj is None:
if not isinstance(dtype, bool):
kwargs['dtype'] = dtype
obj = SeriesParser(json, **kwargs).parse()
return obj
def close(self):
"""
If we opened a stream earlier, in _get_data_from_filepath, we should
close it.
If an open stream or file was passed, we leave it open.
"""
if self.should_close:
try:
self.open_stream.close()
except (IOError, AttributeError):
pass
def __next__(self):
lines = list(islice(self.data, self.chunksize))
if lines:
lines_json = self._combine_lines(lines)
obj = self._get_object_parser(lines_json)
# Make sure that the returned objects have the right index.
obj.index = range(self.nrows_seen, self.nrows_seen + len(obj))
self.nrows_seen += len(obj)
return obj
self.close()
raise StopIteration
class Parser(object):
_STAMP_UNITS = ('s', 'ms', 'us', 'ns')
_MIN_STAMPS = {
's': long(31536000),
'ms': long(31536000000),
'us': long(31536000000000),
'ns': long(31536000000000000)}
def __init__(self, json, orient, dtype=True, convert_axes=True,
convert_dates=True, keep_default_dates=False, numpy=False,
precise_float=False, date_unit=None):
self.json = json
if orient is None:
orient = self._default_orient
self.orient = orient
self.dtype = dtype
if orient == "split":
numpy = False
if date_unit is not None:
date_unit = date_unit.lower()
if date_unit not in self._STAMP_UNITS:
raise ValueError('date_unit must be one of {units}'
.format(units=self._STAMP_UNITS))
self.min_stamp = self._MIN_STAMPS[date_unit]
else:
self.min_stamp = self._MIN_STAMPS['s']
self.numpy = numpy
self.precise_float = precise_float
self.convert_axes = convert_axes
self.convert_dates = convert_dates
self.date_unit = date_unit
self.keep_default_dates = keep_default_dates
self.obj = None
def check_keys_split(self, decoded):
"""
Checks that dict has only the appropriate keys for orient='split'.
"""
bad_keys = set(decoded.keys()).difference(set(self._split_keys))
if bad_keys:
bad_keys = ", ".join(bad_keys)
raise ValueError(u("JSON data had unexpected key(s): {bad_keys}")
.format(bad_keys=pprint_thing(bad_keys)))
def parse(self):
# try numpy
numpy = self.numpy
if numpy:
self._parse_numpy()
else:
self._parse_no_numpy()
if self.obj is None:
return None
if self.convert_axes:
self._convert_axes()
self._try_convert_types()
return self.obj
def _convert_axes(self):
"""
Try to convert axes.
"""
for axis in self.obj._AXIS_NUMBERS.keys():
new_axis, result = self._try_convert_data(
axis, self.obj._get_axis(axis), use_dtypes=False,
convert_dates=True)
if result:
setattr(self.obj, axis, new_axis)
def _try_convert_types(self):
raise AbstractMethodError(self)
def _try_convert_data(self, name, data, use_dtypes=True,
convert_dates=True):
"""
Try to parse a ndarray like into a column by inferring dtype.
"""
# don't try to coerce, unless a force conversion
if use_dtypes:
if self.dtype is False:
return data, False
elif self.dtype is True:
pass
else:
# dtype to force
dtype = (self.dtype.get(name)
if isinstance(self.dtype, dict) else self.dtype)
if dtype is not None:
try:
dtype = np.dtype(dtype)
return data.astype(dtype), True
except (TypeError, ValueError):
return data, False
if convert_dates:
new_data, result = self._try_convert_to_date(data)
if result:
return new_data, True
result = False
if data.dtype == 'object':
# try float
try:
data = data.astype('float64')
result = True
except (TypeError, ValueError):
pass
if data.dtype.kind == 'f':
if data.dtype != 'float64':
# coerce floats to 64
try:
data = data.astype('float64')
result = True
except (TypeError, ValueError):
pass
# don't coerce 0-len data
if len(data) and (data.dtype == 'float' or data.dtype == 'object'):
# coerce ints if we can
try:
new_data = data.astype('int64')
if (new_data == data).all():
data = new_data
result = True
except (TypeError, ValueError):
pass
# coerce ints to 64
if data.dtype == 'int':
# coerce floats to 64
try:
data = data.astype('int64')
result = True
except (TypeError, ValueError):
pass
return data, result
def _try_convert_to_date(self, data):
"""
Try to parse a ndarray like into a date column.
Try to coerce object in epoch/iso formats and integer/float in epoch
formats. Return a boolean if parsing was successful.
"""
# no conversion on empty
if not len(data):
return data, False
new_data = data
if new_data.dtype == 'object':
try:
new_data = data.astype('int64')
except (TypeError, ValueError, OverflowError):
pass
# ignore numbers that are out of range
if issubclass(new_data.dtype.type, np.number):
in_range = (isna(new_data.values) | (new_data > self.min_stamp) |
(new_data.values == iNaT))
if not in_range.all():
return data, False
date_units = (self.date_unit,) if self.date_unit else self._STAMP_UNITS
for date_unit in date_units:
try:
new_data = to_datetime(new_data, errors='raise',
unit=date_unit)
except ValueError:
continue
except Exception:
break
return new_data, True
return data, False
def _try_convert_dates(self):
raise AbstractMethodError(self)
class SeriesParser(Parser):
_default_orient = 'index'
_split_keys = ('name', 'index', 'data')
def _parse_no_numpy(self):
json = self.json
orient = self.orient
if orient == "split":
decoded = {str(k): v for k, v in compat.iteritems(
loads(json, precise_float=self.precise_float))}
self.check_keys_split(decoded)
self.obj = Series(dtype=None, **decoded)
else:
self.obj = Series(
loads(json, precise_float=self.precise_float), dtype=None)
def _parse_numpy(self):
json = self.json
orient = self.orient
if orient == "split":
decoded = loads(json, dtype=None, numpy=True,
precise_float=self.precise_float)
decoded = {str(k): v for k, v in compat.iteritems(decoded)}
self.check_keys_split(decoded)
self.obj = Series(**decoded)
elif orient == "columns" or orient == "index":
self.obj = Series(*loads(json, dtype=None, numpy=True,
labelled=True,
precise_float=self.precise_float))
else:
self.obj = Series(loads(json, dtype=None, numpy=True,
precise_float=self.precise_float))
def _try_convert_types(self):
if self.obj is None:
return
obj, result = self._try_convert_data(
'data', self.obj, convert_dates=self.convert_dates)
if result:
self.obj = obj
class FrameParser(Parser):
_default_orient = 'columns'
_split_keys = ('columns', 'index', 'data')
def _parse_numpy(self):
json = self.json
orient = self.orient
if orient == "columns":
args = loads(json, dtype=None, numpy=True, labelled=True,
precise_float=self.precise_float)
if len(args):
args = (args[0].T, args[2], args[1])
self.obj = DataFrame(*args)
elif orient == "split":
decoded = loads(json, dtype=None, numpy=True,
precise_float=self.precise_float)
decoded = {str(k): v for k, v in compat.iteritems(decoded)}
self.check_keys_split(decoded)
self.obj = DataFrame(**decoded)
elif orient == "values":
self.obj = DataFrame(loads(json, dtype=None, numpy=True,
precise_float=self.precise_float))
else:
self.obj = DataFrame(*loads(json, dtype=None, numpy=True,
labelled=True,
precise_float=self.precise_float))
def _parse_no_numpy(self):
json = self.json
orient = self.orient
if orient == "columns":
self.obj = DataFrame(
loads(json, precise_float=self.precise_float), dtype=None)
elif orient == "split":
decoded = {str(k): v for k, v in compat.iteritems(
loads(json, precise_float=self.precise_float))}
self.check_keys_split(decoded)
self.obj = DataFrame(dtype=None, **decoded)
elif orient == "index":
self.obj = DataFrame(
loads(json, precise_float=self.precise_float), dtype=None).T
elif orient == 'table':
self.obj = parse_table_schema(json,
precise_float=self.precise_float)
else:
self.obj = DataFrame(
loads(json, precise_float=self.precise_float), dtype=None)
def _process_converter(self, f, filt=None):
"""
Take a conversion function and possibly recreate the frame.
"""
if filt is None:
filt = lambda col, c: True
needs_new_obj = False
new_obj = dict()
for i, (col, c) in enumerate(self.obj.iteritems()):
if filt(col, c):
new_data, result = f(col, c)
if result:
c = new_data
needs_new_obj = True
new_obj[i] = c
if needs_new_obj:
# possibly handle dup columns
new_obj = DataFrame(new_obj, index=self.obj.index)
new_obj.columns = self.obj.columns
self.obj = new_obj
def _try_convert_types(self):
if self.obj is None:
return
if self.convert_dates:
self._try_convert_dates()
self._process_converter(
lambda col, c: self._try_convert_data(col, c, convert_dates=False))
def _try_convert_dates(self):
if self.obj is None:
return
# our columns to parse
convert_dates = self.convert_dates
if convert_dates is True:
convert_dates = []
convert_dates = set(convert_dates)
def is_ok(col):
"""
Return if this col is ok to try for a date parse.
"""
if not isinstance(col, compat.string_types):
return False
col_lower = col.lower()
if (col_lower.endswith('_at') or
col_lower.endswith('_time') or
col_lower == 'modified' or
col_lower == 'date' or
col_lower == 'datetime' or
col_lower.startswith('timestamp')):
return True
return False
self._process_converter(
lambda col, c: self._try_convert_to_date(c),
lambda col, c: ((self.keep_default_dates and is_ok(col)) or
col in convert_dates))
| {
"content_hash": "eb68bb5d6e1aab15e4af3dbbf871f147",
"timestamp": "",
"source": "github",
"line_count": 950,
"max_line_length": 79,
"avg_line_length": 35.386315789473684,
"alnum_prop": 0.5480262962191749,
"repo_name": "GuessWhoSamFoo/pandas",
"id": "4bbccc8339d7cf789ea038318d46677b0b81a3ac",
"size": "33657",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pandas/io/json/json.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4879"
},
{
"name": "C",
"bytes": "406353"
},
{
"name": "C++",
"bytes": "17193"
},
{
"name": "HTML",
"bytes": "606963"
},
{
"name": "Makefile",
"bytes": "556"
},
{
"name": "Python",
"bytes": "14926624"
},
{
"name": "Shell",
"bytes": "29351"
},
{
"name": "Smarty",
"bytes": "2040"
}
],
"symlink_target": ""
} |
from django.shortcuts import render,redirect
from django.views.generic import View
from .models import Perfil
from .forms import SolicitudColaboracionForm
# Create your views here.
class Solicitud_colaboracion(View):
def get(self,request):
template_name = 'registro/registro.html'
form = SolicitudColaboracionForm()
context = {'form':form}
return render (request,template_name,context)
def post(self,request):
template_name = "registro/registro.html"
template_name_success = 'registro/solicitud_exitosa.html'
new_solicit_form = SolicitudColaboracionForm(request.POST)
if new_solicit_form.is_valid():
new_solicit = new_solicit_form.save(commit=False)
new_solicit.save()
return render(request,template_name_success)
else:
context = {
'form': new_solicit_form
}
return render(request,template_name,context); | {
"content_hash": "86992633525afa072f375d9ca97ef39d",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 60,
"avg_line_length": 21.575,
"alnum_prop": 0.7427578215527231,
"repo_name": "SurielRuano/Orientador-Legal",
"id": "c9b53f0be0e791d7b939569282a0c4969e6b7282",
"size": "863",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "colaboradores/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "194202"
},
{
"name": "HTML",
"bytes": "62553"
},
{
"name": "JavaScript",
"bytes": "106997"
},
{
"name": "Python",
"bytes": "30652"
}
],
"symlink_target": ""
} |
"""OTA image generator
This script generates OTA image file from the input hex files.
The XS data would be compressed into OTA image with the given LZSS tool.
"""
import argparse
import pathlib
import subprocess
import sys
import zlib
from intelhex import IntelHex
from struct import pack, pack_into
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--binary", required=True, type=pathlib.Path)
parser.add_argument("--ds_hex", required=True, type=pathlib.Path)
parser.add_argument("--xs_hex", required=True, type=pathlib.Path)
parser.add_argument("--lzss_tool", required=True, type=pathlib.Path)
parser.add_argument("--active_xs_len", required=True,
type=lambda x: int(x, 0))
parser.add_argument("--upgrade_xs_len", required=True,
type=lambda x: int(x, 0))
parser.add_argument("--project_config", required=True, type=pathlib.Path)
parser.add_argument("--ota_image_tool", required=True, type=pathlib.Path)
option = parser.parse_args()
intel_hex = IntelHex(str(option.ds_hex))
intel_hex.loadhex(str(option.xs_hex))
ds_segment, xs_segment = intel_hex.segments()[0:2]
ds_header = intel_hex.tobinarray(
start=ds_segment[0], end=ds_segment[0] + 0x10 - 1)
ds_data = intel_hex.tobinarray(
start=ds_segment[0] + 0x10, end=ds_segment[1] - 1)
xs_data = intel_hex.tobinarray(start=xs_segment[0], end=xs_segment[1] - 1)
# Align to 4 bytes
ds_data = pad_data(ds_data, 4)
xs_data = pad_data(xs_data, 4)
# Pad XS data CRC to DS data
xs_crc = zlib.crc32(xs_data)
ds_data += pack("<L", xs_crc)
# Compressed data
cx_data = compress_data(option, xs_data, ".xs.bin")
cx_crc = zlib.crc32(cx_data)
# DS header
ds_crc = zlib.crc32(ds_data)
pack_into("<LL", ds_header, 0x08, ds_crc, len(ds_data))
# XS header
xs_header = ds_header.tobytes()
xs_header += pack("<LL", xs_crc, len(xs_data))
xs_header += pack("<LL", cx_crc, len(cx_data))
print("DS: Length 0x{:08x}, CRC 0x{:08x}".format(len(ds_data), ds_crc))
print("XS: Length 0x{:08x}, CRC 0x{:08x}".format(len(xs_data), xs_crc))
print("CX: Length 0x{:08x}, CRC 0x{:08x}".format(len(cx_data), cx_crc))
print(
"Active XS: Used {:7,}, Free {:7,}".format(
len(xs_data), option.active_xs_len - len(xs_data)
)
)
upgrade_xs_len = len(xs_header) + len(cx_data)
print(
"Upgrade XS: Used {:7,}, Free {:7,}".format(
upgrade_xs_len, option.upgrade_xs_len - upgrade_xs_len
)
)
if option.upgrade_xs_len < upgrade_xs_len:
print("Error: Insufficient space for the upgrade XS.")
return -1
gen_image(option, ds_header, ds_data, xs_header, cx_data)
return 0
def compress_data(option, data, file_suffix):
raw_file = str(option.binary.with_suffix(file_suffix))
compressed_file = str(option.binary.with_suffix(file_suffix + ".lz"))
with open(raw_file, mode="wb") as binary:
binary.write(data)
subprocess.run([option.lzss_tool, "e", raw_file,
compressed_file, ], check=True)
with open(compressed_file, mode="rb") as binary:
return binary.read()
def pad_data(data, aligned_size):
data = bytearray(data)
remained_length = len(data) % aligned_size
if remained_length != 0:
data += bytes(aligned_size - remained_length)
return data
def gen_image(option, ds_header, ds_data, xs_header, cx_data):
configs = parse_config(option)
write_binary(option, ds_header, ds_data, xs_header, cx_data)
run_ota_image_tool(option, configs)
# Get the header size
header_size = 0
for line in subprocess.run(
[option.ota_image_tool, "show", option.binary.with_suffix(".ota"), ],
check=True,
capture_output=True,
text=True,
).stdout.splitlines():
if line.startswith("Header Size:"):
header_size = int(line.split(":")[1])
break
if header_size % 4 == 0:
return
# Insert zeroes to align sections to word
inserted_zero_count = 4 - header_size % 4
write_binary(option, ds_header, ds_data, xs_header,
cx_data, inserted_zero_count)
run_ota_image_tool(option, configs)
def parse_config(option):
configs = {}
with open(option.project_config, "r") as config_file:
for line in config_file.readlines():
tokens = line.strip().split()
if not tokens or not tokens[0].endswith("define"):
continue
key = tokens[1]
value = tokens[2]
if value.startswith('"'):
configs[key] = value.strip('"')
else:
configs[key] = int(value, 0)
return configs
def write_binary(option, ds_header, ds_data, xs_header, cx_data, inserted_zero_count=0):
with open(str(option.binary), mode="wb") as binary:
binary.write(bytes(inserted_zero_count))
binary.write(ds_header)
binary.write(ds_data)
binary.write(xs_header)
binary.write(cx_data)
def run_ota_image_tool(option, configs):
subprocess.run(
[
option.ota_image_tool,
"create",
"--vendor-id={}".format(
configs["CHIP_DEVICE_CONFIG_DEVICE_VENDOR_ID"]),
"--product-id={}".format(
configs["CHIP_DEVICE_CONFIG_DEVICE_PRODUCT_ID"]),
"--version={}".format(
configs["CHIP_DEVICE_CONFIG_DEVICE_SOFTWARE_VERSION"]
),
"--version-str={}".format(
configs["CHIP_DEVICE_CONFIG_DEVICE_SOFTWARE_VERSION_STRING"]
),
"--digest-algorithm=sha256",
option.binary,
option.binary.with_suffix(".ota"),
],
stderr=subprocess.STDOUT,
check=True,
)
if __name__ == "__main__":
sys.exit(main())
| {
"content_hash": "ac9ef28570c9aea0350a1a049aca8ca8",
"timestamp": "",
"source": "github",
"line_count": 195,
"max_line_length": 88,
"avg_line_length": 30.594871794871796,
"alnum_prop": 0.5953737847804224,
"repo_name": "project-chip/connectedhomeip",
"id": "aa7755287525bf08a1900be093e2b4e45e05179b",
"size": "6576",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "third_party/infineon/cyw30739_sdk/gen_ota_image.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1759301"
},
{
"name": "C++",
"bytes": "19104548"
},
{
"name": "CMake",
"bytes": "140510"
},
{
"name": "Dockerfile",
"bytes": "50353"
},
{
"name": "Emacs Lisp",
"bytes": "1042"
},
{
"name": "Java",
"bytes": "167719"
},
{
"name": "JavaScript",
"bytes": "2106"
},
{
"name": "Jinja",
"bytes": "22322"
},
{
"name": "Objective-C",
"bytes": "930838"
},
{
"name": "Objective-C++",
"bytes": "435348"
},
{
"name": "Python",
"bytes": "1931007"
},
{
"name": "Shell",
"bytes": "195843"
},
{
"name": "Tcl",
"bytes": "311"
},
{
"name": "ZAP",
"bytes": "584219"
}
],
"symlink_target": ""
} |
import string
import valve.source.a2s
import re
import logging
class GameServer(object):
def __init__(self, ip, port, password=None):
self.ip = ip
try:
self.port = int(port) # Port number cannot be string
except TypeError:
logging.error("Cannot parse port number!")
raise Exception("Cannot parse port number from value {}".format(port))
self.password = password
# Steamworks port should be one number higher than normal port
self.server = valve.source.a2s.ServerQuerier((self.ip, self.port + 1))
def players(self):
return self.server.get_players()
def info(self):
return self.server.get_info()
def rules(self):
return self.server.get_rules()
def ping(self):
return self.server.ping()
def raw_info(self):
self.server.request(valve.source.a2s.messages.InfoRequest())
raw_msg = self.server.get_response()
return filter(lambda x: x in string.printable, raw_msg)
class ArmaServer(GameServer):
gamestate = {
-1: "No Answer",
1: "Server Empty / Mission Screen",
3: "Slotting Screen",
5: "Loading Mission",
6: "Briefing Screen",
7: "In Progress",
9: "Debriefing Screen",
12: "Setting up",
13: "Briefing",
14: "Playing"
}
def __init__(self, ip, port, password=None):
super(ArmaServer, self).__init__(ip, port, password)
def state(self):
# python-valve doesn't support the extended data field in the info
# request message yet, so we have to do this by hand.
# raw message looks like this:
# IFolk [email protected]
# bf,r152,n0,s1,i1,mf,lf,vt,dt,ttdm,g65545,hd12ce14a,c4194303-4194303,f0,pw,e0,j0,k0,
self.server.request(valve.source.a2s.messages.InfoRequest())
raw_msg = self.server.get_response()
msg = filter(lambda x: x in string.printable, raw_msg)
regex = re.compile(".*,s(?P<serverstate>\d*),.*,t(?P<gametype>\w*),.*")
m = regex.search(msg, re.DOTALL)
s = int(m.group('serverstate'))
return [m.group('gametype'), self.gamestate[s], s]
class InsurgencyServer(GameServer):
def __init__(self, ip, port, password=None):
super(InsurgencyServer, self).__init__(ip, port, password)
def state(self):
# python-valve doesn't support the extended data field in the info
# request message yet, so we have to do this by hand.
# raw message looks like this:
# IFolk ARPSembassy_coopinsurgencyInsurgencydw2.0.4.2i~@checkpoint,theater:default,ver:2042,nwibanlist,nospawnprotection,f
self.server.request(valve.source.a2s.messages.InfoRequest())
raw_msg = self.server.get_response()
msg = filter(lambda x: x in string.printable, raw_msg)
regex = re.compile(".*,s(?P<serverstate>\d*),.*,t(?P<gametype>\w*),.*")
m = regex.search(msg, re.DOTALL)
s = int(m.group('serverstate'))
return [m.group('gametype'), self.gamestate[s]] # TODO: Unresolver deference gamestate. BTW tuple :)
| {
"content_hash": "930c589d868a3c525a16a2f7a0b7ed14",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 130,
"avg_line_length": 34.967032967032964,
"alnum_prop": 0.6209930861093652,
"repo_name": "darkChozo/FAbot",
"id": "c0beee1a5c481acff343a0a33922d5cb7733b5d1",
"size": "3206",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bot/game_server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "30954"
},
{
"name": "Shell",
"bytes": "153"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.