code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
# -*- coding: utf-8 -*-
from orgmode import ORGMODE, repeat
from orgmode.menu import Submenu, ActionEntry
from orgmode.keybinding import Keybinding, Plug, Command
from orgmode import settings
import vim
class TagsProperties(object):
u""" TagsProperties plugin """
def __init__(self):
u""" Initialize plugin """
object.__init__(self)
# menu entries this plugin should create
self.menu = ORGMODE.orgmenu + Submenu(u'&TAGS and Properties')
# key bindings for this plugin
# key bindings are also registered through the menu so only additional
# bindings should be put in this variable
self.keybindings = []
# commands for this plugin
self.commands = []
@classmethod
def complete_tags(cls):
u""" build a list of tags and store it in variable b:org_tag_completion
"""
d = ORGMODE.get_document()
heading = d.current_heading()
if not heading:
return
leading_portion = vim.eval(u'a:ArgLead').decode(u'utf-8')
cursor = int(vim.eval(u'a:CursorPos'))
# extract currently completed tag
idx_orig = leading_portion.rfind(u':', 0, cursor)
if idx_orig == -1:
idx = 0
else:
idx = idx_orig
current_tag = leading_portion[idx: cursor].lstrip(u':')
head = leading_portion[:idx + 1]
if idx_orig == -1:
head = u''
tail = leading_portion[cursor:]
# extract all tags of the current file
all_tags = set()
for h in d.all_headings():
for t in h.tags:
all_tags.add(t)
ignorecase = bool(int(settings.get(u'org_tag_completion_ignorecase', int(vim.eval(u'&ignorecase')))))
possible_tags = []
current_tags = heading.tags
for t in all_tags:
if ignorecase:
if t.lower().startswith(current_tag.lower()):
possible_tags.append(t)
elif t.startswith(current_tag):
possible_tags.append(t)
vim.command((u'let b:org_complete_tags = [%s]' % u', '.join([u'"%s%s:%s"' % (head, i, tail) for i in possible_tags])).encode(u'utf-8'))
@classmethod
@repeat
def set_tags(cls):
u""" Set tags for current heading
"""
d = ORGMODE.get_document()
heading = d.current_heading()
if not heading:
return
# retrieve tags
res = None
if heading.tags:
res = vim.eval(u'input("Tags: ", ":%s:", "customlist,Org_complete_tags")' % u':'.join(heading.tags))
else:
res = vim.eval(u'input("Tags: ", "", "customlist,Org_complete_tags")')
if res is None:
# user pressed <Esc> abort any further processing
return
# remove empty tags
heading.tags = filter(lambda x: x.strip() != u'', res.decode(u'utf-8').strip().strip(u':').split(u':'))
d.write()
return u'OrgSetTags'
@classmethod
def realign_tags(cls):
u"""
Updates tags when user finished editing a heading
"""
d = ORGMODE.get_document(allow_dirty=True)
heading = d.find_current_heading()
if not heading:
return
if vim.current.window.cursor[0] == heading.start_vim:
heading.set_dirty_heading()
d.write_heading(heading, including_children=False)
@classmethod
def realign_all_tags(cls):
u"""
Updates tags when user finishes editing a heading
"""
d = ORGMODE.get_document()
for heading in d.all_headings():
heading.set_dirty_heading()
d.write()
def register(self):
u"""
Registration of plugin. Key bindings and other initialization should be done.
"""
# an Action menu entry which binds "keybinding" to action ":action"
settings.set(u'org_tag_column', u'77')
settings.set(u'org_tag_completion_ignorecase', int(vim.eval(u'&ignorecase')))
settings.set(u'org_leader', u',')
leader = settings.get(u'org_leader', u',')
self.keybindings.append(Keybinding(u'%st' % leader, Plug(u'OrgSetTags', u':py ORGMODE.plugins[u"TagsProperties"].set_tags()<CR>')))
self.menu + ActionEntry(u'Set &Tags', self.keybindings[-1])
self.commands.append(Command(u'OrgTagsRealign', u":py ORGMODE.plugins[u'TagsProperties'].realign_all_tags()"))
# workaround to align tags when user is leaving insert mode
vim.command(u"""function Org_complete_tags(ArgLead, CmdLine, CursorPos)
python << EOF
ORGMODE.plugins[u'TagsProperties'].complete_tags()
EOF
if exists('b:org_complete_tags')
let tmp = b:org_complete_tags
unlet b:org_complete_tags
return tmp
else
return []
endif
endfunction""".encode(u'utf-8'))
# this is for all org files opened after this file
vim.command(u"au FileType org :au InsertLeave <buffer> :py ORGMODE.plugins[u'TagsProperties'].realign_tags()".encode(u'utf-8'))
# this is for the current file
vim.command(u"au InsertLeave <buffer> :py ORGMODE.plugins[u'TagsProperties'].realign_tags()".encode(u'utf-8'))
| kratob/dotfiles | vim/.vim.symlink/ftplugin/orgmode/plugins/TagsProperties.py | Python | mit | 4,531 |
"""Administration form for search settings."""
from __future__ import unicode_literals
from django import forms
from django.core.exceptions import ValidationError
from django.utils.translation import (ugettext,
ugettext_lazy as _)
from djblets.siteconfig.forms import SiteSettingsForm
from reviewboard.admin.siteconfig import load_site_config
from reviewboard.search import search_backend_registry
class SearchSettingsForm(SiteSettingsForm):
"""Form for search settings.
This form manages the main search settings (enabled, how many results, and
what backend to use), as well as displaying per-search backend forms so
that they may be configured.
For example, Elasticsearch requires a URL and index name, while Whoosh
requires a file path to store its index. These fields (and fields for any
other added search backend) will only be shown to the user when the
appropriate search backend is selected.
"""
search_enable = forms.BooleanField(
label=_('Enable search'),
help_text=_('If enabled, provides a search field for quickly '
'searching through review requests, diffs, and users.'),
required=False)
search_results_per_page = forms.IntegerField(
label=_('Search results per page'),
min_value=1,
required=False)
search_backend_id = forms.ChoiceField(
label=_('Search backend'),
required=False,
widget=forms.Select(attrs={
'data-subform-group': 'search-backend',
}))
search_on_the_fly_indexing = forms.BooleanField(
label=_('On-the-fly indexing'),
required=False,
help_text=('If enabled, the search index will be updated dynamically '
'when review requests or users change.<br>'
'<strong>Note:</strong> This is not recommended for use '
'with the Whoosh engine for large or multi-server '
'installs.'))
def __init__(self, siteconfig, data=None, *args, **kwargs):
"""Initialize the search engine settings form.
This will also initialize the settings forms for each search engine
backend.
Args:
site_config (djblets.siteconfig.models.SiteConfiguration):
The site configuration handling the server's settings.
data (dict, optional):
The form data.
*args (tuple):
Additional positional arguments.
**kwargs (dict):
Additional keyword arguments.
"""
super(SearchSettingsForm, self).__init__(siteconfig, data, *args,
**kwargs)
form_kwargs = {
'files': kwargs.get('files'),
'request': kwargs.get('request'),
}
self.search_backend_forms = {
backend.search_backend_id: backend.get_config_form(data,
**form_kwargs)
for backend in search_backend_registry
}
self.fields['search_backend_id'].choices = [
(backend.search_backend_id, backend.name)
for backend in search_backend_registry
]
def is_valid(self):
"""Return whether the form is valid.
This will check the validity of the fields on this form and on
the selected search backend's settings form.
Returns:
bool:
``True`` if the main settings form and search backend's settings
form is valid. ``False`` if either form is invalid.
"""
if not super(SearchSettingsForm, self).is_valid():
return False
backend_id = self.cleaned_data['search_backend_id']
backend_form = self.search_backend_forms[backend_id]
return backend_form.is_valid()
def clean_search_backend_id(self):
"""Clean the ``search_backend_id`` field.
This will ensure the chosen search backend is valid (i.e., it is
available in the registry) and that its dependencies have been
installed.
Returns:
unicode:
The search backend ID.
Raises:
django.core.exceptions.ValidationError:
Raised if the search engine ID chosen cannot be used.
"""
search_backend_id = self.cleaned_data['search_backend_id']
search_backend = search_backend_registry.get_search_backend(
search_backend_id)
if not search_backend:
raise ValidationError(
ugettext('The search engine "%s" could not be found. '
'If this is provided by an extension, you will have '
'to make sure that extension is enabled.')
% search_backend_id
)
search_backend.validate()
return search_backend_id
def clean(self):
"""Clean the form and the sub-form for the selected search backend.
Returns:
dict:
The cleaned data.
"""
if self.cleaned_data['search_enable']:
search_backend_id = self.cleaned_data.get('search_backend_id')
# The search_backend_id field is only available if the backend
# passed validation.
if search_backend_id:
backend_form = self.search_backend_forms[search_backend_id]
if not backend_form.is_valid():
self._errors.update(backend_form.errors)
return self.cleaned_data
def save(self):
"""Save the form and sub-form for the selected search backend.
This forces a site configuration reload.
"""
search_backend_id = self.cleaned_data['search_backend_id']
if self.cleaned_data['search_enable']:
# We only need to update the backend settings when search is
# enabled.
backend_form = self.search_backend_forms[search_backend_id]
backend = search_backend_registry.get_search_backend(
search_backend_id)
backend.configuration = backend.get_configuration_from_form_data(
backend_form.cleaned_data)
super(SearchSettingsForm, self).save()
# Reload any import changes to the Django settings.
load_site_config()
class Meta:
title = _('Search Settings')
subforms = (
{
'subforms_attr': 'search_backend_forms',
'controller_field': 'search_backend_id',
},
)
| chipx86/reviewboard | reviewboard/admin/forms/search_settings.py | Python | mit | 6,654 |
'''
Created on Feb 8, 2011
@author: vgapeyev
'''
import csv, re
__nonspace_whitespace = re.compile(r"[\t\n\r\f\v]")
__long_whitespace = re.compile(r"[ ]{2,}")
def normalize_whitespace(str):
str = re.sub(__nonspace_whitespace, " ", str)
str = re.sub(__long_whitespace, " ", str)
return str
def empty_line(line):
if line.isspace():
return True
else:
return False
def at_beginning(matchobj, str):
if not matchobj:
return False
prefix = str[:matchobj.start()]
return prefix == "" or prefix.isspace()
def likely_chimp_name(prov_time, prov_rest):
return (prov_time == "PM" or prov_time == "AM") \
and prov_rest[0] == " " \
and prov_rest[1].isalpha()
def pad_zero(time):
if time.isdigit() and len(time) == 3:
return "0" + time
else:
return time
def pick_time(line):
# timepat_spec = r"(?P<time>\d\d\d\d)"
# timepat_spec = r"(?P<time>AM|PM|(\d{4}(\s*(-|until)\s*\d{4})?(\s*(AM|PM))?))"
timepat_spec = r"(?P<time>AM|PM|(\d{3,4}(\s*(-|until)\s*\d{3,4})?(\s*(AM|PM))?))"
timepat = re.compile(timepat_spec)
time_match = re.search(timepat, line)
if time_match and at_beginning(time_match, line):
time = time_match.group("time")
rest = line[time_match.end("time"):]
if not likely_chimp_name(time, rest):
return (pad_zero(time), rest.lstrip())
else: return ("", line)
else:
return ("", line)
def pick_recnum(line):
# pat_spec = r"N-(?P<animal>[a-zA-Z]+)-(?P<num>\d+)"
# pat_spec = r"N-(?P<animal>[a-zA-Z]+)-(?P<num>\d+\w*)"
pat_spec = r"[Nn]\s*(-|_|=)=?\s*(?P<animal>[a-zA-Z]+)\s*(-|_)?\s*(?P<num>\d+\w*)"
pat = re.compile(pat_spec)
match = re.search(pat, line)
if match and at_beginning(match, line):
equip = "N"
animal = match.group("animal").upper()
num = match.group("num")
rest = line[match.end():]
return ((equip, animal, num), rest.lstrip())
else:
return (("", "", ""), line)
def parse_line(line):
(time, line) = pick_time(line)
(recnum, line) = pick_recnum(line)
text = normalize_whitespace(line.strip())
return (time, recnum[0], recnum[1], recnum[2], text)
def parse_one_file(src_file, dest_file):
#print "Parsing %s" % src_file
#print "Output to %s" % dest_file
fin = open(src_file)
fout = open(dest_file, "w")
csv_writer = csv.writer(fout)
count = 0
for line in fin:
count = count + 1
if not empty_line(line):
(time, equip, animal, num, text) = parse_line(line)
csv_writer.writerow([count, time, equip, animal, num, text])
fin.close()
fout.close()
__txt_fmt = "%-60.60s"
__csv_fmt = "%3.3s %5s %1.1s %3.3s %3s |%-120.120s|"
def display_parse(txt_fname, csv_fname):
txt_file = open(txt_fname)
csv_file = open(csv_fname)
csv_reader = csv.reader(csv_file)
txt_num = 1
for csv_line in csv_reader:
csv_num = int(csv_line[0])
while txt_num < csv_num:
txt_num = txt_num + 1
print __csv_fmt % ("", "", "", "", "", "",),
print ("#"+__txt_fmt+"#") % txt_file.readline().rstrip()
txt_num = txt_num + 1
print __csv_fmt % tuple(csv_line),
print ("$"+__txt_fmt+"$") % txt_file.readline().rstrip()
txt_file.close()
csv_file.close()
def compare_parses(old_fname, new_fname):
old_file, new_file = open(old_fname), open(new_fname)
old_reader, new_reader = csv.reader(old_file), csv.reader(new_file)
for (old_line, new_line) in zip(old_reader, new_reader):
if old_line != new_line:
print ("o"+__csv_fmt) % tuple(old_line)
print ("n"+__csv_fmt) % tuple(new_line)
print ""
old_file.close(), new_file.close()
def main():
import optparse, sys
p = optparse.OptionParser()
p.set_usage("%prog source_file dest_file")
p.set_description("TODO description")
opt, args = p.parse_args()
if len(args) != 2:
sys.stderr.write(p.get_usage())
raise SystemExit(1)
src_file = args[0]
dest_file = args[1]
parse_one_file(src_file, dest_file)
def main_hardwired(base):
infile = "test_data/inputs/%s.txt" % base
outfile = "test_data/outputs/%s.csv" % base
parse_one_file(infile, outfile)
def display_hardwired(base):
infile = "test_data/inputs/%s.txt" % base
outfile = "test_data/work/%s.csv" % base
display_parse(infile, outfile)
def compare_hardwired(base):
workfile = "test_data/work/%s.csv" % base
outfile = "test_data/outputs/%s.csv" % base
compare_parses(workfile, outfile)
if __name__ == '__main__':
#main()
#main_hardwired("1971-07-15")
#display_hardwired("1971-07-14")
compare_hardwired("1971-07-14") | NESCent/Chimp-Recs-FieldObservations | ObservationParsing/src/obsparser/app.py | Python | cc0-1.0 | 4,869 |
#!/usr/bin/env python
# coding=utf-8
"""316. Numbers in decimal expansions
https://projecteuler.net/problem=316
Let p = p1 p2 p3 ... be an infinite sequence of random digits, selected from
{0,1,2,3,4,5,6,7,8,9} with equal probability.
It can be seen that p corresponds to the real number 0.p1 p2 p3 ....
It can also be seen that choosing a random real number from the interval [0,1)
is equivalent to choosing an infinite sequence of random digits selected from
{0,1,2,3,4,5,6,7,8,9} with equal probability.
For any positive integer n with d decimal digits, let k be the smallest index
such that
pk, pk+1, ...pk+d-1 are the decimal digits of n, in the same order.
Also, let g(n) be the expected value of k; it can be proven that g(n) is
always finite and, interestingly, always an integer number.
For example, if n = 535, then
for p = 31415926 **535** 897...., we get k = 9
for p = 35528714365004956000049084876408468 **535** 4..., we get k = 36
etc and we find that g(535) = 1008.
Given that $\sum \limits_{n = 2}^{999} {g \left ( \left \lfloor
\dfrac{10^6}{n} \right \rfloor \right )} = 27280188$, find $\sum \limits_{n =
2}{999999} {g \left ( \left \lfloor \dfrac{10^{16}}{n} \right \rfloor \right
)}$.
__Note__
: $\lfloor x \rfloor$ represents the floor function.
"""
| openqt/algorithms | projecteuler/pe316-numbers-in-decimal-expansions.py | Python | gpl-3.0 | 1,292 |
#!/usr/bin/python2.7
import os, ast, time, sys, re, inspect, traceback, time, subprocess, json, requests
from threading import Thread
from subprocess import Popen, PIPE
import multiprocessing as mp
from multiprocessing import Pool
# magia:
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
proj_path = "/github/pandorabox/"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pandorabox.settings")
sys.path.append(proj_path)
os.chdir(proj_path)
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Importamos Django modules
from django.core.management.base import BaseCommand, CommandError
from ceamon.models import sapnode, CommandModel
from django.core import serializers
from django.db import connection, transaction
#from .scripts.ck_timeout import timeout
from requests.auth import HTTPBasicAuth
ruta_scripts = "/commands/scripts/"
""" # TEMPORAL
from django.contrib.auth.models import User
user = User.objects.create_user(username='service',
email='[email protected]',
password='initial')
"""
username = "service"
password = "initial"
url = "http://localhost:9988/status/"
class mydict(dict):
def __str__(self):
return json.dumps(self)
def f(*names):
r = {}
for n in names:
r[[ name for name in globals() if globals()[name] is n ][0]] = n
for x in r:
x = "'" + x + "'"
return r
def get_script_dir(follow_symlinks=True):
if getattr(sys, False):
path = os.path.abspath(sys.executable)
else:
path = inspect.getabsfile(get_script_dir)
if follow_symlinks:
path = os.path.realpath(path)
return os.path.dirname(path)
def worker(host,active_moni,sid,product,id):
try:
if active_moni == "Yes":
e = sapnode.objects.get(id=id)
project = e.project.all().filter().order_by().values()
nod = e.command.all().filter().order_by().values()
for command in nod:
command=command['check']
result = subprocess.Popen(BASE_DIR + ruta_scripts + command + ' ' + host + ' ' + sid + ' ' + product, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
update = result.communicate()[0] # primer item de la lista
update = str(update) # convertimos a str
update=update.strip()
#del_simbolo = [ "['" , "']" ] # No funciona con simbolos especiales
#update = update.translate(None, ''.join(del_simbolo)) # el join
#update = update.replace("\\n", "") # simbolo especial x nada
if "alerts" in command:
json_out = update.replace("'", "\"") # Mod str
d = json.loads(json_out) #Convertimos a dict
status = d['STATUS']
status_id = d['STATUS_ID']
comment = d['COMMENT']
host = d['HOST']
print status, status_id, comment, host
#{"status": "DANGER", "status_id": "ck_ping.py", "comment": "ssssssssssss", "system": 2}
requests.put(url, json={"status": status, "status_id": status_id, "comment": comment, "system": id}, auth=HTTPBasicAuth(username, password))
else:
print(" Monitoring is disabled for"+ ' ' + sid + ' ' + "in" + ' ' + host)
except KeyboardInterrupt:
print "Caught KeyboardInterrupt, terminating workers"
pool.terminate()
pool.join()
def handler():
machines = sapnode.objects.filter().order_by().values()
pool = mp.Pool(4) # numero maximo de procesos creados por el script
for x in machines:
sid=x['sid']
id=x['id']
host=x['hostname']
product=x['product']
active_moni=x['active_moni'] # (No=desactivado), (Yes=activado)
pool.apply_async(worker, args=(host,active_moni,sid,product,id,)) # Ejecucion asyncrona de los hosts y ejecucion del worker con argumentos
pool.close()
pool.join()
if __name__ == '__main__':
handler()
| limbail/ceamon | pandorabox/ceamon/management/commands/m_ceamon_alerts_worker.py | Python | mit | 4,124 |
import pickle
import pytest
from praw.models import Subreddit, WikiPage
from ... import UnitTest
class TestSubreddit(UnitTest):
def test_equality(self):
subreddit1 = Subreddit(
self.reddit, _data={"display_name": "dummy1", "n": 1}
)
subreddit2 = Subreddit(
self.reddit, _data={"display_name": "Dummy1", "n": 2}
)
subreddit3 = Subreddit(
self.reddit, _data={"display_name": "dummy3", "n": 2}
)
assert subreddit1 == subreddit1
assert subreddit2 == subreddit2
assert subreddit3 == subreddit3
assert subreddit1 == subreddit2
assert subreddit2 != subreddit3
assert subreddit1 != subreddit3
assert "dummy1" == subreddit1
assert subreddit2 == "dummy1"
def test_construct_failure(self):
message = "Either `display_name` or `_data` must be provided."
with pytest.raises(TypeError) as excinfo:
Subreddit(self.reddit)
assert str(excinfo.value) == message
with pytest.raises(TypeError) as excinfo:
Subreddit(self.reddit, "dummy", {"id": "dummy"})
assert str(excinfo.value) == message
def test_fullname(self):
subreddit = Subreddit(
self.reddit, _data={"display_name": "name", "id": "dummy"}
)
assert subreddit.fullname == "t5_dummy"
def test_hash(self):
subreddit1 = Subreddit(
self.reddit, _data={"display_name": "dummy1", "n": 1}
)
subreddit2 = Subreddit(
self.reddit, _data={"display_name": "Dummy1", "n": 2}
)
subreddit3 = Subreddit(
self.reddit, _data={"display_name": "dummy3", "n": 2}
)
assert hash(subreddit1) == hash(subreddit1)
assert hash(subreddit2) == hash(subreddit2)
assert hash(subreddit3) == hash(subreddit3)
assert hash(subreddit1) == hash(subreddit2)
assert hash(subreddit2) != hash(subreddit3)
assert hash(subreddit1) != hash(subreddit3)
def test_pickle(self):
subreddit = Subreddit(
self.reddit, _data={"display_name": "name", "id": "dummy"}
)
for level in range(pickle.HIGHEST_PROTOCOL + 1):
other = pickle.loads(pickle.dumps(subreddit, protocol=level))
assert subreddit == other
def test_repr(self):
subreddit = Subreddit(self.reddit, display_name="name")
assert repr(subreddit) == "Subreddit(display_name='name')"
def test_search__params_not_modified(self):
params = {"dummy": "value"}
subreddit = Subreddit(self.reddit, display_name="name")
generator = subreddit.search(None, params=params)
assert generator.params["dummy"] == "value"
assert params == {"dummy": "value"}
def test_str(self):
subreddit = Subreddit(
self.reddit, _data={"display_name": "name", "id": "dummy"}
)
assert str(subreddit) == "name"
def test_submit_failure(self):
message = "Either `selftext` or `url` must be provided."
subreddit = Subreddit(self.reddit, display_name="name")
with pytest.raises(TypeError) as excinfo:
subreddit.submit("Cool title")
assert str(excinfo.value) == message
with pytest.raises(TypeError) as excinfo:
subreddit.submit("Cool title", selftext="a", url="b")
assert str(excinfo.value) == message
with pytest.raises(TypeError) as excinfo:
subreddit.submit("Cool title", selftext="", url="b")
assert str(excinfo.value) == message
def test_upload_banner_additional_image(self):
subreddit = Subreddit(self.reddit, display_name="name")
with pytest.raises(ValueError):
subreddit.stylesheet.upload_banner_additional_image(
"dummy_path", align="asdf"
)
class TestSubredditFlair(UnitTest):
def test_set(self):
subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)
with pytest.raises(TypeError):
subreddit.flair.set(
"a_redditor", css_class="myCSS", flair_template_id="gibberish"
)
class TestSubredditFlairTemplates(UnitTest):
def test_bad_add(self):
subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)
with pytest.raises(TypeError):
subreddit.flair.templates.add(
"impossible", css_class="conflict", background_color="#ABCDEF"
)
with pytest.raises(TypeError):
subreddit.flair.templates.add(
"impossible", css_class="conflict", mod_only=False
)
with pytest.raises(TypeError):
subreddit.flair.templates.add(
"impossible", css_class="conflict", text_color="dark"
)
with pytest.raises(TypeError):
subreddit.flair.templates.add(
"impossible",
css_class="conflict",
background_color="#ABCDEF",
mod_only=False,
text_color="dark",
)
class TestSubredditLinkFlairTemplates(UnitTest):
def test_bad_add(self):
subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)
with pytest.raises(TypeError):
subreddit.flair.link_templates.add(
"impossible", css_class="conflict", background_color="#ABCDEF"
)
with pytest.raises(TypeError):
subreddit.flair.link_templates.add(
"impossible", css_class="conflict", mod_only=False
)
with pytest.raises(TypeError):
subreddit.flair.link_templates.add(
"impossible", css_class="conflict", text_color="dark"
)
with pytest.raises(TypeError):
subreddit.flair.link_templates.add(
"impossible",
css_class="conflict",
background_color="#ABCDEF",
mod_only=False,
text_color="dark",
)
class TestSubredditWiki(UnitTest):
def test__getitem(self):
subreddit = Subreddit(self.reddit, display_name="name")
wikipage = subreddit.wiki["Foo"]
assert isinstance(wikipage, WikiPage)
assert "foo" == wikipage.name
| leviroth/praw | tests/unit/models/reddit/test_subreddit.py | Python | bsd-2-clause | 6,349 |
# -*- encoding: utf-8 -*-
# Module iaendpoints
from numpy import *
from string import upper
def iaendpoints(OPTION="LOOP"):
from iase2hmt import iase2hmt
from iabinary import iabinary
Iab = None
OPTION = upper(OPTION)
if OPTION == 'LOOP':
Iab = iase2hmt( iabinary([[0,0,0],
[0,1,0],
[0,0,0]]),
iabinary([[0,0,0],
[1,0,1],
[1,1,1]]))
elif OPTION == 'HOMOTOPIC':
Iab = iase2hmt( iabinary([[0,1,0],
[0,1,0],
[0,0,0]]),
iabinary([[0,0,0],
[1,0,1],
[1,1,1]]))
return Iab
| mariecpereira/IA369Z | deliver/ia870/iaendpoints.py | Python | mit | 837 |
# -*- coding: utf-8 -*-
# flake8: noqa
from __future__ import unicode_literals
from .__about__ import __version__, __description__, __author__, __url__
from .benchmark import Benchmark, RunResult, DEFAULT_TIMES
from .report import BaseReporter, JsonReporter, CsvReporter, MarkdownReporter, RstReporter, FileReporter, FixedWidth
from .runner import BenchmarkRunner
| noirbizarre/minibench | minibench/__init__.py | Python | mit | 365 |
#!/usr/bin/env python
import codecs, os, shutil, subprocess, sys, tempfile
mteval_pl = os.path.join(os.path.dirname(os.path.dirname(__file__)),
'mt-diff', 'files', 'mteval-v13m.pl')
def main(argv):
if len(argv[1:]) < 2:
print 'Score with NIST BLEU'
print ''
print 'usage: {0} <hyp> <ref> [opts]'.format(argv[0])
print ''
print '-------------------'
print 'Options for scoring'
print '-------------------'
print ''
subprocess.call(['perl', mteval_pl, '-h'])
sys.exit(1)
hyp = argv[1]
ref = argv[2]
opts = argv[3:]
src_sgm = tempfile.mktemp(suffix='.sgm')
tst_sgm = tempfile.mktemp(suffix='.sgm')
ref_sgm = tempfile.mktemp(suffix='.sgm')
sgm(ref, src_sgm, 'srcset')
sgm(hyp, tst_sgm, 'tstset')
sgm(ref, ref_sgm, 'refset')
cmd = ['perl', mteval_pl, '-s', src_sgm, '-t', tst_sgm, '-r', ref_sgm]
for opt in opts:
cmd.append(opt)
subprocess.call(cmd)
os.remove(src_sgm)
os.remove(tst_sgm)
os.remove(ref_sgm)
def sgm(f_in, f_out, f_type):
i = open(f_in)
o = open(f_out, 'w')
s = 0
print >> o, '<{0} trglang="trg" setid="set" srclang="src">'.format(f_type)
print >> o, '<doc docid="doc" sysid="sys">'
for line in i:
s += 1
print >> o, '<seg id="{0}"> {1} </seg>'.format(s, line.strip())
print >> o, '</doc>'
print >> o, '</{0}>'.format(f_type)
i.close()
o.close()
if __name__ == '__main__' : main(sys.argv)
| qingsongma/blend | tools/meteor-1.4/scripts/bleu.py | Python | gpl-3.0 | 1,540 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'UserProfile'
db.create_table(u'ecg_balancing_userprofile', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.OneToOneField')(related_name=u'profile', unique=True, to=orm['auth.User'])),
))
db.send_create_signal(u'ecg_balancing', ['UserProfile'])
# Adding M2M table for field companies on 'UserProfile'
m2m_table_name = db.shorten_name(u'ecg_balancing_userprofile_companies')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('userprofile', models.ForeignKey(orm[u'ecg_balancing.userprofile'], null=False)),
('company', models.ForeignKey(orm[u'ecg_balancing.company'], null=False))
))
db.create_unique(m2m_table_name, ['userprofile_id', 'company_id'])
def backwards(self, orm):
# Deleting model 'UserProfile'
db.delete_table(u'ecg_balancing_userprofile')
# Removing M2M table for field companies on 'UserProfile'
db.delete_table(db.shorten_name(u'ecg_balancing_userprofile_companies'))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'ecg_balancing.company': {
'Meta': {'object_name': 'Company'},
'activities': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'employees_number': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'foundation_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'industry': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'managing_directors': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'model_creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owners': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'revenue': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'street': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'website': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'zipcode': ('django.db.models.fields.PositiveIntegerField', [], {})
},
u'ecg_balancing.companybalance': {
'Meta': {'object_name': 'CompanyBalance'},
'auditor': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'common_good': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'company': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'balance'", 'to': u"orm['ecg_balancing.Company']"}),
'end_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'matrix': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'company_balances'", 'to': u"orm['ecg_balancing.ECGMatrix']"}),
'peer_companies': ('django.db.models.fields.related.ManyToManyField', [], {'max_length': '255', 'to': u"orm['ecg_balancing.Company']", 'null': 'True', 'symmetrical': 'False', 'blank': 'True'}),
'process_description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'prospect': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
u'ecg_balancing.companybalanceindicator': {
'Meta': {'object_name': 'CompanyBalanceIndicator'},
'company_balance': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'company_balance'", 'to': u"orm['ecg_balancing.CompanyBalance']"}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'evaluation': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'company_balance'", 'to': u"orm['ecg_balancing.Indicator']"})
},
u'ecg_balancing.ecgmatrix': {
'Meta': {'object_name': 'ECGMatrix'},
'contact': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'default': "u'4.1'", 'max_length': '6'})
},
u'ecg_balancing.indicator': {
'Meta': {'object_name': 'Indicator'},
'contact': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'ecg_value': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'matrix': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'indicators'", 'to': u"orm['ecg_balancing.ECGMatrix']"}),
'max_evaluation': ('django.db.models.fields.IntegerField', [], {}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'parent_indicator'", 'null': 'True', 'to': u"orm['ecg_balancing.Indicator']"}),
'stakeholder': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'subindicator_number': ('django.db.models.fields.IntegerField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'ecg_balancing.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'companies': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['ecg_balancing.Company']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "u'profile'", 'unique': 'True', 'to': u"orm['auth.User']"})
},
u'ecg_balancing.userrole': {
'Meta': {'object_name': 'UserRole'},
'company': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ecg_balancing.Company']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
}
}
complete_apps = ['ecg_balancing'] | sinnwerkstatt/ecg-balancing | ecg_balancing/migrations/0007_auto__add_userprofile.py | Python | mit | 11,373 |
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM14.CPSM.Equipment.Core.IdentifiedObject import IdentifiedObject
class GeographicalRegion(IdentifiedObject):
"""A geographical region of a power system network model.
"""
def __init__(self, Regions=None, *args, **kw_args):
"""Initialises a new 'GeographicalRegion' instance.
@param Regions: The association is used in the naming hierarchy.
"""
self._Regions = []
self.Regions = [] if Regions is None else Regions
super(GeographicalRegion, self).__init__(*args, **kw_args)
_attrs = []
_attr_types = {}
_defaults = {}
_enums = {}
_refs = ["Regions"]
_many_refs = ["Regions"]
def getRegions(self):
"""The association is used in the naming hierarchy.
"""
return self._Regions
def setRegions(self, value):
for x in self._Regions:
x.Region = None
for y in value:
y._Region = self
self._Regions = value
Regions = property(getRegions, setRegions)
def addRegions(self, *Regions):
for obj in Regions:
obj.Region = self
def removeRegions(self, *Regions):
for obj in Regions:
obj.Region = None
| rwl/PyCIM | CIM14/CPSM/Equipment/Core/GeographicalRegion.py | Python | mit | 2,316 |
"""
Read rules from all cubes and sort cubes by some metrics (Number rows, Number feeders,... )
"""
import configparser
from TM1py.Services import TM1Service
config = configparser.ConfigParser()
# storing the credentials in a file is not recommended for purposes other than testing.
# it's better to setup CAM with SSO or use keyring to store credentials in the windows credential manager. Sample:
# Samples/credentials_best_practice.py
config.read(r'..\config.ini')
# Connect to TM1
with TM1Service(**config['tm1srv01']) as tm1:
cubes = tm1.cubes.get_all()
# cubes with SKIPCHECK
cubes_with_skipcheck = [cube.name for cube in cubes if cube.skipcheck]
print("Cubes with SKIPCHECK:")
print(cubes_with_skipcheck)
# cubes with UNDEFVALS
cubes_with_undefvals = [cube.name for cube in cubes if cube.undefvals]
print("Cubes with UNDEFVALS:")
print(cubes_with_undefvals)
# cubes ordered by the number of rule statements
cubes.sort(key=lambda cube: len(cube.rules.rule_statements) if cube.has_rules else 0, reverse=True)
print("Cubes sorted by number of Rule Statements:")
print([cube.name for cube in cubes])
# cubes ordered by the number of feeder statements
cubes.sort(key=lambda cube: len(cube.rules.feeder_statements) if cube.has_rules else 0, reverse=True)
print("Cubes sorted by number of Feeder Statements:")
print([cube.name for cube in cubes])
| cubewise-code/TM1py-samples | Administration/cube_rules_stats.py | Python | mit | 1,421 |
import os
import synapse.exc as s_exc
import synapse.common as s_common
import synapse.cortex as s_cortex
import synapse.tests.utils as s_tests
import synapse.lib.modelrev as s_modelrev
def nope(*args, **kwargs):
raise Exception('nope was called')
class ModelRevTest(s_tests.SynTest):
async def test_cortex_modelrev_init(self):
with self.getTestDir(mirror='testcore') as dirn:
async with await s_cortex.Cortex.anit(dirn) as core:
layr = core.getLayer()
self.true(layr.fresh)
self.eq(s_modelrev.maxvers, await layr.getModelVers())
# no longer "fresh", but lets mark a layer as read only
# and test the bail condition for layers which we cant update
async with await s_cortex.Cortex.anit(dirn) as core:
layr = core.getLayer()
layr.canrev = False
mrev = s_modelrev.ModelRev(core)
mrev.revs = mrev.revs + (((9999, 9999, 9999), nope),)
with self.raises(s_exc.CantRevLayer):
await mrev.revCoreLayers()
# no longer "fresh"
async with await s_cortex.Cortex.anit(dirn) as core:
layr = core.getLayer()
self.false(layr.fresh)
self.eq(s_modelrev.maxvers, await layr.getModelVers())
mrev = s_modelrev.ModelRev(core)
layr.woot = False
async def woot(layers):
layr.woot = True
mrev.revs = mrev.revs + (((9999, 9999, 9999), woot),)
await mrev.revCoreLayers()
self.true(layr.woot)
self.eq((9999, 9999, 9999), await layr.getModelVers())
async def test_modelrev_2_0_1(self):
async with self.getRegrCore('model-2.0.1') as core:
nodes = await core.nodes('ou:org=b084f448ee7f95a7e0bc1fd7d3d7fd3b')
self.len(1, nodes)
self.len(3, nodes[0].get('industries'))
nodes = await core.nodes('ou:org=57c2dd4feee21204b1a989b9a796a89d')
self.len(1, nodes)
self.len(1, nodes[0].get('industries'))
async def test_modelrev_0_2_2(self):
async with self.getRegrCore('model-0.2.2') as core:
nodes = await core.nodes('inet:web:acct:signup:client:ipv6="::ffff:1.2.3.4"')
self.len(2001, nodes)
async def test_modelrev_0_2_3(self):
async with self.getRegrCore('model-0.2.3') as core:
nodes = await core.nodes('it:exec:proc:cmd=rar.exe')
self.len(2001, nodes)
nodes = await core.nodes('it:cmd')
self.len(1, nodes)
self.eq(nodes[0].ndef, ('it:cmd', 'rar.exe'))
async def test_modelrev_0_2_4(self):
async with self.getRegrCore('model-0.2.4') as core:
nodes = await core.nodes('ps:person=1828dca605977725540bb74f728d9d81')
self.len(1, nodes)
self.len(1, nodes[0].get('names'))
nodes = await core.nodes('ps:person=d26a988f732371e51e36fea0f16ff382')
self.len(1, nodes)
self.len(3, nodes[0].get('names'))
nodes = await core.nodes('ps:person=c92e49791022c88396fa69d9f94281cb')
self.len(1, nodes)
self.len(3, nodes[0].get('names'))
nodes = await core.nodes('ps:person:name=coverage')
self.len(1003, nodes)
for node in nodes:
self.len(1, nodes[0].get('names'))
async def test_modelrev_0_2_6(self):
async with self.getRegrCore('model-0.2.6') as core:
acct = '90b3d80f8bdf9e33b4aeb46c720d3289'
nodes = await core.nodes(f'it:account={acct}')
self.len(1, nodes)
self.len(2, nodes[0].get('groups'))
g00 = 'd0d235109162501db9d4014a4c2cc4d9'
g01 = 'bf1999e8c45523bc64803e28b19a34c6'
nodes = await core.nodes(f'it:account={acct} [:groups=({g00}, {g01}, {g00})]')
self.len(1, nodes)
self.len(2, nodes[0].get('groups'))
url0 = "https://charlie.com/woot"
url1 = "https://bravo.com/woot"
url2 = "https://delta.com/woot"
url3 = "https://alpha.com/woot"
# created via: f'[it:sec:cve=CVE-2013-9999 :desc="some words" :references=({url0}, {url1}, {url2}, {url3})]'
nodes = await core.nodes(f'it:sec:cve=CVE-2013-9999')
self.eq(nodes[0].ndef[1], 'cve-2013-9999')
self.eq(nodes[0].get('desc'), 'some words')
self.eq(nodes[0].get('references'), (url3, url1, url0, url2))
async def test_modelrev_0_2_7_mirror(self):
vers = '2.85.1-hugenum-indx'
with self.getRegrDir('cortexes', vers) as regrdir00:
with self.getRegrDir('cortexes', vers) as regrdir01:
conf00 = {'nexslog:en': True}
async with await s_cortex.Cortex.anit(regrdir00, conf=conf00) as core00:
self.eq(await core00.getLayer().getModelVers(), (0, 2, 7))
conf01 = {'nexslog:en': True, 'mirror': core00.getLocalUrl()}
async with await s_cortex.Cortex.anit(regrdir01, conf=conf01) as core01:
self.eq(await core01.getLayer().getModelVers(), (0, 2, 6))
nodes = await core01.nodes('inet:fqdn=baz.com')
self.len(1, nodes)
node = nodes[0]
self.eq(node.props.get('_huge'), '10E-21')
self.eq(node.props.get('._univhuge'), '10E-21')
self.eq(node.props.get('._hugearray'), ('3.45', '10E-21'))
self.eq(node.props.get('._hugearray'), ('3.45', '10E-21'))
async with await s_cortex.Cortex.anit(regrdir00, conf=conf00) as core00:
async with await s_cortex.Cortex.anit(regrdir01, conf=conf01) as core01:
await core01.sync()
self.eq(await core01.getLayer().getModelVers(), (0, 2, 7))
nodes = await core01.nodes('inet:fqdn=baz.com')
self.len(1, nodes)
node = nodes[0]
self.eq(node.props.get('_huge'), '0.00000000000000000001')
self.eq(node.props.get('._univhuge'), '0.00000000000000000001')
self.eq(node.props.get('._hugearray'), ('3.45', '0.00000000000000000001'))
self.eq(node.props.get('._hugearray'), ('3.45', '0.00000000000000000001'))
| vertexproject/synapse | synapse/tests/test_lib_modelrev.py | Python | apache-2.0 | 6,599 |
# Copyright (c) OpenMMLab. All rights reserved.
import random
import warnings
import torch
from mmcv.runner import get_dist_info
from mmcv.runner.hooks import HOOKS, Hook
from torch import distributed as dist
@HOOKS.register_module()
class SyncRandomSizeHook(Hook):
"""Change and synchronize the random image size across ranks.
SyncRandomSizeHook is deprecated, please use Resize pipeline to achieve
similar functions. Such as `dict(type='Resize', img_scale=[(448, 448),
(832, 832)], multiscale_mode='range', keep_ratio=True)`.
Note: Due to the multi-process dataloader, its behavior is different
from YOLOX's official implementation, the official is to change the
size every fixed iteration interval and what we achieved is a fixed
epoch interval.
Args:
ratio_range (tuple[int]): Random ratio range. It will be multiplied
by 32, and then change the dataset output image size.
Default: (14, 26).
img_scale (tuple[int]): Size of input image. Default: (640, 640).
interval (int): The epoch interval of change image size. Default: 1.
device (torch.device | str): device for returned tensors.
Default: 'cuda'.
"""
def __init__(self,
ratio_range=(14, 26),
img_scale=(640, 640),
interval=1,
device='cuda'):
warnings.warn('DeprecationWarning: SyncRandomSizeHook is deprecated. '
'Please use Resize pipeline to achieve similar '
'functions. Due to the multi-process dataloader, '
'its behavior is different from YOLOX\'s official '
'implementation, the official is to change the size '
'every fixed iteration interval and what we achieved '
'is a fixed epoch interval.')
self.rank, world_size = get_dist_info()
self.is_distributed = world_size > 1
self.ratio_range = ratio_range
self.img_scale = img_scale
self.interval = interval
self.device = device
def after_train_epoch(self, runner):
"""Change the dataset output image size."""
if self.ratio_range is not None and (runner.epoch +
1) % self.interval == 0:
# Due to DDP and DP get the device behavior inconsistent,
# so we did not get the device from runner.model.
tensor = torch.LongTensor(2).to(self.device)
if self.rank == 0:
size_factor = self.img_scale[1] * 1. / self.img_scale[0]
size = random.randint(*self.ratio_range)
size = (int(32 * size), 32 * int(size * size_factor))
tensor[0] = size[0]
tensor[1] = size[1]
if self.is_distributed:
dist.barrier()
dist.broadcast(tensor, 0)
runner.data_loader.dataset.update_dynamic_scale(
(tensor[0].item(), tensor[1].item()))
| open-mmlab/mmdetection | mmdet/core/hook/sync_random_size_hook.py | Python | apache-2.0 | 3,061 |
from .base import *
import logging
SITE_NAME = "Development"
DEBUG = True
DEBUG_TOOLBAR = False
LIVE_GO_CARDLESS = False
LIVE_MAIL = False
env_path = os.path.join(BASE_DIR, ".env")
environ.Env.read_env(env_path)
INSTALLED_APPS += ["coverage", "django_waitress"]
if DEBUG_TOOLBAR:
INSTALLED_APPS += ["debug_toolbar"]
MIDDLEWARE.insert(1, "debug_toolbar.middleware.DebugToolbarMiddleware")
INTERNAL_IPS = ["127.0.0.1"]
DATABASES = {"default": env.db_url("DATABASE_URL_DEV")}
ALLOWED_HOSTS = ["localhost"]
TEMPLATES[0]["OPTIONS"]["debug"] = DEBUG
AUTH_PASSWORD_VALIDATORS = []
SECRET_KEY = env.str("SECRET_KEY")
BEE_FREE_ID = env.str("BEE_FREE_ID")
BEE_FREE_SECRET = env.str("BEE_FREE_SECRET")
POS_COOKIE = env.str("POS_COOKIE")
TEST_USER = env.str("TEST_USER")
TEST_PASSWORD = env.str("TEST_PASSWORD")
if LIVE_MAIL:
print("Warning - Live mail")
EMAIL_BACKEND = "anymail.backends.mailgun.EmailBackend"
ANYMAIL = env.dict("ANYMAIL")
else:
# EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend'
# EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
EMAIL_BACKEND = "django.core.mail.backends.filebased.EmailBackend"
EMAIL_FILE_PATH = os.path.join(BASE_DIR, "test_emails")
if LIVE_GO_CARDLESS:
CARDLESS_ACCESS_TOKEN = env.str("CARDLESS_PRODUCTION_TOKEN")
CARDLESS_ENVIRONMENT = "live"
CARDLESS_WEBHOOK_SECRET = env.str("CARDLESS_WEBHOOK_SECRET")
print("WARNING - LIVE Go Cardless site")
else:
CARDLESS_ACCESS_TOKEN = env.str("CARDLESS_SANDBOX_TOKEN")
CARDLESS_ENVIRONMENT = "sandbox"
CARDLESS_WEBHOOK_SECRET = env.str("CARDLESS_WEBHOOK_SECRET")
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"filters": {
"require_debug_false": {"()": "django.utils.log.RequireDebugFalse"},
"require_debug_true": {"()": "django.utils.log.RequireDebugTrue"},
},
"formatters": {
"simple": {"format": "[%(asctime)s] %(levelname)s %(message)s", "datefmt": "%Y-%m-%d %H:%M:%S"},
"verbose": {
"format": "[%(asctime)s] %(levelname)s [%(name)s.%(funcName)s:%(lineno)d] %(message)s",
"datefmt": "%Y-%m-%d %H:%M:%S",
},
},
"handlers": {
"console": {
"level": "DEBUG",
"filters": ["require_debug_true"],
"class": "logging.StreamHandler",
"formatter": "simple",
}
},
"root": {"level": "DEBUG", "handlers": ["console"]},
"loggers": {
"members": {"handlers": ["console"]},
"django": {"handlers": ["console"], "propagate": True},
# stop sentry logging disallowed host
"django.security.DisallowedHost": {"handlers": ["console"], "propagate": False},
"django.request": { # debug logging of things that break requests
"handlers": ["console"],
"level": "DEBUG",
"propagate": True,
},
},
"py.warnings": {"handlers": ["console"]},
}
WAGTAIL_CACHE = True
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.filebased.FileBasedCache",
"LOCATION": os.path.join(BASE_DIR, "cache"),
"KEY_PREFIX": "coderedcms",
"TIMEOUT": 14400, # in seconds
}
}
| ianastewart/cwltc-admin | mysite/settings/dev.py | Python | mit | 3,231 |
__author__ = 'benji'
| oldm/OldMan | oldman/validation/__init__.py | Python | bsd-3-clause | 21 |
import random
from unittest import TestCase
from hamcrest import *
from chapter14.exercise14_1_4 import os_key_rank
from tree_util import get_random_os_tree
class TestExercise14_1_4(TestCase):
def test_os_key_rank(self):
tree, nodes, keys = get_random_os_tree()
key_to_find = random.choice(keys)
actual_rank = os_key_rank(tree.root, key_to_find)
sorted_keys = sorted(keys)
expected_ranks = [i + 1 for i, key in enumerate(sorted_keys) if key == key_to_find]
assert_that(actual_rank, is_in(expected_ranks))
| wojtask/CormenPy | test/test_chapter14/test_exercise14_1_4.py | Python | gpl-3.0 | 564 |
#!/usr/bin/env python
from pprint import pprint
import pyeapi
pynet_sw = pyeapi.connect_to("pynet-sw2")
show_version = pynet_sw.enable("show version")
pprint(show_version)
| ktbyers/pynet-ons-nov16 | arista_pyeapi_example/arista_pyeapi.py | Python | apache-2.0 | 174 |
from __future__ import absolute_import
# #START_LICENSE###########################################################
#
#
# This file is part of the Environment for Tree Exploration program
# (ETE). http://etetoolkit.org
#
# ETE is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ETE is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ETE. If not, see <http://www.gnu.org/licenses/>.
#
#
# ABOUT THE ETE PACKAGE
# =====================
#
# ETE is distributed under the GPL copyleft license (2008-2015).
#
# If you make use of ETE in published work, please cite:
#
# Jaime Huerta-Cepas, Joaquin Dopazo and Toni Gabaldon.
# ETE: a python Environment for Tree Exploration. Jaime BMC
# Bioinformatics 2010,:24doi:10.1186/1471-2105-11-24
#
# Note that extra references to the specific methods implemented in
# the toolkit may be available in the documentation.
#
# More info at http://etetoolkit.org. Contact: [email protected]
#
#
# #END_LICENSE#############################################################
import os
import logging
log = logging.getLogger("main")
from ete3.tools.phylobuild_lib.master_task import ModelTesterTask
from ete3.tools.phylobuild_lib.master_job import Job
from ete3.tools.phylobuild_lib.utils import basename, PhyloTree, JMODELTEST_CITE
__all__ = ["JModeltest"]
class JModeltest(ModelTesterTask):
def __init__(self, nodeid, alg_fasta_file, alg_phylip_file, conf):
GLOBALS["citator"].add(JMODELTEST_CITE)
self.conf = conf
base_args = {
'-d': alg_fasta_file,
}
args = self.conf["jmodeltest"]
if args.get("-t", "ML") == "ML":
task_type = "tree"
else:
task_type = "mchooser"
ModelTesterTask.__init__(self, nodeid, task_type, "Jmodeltest",
base_args, self.conf[confname])
# set app arguments and options
self.alg_fasta_file = alg_fasta_file
self.alg_phylip_file = alg_phylip_file
self.seqtype = "nt"
self.models = "see jmodeltest params"
self.init()
self.best_model_file = os.path.join(self.taskdir, "best_model.txt")
if task_type == "tree":
self.tree_file = os.path.join(self.taskdir, "final_tree.nw")
else:
self.tree_file = None
def load_jobs(self):
tree_job = Job(self.conf["app"]["jmodeltest"], self.args, parent_ids=[self.nodeid])
self.jobs.append(tree_job)
def finish(self):
# first job is the raxml tree
best_model = None
best_model_in_next_line = False
t = None
for line in open(self.jobs[-1].stdout_file, "rU"):
line = line.strip()
if best_model_in_next_line and line.startswith("Model"):
pass#best_model = line.split("=")[1].strip()
elif best_model_in_next_line and line.startswith("partition"):
best_model = line.split("=")[1].strip()
best_model_in_next_line = False
elif line.startswith("Model selected:"):
best_model_in_next_line = True
elif line.startswith("ML tree (NNI) for the best AIC model ="):
nw = line.replace("ML tree (NNI) for the best AIC model =", "")
t = PhyloTree(nw)
open(self.best_model_file, "w").write(best_model)
log.log(26, "Best model: %s" %best_model)
if self.ttype == "tree":
tree_job = self.jobs[-1]
tree_file = os.path.join(tree_job.jobdir,
"jModelTest_tree."+self.nodeid)
t.write(outfile=self.tree_file)
self.model = best_model
ModelTesterTask.finish(self)
| fmaguire/ete | ete3/tools/phylobuild_lib/task/jmodeltest.py | Python | gpl-3.0 | 4,185 |
# Copyright (C) 2017 Szymon Nieznański ([email protected])
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk, Gdk, Pango, GObject
class SimpleLabel(Gtk.Label):
def __init__(self, text, color, align, font_description):
super(SimpleLabel, self).__init__()
self.set_text(text)
self.set_xalign(align)
self.modify_font(Pango.FontDescription(font_description))
self.set_text_color(color)
def set_text_color(self, color):
self.modify_fg(Gtk.StateFlags.NORMAL, Gdk.color_parse(color))
| s-nez/service-monitor | SimpleLabel.py | Python | gpl-3.0 | 1,275 |
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals;
# This file is only used if you use `make publish` or
# explicitly specify it as your config file.
import os;
import sys;
sys.path.append(os.curdir);
from pelicanconf import *;
## Publish Config --------------------------------------------------------------
# turn off develop mode
DEVELOP_FLAG = False;
# site settings
SITEURL = 'https://benrbray.com';
RELATIVE_URLS = False;
# URL Settings (different from publishconf.py)
ARTICLE_URL = "posts/{date:%Y}/{slug}";
ARTICLE_SAVE_AS = 'posts/{date:%Y}/{slug}.html';
PAGE_URL = '{slug}/';
PAGE_SAVE_AS = '{slug}.html';
## Plugins ---------------------------------------------------------------------
# Disqus Comments
DISQUS_SITENAME = "benrbray";
# Feeds
#FEED_ALL_ATOM = 'feeds/all.atom.xml';
#CATEGORY_FEED_ATOM = 'feeds/%s.atom.xml';
| benrbray/benrbray.github.io-source | publishconf.py | Python | mit | 882 |
#!/usr/bin/env python2
import os
import subprocess
import argparse
from lib import builder
from lib import promote
current_directory = os.path.realpath(os.path.dirname(__file__))
parser = argparse.ArgumentParser()
description = "Used to promote from one branch to another. For example, from 2.6-dev to " \
"2.6-testing. If 2.6-dev is on version 2.6.1-0.2.alpha and 2.6-testing is on " \
"2.6.0-1 then the end results would be 2.6-dev on 2.6.2-0.1.alpha and " \
"2.6-testing on 2.2.6.1-0.3.beta"
parser.description = description
parser.add_argument("project_directory",
help="The directory containing the project that you want to promote")
parser.add_argument("source_branch", help="The branch that you want to promote from")
parser.add_argument("target_branch", help="The branch that you want to promote to")
parser.add_argument("--remote-name", help="The remote name for the git upstream to use",
default='origin')
opts = parser.parse_args()
git_directory = opts.project_directory
source_branch = opts.source_branch
target_branch = opts.target_branch
remote_name = opts.remote_name
print "DANGER ARE YOU SURE YOU WANT TO DO THIS??"
print "All the branches between %s and master will be checked out and pulled." \
"The results of the promotion will be merged forward to master using '-s ours'."
print "%s and %s will be merged, and updated. You will be responsible to push to github" % \
(source_branch, target_branch)
confirmation = raw_input("Are you sure (y/n): ")
if confirmation != 'y':
print "you thought better, probably a smart move"
print "Checking that we can merge cleanly to master"
# Checkout all the branches required to update & merge forward
promotion_chain = promote.get_promotion_chain(git_directory, target_branch,
upstream_name=remote_name)
promote.check_merge_forward(git_directory, promotion_chain)
print "Getting all the branches required, and pulling the latest version"
for git_branch in promotion_chain:
promote.checkout_branch(git_directory, git_branch, remote_name=remote_name)
# Update the version on source and merge up
print "Bumping the stage on source before merging to target"
promote.checkout_branch(git_directory, source_branch, remote_name=remote_name)
subprocess.check_call(['./update-version.py', '--update-type', 'stage', git_directory],
cwd=current_directory)
new_version = builder.get_nvr_from_spec_file_in_directory(git_directory)
msg = "bumped version to %s" % new_version
subprocess.check_call(['git', 'commit', '-a', '-m', msg], cwd=git_directory)
promote.merge_forward(git_directory)
print "Merging %s into %s " % (source_branch, target_branch)
promote.checkout_branch(git_directory, target_branch, remote_name=remote_name)
subprocess.check_call(['git', 'merge', source_branch], cwd=opts.project_directory)
print "Bumping the patch level on the source branch"
# merge the source into the target branch
promote.checkout_branch(git_directory, source_branch, remote_name=remote_name)
subprocess.check_call(['./update-version.py', '--update-type', 'patch', git_directory],
cwd=current_directory)
msg = "bumped version to %s" % new_version
subprocess.check_call(['git', 'commit', '-a', '-m', msg], cwd=git_directory)
promote.merge_forward(git_directory)
print "Don't forget the following branches need to be pushed to github:"
print promotion_chain
| rbarlow/pulp_packaging | ci/promote-brach.py | Python | gpl-2.0 | 3,501 |
from pyxb_114.bundles.wssplat.raw.wscoor import *
| msherry/PyXB-1.1.4 | pyxb_114/bundles/wssplat/wscoor.py | Python | apache-2.0 | 50 |
from extra.utils import strToBool
class Machine(object):
"""
Provides the implementation of a Machine in a Plant.
"""
def __init__(self, name, quantity = 1, canUnhook = False,
precedence = False, breaks = []):
"""
name is the unique Machine name.
precedence is whether the quantity should be dealt with as capacity or
as parallel different Machine instances.
canUnhook is whether a crane can leave an Order at this Machine or not.
quantity is the number of available machines of this type (name) in
the Plant.
"""
assert name != None
assert name != ""
assert breaks != None
assert quantity >= 1
self.quantity = quantity
self.canUnhook = canUnhook
self.precedence = precedence
self.name = name
self.breaks = breaks
def __repr__(self):
return str(self.name)
def setOfBreaks(self):
res = []
for b in self.breaks:
res.extend(range(b[0], b[0] + b[1]))
return res
@staticmethod
def fromXml(element):
"""
Creates a Machine instance from XML node tree element and returns it.
"""
breaks = []
for e in element.getElementsByTagName("break"):
breaks.append((int(e.getAttribute("start")),
int(e.getAttribute("duration"))))
return Machine(
name = element.getAttribute("name").lower(),
quantity = int(element.getAttribute("quantity")),
precedence = strToBool(element.getAttribute("precedence")),
canUnhook = strToBool(element.getAttribute("canUnhook")),
breaks = breaks
)
| fredmorcos/attic | projects/plantmaker/archive/20100531/src/plant/machine.py | Python | isc | 1,470 |
# Copyright 2013 IBM Corp.
from powervc.common import config
from powervc.common import netutils
CONF = config.CONF
# http client opts from config file normalized
# to keystone client form
OS_OPTS = None
PVC_OPTS = None
def _build_base_http_opts(config_section, opt_map):
configuration = CONF[config_section]
opt_map['tenant_name'] = configuration['admin_tenant_name']
opt_map['username'] = configuration['admin_user']
opt_map['password'] = configuration['admin_password']
opt_map['cacert'] = configuration['connection_cacert']
opt_map['insecure'] = configuration['http_insecure']
if opt_map['insecure'] is False:
opt_map['auth_url'] = netutils.hostname_url(configuration['auth_url'])
else:
opt_map['auth_url'] = configuration['auth_url']
return opt_map
# init client opts for powervc and openstack only once
if OS_OPTS is None:
OS_OPTS = _build_base_http_opts('openstack', {})
# support multiple region on local openstack
OS_OPTS['region_name'] = CONF['openstack']['region_name']
if PVC_OPTS is None:
PVC_OPTS = _build_base_http_opts('powervc', {})
| openstack/powervc-driver | common-powervc/powervc/common/client/config.py | Python | apache-2.0 | 1,125 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import warnings
from py.test import mark
from translate.tools import pretranslate
from translate.convert import test_convert
from translate.misc import wStringIO
from translate.storage import po
from translate.storage import xliff
class TestPretranslate:
xliff_skeleton = '''<?xml version="1.0" encoding="utf-8"?>
<xliff version="1.1" xmlns="urn:oasis:names:tc:xliff:document:1.1">
<file original="doc.txt" source-language="en-US">
<body>
%s
</body>
</file>
</xliff>'''
def setup_method(self, method):
warnings.resetwarnings()
def teardown_method(self, method):
warnings.resetwarnings()
def pretranslatepo(self, input_source, template_source=None):
"""helper that converts strings to po source without requiring files"""
input_file = wStringIO.StringIO(input_source)
if template_source:
template_file = wStringIO.StringIO(template_source)
else:
template_file = None
output_file = wStringIO.StringIO()
pretranslate.pretranslate_file(input_file, output_file, template_file)
output_file.seek(0)
return po.pofile(output_file.read())
def pretranslatexliff(self, input_source, template_source=None):
"""helper that converts strings to po source without requiring files"""
input_file = wStringIO.StringIO(input_source)
if template_source:
template_file = wStringIO.StringIO(template_source)
else:
template_file = None
output_file = wStringIO.StringIO()
pretranslate.pretranslate_file(input_file, output_file, template_file)
output_file.seek(0)
return xliff.xlifffile(output_file.read())
def singleunit(self, pofile):
"""checks that the pofile contains a single non-header unit, and
returns it"""
if len(pofile.units) == 2 and pofile.units[0].isheader():
print pofile.units[1]
return pofile.units[1]
else:
print pofile.units[0]
return pofile.units[0]
def test_pretranslatepo_blank(self):
"""checks that the pretranslatepo function is working for a simple file
initialisation"""
input_source = '''#: simple.label%ssimple.accesskey\nmsgid "A &hard coded newline.\\n"\nmsgstr ""\n''' % po.lsep
newpo = self.pretranslatepo(input_source)
assert str(self.singleunit(newpo)) == input_source
def test_merging_simple(self):
"""checks that the pretranslatepo function is working for a simple
merge"""
input_source = '''#: simple.label%ssimple.accesskey\nmsgid "A &hard coded newline.\\n"\nmsgstr ""\n''' % po.lsep
template_source = '''#: simple.label%ssimple.accesskey\nmsgid "A &hard coded newline.\\n"\nmsgstr "&Hart gekoeerde nuwe lyne\\n"\n''' % po.lsep
newpo = self.pretranslatepo(input_source, template_source)
assert str(self.singleunit(newpo)) == template_source
def test_merging_messages_marked_fuzzy(self):
"""test that when we merge PO files with a fuzzy message that it
remains fuzzy"""
input_source = '''#: simple.label%ssimple.accesskey\nmsgid "A &hard coded newline.\\n"\nmsgstr ""\n''' % po.lsep
template_source = '''#: simple.label%ssimple.accesskey\n#, fuzzy\nmsgid "A &hard coded newline.\\n"\nmsgstr "&Hart gekoeerde nuwe lyne\\n"\n''' % po.lsep
newpo = self.pretranslatepo(input_source, template_source)
assert str(self.singleunit(newpo)) == template_source
def test_merging_plurals_with_fuzzy_matching(self):
"""test that when we merge PO files with a fuzzy message that it
remains fuzzy"""
input_source = r'''#: file.cpp:2
msgid "%d manual"
msgid_plural "%d manuals"
msgstr[0] ""
msgstr[1] ""
'''
template_source = r'''#: file.cpp:3
#, fuzzy
msgid "%d manual"
msgid_plural "%d manuals"
msgstr[0] "%d handleiding."
msgstr[1] "%d handleidings."
'''
# The #: comment and msgid's are different between the pot and the po
poexpected = r'''#: file.cpp:2
#, fuzzy
msgid "%d manual"
msgid_plural "%d manuals"
msgstr[0] "%d handleiding."
msgstr[1] "%d handleidings."
'''
newpo = self.pretranslatepo(input_source, template_source)
assert str(self.singleunit(newpo)) == poexpected
@mark.xfail(reason="Not Implemented")
def test_merging_msgid_change(self):
"""tests that if the msgid changes but the location stays the same that
we merge"""
input_source = '''#: simple.label\n#: simple.accesskey\nmsgid "Its &hard coding a newline.\\n"\nmsgstr ""\n'''
template_source = '''#: simple.label\n#: simple.accesskey\nmsgid "A &hard coded newline.\\n"\nmsgstr "&Hart gekoeerde nuwe lyne\\n"\n'''
poexpected = '''#: simple.label\n#: simple.accesskey\n#, fuzzy\nmsgid "Its &hard coding a newline.\\n"\nmsgstr "&Hart gekoeerde nuwe lyne\\n"\n'''
newpo = self.pretranslatepo(input_source, template_source)
print newpo
assert str(newpo) == poexpected
def test_merging_location_change(self):
"""tests that if the location changes but the msgid stays the same that
we merge"""
input_source = '''#: new_simple.label%snew_simple.accesskey\nmsgid "A &hard coded newline.\\n"\nmsgstr ""\n''' % po.lsep
template_source = '''#: simple.label%ssimple.accesskey\nmsgid "A &hard coded newline.\\n"\nmsgstr "&Hart gekoeerde nuwe lyne\\n"\n''' % po.lsep
poexpected = '''#: new_simple.label%snew_simple.accesskey\nmsgid "A &hard coded newline.\\n"\nmsgstr "&Hart gekoeerde nuwe lyne\\n"\n''' % po.lsep
newpo = self.pretranslatepo(input_source, template_source)
print newpo
assert str(newpo) == poexpected
def test_merging_location_and_whitespace_change(self):
"""test that even if the location changes that if the msgid only has
whitespace changes we can still merge"""
input_source = '''#: singlespace.label%ssinglespace.accesskey\nmsgid "&We have spaces"\nmsgstr ""\n''' % po.lsep
template_source = '''#: doublespace.label%sdoublespace.accesskey\nmsgid "&We have spaces"\nmsgstr "&One het spasies"\n''' % po.lsep
poexpected = '''#: singlespace.label%ssinglespace.accesskey\n#, fuzzy\nmsgid "&We have spaces"\nmsgstr "&One het spasies"\n''' % po.lsep
newpo = self.pretranslatepo(input_source, template_source)
print newpo
assert str(newpo) == poexpected
@mark.xfail(reason="Not Implemented")
def test_merging_accelerator_changes(self):
"""test that a change in the accelerator localtion still allows
merging"""
input_source = '''#: someline.c\nmsgid "A&bout"\nmsgstr ""\n'''
template_source = '''#: someline.c\nmsgid "&About"\nmsgstr "&Info"\n'''
poexpected = '''#: someline.c\nmsgid "A&bout"\nmsgstr "&Info"\n'''
newpo = self.pretranslatepo(input_source, template_source)
print newpo
assert str(newpo) == poexpected
@mark.xfail(reason="Not Implemented")
def test_lines_cut_differently(self):
"""Checks that the correct formatting is preserved when pot an po lines
differ."""
input_source = '''#: simple.label\nmsgid "Line split "\n"differently"\nmsgstr ""\n'''
template_source = '''#: simple.label\nmsgid "Line"\n" split differently"\nmsgstr "Lyne verskillend gesny"\n'''
newpo = self.pretranslatepo(input_source, template_source)
newpounit = self.singleunit(newpo)
assert str(newpounit) == template_source
def test_merging_automatic_comments_dont_duplicate(self):
"""ensure that we can merge #. comments correctly"""
input_source = '''#. Row 35\nmsgid "&About"\nmsgstr ""\n'''
template_source = '''#. Row 35\nmsgid "&About"\nmsgstr "&Info"\n'''
newpo = self.pretranslatepo(input_source, template_source)
newpounit = self.singleunit(newpo)
assert str(newpounit) == template_source
def test_merging_automatic_comments_new_overides_old(self):
"""ensure that new #. comments override the old comments"""
input_source = '''#. new comment\n#: someline.c\nmsgid "&About"\nmsgstr ""\n'''
template_source = '''#. old comment\n#: someline.c\nmsgid "&About"\nmsgstr "&Info"\n'''
poexpected = '''#. new comment\n#: someline.c\nmsgid "&About"\nmsgstr "&Info"\n'''
newpo = self.pretranslatepo(input_source, template_source)
newpounit = self.singleunit(newpo)
assert str(newpounit) == poexpected
def test_merging_comments_with_blank_comment_lines(self):
"""test that when we merge a comment that has a blank line we keep the
blank line"""
input_source = '''#: someline.c\nmsgid "About"\nmsgstr ""\n'''
template_source = '''# comment1\n#\n# comment2\n#: someline.c\nmsgid "About"\nmsgstr "Omtrent"\n'''
poexpected = template_source
newpo = self.pretranslatepo(input_source, template_source)
newpounit = self.singleunit(newpo)
assert str(newpounit) == poexpected
def test_empty_commentlines(self):
input_source = '''#: paneSecurity.title
msgid "Security"
msgstr ""
'''
template_source = '''# - Contributor(s):
# -
# - Alternatively, the
# -
#: paneSecurity.title
msgid "Security"
msgstr "Sekuriteit"
'''
poexpected = template_source
newpo = self.pretranslatepo(input_source, template_source)
newpounit = self.singleunit(newpo)
print "expected"
print poexpected
print "got:"
print str(newpounit)
assert str(newpounit) == poexpected
def test_merging_msgidcomments(self):
"""ensure that we can merge msgidcomments messages"""
input_source = r'''#: window.width
msgid ""
"_: Do not translate this.\n"
"36em"
msgstr ""
'''
template_source = r'''#: window.width
msgid ""
"_: Do not translate this.\n"
"36em"
msgstr "36em"
'''
newpo = self.pretranslatepo(input_source, template_source)
newpounit = self.singleunit(newpo)
assert str(newpounit) == template_source
def test_merging_plurals(self):
"""ensure that we can merge plural messages"""
input_source = '''msgid "One"\nmsgid_plural "Two"\nmsgstr[0] ""\nmsgstr[1] ""\n'''
template_source = '''msgid "One"\nmsgid_plural "Two"\nmsgstr[0] "Een"\nmsgstr[1] "Twee"\nmsgstr[2] "Drie"\n'''
newpo = self.pretranslatepo(input_source, template_source)
print newpo
newpounit = self.singleunit(newpo)
assert str(newpounit) == template_source
def test_merging_resurect_obsolete_messages(self):
"""check that we can reuse old obsolete messages if the message comes
back"""
input_source = '''#: resurect.c\nmsgid "&About"\nmsgstr ""\n'''
template_source = '''#~ msgid "&About"\n#~ msgstr "&Omtrent"\n'''
expected = '''#: resurect.c\nmsgid "&About"\nmsgstr "&Omtrent"\n'''
newpo = self.pretranslatepo(input_source, template_source)
print newpo
assert str(newpo) == expected
def test_merging_comments(self):
"""Test that we can merge comments correctly"""
input_source = '''#. Don't do it!\n#: file.py:1\nmsgid "One"\nmsgstr ""\n'''
template_source = '''#. Don't do it!\n#: file.py:2\nmsgid "One"\nmsgstr "Een"\n'''
poexpected = '''#. Don't do it!\n#: file.py:1\nmsgid "One"\nmsgstr "Een"\n'''
newpo = self.pretranslatepo(input_source, template_source)
print newpo
newpounit = self.singleunit(newpo)
assert str(newpounit) == poexpected
def test_merging_typecomments(self):
"""Test that we can merge with typecomments"""
input_source = '''#: file.c:1\n#, c-format\nmsgid "%d pipes"\nmsgstr ""\n'''
template_source = '''#: file.c:2\nmsgid "%d pipes"\nmsgstr "%d pype"\n'''
poexpected = '''#: file.c:1\n#, c-format\nmsgid "%d pipes"\nmsgstr "%d pype"\n'''
newpo = self.pretranslatepo(input_source, template_source)
newpounit = self.singleunit(newpo)
print newpounit
assert str(newpounit) == poexpected
input_source = '''#: file.c:1\n#, c-format\nmsgid "%d computers"\nmsgstr ""\n'''
template_source = '''#: file.c:2\n#, c-format\nmsgid "%s computers "\nmsgstr "%s-rekenaars"\n'''
poexpected = '''#: file.c:1\n#, fuzzy, c-format\nmsgid "%d computers"\nmsgstr "%s-rekenaars"\n'''
newpo = self.pretranslatepo(input_source, template_source)
newpounit = self.singleunit(newpo)
assert newpounit.isfuzzy()
assert newpounit.hastypecomment("c-format")
def test_xliff_states(self):
"""Test correct maintenance of XLIFF states."""
xlf_template = self.xliff_skeleton \
% '''<trans-unit id="1" xml:space="preserve">
<source> File 1 </source>
</trans-unit>'''
xlf_old = self.xliff_skeleton \
% '''<trans-unit id="1" xml:space="preserve" approved="yes">
<source> File 1 </source>
<target> Lêer 1 </target>
</trans-unit>'''
template = xliff.xlifffile.parsestring(xlf_template)
old = xliff.xlifffile.parsestring(xlf_old)
new = self.pretranslatexliff(template, old)
print str(old)
print '---'
print str(new)
assert new.units[0].isapproved()
# Layout might have changed, so we won't compare the serialised versions
class TestPretranslateCommand(test_convert.TestConvertCommand, TestPretranslate):
"""Tests running actual pretranslate commands on files"""
convertmodule = pretranslate
def test_help(self):
"""tests getting help"""
options = test_convert.TestConvertCommand.test_help(self)
options = self.help_check(options, "-t TEMPLATE, --template=TEMPLATE")
options = self.help_check(options, "--tm")
options = self.help_check(options, "-s MIN_SIMILARITY, --similarity=MIN_SIMILARITY")
options = self.help_check(options, "--nofuzzymatching", last=True)
| jagg81/translate-toolkit | translate/tools/test_pretranslate.py | Python | gpl-2.0 | 14,227 |
from base import StreetAddressValidation, AddressValidation
UPS_XAV_CONNECTION = 'https://onlinetools.ups.com/ups.app/xml/XAV'
UPS_XAV_CONNECTION_TEST = 'https://wwwcie.ups.com/ups.app/xml/XAV'
UPS_AV_CONNECTION = 'https://onlinetools.ups.com/ups.app/xml/AV'
UPS_AV_CONNECTION_TEST = 'https://wwwcie.ups.com/ups.app/xml/AV'
| cuker/python-ups | ups/addressvalidation/__init__.py | Python | bsd-3-clause | 326 |
"""This module provides the BET calculations on isotherm data.
See: http://micro.edu/calculations/bet.html for details.
"""
import numpy as np
import math
from . import constants as const
from . import util
def Isotherm2BET(Qads, Prel) :
"""Calculate the BET Transform
Arguments:
Qads: Quauntity of gas adsorbed (cm^3/g STP) (numpy array)
Prel: Relative Pressure (numpy array)
Returns: BET transform of the data: 1/(Qads*(1/Prel-1))"""
return 1/(Qads*(1/Prel-1))
def Isotherm2RoquerolBET(Qads, Prel) :
"""Calculate the Rouquerol BET Transform Qads (1-Prel)
Arguments:
Qads: Quauntity of gas adsorbed (cm^3/g STP) (numpy array)
Prel: Relative Pressure (numpy array)
Returns: BET Rouquerol transform of the data"""
return Qads*(1-Prel)
def bet(Qads, Prel, csa):
"""Run the BET surface area calulation.
Arguments:
Qads: Quauntity of gas adsorbed (cm^3/g STP) (numpy array)
Prel: Relative Pressure (numpy array)
csa: Molecular cross sectional area. (nm^2)
Returns a namedtouple with the following fields:
transform: BET transform of the data: 1/(Qads*(1/Prel-1))
C: BET C value
sa: BET Surface area (m^2/g)
sa_err: Uncertainty in the BET surface area.
q_m: Monolayer capacity (cm^3/g STP)
line_fit: The line fit statistics from transform vs. Prel.
"""
transform = Isotherm2BET(Qads, Prel)
lf = util.linefit(Prel, transform)
C = (lf.slope + lf.y_intercept)/lf.y_intercept
q_m = 1/(lf.slope + lf.y_intercept)
sa = const.AVOGADRO*csa/(const.VOLGASTP*const.NM2_M2*(lf.slope + lf.y_intercept))
sa_err = sa*(math.sqrt((lf.slope_err**2)+(lf.y_intercept_err**2))/(lf.slope+lf.y_intercept))
return util.make_touple(
"BETResults",
transform = transform,
C = C,
q_m = q_m,
sa = sa,
sa_err = sa_err,
line_fit=lf,
)
def CalcBETArea(Qads, Prel, csa):
"""Calculates the BET Surface Area.
Arguments:
Qads: Quauntity of gas adsorbed (cm^3/g STP) (numpy array)
Prel: Relative Pressure (numpy array)
csa: Molecular cross sectional area. (nm^2)
Returns: BET Surface area (m^2/g)
"""
return bet(Qads, Prel, csa).sa
| lowks/micromeritics | micromeritics/bet.py | Python | gpl-3.0 | 2,286 |
#!/usr/bin/env python
# encoding: utf-8
"""
update/disease.py
Update the disease terms in database
Created by Måns Magnusson on 2017-04-03.
Copyright (c) 2017 __MoonsoInc__. All rights reserved.
"""
import logging
import os
import click
from flask.cli import current_app, with_appcontext
from scout.constants import UPDATE_DISEASES_RESOURCES
from scout.load.hpo import load_disease_terms
from scout.server.extensions import store
from scout.utils.handle import get_file_handle
from scout.utils.scout_requests import (
fetch_hpo_terms,
fetch_hpo_to_genes_to_disease,
fetch_mim_files,
)
LOG = logging.getLogger(__name__)
def _check_resources(resources):
"""Check that resource lines file contain valid data
Args:
resources(dict): resource names as keys and resource file lines as values
"""
for resname, lines in resources.items():
if not lines or lines[0].startswith("#") is False:
LOG.error(f"Resource file '{resname}' doesn't contain valid data.")
raise click.Abort()
def _fetch_downloaded_resources(resources, downloads_folder):
"""Populate resource lines if a resource exists in downloads folder
Args:
resources(dict):
downloads_folder(str): path to downloaded files or demo version of these files
"""
for resname, filenames in UPDATE_DISEASES_RESOURCES.items():
for filename in filenames:
resource_path = os.path.join(downloads_folder, filename)
resource_exists = os.path.isfile(resource_path)
if resource_exists:
resources[resname] = get_file_handle(resource_path).readlines()
if resname not in resources:
LOG.error(f"Resource file '{resname}' was not found in provided downloads folder.")
raise click.Abort()
@click.command("diseases", short_help="Update disease terms")
@click.option(
"-f",
"--downloads-folder",
type=click.Path(exists=True, dir_okay=True, readable=True),
help="specify path to folder where files necessary to update diseases are pre-downloaded",
)
@click.option(
"--api-key",
help="Download resources using an OMIM api key (required only if downloads folder is NOT specified)",
)
@with_appcontext
def diseases(downloads_folder, api_key):
"""
Update disease terms in mongo database. Use pre-downloaded resource files (phenotype_to_genes and genemap2) or download them from OMIM.
Both options require using a valid omim api key.
"""
adapter = store
api_key = api_key or current_app.config.get("OMIM_API_KEY")
resources = {}
if downloads_folder:
api_key = None
# Fetch required resource lines after making sure that are present in downloads folder and that contain valid data
_fetch_downloaded_resources(resources, downloads_folder)
else:
# Download resources
if not api_key:
LOG.warning("Please provide a omim api key to load the omim gene panel")
raise click.Abort()
try:
mim_files = fetch_mim_files(api_key, genemap2=True)
resources["genemap_lines"] = mim_files["genemap2"]
resources["hpo_gene_lines"] = fetch_hpo_to_genes_to_disease()
except Exception as err:
LOG.warning(err)
raise click.Abort()
_check_resources(resources)
LOG.info("Dropping DiseaseTerms")
adapter.disease_term_collection.delete_many({})
LOG.debug("DiseaseTerms dropped")
load_disease_terms(
adapter=adapter,
genemap_lines=resources["genemap_lines"],
hpo_disease_lines=resources["hpo_gene_lines"],
)
LOG.info("Successfully loaded all disease terms")
| Clinical-Genomics/scout | scout/commands/update/disease.py | Python | bsd-3-clause | 3,712 |
##
# Copyright 2002-2012 Ilja Livenson, PDC KTH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
"""
Process blob-specific CDMI request.
"""
from twisted.python import log
from twisted.web.server import NOT_DONE_YET
from twisted.web.static import NoRangeStaticProducer
from vcdm import blob
from vcdm import c
from vcdm.server.cdmi.cdmi_content_types import CDMI_OBJECT
from vcdm.server.cdmi.generic import set_common_headers, parse_path,\
get_common_body
from vcdm.server.cdmi.cdmiresource import StorageResource
from httplib import OK, CREATED, FOUND
from StringIO import StringIO
import base64
try:
import json
except ImportError:
import simplejson as json
class Blob(StorageResource):
isLeaf = True # data items cannot be nested
allowedMethods = ('PUT', 'GET', 'DELETE', 'HEAD')
def render_GET(self, request, bodyless=False):
"""GET operation corresponds to reading of the blob object"""
# process path and extract potential containers/fnm
_, __, fullpath = parse_path(request.path)
tre_header = request.getHeader('tre-enabled')
tre_request = tre_header is not None and tre_header.lower() == 'true'
log.msg("Request for TRE-enabled download received.")
# perform operation on ADT
status, vals = blob.read(self.avatar, fullpath, tre_request)
# construct response
request.setResponseCode(status)
request.setHeader('Content-Type', CDMI_OBJECT)
if tre_request and status == FOUND:
request.setHeader('Location', "/".join([c('general', 'tre_server'),
str(vals['uid'])]))
request.setLastModified(float(vals['mtime']))
set_common_headers(request)
if status == OK:
request.setLastModified(float(vals['mtime']))
if not bodyless:
valueencoding = vals['valuetransferencoding']
# for content we want to read in the full object into memory
content = (vals['content'].read() if valueencoding == 'utf-8' else
base64.b64encode(vals['content'].read()))
# construct body
response_body = {
'completionStatus': 'Complete',
'mimetype': vals['mimetype'],
'metadata': vals['metadata'],
'valuetransferencoding': valueencoding,
'value': content,
'actual_uri': vals.get('actual_uri'),
'capabilitiesURI': '/cdmi_capabilities/dataobject'
}
response_body.update(get_common_body(request, str(vals['uid']),
fullpath))
return json.dumps(response_body)
return ''
def render_PUT(self, request):
"""PUT corresponds to a create/update operation on a blob"""
# process path and extract potential containers/fnm
name, container_path, fullpath = parse_path(request.path)
length = int(request.getHeader('Content-Length'))
request.content.seek(0, 0)
# process json encoded request body
body = json.loads(request.content.read(length))
# default values of mimetype and metadata
mimetype = body.get('mimetype', 'text/plain')
metadata = body.get('metadata', {})
desired_backend = (metadata.get('desired_backend') or
request.getHeader('desired_backend'))
valueencoding = body.get('valuetransferencoding', 'utf-8')
body_value = body.get('value', '')
value = (body_value if valueencoding == 'utf-8' else
base64.b64decode(body_value))
content = (StringIO(value), len(value))
status, uid = blob.write(self.avatar, name, container_path, fullpath,
mimetype, metadata, content, valueencoding,
desired_backend=desired_backend)
request.setResponseCode(status)
request.setHeader('Content-Type', CDMI_OBJECT)
set_common_headers(request)
if status == OK or status == CREATED:
response_body = {
'completionStatus': 'Complete',
'mimetype': mimetype,
'metadata': metadata,
}
# add common elements
response_body.update(get_common_body(request, uid, fullpath))
return json.dumps(response_body)
else:
# error state
return ''
def render_DELETE(self, request):
"""DELETE operations corresponds to the blob deletion operation"""
_, __, fullpath = parse_path(request.path)
status = blob.delete(self.avatar, fullpath)
request.setResponseCode(status)
set_common_headers(request)
return ''
def render_HEAD(self, request):
"""Custom HEAD operation - Twisted's automatic body swallowing is failing on certain requests"""
return self.render_GET(request, bodyless=True)
class NonCDMIBlob(StorageResource):
isLeaf = True
allowedMethods = ('PUT', 'GET', 'DELETE', 'HEAD')
def makeProducer(self, request, content_object):
request.setResponseCode(OK)
# TODO: add full support for multi-part download and upload
# TODO: twisted.web.static.File is a nice example for streaming
# TODO: For non-local backends twisted.web.Proxy approach should be reused.
return NoRangeStaticProducer(request, content_object)
def render_GET(self, request, bodyless=False):
"""GET returns contents of a blob"""
# process path and extract potential containers/fnm
_, __, fullpath = parse_path(request.path)
log.msg("Getting blob (non-cdmi) %s" % fullpath)
tre_header = request.getHeader('tre-enabled')
tre_request = tre_header is not None and tre_header.lower() == 'true'
# perform operation on ADT
status, vals = blob.read(self.avatar, fullpath, tre_request)
# construct response
request.setResponseCode(status)
set_common_headers(request, False)
if tre_request and status == FOUND:
request.setHeader('Location', "/".join([c('general', 'tre_server'),
str(vals['uid'])]))
if status is OK:
# XXX: hack - some-why the response just hangs if to simply path
# mimetype as a content_object type
mimetype = vals['mimetype']
actual_type = 'text/plain' if mimetype == 'text/plain' else str(mimetype)
request.setHeader('Content-Type', actual_type)
request.setLastModified(float(vals['mtime']))
if not bodyless:
request.setHeader('Content-Length', str(vals['size']))
producer = self.makeProducer(request, vals['content'])
producer.start()
return NOT_DONE_YET
else:
return ''
return ''
def render_PUT(self, request):
"""PUT corresponds to a create/update operation on a blob"""
# process path and extract potential containers/fnm
name, container_path, fullpath = parse_path(request.path)
length = request.getHeader('Content-Length')
if length is None:
request.setResponseCode(411)
return ''
content = (request.content, int(length))
# default values of mimetype and metadata
mimetype = request.getHeader('Content-Type') if request.getHeader('Content-Type') is not None else 'text/plain'
desired_backend = request.getHeader('desired_backend')
if ((len(mimetype) == 2 and mimetype[1] == 'utf-8')):
valueencoding = 'utf-8'
else:
valueencoding = 'base64'
status, _ = blob.write(self.avatar, name, container_path, fullpath,
mimetype, {}, content, valueencoding,
desired_backend)
request.setResponseCode(status)
set_common_headers(request, False)
return ''
def render_DELETE(self, request):
"""DELETE operations corresponds to the blob deletion operation"""
_, __, fullpath = parse_path(request.path)
status = blob.delete(self.avatar, fullpath)
request.setResponseCode(status)
set_common_headers(request, False)
return ''
def render_HEAD(self, request):
"""Custom HEAD operation - Twisted's automatic body swallowing is failing on certain requests"""
return self.render_GET(request, bodyless=True)
| livenson/vcdm | src/vcdm/server/cdmi/blob.py | Python | bsd-3-clause | 9,373 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
from RBMlib import RBM
from tqdm import tqdm
import matplotlib.pylab as plt
nvis = 8
nhid = 2
nsmpl = 5000
k = 10
batchsz = 100
epochs = 100
hbias = np.zeros( nhid )
vbias = np.zeros( nvis )
W = np.random.uniform( low=-1, high=+1, size=(nhid, nvis) )
showweight = 2**( np.arange(nvis) )
# Create True distribution from an RBM and sample from it
truerbm = RBM( nvis=nvis, nhid=nhid, hbias=hbias, vbias=vbias, W=W )
v0 = np.random.uniform( size=(nvis, batchsz) )
vsamp0 = truerbm.Sampling( v0, nsmpl )
valhist0 = showweight.dot( vsamp0 )
# Create Test (student) RBM for training
testrbm = RBM( nvis=nvis, nhid=nhid )
# sample from the RBM before training
v1 = np.random.uniform( size=(nvis, batchsz) )
vsamp1 = testrbm.Sampling( v1, nsmpl )
valhist1 = showweight.dot( vsamp1 )
# Train the Test RBM
Fdev = np.zeros( epochs )
mondev = np.zeros( epochs )
print 'Start training'
for ep in tqdm( range( epochs ) ):
for n in range( nsmpl/batchsz ):
beg = n * batchsz
end = beg + batchsz
v0 = vsamp0[:, beg:end]
mon, Fnorm = testrbm.UpdateParams( v0, k=k ) # train with CD-k
mondev[ep] += mon
Fdev[ep] += Fnorm
print 'End training'
# sample from the trained RBM
v2 = np.random.uniform( size=(nvis, batchsz) )
vsamp2 = testrbm.Sampling( v2, nsmpl )
valhist2 = showweight.dot( vsamp2 )
# Show the result
plt.figure()
nbins = 2**nvis
plt.subplot( 3, 1, 1 )
plt.hist( valhist0, bins=nbins, normed=True )
plt.grid()
plt.title( 'True Distribution from a RBM sampling' )
plt.xlim( (0,nbins) )
plt.subplot( 3, 1, 2 )
plt.hist( valhist1, bins=nbins, normed=True )
plt.grid()
plt.title( 'Test RBM (untrained) distribution' )
plt.xlim( (0,nbins) )
plt.subplot( 3, 1, 3 )
plt.hist( valhist2, bins=nbins, normed=True )
plt.grid()
plt.title( 'Test RBM (trained) distribution' )
plt.xlim( (0,nbins) )
plt.show()
plt.figure()
plt.subplot( 2, 1, 1 )
plt.plot( mondev, 'b-' )
plt.title( 'Monitor value (self Cross Ent.)' )
plt.grid()
plt.subplot( 2, 1, 2 )
plt.plot( Fdev, 'r-' )
plt.title( 'Norm of update vectors' )
plt.grid()
plt.show()
| shouno/RBMsample | rbmtest.py | Python | bsd-3-clause | 2,152 |
#---------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#---------------------------------------------------------------------------------------------
#pylint: skip-file
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .local_gateway_operations import LocalGatewayOperations
__all__ = [
'LocalGatewayOperations',
]
| BurtBiel/azure-cli | src/command_modules/azure-cli-network/azure/cli/command_modules/network/mgmt_local_gateway/lib/operations/__init__.py | Python | mit | 794 |
import matplotlib.pyplot as plt
from scipy import io
from Tkinter import Tk
from tkFileDialog import askopenfilename
from tkFileDialog import asksaveasfilename
if __name__ == '__main__':
root=Tk()
in_file = askopenfilename()
out_file = asksaveasfilename()
root.destroy()
print in_file
a = io.loadmat(in_file)
g = a["Grid"]
plt.figure()
# plt.imshow(g, vmin=-12, vmax=20)
plt.imshow(g)
#c = plt.colorbar(orientation="horizontal", aspect=30)
c = plt.colorbar()
c.set_label("Number of Slabs")
plt.xlabel('Downwind Distance')
plt.savefig(out_file, bbox_inches='tight')
| robintw/WernerModel | plot_figure.py | Python | bsd-3-clause | 649 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Calculates the Wigner-Seitz projection of cube file data of periodic data.
.. autofunction:: main
Command Line Interface
----------------------
.. program:: es_fitting.py
.. option:: filename
The cubefile to read from. Both Bohr and Angstrom units are supported.
.. option:: --periodic
Whether to treat input file in periodic boundary conditions. In the current implementation, this requires copying all atom positions for the 26 direct neighbouring cubes and therefore carries substantial cost in terms of memory requirements. The runtime scales O(log n) with the number of atoms.
.. option:: --leafsize
Number of points at which brute-force nearest neighbour search is employed. Increasing this number introduces higher memory requirements, but may speed up processing. Default: 5000.
Implementation
--------------
"""
# system modules
import argparse
# third-party modules
from scipy.spatial import KDTree
import numpy as np
# custom modules
import euston.io as io
import euston.geometry as geom
parser = argparse.ArgumentParser(
description='Calculates the Wigner-Seitz projection of cube file data of periodic data.')
parser.add_argument('filename', type=str, help='The cube file.')
parser.add_argument('--periodic', action='store_true', help='Treats cube data periodically.')
parser.add_argument('--leafsize', type=int,
help='Number of points at which brute-force nearest neighbour search is employed.', default=5000)
def main(parser):
"""
Main routine wrapper.
:param argparse.ArgumentParser parser: Argument parser
"""
args = parser.parse_args()
print 'Reading cubefile... ',
cube = io.CubeFile(args.filename)
print 'Completed, %d atoms %d voxels.' % (cube.count_atoms(), cube.count_voxels())
print 'Calculating cell dimensions... ',
h_mat = cube.get_h_matrix()
print 'Completed.'
print 'Adding image atoms... ',
coord = cube.get_coordinates()
images = np.zeros(((len(coord) * 27), 3))
counter = 0
for x in range(-1, 2):
for y in range(-1, 2):
for z in range(-1, 2):
if x * y * z == 0:
continue
shift = (h_mat.transpose() * np.array([x, y, z])).sum(axis=0)
images[counter * cube.count_atoms():(counter + 1) * cube.count_atoms(), :] = coord + shift
counter += 1
print 'Completed, %d atoms added.' % (cube.count_atoms() * 26)
images[-cube.count_atoms():] = coord
if args.periodic:
kdt = KDTree(images, leafsize=args.leafsize)
else:
kdt = KDTree(coord, leafsize=args.leafsize)
vals = np.zeros(cube.count_atoms())
for x in range(cube.get_xlen()):
print x
for y in range(cube.get_ylen()):
for z in range(cube.get_zlen()):
d, i = kdt.query(cube.get_voxel_pos(x, y, z, centered=True))
i = i % cube.count_atoms()
vals[i] += cube.get_val(x, y, z)
print 'Results (atom - value)'
vals *= cube.get_voxel_volume()
for idx, val in enumerate(vals):
print idx, val
if __name__ == '__main__':
main(parser)
| ferchault/euston | src/tools/es_fitting.py | Python | lgpl-3.0 | 3,000 |
# coding: UTF-8
#
# Copyright 2014 by SCSK Corporation.
#
# This file is part of PrimeCloud Controller(TM).
#
# PrimeCloud Controller(TM) is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# PrimeCloud Controller(TM) is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PrimeCloud Controller(TM). If not, see <http://www.gnu.org/licenses/>.
#
from iaasgw.log.log import IaasLogger
from iaasgw.utils.iaasSelecter import iaasSelect
import os
import sys
import traceback
if __name__ == '__main__':
param = sys.argv
logger = IaasLogger()
#ログ用パラメータ
logparam = ["DescribeKeyPairs",os.getpid(), "無し"]
logger.start(logparam)
#実行
try:
#パラメータ解説
# 0.ファイル名
# 1.ユーザー名
# 2.プラットフォームNo
#
# 例:param = [None, "1", "6", "1"]
iaasController = iaasSelect(param[1], param[2])
if iaasController == None:
sys.exit()
keypairs = iaasController.describeKeyPairs()
#標準出力として返す
print keypairs
except:
logger.error(traceback.format_exc())
raise
logger.end(logparam)
| aigamo/primecloud-controller | iaas-gw/src/DescribeKeyPairs.py | Python | gpl-2.0 | 1,656 |
from data_importers.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = "NNO"
addresses_name = "2021-03-25T13:58:54.180597/Democracy_Club__06May2021.CSV"
stations_name = "2021-03-25T13:58:54.180597/Democracy_Club__06May2021.CSV"
elections = ["2021-05-06"]
csv_delimiter = ","
def address_record_to_dict(self, record):
uprn = record.property_urn.strip().lstrip("0")
if uprn in [
"10034791449", # THE CEDARS, GRESHAM, NORWICH
"10023665747", # CHALET 4 MILL FARM AYLSHAM ROAD, FELMINGHAM
"10034812867", # BURROW COTTAGE AT WARREN BARN BREWERY ROAD, TRUNCH
"10034807115", # 6 SEAWARD CREST, LINKS ROAD, MUNDESLEY, NORWICH
"10034818211", # GARDEN COTTAGE, HOVETON HALL ESTATE, HOVETON, NORWICH
"10034819670", # THE OLD GATEHOUSE, WALSINGHAM ROAD, EAST BARSHAM, FAKENHAM
"100091325096", # FLAT 6 7 NORWICH ROAD, CROMER
]:
return None
if record.addressline6 in ["NR12 0RX", "NR11 7PE", "NR12 8AH", "NR12 0UD"]:
return None
return super().address_record_to_dict(record)
def station_record_to_dict(self, record):
if record.polling_place_id in [
"17025", # Walsingham Village Hall Wells Road Walsingham NR23 1RX
"17003", # Great Snoring Social Club Walsingham Road Great Snoring Fakenham NR21 0AP
"16607", # The Preston Room Neatishead Road Ashmanhaugh Wroxham NR12 8LB
]:
record = record._replace(polling_place_postcode="")
# Walcott Village Hall Coast Road Walcott NR12 ONG - O => 0
if record.polling_place_id == "16728":
record = record._replace(polling_place_postcode="NR12 0NG")
return super().station_record_to_dict(record)
| DemocracyClub/UK-Polling-Stations | polling_stations/apps/data_importers/management/commands/import_north_norfolk.py | Python | bsd-3-clause | 1,890 |
# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Exception classes - Subclassing allows you to check for specific errors
"""
import base64
import xml.sax
import boto
from boto import handler
from boto.compat import json, StandardError
from boto.resultset import ResultSet
class BotoClientError(StandardError):
"""
General Boto Client error (error accessing AWS)
"""
def __init__(self, reason, *args):
super(BotoClientError, self).__init__(reason, *args)
self.reason = reason
def __repr__(self):
return 'BotoClientError: %s' % self.reason
def __str__(self):
return 'BotoClientError: %s' % self.reason
class SDBPersistenceError(StandardError):
pass
class StoragePermissionsError(BotoClientError):
"""
Permissions error when accessing a bucket or key on a storage service.
"""
pass
class S3PermissionsError(StoragePermissionsError):
"""
Permissions error when accessing a bucket or key on S3.
"""
pass
class GSPermissionsError(StoragePermissionsError):
"""
Permissions error when accessing a bucket or key on GS.
"""
pass
class BotoServerError(StandardError):
def __init__(self, status, reason, body=None, *args):
super(BotoServerError, self).__init__(status, reason, body, *args)
self.status = status
self.reason = reason
self.body = body or ''
self.request_id = None
self.error_code = None
self._error_message = None
self.message = ''
self.box_usage = None
if isinstance(self.body, bytes):
try:
self.body = self.body.decode('utf-8')
except UnicodeDecodeError:
boto.log.debug('Unable to decode body from bytes!')
# Attempt to parse the error response. If body isn't present,
# then just ignore the error response.
if self.body:
# Check if it looks like a ``dict``.
if hasattr(self.body, 'items'):
# It's not a string, so trying to parse it will fail.
# But since it's data, we can work with that.
self.request_id = self.body.get('RequestId', None)
if 'Error' in self.body:
# XML-style
error = self.body.get('Error', {})
self.error_code = error.get('Code', None)
self.message = error.get('Message', None)
else:
# JSON-style.
self.message = self.body.get('message', None)
else:
try:
h = handler.XmlHandlerWrapper(self, self)
h.parseString(self.body)
except (TypeError, xml.sax.SAXParseException):
# What if it's JSON? Let's try that.
try:
parsed = json.loads(self.body)
if 'RequestId' in parsed:
self.request_id = parsed['RequestId']
if 'Error' in parsed:
if 'Code' in parsed['Error']:
self.error_code = parsed['Error']['Code']
if 'Message' in parsed['Error']:
self.message = parsed['Error']['Message']
except (TypeError, ValueError):
# Remove unparsable message body so we don't include garbage
# in exception. But first, save self.body in self.error_message
# because occasionally we get error messages from Eucalyptus
# that are just text strings that we want to preserve.
self.message = self.body
self.body = None
def __getattr__(self, name):
if name == 'error_message':
return self.message
if name == 'code':
return self.error_code
raise AttributeError
def __setattr__(self, name, value):
if name == 'error_message':
self.message = value
else:
super(BotoServerError, self).__setattr__(name, value)
def __repr__(self):
return '%s: %s %s\n%s' % (self.__class__.__name__,
self.status, self.reason, self.body)
def __str__(self):
return '%s: %s %s\n%s' % (self.__class__.__name__,
self.status, self.reason, self.body)
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name in ('RequestId', 'RequestID'):
self.request_id = value
elif name == 'Code':
self.error_code = value
elif name == 'Message':
self.message = value
elif name == 'BoxUsage':
self.box_usage = value
return None
def _cleanupParsedProperties(self):
self.request_id = None
self.error_code = None
self.message = None
self.box_usage = None
class ConsoleOutput(object):
def __init__(self, parent=None):
self.parent = parent
self.instance_id = None
self.timestamp = None
self.comment = None
self.output = None
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'instanceId':
self.instance_id = value
elif name == 'output':
self.output = base64.b64decode(value)
else:
setattr(self, name, value)
class StorageCreateError(BotoServerError):
"""
Error creating a bucket or key on a storage service.
"""
def __init__(self, status, reason, body=None):
self.bucket = None
super(StorageCreateError, self).__init__(status, reason, body)
def endElement(self, name, value, connection):
if name == 'BucketName':
self.bucket = value
else:
return super(StorageCreateError, self).endElement(name, value, connection)
class S3CreateError(StorageCreateError):
"""
Error creating a bucket or key on S3.
"""
pass
class GSCreateError(StorageCreateError):
"""
Error creating a bucket or key on GS.
"""
pass
class StorageCopyError(BotoServerError):
"""
Error copying a key on a storage service.
"""
pass
class S3CopyError(StorageCopyError):
"""
Error copying a key on S3.
"""
pass
class GSCopyError(StorageCopyError):
"""
Error copying a key on GS.
"""
pass
class SQSError(BotoServerError):
"""
General Error on Simple Queue Service.
"""
def __init__(self, status, reason, body=None):
self.detail = None
self.type = None
super(SQSError, self).__init__(status, reason, body)
def startElement(self, name, attrs, connection):
return super(SQSError, self).startElement(name, attrs, connection)
def endElement(self, name, value, connection):
if name == 'Detail':
self.detail = value
elif name == 'Type':
self.type = value
else:
return super(SQSError, self).endElement(name, value, connection)
def _cleanupParsedProperties(self):
super(SQSError, self)._cleanupParsedProperties()
for p in ('detail', 'type'):
setattr(self, p, None)
class SQSDecodeError(BotoClientError):
"""
Error when decoding an SQS message.
"""
def __init__(self, reason, message):
super(SQSDecodeError, self).__init__(reason, message)
self.message = message
def __repr__(self):
return 'SQSDecodeError: %s' % self.reason
def __str__(self):
return 'SQSDecodeError: %s' % self.reason
class StorageResponseError(BotoServerError):
"""
Error in response from a storage service.
"""
def __init__(self, status, reason, body=None):
self.resource = None
super(StorageResponseError, self).__init__(status, reason, body)
def startElement(self, name, attrs, connection):
return super(StorageResponseError, self).startElement(
name, attrs, connection)
def endElement(self, name, value, connection):
if name == 'Resource':
self.resource = value
else:
return super(StorageResponseError, self).endElement(
name, value, connection)
def _cleanupParsedProperties(self):
super(StorageResponseError, self)._cleanupParsedProperties()
for p in ('resource'):
setattr(self, p, None)
class S3ResponseError(StorageResponseError):
"""
Error in response from S3.
"""
pass
class GSResponseError(StorageResponseError):
"""
Error in response from GS.
"""
pass
class EC2ResponseError(BotoServerError):
"""
Error in response from EC2.
"""
def __init__(self, status, reason, body=None):
self.errors = None
self._errorResultSet = []
super(EC2ResponseError, self).__init__(status, reason, body)
self.errors = [
(e.error_code, e.error_message) for e in self._errorResultSet]
if len(self.errors):
self.error_code, self.error_message = self.errors[0]
def startElement(self, name, attrs, connection):
if name == 'Errors':
self._errorResultSet = ResultSet([('Error', _EC2Error)])
return self._errorResultSet
else:
return None
def endElement(self, name, value, connection):
if name == 'RequestID':
self.request_id = value
else:
return None # don't call subclass here
def _cleanupParsedProperties(self):
super(EC2ResponseError, self)._cleanupParsedProperties()
self._errorResultSet = []
for p in ('errors'):
setattr(self, p, None)
class JSONResponseError(BotoServerError):
"""
This exception expects the fully parsed and decoded JSON response
body to be passed as the body parameter.
:ivar status: The HTTP status code.
:ivar reason: The HTTP reason message.
:ivar body: The Python dict that represents the decoded JSON
response body.
:ivar error_message: The full description of the AWS error encountered.
:ivar error_code: A short string that identifies the AWS error
(e.g. ConditionalCheckFailedException)
"""
def __init__(self, status, reason, body=None, *args):
self.status = status
self.reason = reason
self.body = body
if self.body:
self.error_message = self.body.get('message', None)
self.error_code = self.body.get('__type', None)
if self.error_code:
self.error_code = self.error_code.split('#')[-1]
class DynamoDBResponseError(JSONResponseError):
pass
class SWFResponseError(JSONResponseError):
pass
class EmrResponseError(BotoServerError):
"""
Error in response from EMR
"""
pass
class _EC2Error(object):
def __init__(self, connection=None):
self.connection = connection
self.error_code = None
self.error_message = None
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'Code':
self.error_code = value
elif name == 'Message':
self.error_message = value
else:
return None
class SDBResponseError(BotoServerError):
"""
Error in responses from SDB.
"""
pass
class AWSConnectionError(BotoClientError):
"""
General error connecting to Amazon Web Services.
"""
pass
class StorageDataError(BotoClientError):
"""
Error receiving data from a storage service.
"""
pass
class S3DataError(StorageDataError):
"""
Error receiving data from S3.
"""
pass
class GSDataError(StorageDataError):
"""
Error receiving data from GS.
"""
pass
class InvalidUriError(Exception):
"""Exception raised when URI is invalid."""
def __init__(self, message):
super(InvalidUriError, self).__init__(message)
self.message = message
class InvalidAclError(Exception):
"""Exception raised when ACL XML is invalid."""
def __init__(self, message):
super(InvalidAclError, self).__init__(message)
self.message = message
class InvalidCorsError(Exception):
"""Exception raised when CORS XML is invalid."""
def __init__(self, message):
super(InvalidCorsError, self).__init__(message)
self.message = message
class InvalidEncryptionConfigError(Exception):
"""Exception raised when GCS encryption configuration XML is invalid."""
def __init__(self, message):
super(InvalidEncryptionConfigError, self).__init__(message)
self.message = message
class InvalidLifecycleConfigError(Exception):
"""Exception raised when GCS lifecycle configuration XML is invalid."""
def __init__(self, message):
super(InvalidLifecycleConfigError, self).__init__(message)
self.message = message
class NoAuthHandlerFound(Exception):
"""Is raised when no auth handlers were found ready to authenticate."""
pass
# Enum class for resumable upload failure disposition.
class ResumableTransferDisposition(object):
# START_OVER means an attempt to resume an existing transfer failed,
# and a new resumable upload should be attempted (without delay).
START_OVER = 'START_OVER'
# WAIT_BEFORE_RETRY means the resumable transfer failed but that it can
# be retried after a time delay within the current process.
WAIT_BEFORE_RETRY = 'WAIT_BEFORE_RETRY'
# ABORT_CUR_PROCESS means the resumable transfer failed and that
# delaying/retrying within the current process will not help. If
# resumable transfer included a state tracker file the upload can be
# retried again later, in another process (e.g., a later run of gsutil).
ABORT_CUR_PROCESS = 'ABORT_CUR_PROCESS'
# ABORT means the resumable transfer failed in a way that it does not
# make sense to continue in the current process, and further that the
# current tracker ID should not be preserved (in a tracker file if one
# was specified at resumable upload start time). If the user tries again
# later (e.g., a separate run of gsutil) it will get a new resumable
# upload ID.
ABORT = 'ABORT'
class ResumableUploadException(Exception):
"""
Exception raised for various resumable upload problems.
self.disposition is of type ResumableTransferDisposition.
"""
def __init__(self, message, disposition):
super(ResumableUploadException, self).__init__(message, disposition)
self.message = message
self.disposition = disposition
def __repr__(self):
return 'ResumableUploadException("%s", %s)' % (
self.message, self.disposition)
class ResumableDownloadException(Exception):
"""
Exception raised for various resumable download problems.
self.disposition is of type ResumableTransferDisposition.
"""
def __init__(self, message, disposition):
super(ResumableDownloadException, self).__init__(message, disposition)
self.message = message
self.disposition = disposition
def __repr__(self):
return 'ResumableDownloadException("%s", %s)' % (
self.message, self.disposition)
class TooManyRecordsException(Exception):
"""
Exception raised when a search of Route53 records returns more
records than requested.
"""
def __init__(self, message):
super(TooManyRecordsException, self).__init__(message)
self.message = message
class PleaseRetryException(Exception):
"""
Indicates a request should be retried.
"""
def __init__(self, message, response=None):
self.message = message
self.response = response
def __repr__(self):
return 'PleaseRetryException("%s", %s)' % (
self.message,
self.response
)
class InvalidInstanceMetadataError(Exception):
MSG = (
"You can set the 'metadata_service_num_attempts' "
"in your boto config file to increase the number "
"of times boto will attempt to retrieve "
"credentials from the instance metadata service."
)
def __init__(self, msg):
final_msg = msg + '\n' + self.MSG
super(InvalidInstanceMetadataError, self).__init__(final_msg)
| endlessm/chromium-browser | third_party/catapult/third_party/gsutil/gslib/vendored/boto/boto/exception.py | Python | bsd-3-clause | 17,799 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Diploma.hours'
db.alter_column('nmadb_students_diploma', 'hours', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=6, decimal_places=2))
# Changing field 'Diploma.tasks_solved'
db.alter_column('nmadb_students_diploma', 'tasks_solved', self.gf('django.db.models.fields.PositiveSmallIntegerField')(null=True))
def backwards(self, orm):
# User chose to not deal with backwards NULL issues for 'Diploma.hours'
raise RuntimeError("Cannot reverse this migration. 'Diploma.hours' and its values cannot be restored.")
# User chose to not deal with backwards NULL issues for 'Diploma.tasks_solved'
raise RuntimeError("Cannot reverse this migration. 'Diploma.tasks_solved' and its values cannot be restored.")
models = {
'nmadb_contacts.address': {
'Meta': {'ordering': "[u'municipality']", 'object_name': 'Address'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '90'}),
'human': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['nmadb_contacts.Human']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'municipality': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['nmadb_contacts.Municipality']", 'null': 'True', 'blank': 'True'}),
'town': ('django.db.models.fields.CharField', [], {'max_length': '45'})
},
'nmadb_contacts.human': {
'Meta': {'ordering': "[u'last_name', u'first_name']", 'object_name': 'Human'},
'academic_degree': ('django.db.models.fields.CharField', [], {'max_length': '45', 'blank': 'True'}),
'birth_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'first_name': ('django_db_utils.models.FirstNameField', [], {'max_length': '45'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identity_code': ('django_db_utils.models.IdentityCodeField', [], {'max_length': '11', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'last_name': ('django_db_utils.models.LastNameField', [], {'max_length': '45'}),
'main_address': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['nmadb_contacts.Address']"}),
'old_last_name': ('django_db_utils.models.LastNameField', [], {'max_length': '45', 'blank': 'True'})
},
'nmadb_contacts.municipality': {
'Meta': {'ordering': "[u'town', u'municipality_type']", 'object_name': 'Municipality'},
'code': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'municipality_type': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'town': ('django.db.models.fields.CharField', [], {'max_length': '45'})
},
'nmadb_students.alumni': {
'Meta': {'object_name': 'Alumni'},
'abilities': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'activity_fields': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'info_change_year': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'information_received_timestamp': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'interest_level': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'student': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['nmadb_students.Student']", 'unique': 'True'}),
'study_field': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'university': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'})
},
'nmadb_students.diploma': {
'Meta': {'object_name': 'Diploma'},
'diploma_type': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'hours': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '6', 'decimal_places': '2', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'number': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'student': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['nmadb_students.Student']", 'unique': 'True'}),
'tasks_solved': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'})
},
'nmadb_students.disabilitymark': {
'Meta': {'object_name': 'DisabilityMark'},
'disability': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'end': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start': ('django.db.models.fields.DateField', [], {}),
'student': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['nmadb_students.Student']"})
},
'nmadb_students.parentrelation': {
'Meta': {'object_name': 'ParentRelation'},
'child': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['nmadb_students.Student']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['nmadb_contacts.Human']"}),
'relation_type': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
'nmadb_students.school': {
'Meta': {'ordering': "[u'title']", 'object_name': 'School'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '128', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'municipality': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['nmadb_contacts.Municipality']", 'null': 'True', 'blank': 'True'}),
'school_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'})
},
'nmadb_students.socialdisadvantagemark': {
'Meta': {'object_name': 'SocialDisadvantageMark'},
'end': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start': ('django.db.models.fields.DateField', [], {}),
'student': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['nmadb_students.Student']"})
},
'nmadb_students.student': {
'Meta': {'ordering': "[u'last_name', u'first_name']", 'object_name': 'Student', '_ormbases': ['nmadb_contacts.Human']},
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'human_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['nmadb_contacts.Human']", 'unique': 'True', 'primary_key': 'True'}),
'parents': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'children'", 'symmetrical': 'False', 'through': "orm['nmadb_students.ParentRelation']", 'to': "orm['nmadb_contacts.Human']"}),
'school_class': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'school_year': ('django.db.models.fields.IntegerField', [], {}),
'schools': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['nmadb_students.School']", 'through': "orm['nmadb_students.StudyRelation']", 'symmetrical': 'False'})
},
'nmadb_students.studyrelation': {
'Meta': {'ordering': "[u'student', u'entered']", 'object_name': 'StudyRelation'},
'entered': ('django.db.models.fields.DateField', [], {}),
'finished': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'school': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['nmadb_students.School']"}),
'student': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['nmadb_students.Student']"})
}
}
complete_apps = ['nmadb_students'] | vakaras/nmadb-students | src/nmadb_students/migrations/0003_auto__chg_field_diploma_hours__chg_field_diploma_tasks_solved.py | Python | lgpl-3.0 | 9,381 |
# -*- coding: utf-8 -*-
#
# Flask-Micropub documentation build configuration file, created by
# sphinx-quickstart on Wed Dec 16 17:53:24 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Flask-Micropub'
copyright = u'2015, Kyle Mahan'
author = u'Kyle Mahan'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.2.4'
# The full version, including alpha/beta/rc tags.
release = u'0.2.4'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Napoleon docstrings
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = False
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Flask-Micropubdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Flask-Micropub.tex', u'Flask-Micropub Documentation',
u'Kyle Mahan', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'flask-micropub', u'Flask-Micropub Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Flask-Micropub', u'Flask-Micropub Documentation',
author, 'Flask-Micropub', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| kylewm/flask-micropub | docs/conf.py | Python | bsd-2-clause | 9,728 |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import pika
import time
connection = pika.BlockingConnection(pika.ConnectionParameters(
host='localhost'))
channel = connection.channel()
channel.queue_declare(queue='rpc_queue')
def fib(n):
if n == 0:
return 0
elif n == 1:
return 1
else:
return fib(n-1) + fib(n-2)
def on_request(ch, method, props, body):
n = int(body)
print(" [.] fib(%s)" % n)
response = fib(n)
ch.basic_publish(exchange='',
routing_key=props.reply_to,
properties=pika.BasicProperties(correlation_id = \
props.correlation_id),
body=str(response))
ch.basic_ack(delivery_tag = method.delivery_tag)
channel.basic_qos(prefetch_count=1)
channel.basic_consume(on_request, queue='rpc_queue')
print(" [x] Awaiting RPC requests")
channel.start_consuming() | dianshen/python_day | s12day10/rabbitMQ_rpc_serverl.py | Python | apache-2.0 | 951 |
__all__ = ['ewsum', 'ewsum_back', 'softmax_back', 'rectify_back']
import os
import math
import numpy
import pycuda.gpuarray as gpuarray
import pycuda.autoinit
from pycuda.compiler import SourceModule
from .matrix import matrix_addition
from .utils import gpu_func
from .enums import MAX_BLOCK_SIZE, CUR_DIR, CACHE_DIR
mod = SourceModule(open(os.path.join(CUR_DIR, 'kernel/ewsum.cu')).read(), cache_dir=CACHE_DIR)
ewsum_kernel = mod.get_function('ewsum_kernel')
ewsum_sum_kernel = mod.get_function('ewsum_sum_kernel')
ewsum_back_kernel = mod.get_function('ewsum_back_kernel')
mod2 = SourceModule(open(os.path.join(CUR_DIR, 'kernel/softmax.cu')).read(), cache_dir=CACHE_DIR)
softmax_back_kernel = mod2.get_function('softmax_back_kernel')
mod3 = SourceModule(open(os.path.join(CUR_DIR, 'kernel/rectify.cu')).read(), cache_dir=CACHE_DIR)
rectify_back_kernel = mod3.get_function('rectify_back_kernel')
@gpu_func
def ewsum(d_a, d_w):
"""
YORI NOTES
This method is faster than CPU if num_w is large, and non_width is small:
When num_w is large, the for loop is small
When non_width is large, there are more threads necessary
"""
width = d_a.shape[0]
total_dim = d_a.size
num_w = d_w.shape[0]
d_tmp_out = gpuarray.zeros_like(d_a)
thread_size = min(d_a.size, MAX_BLOCK_SIZE)
block_size = max(int(math.ceil(d_a.size / float(thread_size))), 1)
ewsum_kernel(d_a, d_w, d_tmp_out,
numpy.int32(num_w), numpy.int32(width), numpy.int32(total_dim),
block=(thread_size,1,1), grid=(block_size,1,1))
# TODO: There HAS to be a better way to do this
x = width / num_w
d_out = gpuarray.zeros((x,) + d_a.shape[1:], numpy.float32)
thread_size = min(d_out.size, MAX_BLOCK_SIZE)
block_size = max(int(math.ceil(d_out.size / float(thread_size))), 1)
ewsum_sum_kernel(d_tmp_out, d_out,
numpy.int32(num_w), numpy.int32(width), numpy.int32(total_dim),
block=(thread_size,1,1), grid=(block_size,1,1))
return d_out
@gpu_func
def ewsum_back(d_error, d_w):
d_out = gpuarray.zeros((d_w.shape[0]*d_error.shape[0],) + d_error.shape[1:], dtype=d_error.dtype)
err_width = d_error.shape[0]
width = d_out.shape[0]
total_dim = d_out.size
num_w = d_w.shape[0]
thread_size = min(d_out.size, MAX_BLOCK_SIZE)
block_size = max(int(math.ceil(d_out.size / float(thread_size))), 1)
ewsum_back_kernel(d_error, d_w, d_out,
numpy.int32(num_w), numpy.int32(err_width), numpy.int32(width), numpy.int32(total_dim),
block=(thread_size,1,1), grid=(block_size,1,1))
return d_out
@gpu_func
def softmax_back(d_a, d_error, s):
d_out = gpuarray.zeros_like(d_a)
thread_size = min(d_out.size, MAX_BLOCK_SIZE)
block_size = max(int(math.ceil(d_out.size / float(thread_size))), 1)
softmax_back_kernel(d_a, d_error, d_out, numpy.float32(s), numpy.int32(d_out.size),
block=(thread_size,1,1), grid=(block_size,1,1))
return d_out
@gpu_func
def rectify_back(d_a, d_error, inplace=False):
if inplace:
d_out = d_a
else:
d_out = gpuarray.zeros_like(d_a)
thread_size = min(d_out.size, MAX_BLOCK_SIZE)
block_size = max(int(math.ceil(d_out.size / float(thread_size))), 1)
rectify_back_kernel(d_a, d_error, d_out, numpy.int32(d_out.size),
block=(thread_size,1,1), grid=(block_size,1,1))
return d_out
| Captricity/sciguppy | sciguppy/misc.py | Python | mit | 3,415 |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import unittest
import paddle
from test_dist_base import TestDistBase
def download_files():
url_prefix = 'http://paddle-unittest-data.bj.bcebos.com/dist_transformer/'
vocab_url = url_prefix + 'vocab.bpe.32000'
vocab_md5 = 'a86d345ca6e27f6591d0dccb1b9be853'
paddle.dataset.common.download(vocab_url, 'test_dist_transformer',
vocab_md5)
local_train_url = url_prefix + 'train.tok.clean.bpe.32000.en-de'
local_train_md5 = '033eb02b9449e6dd823f050782ac8914'
paddle.dataset.common.download(local_train_url, 'test_dist_transformer',
local_train_md5)
train0_url = url_prefix + 'train.tok.clean.bpe.32000.en-de.train_0'
train0_md5 = 'ddce7f602f352a0405267285379a38b1'
paddle.dataset.common.download(train0_url, 'test_dist_transformer',
train0_md5)
train1_url = url_prefix + 'train.tok.clean.bpe.32000.en-de.train_1'
train1_md5 = '8757798200180285b1a619cd7f408747'
paddle.dataset.common.download(train1_url, 'test_dist_transformer',
train1_md5)
test_url = url_prefix + 'newstest2013.tok.bpe.32000.en-de'
test_md5 = '9dd74a266dbdb25314183899f269b4a2'
paddle.dataset.common.download(test_url, 'test_dist_transformer', test_md5)
# cut test data for faster CI
orig_path = os.path.join(paddle.dataset.common.DATA_HOME,
"test_dist_transformer",
"newstest2013.tok.bpe.32000.en-de")
head_path = os.path.join(paddle.dataset.common.DATA_HOME,
"test_dist_transformer",
"newstest2013.tok.bpe.32000.en-de.cut")
os.system("head -n10 %s > %s" % (orig_path, head_path))
class TestDistTransformer2x2Sync(TestDistBase):
def _setup_config(self):
self._sync_mode = True
def test_dist_train(self):
download_files()
self.check_with_place(
"dist_transformer.py", delta=1e-5, check_error_log=False)
class TestDistTransformer2x2Async(TestDistBase):
def _setup_config(self):
self._sync_mode = False
def test_dist_train(self):
download_files()
self.check_with_place(
"dist_transformer.py", delta=1.0, check_error_log=False)
if __name__ == "__main__":
unittest.main()
| luotao1/Paddle | python/paddle/fluid/tests/unittests/test_dist_transformer.py | Python | apache-2.0 | 3,034 |
import sys
import os
import struct
import leb128
from exception import UnknownMagic, UnsupportBindOpcode
from macho import MACH_O_CPU_TYPE
from fat_header import FatHeader
from mach_header import *
from objc import *
class MachOAnalyzer:
def __init__(self, file, cpu_type=MACH_O_CPU_TYPE.ARM64):
self.__fd = file
try:
self.__fat_header = FatHeader(self.__fd)
print 'FAT Mach-O detected'
fat_arch = self.__fat_header.get_arch(cpu_type)
if fat_arch == None:
print 'No arch for', cpu_type, ' in FAT Header'
fat_arch = self.__fat_header.get_arch()
print 'Using the first avaiable arch:', fat_arch.get_cpu_type()
self.__mh_offset = fat_arch.get_file_offset()
except UnknownMagic:
print 'Mach-O detected'
self.__mh_offset = 0
self.__mach_header = MachHeader(self.__fd, self.__mh_offset)
if True == self.__mach_header.is_big_endian():
self.__endian_str = '>'
else:
self.__endian_str = '<'
if True == self.__mach_header.is_64bit_cpu():
self.__is_64bit_cpu = True
else:
self.__is_64bit_cpu = False
self.__segments = self.__build_segments()
self.__dylibs = self.__build_load_dylib()
dyld_info = self.get_dyld_info()
#pass1: build the import table
if dyld_info.bind_size != 0:
#print 'bind pass1'
self.__build_bind_info(dyld_info.bind_off, dyld_info.bind_size, self.__bind_pass1)
if dyld_info.weak_bind_size != 0:
#print 'weak bind pass1'
self.__build_bind_info(dyld_info.weak_bind_off, dyld_info.weak_bind_size, self.__bind_pass1)
if dyld_info.lazy_bind_size != 0:
#print 'lazy bind pass1'
self.__build_bind_info(dyld_info.lazy_bind_off, dyld_info.lazy_bind_size, self.__bind_pass1)
#build virtual section from the import table
#TODO revise the data structure for addend
self.__virtual_section = self.__build_virtual_section()
#pass2: bind address to virtual section
if dyld_info.bind_size != 0:
#print 'bind pass2'
self.__build_bind_info(dyld_info.bind_off, dyld_info.bind_size, self.__bind_pass2)
if dyld_info.weak_bind_size != 0:
#print 'weak bind pass2'
self.__build_bind_info(dyld_info.weak_bind_off, dyld_info.weak_bind_size, self.__bind_pass2)
if dyld_info.lazy_bind_size != 0:
#print 'lazy bind pass2'
self.__build_bind_info(dyld_info.lazy_bind_off, dyld_info.lazy_bind_size, self.__bind_pass2)
self.__objc2_cls_stack = []
self.__resloved_objc2_cls_list = []
self.__objc_classlist = self.__build_objc2_clslist()
self.__objc_nlclslist = self.__build_objc2_nlclslist()
'''
struct segment_command
{
unsigned long type;
unsigned long len;
char segment_name[16];
unsigned long vmaddr;
unsigned long vmsize;
unsigned long fileoff;
unsigned long filesize;
unsigned long maxprot;
unsigned long initprot;
unsigned long nsects;
unsigned long flags;
}
struct segment_command_64
{
unsigned long type;
unsigned long len;
char segment_name[16];
unsigned long long vmaddr;
unsigned long long vmsize;
unsigned long long fileoff;
unsigned long long filesize;
unsigned long maxprot;
unsigned long initprot;
unsigned long nsects;
unsigned long flags;
}
struct section
{
char section_name[16];
char segment_name[16];
unsigned long addr;
unsigned long size;
unsigned long offset;
unsigned long alignment;
unsigned long reloff;
unsigned long nreloc
unsigned long flags;
unsigned long reserved1;
unsigned long reserved2;
}
struct section_64
{
char section_name[16];
char segment_name[16];
unsigned long long addr;
unsigned long long size;
unsigned long offset;
unsigned long alignment;
unsigned long reloff;
unsigned long nreloc
unsigned long flags;
unsigned long reserved1;
unsigned long reserved2;
unsigned long reserved3;
}
'''
#Build Segment and section list in memory
def __build_segments(self):
n_lcmds = self.__mach_header.get_number_cmds()
self.__fd.seek(self.__mh_offset + self.__mach_header.get_hdr_len())
segments = []
for i in range(n_lcmds):
type, len = struct.unpack(self.__endian_str + 'LL', self.__fd.read(8))
if self.__is_64bit_cpu == True and MACH_O_LOAD_COMMAND_TYPE(type) == MACH_O_LOAD_COMMAND_TYPE.SEGMENT_64:
name, vmaddr, vmsize, offset, filesize, maxprot, initprot, nsects, flags = \
struct.unpack(self.__endian_str + '16sQQQQLLLL', self.__fd.read(64))
name = name.strip('\x00')
segment = Segment(name, vmaddr, vmsize, offset, filesize, maxprot, initprot, nsects, flags)
segments.append(segment)
for j in range(nsects):
sec_name, name, vmaddr, vmsize, offset, alignment, reloff, nreloc, flags, reserved1, reserved2, reserved3 = \
struct.unpack(self.__endian_str + '16s16sQQLLLLLLLL', self.__fd.read(80))
sec_name = sec_name.strip('\x00')
section = Section(sec_name, vmaddr, vmsize, offset, alignment, reloff, nreloc, flags, reserved1, reserved2, reserved3)
segment.append_section(section)
elif self.__is_64bit_cpu == False and MACH_O_LOAD_COMMAND_TYPE(type) == MACH_O_LOAD_COMMAND_TYPE.SEGMENT:
name, vmaddr, vmsize, offset, filesize, maxprot, initprot, nsects, flags = \
struct.unpack(self.__endian_str + '16sLLLLLLLL', self.__fd.read(48))
name = name.strip('\x00')
segment = Segment(name, vmaddr, vmsize, offset, filesize, maxprot, initprot, nsects, flags)
segments.append(segment)
for j in range(nsects):
sec_name, name, vmaddr, vmsize, offset, alignment, reloff, nreloc, flags, reserved1, reserved2 = \
struct.unpack(self.__endian_str + '16s16sLLLLLLLLL', self.__fd.read(68))
sec_name = sec_name.strip('\x00')
section = Section(sec_name, vmaddr, vmsize, offset, alignment, reloff, nreloc, flags, reserved1, reserved2)
segment.append_section(section)
else:
self.__fd.seek(len - 8, os.SEEK_CUR)
for segment in segments:
for section in segment.sections:
self.__fd.seek(self.__mh_offset + section.offset)
section.buf_data(self.__fd.read(section.vmsize))
return segments
'''
struct load_dylib
{
unsigned long type;
unsigned long len;
unsigned long name_off;
unsigned long timestamp;
unsigned long current_ver;
unsigned long compat_ver;
char lib_name[];
}
'''
#Build dylib name list from LC_LOAD_DYLIB
def __build_load_dylib(self):
#skip mach header to load commands
offset = self.__mach_header.get_hdr_len()
n_cmds = self.__mach_header.get_number_cmds()
self.__fd.seek(self.__mh_offset + offset)
dylibs = []
dylib_cmd_count = 0
for i in range(n_cmds):
type, lc_len = struct.unpack(self.__endian_str + 'LL', self.__fd.read(8))
if (MACH_O_LOAD_COMMAND_TYPE(type) == MACH_O_LOAD_COMMAND_TYPE.LOAD_DYLIB) or \
(MACH_O_LOAD_COMMAND_TYPE(type) == MACH_O_LOAD_COMMAND_TYPE.WEAK_DYLIB):
off, ts, cur_ver, compat_ver = struct.unpack(self.__endian_str + 'LLLL', self.__fd.read(16))
c_str = ''
while True:
c = self.__fd.read(1)
if ord(c) == 0:
break
c_str = c_str + c
self.__fd.seek(lc_len - 8 - 16 - len(c_str) - 1, os.SEEK_CUR)
dylib = DYLib(ts, cur_ver, compat_ver, c_str)
dylibs.append(dylib)
else:
self.__fd.seek(lc_len - 8, os.SEEK_CUR)
return dylibs
def get_dylib(self, lib_idx):
return self.__dylibs[lib_idx]
pass
'''
struct dyld_info
{
unsigned long type;
unsigned long len;
unsigned long rebase_off;
unsigned long rebase_size;
unsigned long bind_off;
unsigned long bind_size;
unsigned long week_bind_off;
unsigned long week_bind_size;
unsigned long lazy_bind_off;
unsigned long lazy_bind_size;
unsigned long export_off;
unsigned long export_size;
}
'''
#Search for segment LOAD_COMMAND_DYLD_INFO and return its fields
def get_dyld_info(self):
offset = self.__mach_header.get_hdr_len()
n_cmds = self.__mach_header.get_number_cmds()
self.__fd.seek(self.__mh_offset + offset)
rebase_off = 0
rebase_size = 0
bind_off = 0
bind_size = 0
weak_bind_off = 0
weak_bind_size = 0
lazy_bind_off = 0
lazy_bind_size = 0
export_off = 0
export_size = 0
for i in range(n_cmds):
type, len = struct.unpack(self.__endian_str + 'LL', self.__fd.read(8))
if MACH_O_LOAD_COMMAND_TYPE(type) == MACH_O_LOAD_COMMAND_TYPE.DYLD_INFO:
rebase_off, rebase_size, bind_off, bind_size, weak_bind_off, weak_bind_size, lazy_bind_off, lazy_bind_size, export_off, export_size \
= struct.unpack(self.__endian_str + 'LLLLLLLLLL', self.__fd.read(40))
break
else:
self.__fd.seek(len - 8, os.SEEK_CUR)
dyld_info = DYLDInfo(rebase_off, rebase_size, bind_off, bind_size, weak_bind_off, weak_bind_size, lazy_bind_off, lazy_bind_size, export_off, export_size)
return dyld_info
#Load binding information and build up a binding table which is a list of vmaddr to imported symbol mapping
#This is a simple implementation
#Many Object-C data structure are fixed up based on binding information. The binding table must be loaded before analyzing and dumping Object-C data.
def __build_bind_info(self, bind_off, bind_size, bind_function):
self.__fd.seek(self.__mh_offset + bind_off)
bind_data = self.__fd.read(bind_size)
library = None
bind_item = []
bind_list = []
i = 0
#Deal with bind command without set dylib ordinal
lib_ordinal = 1
value = None
len = None
symbol = None
type = None
addend = 0
seg_idx = None
seg_off = None
addr = None
count = None
skip = None
while i < bind_size:
byte = bind_data[i]
opcode = ord(byte) & DYLD_INFO_BIND_OPCODE.OPCODE_MASK.value
opcode = DYLD_INFO_BIND_OPCODE(opcode)
imm = ord(byte) & DYLD_INFO_BIND_OPCODE.IMMEDIATE_MASK.value
debug_str = '[0x{:x}] 0x{:x}:'.format(i, ord(byte))
i = i + 1
if opcode == DYLD_INFO_BIND_OPCODE.DONE:
debug_str = debug_str + 'bind done'
#print debug_str
return
elif opcode == DYLD_INFO_BIND_OPCODE.SET_DYLIB_ORDINAL_IMM:
lib_ordinal = imm
debug_str = debug_str + 'set library oridinal imm: {:d}'.format(lib_ordinal)
#print debug_str
elif opcode == DYLD_INFO_BIND_OPCODE.SET_DYLIB_ORDINAL_ULEB:
value, len = leb128.decode_uleb128(bind_data[i:bind_size:], bind_size - i)
lib_ordinal = value
i = i + len
debug_str = debug_str + 'set library oridinal uleb: 0x{:x}'.format(lib_ordinal)
#print debug_str
elif opcode == DYLD_INFO_BIND_OPCODE.SET_DYLIB_SPECIAL_IMM:
#Have no idea about how to handle negative or zero library ordinal
#So print and raise an exception here
if imm != 0:
lib_ordinal = imm | DYLD_INFO_BIND_OPCODE.OPCODE_MASK
else:
lib_ordinal = imm
debug_str = debug_str + 'set library oridinal special imm: 0x{:x}'.format(lib_ordinal)
#print debug_str
raise UnsupportBindOpcode(byte)
elif opcode == DYLD_INFO_BIND_OPCODE.SET_SYMBOL_TRAILING_FLAGS_IMM:
symbol = ''
while ord(bind_data[i]) != 0:
symbol = symbol + bind_data[i]
i = i + 1
i = i + 1
debug_str = debug_str + 'set symbol imm: 0x{:x}, {:s}'.format(imm, symbol)
#print debug_str
elif opcode == DYLD_INFO_BIND_OPCODE.SET_TYPE_IMM:
type = imm
debug_str = debug_str + 'set type imm: 0x{:x}'.format(type)
#print debug_str
if DYLD_INFO_BIND_TYPE(type) != DYLD_INFO_BIND_TYPE.POINTER:
raise UnsupportBindOpcode(byte)
elif opcode == DYLD_INFO_BIND_OPCODE.SET_ADDEND_SLEB:
#TODO: Add support for non zero addend
#The virtual section data structure need to be revised
addend, len = leb128.decode_sleb128(bind_data[i:bind_size:], bind_size - i)
i = i + len
debug_str = debug_str + 'set addend sleb: 0x{:x}'.format(addend)
#print debug_str
#raise UnsupportBindOpcode(byte)
elif opcode == DYLD_INFO_BIND_OPCODE.SET_SEGMENT_AND_OFFSET_ULEB:
seg_idx = imm
seg_off, len = leb128.decode_uleb128(bind_data[i:bind_size:], bind_size - i)
i = i + len
debug_str = debug_str + 'set segment: {:d} and offset 0x{:x}'.format(seg_idx, seg_off)
#print debug_str
elif opcode == DYLD_INFO_BIND_OPCODE.ADD_ADDR_ULEB:
addr, len = leb128.decode_uleb128(bind_data[i:bind_size:], bind_size - i)
i = i + len
#it's actually signed long long
if addr & 0x8000000000000000:
addr = -((addr - 1) ^ 0xFFFFFFFFFFFFFFFF)
seg_off = seg_off + addr
debug_str = debug_str + 'add addr uleb: 0x{:x}'.format(seg_off)
#print debug_str
elif opcode == DYLD_INFO_BIND_OPCODE.DO_BIND:
bind_function(seg_idx, seg_off, type, lib_ordinal, addend, symbol)
if self.__is_64bit_cpu == True:
seg_off = seg_off + 8
else:
seg_off = seg_off + 4
debug_str = debug_str + 'do bind, offset is now: 0x{:x}'.format(seg_off)
#print debug_str
elif opcode == DYLD_INFO_BIND_OPCODE.DO_BIND_ADD_ADDR_ULEB:
bind_function(seg_idx, seg_off, type, lib_ordinal, addend, symbol)
if self.__is_64bit_cpu == True:
seg_off = seg_off + 8
else:
seg_off = seg_off + 4
addr, len = leb128.decode_uleb128(bind_data[i:bind_size:], bind_size - i)
if addr & 0x8000000000000000:
addr = -((addr - 1) ^ 0xFFFFFFFFFFFFFFFF)
i = i + len
seg_off = seg_off + addr
debug_str = debug_str + 'do bind add addr uleb, offset is now: 0x{:x}'.format(seg_off)
#print debug_str
elif opcode == DYLD_INFO_BIND_OPCODE.DO_BIND_ADD_ADDR_IMM_SCALED:
bind_function(seg_idx, seg_off, type, lib_ordinal, addend, symbol)
if self.__is_64bit_cpu == True:
seg_off = seg_off + (imm + 1)* 8
else:
seg_off = seg_off + (imm + 1) * 4
debug_str = debug_str + 'do bind add addr imm scaled, offset is now: 0x{:x}'.format(seg_off)
#print debug_str
elif opcode == DYLD_INFO_BIND_OPCODE.DO_BIND_ULEB_TIMES_SKIPPING_ULEB:
count, len = leb128.decode_uleb128(bind_data[i:bind_size:], bind_size - i)
i = i + len
skip, len = leb128.decode_uleb128(bind_data[i:bind_size:], bind_size - i)
i = i + len
for j in range(count):
bind_function(seg_idx, seg_off, type, lib_ordinal, addend, symbol)
if self.__is_64bit_cpu == True:
seg_off = seg_off + skip + 8
else:
seg_off = seg_off + skip + 4
debug_str = debug_str + 'do bind ulbe times ({:d}) skipping uleb ({:d}), offset is now: 0x{:x}'.format(count, skip, seg_off)
#print debug_str
else:
raise UnsupportBindOpcode(byte)
#bind commands without end
print 'bind commands without end'
return
#Search for segment LOAD_COMMAND_SEGMENT or LOAD_COMMAND_SEGMENT64 with segment index
#Generally __ZEROPAGE is indexed by 0, __TEXT by 1, __DATA by 2 and __LINKEDIT by 3
def get_segment(self, seg_idx):
return self.__segments[seg_idx]
def get_section_by_addr(self, seg_idx, seg_off):
size = 0
for section in self.__segments[seg_idx].sections:
size = size + section.vmsize
if seg_off < size:
return section
def __bind_pass1(self, seg_idx, seg_off, type, lib_ordinal, addend, symbol):
dylib = self.get_dylib(lib_ordinal - 1)
dylib.append_symbol(symbol)
def __bind_pass2(self, seg_idx, seg_off, type, lib_ordinal, addend, symbol):
segment = self.get_segment(seg_idx)
section = self.get_section_by_addr(seg_idx, seg_off)
symbol_addr = self.get_virtual_map_addr(symbol)
position = seg_off - (section.vmaddr - segment.vmaddr)
if self.__is_64bit_cpu == True:
length = 8
addr_str = struct.pack(self.__endian_str + 'Q', symbol_addr)
else:
length = 4
addr_str = struct.pack(self.__endian_str + 'L', symbol_addr)
#TODO: addend
data = section.data[0 : position] + addr_str + section.data[position + length:]
section.data = data
#print '0x{:X} binding to 0x{:X}:{:s}+{:d}'.format(segment.vmaddr + seg_off, symbol_addr, symbol, addend)
def dump_import_table(self):
for dylib in self.__dylibs:
print dylib.name
for symbol in dylib.symbols:
print ' ', symbol
def __build_virtual_section(self):
segment = self.__segments[-1]
addr = segment.vmaddr + segment.vmsize
vsec = []
for dylib in self.__dylibs:
for symbol in dylib.symbols:
vmap = VirtualMap(addr, symbol)
vsec.append(vmap)
if self.__is_64bit_cpu == True:
addr = addr + 8
else:
addr = addr + 4
return vsec
def is_virtual_section_addr(self, addr):
return addr >= self.__virtual_section[0].addr
def get_virtual_map_addr(self, symbol):
for vmap in self.__virtual_section:
if symbol == vmap.symbol:
return vmap.addr
def get_virtual_map_symbol(self, addr):
if self.is_virtual_section_addr(addr):
if self.__is_64bit_cpu == True:
idx = (addr - self.__virtual_section[0].addr) / 8
else:
idx = (addr - self.__virtual_section[0].addr) / 4
return self.__virtual_section[idx].symbol
return None
def dump_virtual_section(self):
for vmap in self.__virtual_section:
print '0x{:X}:{:s}'.format(vmap.addr, vmap.symbol)
#Search a section with segment name and section name
def get_section_by_name(self, seg_name, sec_name):
for segment in self.__segments:
if seg_name == segment.name:
for section in segment.sections:
if sec_name == section.name:
return section
return None
def get_objc2_ivar_layout(self, vmaddr):
section = self.get_section_by_name('__TEXT', '__objc_classname')
assert vmaddr < (section.vmaddr + section.vmsize)
position = vmaddr - section.vmaddr
ivar_layout = ord(section.data[position])
return ivar_layout
def get_objc2_cls_name(self, vmaddr):
section = self.get_section_by_name('__TEXT', '__objc_classname')
assert vmaddr < (section.vmaddr + section.vmsize)
position = vmaddr - section.vmaddr
c_str = ''
while True:
c = section.data[position]
position = position + 1
if ord(c) == 0:
break
c_str = c_str + c
return c_str
def get_objc2_method_name(self, vmaddr):
section = self.get_section_by_name('__TEXT', '__objc_methname')
assert vmaddr < (section.vmaddr + section.vmsize)
position = vmaddr - section.vmaddr
i = 0
c_str = ''
while True:
c = section.data[position]
position = position + 1
if ord(c) == 0:
break
c_str = c_str + c
return c_str
def get_objc2_method_type(self, vmaddr):
section = self.get_section_by_name('__TEXT', '__objc_methtype')
assert vmaddr < (section.vmaddr + section.vmsize)
position = vmaddr - section.vmaddr
i = 0
c_str = ''
while True:
c = section.data[position]
position = position + 1
if ord(c) == 0:
break
c_str = c_str + c
return c_str
'''
struct objc2_meth
{
vmaddr sel_ptr;
vmaddr type_ptr;
vmaddr imp_ptr;
}
struct objc2_meth_list
{
unsigned long entry_size;
unsigned long entry_count;
struct objc2_meth first;
}
'''
def get_objc2_methods(self, vmaddr):
section = self.get_section_by_name('__DATA', '__objc_const')
assert vmaddr < (section.vmaddr + section.vmsize)
position = vmaddr - section.vmaddr
entry_size, entry_count = struct.unpack(self.__endian_str + 'LL', section.data[position: position + 8:])
position = position + 8
method_list = []
for i in range(entry_count):
if self.__is_64bit_cpu == True:
sel_ptr, type_ptr, imp_ptr = struct.unpack(self.__endian_str + 'QQQ', section.data[position: position + entry_size:])
else:
sel_ptr, type_ptr, imp_ptr = struct.unpack(self.__endian_str + 'LLL', section.data[position: position + entry_size:])
position = position + entry_size
meth_name = self.get_objc2_method_name(sel_ptr)
meth_type = self.get_objc2_method_type(type_ptr)
imp_addr = '0x{:X}'.format(imp_ptr)
objc2_meth = ObjC2Method(meth_name, meth_type, imp_addr)
method_list.append(objc2_meth)
return method_list
def get_objc2_protocols(self, vmaddr):
pass
def get_objc2_ivar_offset(self, vmaddr):
section = self.get_section_by_name('__DATA', '__objc_ivar')
assert vmaddr < (section.vmaddr + section.vmsize)
position = vmaddr - section.vmaddr
offset, = struct.unpack(self.__endian_str + 'L', section.data[position: position + 4])
return offset
'''
struct objc2_ivar
{
vmaddr offset_ptr;
vmaddr name_ptr;
vmaddr type_ptr;
unsigned long alignment;
unsigned long size;
}
struct objc2_ivar_list
{
unsigned long entry_size;
unsigned long entry_count;
struct objc2_ivar first;
}
'''
def get_objc2_ivars(self, vmaddr):
section = self.get_section_by_name('__DATA', '__objc_const')
assert vmaddr < (section.vmaddr + section.vmsize)
position = vmaddr - section.vmaddr
entry_size, entry_count = struct.unpack(self.__endian_str + 'LL', section.data[position : position + 8])
position = position + 8
ivar_list = []
for i in range(entry_count):
if self.__is_64bit_cpu == True:
offset_ptr, name_ptr, type_ptr, alignment, size = struct.unpack(self.__endian_str + 'QQQLL', section.data[position : position + entry_size])
else:
offset_ptr, name_ptr, type_ptr, alignment, size = struct.unpack(self.__endian_str + 'LLLLL', section.data[position : position + entry_size])
position = position + entry_size
offset = self.get_objc2_ivar_offset(offset_ptr)
meth_name = self.get_objc2_method_name(name_ptr)
meth_type = self.get_objc2_method_type(type_ptr)
objc2_ivar = ObjC2IVar(offset, meth_name, meth_type, alignment, size)
ivar_list.append(objc2_ivar)
return ivar_list
def get_cstring(self, vmaddr):
section = self.get_section_by_name('__TEXT', '__cstring')
assert vmaddr < (section.vmaddr + section.vmsize)
position = vmaddr - section.vmaddr
c_str = ''
while True:
c = section.data[position]
position = position + 1
if ord(c) == 0:
break
c_str = c_str + c
return c_str
'''
struct objc2_property
{
vmaddr name_ptr;
vmaddr attr_ptr;
};
struct objc2_prop_list
{
unsigned long entry_size;
unsigned long entry_count;
struct objc2_prop first;
};
'''
def get_objc2_properties(self, vmaddr):
section = self.get_section_by_name('__DATA', '__objc_const')
assert vmaddr < (section.vmaddr + section.vmsize)
position = vmaddr - section.vmaddr
entry_size, entry_count = struct.unpack(self.__endian_str + 'LL', section.data[position : position + 8])
position = position + 8
prop_list = []
for i in range(entry_count):
if self.__is_64bit_cpu == True:
name_ptr, attr_ptr = struct.unpack(self.__endian_str + 'QQ', section.data[position : position + entry_size])
else:
name_ptr, attr_ptr = struct.unpack(self.__endian_str + 'LL', section.data[position : position + entry_size])
position = position + entry_size
name = self.get_cstring(name_ptr)
attr = self.get_cstring(attr_ptr)
objc2_property = ObjC2Property(name, attr)
prop_list.append(objc2_property)
return prop_list
'''
struct objc2_class_ro {
uint32_t flags;
uint32_t instanceStart;
uint32_t instanceSize;
uint32_t reserved; // *** this field does not exist in the 32-bit version ***
vmaddr ivar_layout_ptr;
vmaddr cls_name_ptr;
vmaddr base_methods_ptr;
vmaddr base_protocols_ptr;
vmaddr ivars_ptr;
vmaddr weak_ivar_layout_ptr;
vmaddr base_properties_ptr;
};
'''
def __build_objc2_cls_ro(self, vmaddr):
section = self.get_section_by_name('__DATA', '__objc_const')
assert vmaddr < (section.vmaddr + section.vmsize)
position = vmaddr - section.vmaddr
flags, inst_start, inst_size = struct.unpack(self.__endian_str + 'LLL', section.data[position : position + 12])
position = position + 12
if self.__is_64bit_cpu == True:
reserved, ivar_layout_ptr, cls_name_ptr, base_methods_ptr, base_protocols_ptr, ivars_ptr, weak_ivar_layout_ptr, base_properties_ptr = \
struct.unpack(self.__endian_str + 'LQQQQQQQ', section.data[position : position + 60])
else:
ivar_layout_ptr, cls_name_ptr, base_methods_ptr, base_protocols_ptr, ivars_ptr, weak_ivar_layout_ptr, base_properties_ptr = \
struct.unpack(self.__endian_str + 'LLLLLLL', section.data[position : position + 28])
reserved = None
if ivar_layout_ptr != 0:
ivar_layout = self.get_objc2_ivar_layout(ivar_layout_ptr)
else:
ivar_layout = None
if cls_name_ptr != 0:
cls_name = self.get_objc2_cls_name(cls_name_ptr)
else:
cls_name = None
if base_methods_ptr != 0:
methods = self.get_objc2_methods(base_methods_ptr)
else:
methods = None
if base_protocols_ptr != 0:
self.get_objc2_protocols(base_protocols_ptr)
if ivars_ptr != 0:
ivars = self.get_objc2_ivars(ivars_ptr)
else:
ivars = None
if weak_ivar_layout_ptr != 0:
weak_ivar_layout = self.get_objc2_ivar_layout(weak_ivar_layout_ptr)
else:
weak_ivar_layout = None
if base_properties_ptr != 0:
properties = self.get_objc2_properties(base_properties_ptr)
else:
properties = None
return ObjC2ClassRO(flags, inst_start, inst_size, ivar_layout, cls_name, methods, ivars, weak_ivar_layout, properties, reserved)
'''
struct objc2_class {
vmaddr isa_ptr;
vmaddr superclass_ptr;
vmaddr cache_ptr;
vmaddr vtable_ptr;
vmaddr data_ptr; //objc2_class_ro
}
'''
def __build_objc2_cls(self, vmaddr):
section = self.get_section_by_name('__DATA', '__objc_data')
assert vmaddr < (section.vmaddr + section.vmsize)
if vmaddr in self.__objc2_cls_stack:
return None
else:
self.__objc2_cls_stack.append(vmaddr)
position = vmaddr - section.vmaddr
if self.__is_64bit_cpu == True:
isa_ptr, superclass_ptr, cache_ptr, vtable_ptr, data_ptr = \
struct.unpack(self.__endian_str + 'QQQQQ', section.data[position : position + 40])
else:
isa_ptr, superclass_ptr, cache_ptr, vtable_ptr, data_ptr = \
struct.unpack(self.__endian_str + 'LLLLL', section.data[position : position + 20])
objc2_cls_ro = self.__build_objc2_cls_ro(data_ptr)
objc2_class = ObjC2Class(vmaddr, objc2_cls_ro)
self.__resloved_objc2_cls_list.append(objc2_class)
isa = None
if self.is_virtual_section_addr(isa_ptr):
isa_name = self.get_virtual_map_symbol(isa_ptr)
elif isa_ptr != 0:
isa = self.__build_objc2_cls(isa_ptr)
if isa == None:
for resloved_cls in self.__resloved_objc2_cls_list:
if isa_ptr == resloved_cls.vmaddr:
isa_name = resloved_cls.name
break
else:
isa_name = isa.name
else:
isa_name = '{:d}'.format(isa_ptr)
objc2_class.isa = isa
objc2_class.isa_name = isa_name
superclass = None
if self.is_virtual_section_addr(superclass_ptr):
superclass_name = self.get_virtual_map_symbol(superclass_ptr)
elif superclass_ptr != 0:
superclass = self.__build_objc2_cls(superclass_ptr)
if superclass == None:
for resloved_cls in self.__resloved_objc2_cls_list:
if superclass_ptr == resloved_cls.vmaddr:
superclass_name = resloved_cls.name
break
else:
superclass_name = superclass.name
else:
superclass_name = '{:d}'.format(superclass_ptr)
objc2_class.superclass = superclass
objc2_class.superclass_name = superclass_name
if self.is_virtual_section_addr(cache_ptr):
cache_name = self.get_virtual_map_symbol(cache_ptr)
else:
cache_name = '{:d}'.format(cache_ptr)
objc2_class.cache_name = cache_name
if self.is_virtual_section_addr(vtable_ptr):
vtable_name = self.get_virtual_map_symbol(vtable_ptr)
else:
vtable_name = '{:d}'.format(vtable_ptr)
objc2_class.vtable_name = vtable_name
self.__objc2_cls_stack.pop()
return objc2_class
def __build_objc2_clslist(self):
section = self.get_section_by_name('__DATA', '__objc_classlist')
if self.__is_64bit_cpu == True:
n_cls = section.vmsize / 8
else:
n_cls = section.vmsize / 4
cls_list = []
position = 0
for i in range(n_cls):
if self.__is_64bit_cpu == True:
objc2_cls_ptr, = struct.unpack(self.__endian_str + 'Q', section.data[position : position + 8 :])
position = position + 8
else:
objc2_cls_ptr, = struct.unpack(self.__endian_str + 'L', section.data[position : position + 4 :])
position = position + 4
objc2_cls = self.__build_objc2_cls(objc2_cls_ptr)
cls_list.append(objc2_cls)
return cls_list
def __build_objc2_nlclslist(self):
section = self.get_section_by_name('__DATA', '__objc_nlclslist')
if self.__is_64bit_cpu == True:
n_cls = section.vmsize / 8
else:
n_cls = section.vmsize / 4
cls_list = []
position = 0
for i in range(n_cls):
if self.__is_64bit_cpu == True:
objc2_cls_ptr, = struct.unpack(self.__endian_str + 'Q', section.data[position : position + 8 :])
position = position + 8
else:
objc2_cls_ptr, = struct.unpack(self.__endian_str + 'L', section.data[position : position + 4 :])
position = position + 4
objc2_cls = self.__build_objc2_cls(objc2_cls_ptr)
cls_list.append(objc2_cls)
return cls_list
def __build_objc2_protolist(self):
section = self.get_section_by_name('__DATA', '__objc_protolist')
if self.__is_64bit_cpu == True:
n_proto = section.vmsize / 8
else:
n_proto = section.vmsize / 4
proto_list = []
#TODO
def dump_objc_classlist(self):
for objc_class in self.__objc_classlist:
objc_class.dump()
def dump_objc_nlclslist(self):
for objc_class in self.__objc_nlclslist:
objc_class.dump()
def dump_section_objc_selrefs(self):
section = self.get_section_by_name('__DATA', '__objc_selrefs')
if self.__is_64bit_cpu == True:
ref_size = 8
else:
ref_size = 4
nrefs = section.vmsize/ref_size
address = section.vmaddr
for i in range(nrefs):
position = address - section.vmaddr
if self.__is_64bit_cpu == True:
ref, = struct.unpack(self.__endian_str + 'Q', section.data[position : position + ref_size])
else:
ref, = struct.unpack(self.__endian_str + 'L', section.data[position : position + ref_size])
method_name = self.get_objc2_method_name(ref)
print '0x{:X}: __objc_methname(\'{:s}\')'.format(address, method_name)
address = address + ref_size
def get_objc_class_ref(self, vmaddr):
for objc_class in self.__objc_classlist:
if vmaddr == objc_class.vmaddr:
return objc_class
for objc_class in self.__objc_nlclslist:
if vmaddr == objc_class.vmaddr:
return objc_class
return None
def dump_section_objc_classrefs(self):
section = self.get_section_by_name('__DATA', '__objc_classrefs')
if self.__is_64bit_cpu == True:
ref_size = 8
else:
ref_size = 4
nrefs = section.vmsize/ref_size
address = section.vmaddr
for i in range(nrefs):
position = address - section.vmaddr
if self.__is_64bit_cpu == True:
ref, = struct.unpack(self.__endian_str + 'Q', section.data[position : position + ref_size])
else:
ref, = struct.unpack(self.__endian_str + 'L', section.data[position : position + ref_size])
if self.is_virtual_section_addr(ref):
class_name = self.get_virtual_map_symbol(ref)
else:
objc_class = self.get_objc_class_ref(ref)
class_name = objc_class.name
print '0x{:X}: {:s}'.format(address, class_name)
address = address + ref_size
def dump_section_objc_superrefs(self):
section = self.get_section_by_name('__DATA', '__objc_superrefs')
if self.__is_64bit_cpu == True:
ref_size = 8
else:
ref_size = 4
nrefs = section.vmsize/ref_size
address = section.vmaddr
for i in range(nrefs):
position = address - section.vmaddr
if self.__is_64bit_cpu == True:
ref, = struct.unpack(self.__endian_str + 'Q', section.data[position : position + ref_size])
else:
ref, = struct.unpack(self.__endian_str + 'L', section.data[position : position + ref_size])
if self.is_virtual_section_addr(ref):
class_name = self.get_virtual_map_symbol(ref)
else:
objc_class = self.get_objc_class_ref(ref)
class_name = objc_class.name
print '0x{:X}: {:s}'.format(address, class_name)
address = address + ref_size
def dump_section_objc_ivar(self):
section = self.get_section_by_name('__DATA', '__objc_ivar')
ivar_size = 4
nivar = section.vmsize/ivar_size
address = section.vmaddr
for i in range(nivar):
position = address - section.vmaddr
ivar, = struct.unpack(self.__endian_str + 'L', section.data[position : position + ivar_size])
print '0x{:X}: 0x{:X}'.format(address, ivar)
address = address + ivar_size
'''
struct __NSConstantStringImpl
{
vmaddr isa;
unsigned long flags;
vmaddr str;
unsigned long length;
}
struct __NSConstantStringImpl64
{
vmaddr isa;
unsigned long long flags;
vmaddr str;
unsigned long long length;
}
'''
def dump_section_cfstring(self):
section = self.get_section_by_name('__DATA', '__cfstring')
if self.__is_64bit_cpu == True:
cfstring_size = 32
else:
cfstring_size = 16
ncfstring = section.vmsize/cfstring_size
address = section.vmaddr
for i in range(ncfstring):
position = address - section.vmaddr
if self.__is_64bit_cpu == True:
isa, flags, str, length = struct.unpack(self.__endian_str + 'QQQQ', section.data[position : position + cfstring_size])
else:
isa, flags, str, length = struct.unpack(self.__endian_str + 'LLLL', section.data[position : position + cfstring_size])
if self.is_virtual_section_addr(isa):
isa_name = self.get_virtual_map_symbol(isa)
else:
isa_name = '0x:{:X}'.format(isa)
c_str = self.get_cstring(str)
print '0x{:X}: __CFString<{:s}, 0x{:X}, \'{:s}\', {:d}>'.format(address, isa_name, flags, c_str, length)
address = address + cfstring_size
def main():
from optparse import OptionParser
parser = OptionParser(usage='usage: %prog [options] file', version='%prog 0.01')
parser.add_option('-a', '--arch', action='store', dest='arch', \
type='choice', choices=['arm', 'aarch64', 'i386', 'x86_64'], default='aarch64', help='specify an arch to dump, aarch64 is specified by default, applicable only for FAT Mach-O file')
parser.add_option('-l', '--all', action='store_true', dest='dump_all', default=False, help='dump all')
parser.add_option('-c', '--classlist', action='store_true', dest='dump_clslist', default=False, help='dump section __objc_classlist')
parser.add_option('-n', '--nlclslist', action='store_true', dest='dump_nlclslist', default=False, help='dump section __objc_nlclslist')
parser.add_option('-s', '--selrefs', action='store_true', dest='dump_selrefs', default=False, help='dump section __objc_selrefs')
parser.add_option('-r', '--classrefs', action='store_true', dest='dump_classrefs', default=False, help='dump section __objc_classrefs')
parser.add_option('-u', '--superrefs', action='store_true', dest='dump_superrefs', default=False, help='dump section __objc_superrefs')
parser.add_option('-i', '--ivar', action='store_true', dest='dump_ivar', default=False, help='dump section __objc_ivar')
parser.add_option('-f', '--cfstring', action='store_true', dest='dump_cfstring', default=False, help='dump section __cfstring')
parser.add_option('-m', '--import_table', action='store_true', dest='dump_import_table', default=False, help='dump all imported symbols')
parser.add_option('-v', '--virtual_section', action='store_true', dest='dump_vsection', default=False, help='dump virtual section for dynamic binding')
options, args = parser.parse_args()
if len(args) != 1:
parser.print_help()
sys.exit(0)
file = args[0]
arch = None
if options.arch == 'aarch64':
arch = MACH_O_CPU_TYPE.ARM64
elif options.arch == 'arm':
arch = MACH_O_CPU_TYPE.ARM
elif options.arch == 'i386':
arch = MACH_O_CPU_TYPE.I386
elif options.arch == 'x86_64':
arch = MACH_O_CPU_TYPE.X86_64
else:
print 'Unknown arch selected, fallback to aarch64'
arch = MACH_O_CPU_TYPE.ARM64
if options.dump_all == True:
options.dump_clslist = True
options.dump_nlclslist = True
options.dump_selrefs = True
options.dump_classrefs = True
options.dump_superrefs = True
options.dump_ivar = True
options.dump_cfstring = True
options.dump_import_table = True
options.dump_vsection = True
fd = open(file, 'rb')
try:
mach_o_anylyzer = MachOAnalyzer(fd, arch)
except UnknownMagic as e:
print 'Unknow magic:' + e.value
fd.close()
sys.exit(0)
if options.dump_clslist:
print '--------------__objc_classlist--------------'
mach_o_anylyzer.dump_objc_classlist()
if options.dump_nlclslist:
print '--------------__objc_nlclslist--------------'
mach_o_anylyzer.dump_objc_nlclslist()
if options.dump_selrefs:
print '---------------__objc_selrefs---------------'
mach_o_anylyzer.dump_section_objc_selrefs()
if options.dump_classrefs:
print '--------------__objc_classrefs--------------'
mach_o_anylyzer.dump_section_objc_classrefs()
if options.dump_superrefs:
print '--------------__objc_superrefs--------------'
mach_o_anylyzer.dump_section_objc_superrefs()
if options.dump_ivar:
print '----------------__objc_ivar-----------------'
mach_o_anylyzer.dump_section_objc_ivar()
if options.dump_cfstring:
print '-----------------__cfstring-----------------'
mach_o_anylyzer.dump_section_cfstring()
if options.dump_import_table:
print '----------------import_table----------------'
mach_o_anylyzer.dump_import_table()
if options.dump_vsection:
print '---------------virtual_section--------------'
mach_o_anylyzer.dump_virtual_section()
fd.close()
if __name__ == '__main__':
main()
| py-ir0nf1st/objc_class_dump | objc_class_dump.py | Python | mit | 44,790 |
#!/usr/bin/python
import re
import probe_config as conf
import socket
import os
import tempfile
import time
class Cassandra:
def __init__(self, myname):
self.myname = myname
self.allnodes = conf.cassandra_nodes
self.idx = self.allnodes.index(myname)
self.base_dir = '/mnt/%s1' % conf.data_disk
def _configure_limits(self):
s = """
* soft nofile 999999
* hard nofile 999999
"""
with open('/etc/security/limits.conf','a') as outfile:
outfile.write(s)
def _configure_sysctl(self):
s = """
# disable TIME_WAIT.. wait..
net.ipv4.tcp_tw_recycle=1
net.ipv4.tcp_tw_reuse=1
# disable syn cookies
net.ipv4.tcp_syncookies = 0
# double amount of allowed conntrack
net.ipv4.netfilter.ip_conntrack_max = 262144
net.core.rmem_max = 8388608
net.core.wmem_max = 8388608
net.core.rmem_default = 65536
net.core.wmem_default = 65536
net.ipv4.tcp_rmem = 4096 87380 8388608
net.ipv4.tcp_wmem = 4096 65536 8388608
net.ipv4.tcp_mem = 8388608 8388608 8388608
net.ipv4.ip_local_port_range = 15000 61000
net.ipv4.tcp_fin_timeout = 15
net.ipv4.tcp_tw_recycle = 1
net.ipv4.tcp_tw_reuse = 1
net.core.somaxconn = 32768
net.ipv4.tcp_max_syn_backlog = 10240
net.core.netdev_max_backlog = 10240
fs.file-max = 999999
"""
with open('/etc/sysctl.conf','w') as outfile:
outfile.write(s)
os.system('sysctl -p')
def configure(self):
os.system("./partition.sh %s %s" % (conf.data_disk, self.base_dir))
self._configure_sysctl()
self._configure_limits()
num_cass_nodes = len(self.allnodes)
token = str(((2**64 / num_cass_nodes) * self.idx) - 2**63)
seed_ip = socket.gethostbyname(self.allnodes[0])
my_ip = socket.gethostbyname(self.myname)
# read in template
with open("cassandra.yaml", "r") as infile:
inlines = infile.readlines()
with open("/opt/apache-cassandra-1.2.5/conf/cassandra.yaml", "w") as outfile:
for line in inlines:
line = re.sub(r'__BASE_DIR__', self.base_dir, line)
line = re.sub(r'__INITIAL_TOKEN__', token, line)
line = re.sub(r'__MY_IP__', my_ip, line)
line = re.sub(r'__SEED_IP__', seed_ip, line)
outfile.write(line)
# UNCOMMENT IF YOU MAKE CHANGES TO THE DEFAULT 'cassandra-env.sh'
#with open("cassandra-env.sh", "r") as infile:
# inlines = infile.readlines()
#with open("/opt/apache-cassandra-1.2.5/conf/cassandra-env.sh", "w") as outfile:
# for line in inlines:
# outfile.write(line)
def _initialize_db(self):
print "initializing db"
time.sleep(20)
print "finished sleeping..."
init="""\
create KEYSPACE simbastore WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1};
create table simbastore.subscriptions (key uuid PRIMARY KEY, subscriptions list<blob>);
create table simbastore.metadata (key text PRIMARY KEY, consistency text);
create KEYSPACE test_3replicas WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 3};
"""
fh, path = tempfile.mkstemp()
with open(path,'w') as outfile:
outfile.write(init)
os.close(fh)
os.system("/opt/apache-cassandra-1.2.5/bin/cqlsh -f %s" % path)
os.unlink(path)
def start(self):
os.system('/opt/apache-cassandra-1.2.5/bin/cassandra')
if self.myname == self.allnodes[-1]:
self._initialize_db()
def stop(self):
os.system('pkill -f cassandra')
| SimbaService/Simba | server/scripts/probe/cassandra.py | Python | apache-2.0 | 3,363 |
import unittest
from nose.tools import raises
from vmf_converter.core.dynamic_converter import DynamicConverter
class DynamicConverterTest(unittest.TestCase):
"""Test Class for DynamicConverter module"""
def test_velocity_to_vmf_001(self):
"""
Tests the Velocity to VMF conversion for pppp dynamic.
"""
assert DynamicConverter.velocity_to_vmf(5) == -5
def test_velocity_to_vmf_002(self):
"""
Tests the Velocity to VMF conversion for ppp dynamic.
"""
assert DynamicConverter.velocity_to_vmf(15) == -4
def test_velocity_to_vmf_003(self):
"""
Tests the Velocity to VMF conversion for pp dynamic.
"""
assert DynamicConverter.velocity_to_vmf(30) == -3
def test_velocity_to_vmf_004(self):
"""
Tests the Velocity to VMF conversion for p dynamic.
"""
assert DynamicConverter.velocity_to_vmf(45) == -2
def test_velocity_to_vmf_005(self):
"""
Tests the Velocity to VMF conversion for mp dynamic.
"""
assert DynamicConverter.velocity_to_vmf(55) == -1
def test_velocity_to_vmf_006(self):
"""
Tests the Velocity to VMF conversion for mf dynamic.
"""
assert DynamicConverter.velocity_to_vmf(66) == 1
def test_velocity_to_vmf_007(self):
"""
Tests the Velocity to VMF conversion for f dynamic.
"""
assert DynamicConverter.velocity_to_vmf(80) == 2
def test_velocity_to_vmf_008(self):
"""
Tests the Velocity to VMF conversion for ff dynamic.
"""
assert DynamicConverter.velocity_to_vmf(95) == 3
def test_velocity_to_vmf_009(self):
"""
Tests the Velocity to VMF conversion for fff dynamic.
"""
assert DynamicConverter.velocity_to_vmf(105) == 4
def test_velocity_to_vmf_010(self):
"""
Tests the Velocity to VMF conversion for ffff dynamic.
"""
assert DynamicConverter.velocity_to_vmf(120) == 5
@raises(ValueError)
def test_velocity_to_vmf_011(self):
"""
Tests the Velocity to VMF conversion for a lower extreme bound.
"""
DynamicConverter.velocity_to_vmf(-5)
@raises(ValueError)
def test_velocity_to_vmf_012(self):
"""
Tests the Velocity to VMF conversion for a upper extreme bound
"""
DynamicConverter.velocity_to_vmf(150)
def test_vmf_to_velocity_001(self):
"""
Tests the VMF to Velocity conversion for a pppp dynamic.
"""
assert DynamicConverter.vmf_to_velocity(-5) == 10
def test_vmf_to_velocity_002(self):
"""
Tests the VMF to Velocity conversion for a ppp dynamic.
"""
assert DynamicConverter.vmf_to_velocity(-4) == 23
def test_vmf_to_velocity_003(self):
"""
Tests the VMF to Velocity conversion for a pp dynamic.
"""
assert DynamicConverter.vmf_to_velocity(-3) == 36
def test_vmf_to_velocity_004(self):
"""
Tests the VMF to Velocity conversion for a p dynamic.
"""
assert DynamicConverter.vmf_to_velocity(-2) == 49
def test_vmf_to_velocity_005(self):
"""
Tests the VMF to Velocity conversion for a mp dynamic.
"""
assert DynamicConverter.vmf_to_velocity(-1) == 62
def test_vmf_to_velocity_006(self):
"""
Tests the VMF to Velocity conversion for a mf dynamic.
"""
assert DynamicConverter.vmf_to_velocity(1) == 75
def test_vmf_to_velocity_007(self):
"""
Tests the VMF to Velocity conversion for a f dynamic.
"""
assert DynamicConverter.vmf_to_velocity(2) == 88
def test_vmf_to_velocity_008(self):
"""
Tests the VMF to Velocity conversion for a ff dynamic.
"""
assert DynamicConverter.vmf_to_velocity(3) == 101
def test_vmf_to_velocity_009(self):
"""
Tests the VMF to Velocity conversion for a fff dynamic.
"""
assert DynamicConverter.vmf_to_velocity(4) == 114
def test_vmf_to_velocity_010(self):
"""
Tests the VMF to Velocity conversion for a ffff dynamic.
"""
assert DynamicConverter.vmf_to_velocity(5) == 127
@raises(ValueError)
def test_vmf_to_velocity_011(self):
"""
Tests the VMF to Velocity conversion for a 0 dynamic (invalid).
"""
DynamicConverter.vmf_to_velocity(0) | project-schumann/vmf-converter | tests/dynamic_converter_test.py | Python | mit | 4,528 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from marvin.cloudstackTestCase import *
from marvin.cloudstackAPI import *
from marvin.sshClient import SshClient
from marvin.integration.lib.utils import *
from marvin.integration.lib.base import *
from marvin.integration.lib.common import *
from nose.plugins.attrib import attr
#Import System modules
import time
_multiprocess_shared_ = True
class Services:
"""Test Network Services
"""
def __init__(self):
self.services = {
"ostype": "CentOS 5.3 (64-bit)",
# Cent OS 5.3 (64 bit)
"lb_switch_wait": 10,
# Time interval after which LB switches the requests
"sleep": 60,
"timeout":10,
"network_offering": {
"name": 'Test Network offering',
"displaytext": 'Test Network offering',
"guestiptype": 'Isolated',
"supportedservices": 'Dhcp,Dns,SourceNat,PortForwarding',
"traffictype": 'GUEST',
"availability": 'Optional',
"serviceProviderList" : {
"Dhcp": 'VirtualRouter',
"Dns": 'VirtualRouter',
"SourceNat": 'VirtualRouter',
"PortForwarding": 'VirtualRouter',
},
},
"network": {
"name": "Test Network",
"displaytext": "Test Network",
},
"service_offering": {
"name": "Tiny Instance",
"displaytext": "Tiny Instance",
"cpunumber": 1,
"cpuspeed": 100,
# in MHz
"memory": 256,
# In MBs
},
"account": {
"email": "[email protected]",
"firstname": "Test",
"lastname": "User",
"username": "test",
"password": "password",
},
"server":
{
"displayname": "Small Instance",
"username": "root",
"password": "password",
"hypervisor": 'XenServer',
"privateport": 22,
"publicport": 22,
"ssh_port": 22,
"protocol": 'TCP',
},
"natrule":
{
"privateport": 22,
"publicport": 2222,
"protocol": "TCP"
},
"lbrule":
{
"name": "SSH",
"alg": "roundrobin",
# Algorithm used for load balancing
"privateport": 22,
"publicport": 2222,
"protocol": 'TCP'
}
}
class TestLoadBalance(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.api_client = super(TestLoadBalance, cls).getClsTestClient().getApiClient()
cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client, cls.services)
cls.zone = get_zone(cls.api_client, cls.services)
template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
cls.services["server"]["zoneid"] = cls.zone.id
#Create an account, network, VM and IP addresses
cls.account = Account.create(
cls.api_client,
cls.services["account"],
admin=True,
domainid=cls.domain.id
)
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
cls.vm_1 = VirtualMachine.create(
cls.api_client,
cls.services["server"],
templateid=template.id,
accountid=cls.account.name,
domainid=cls.account.domainid,
serviceofferingid=cls.service_offering.id
)
cls.vm_2 = VirtualMachine.create(
cls.api_client,
cls.services["server"],
templateid=template.id,
accountid=cls.account.name,
domainid=cls.account.domainid,
serviceofferingid=cls.service_offering.id
)
cls.vm_3 = VirtualMachine.create(
cls.api_client,
cls.services["server"],
templateid=template.id,
accountid=cls.account.name,
domainid=cls.account.domainid,
serviceofferingid=cls.service_offering.id
)
cls.non_src_nat_ip = PublicIPAddress.create(
cls.api_client,
cls.account.name,
cls.zone.id,
cls.account.domainid,
cls.services["server"]
)
# Open up firewall port for SSH
cls.fw_rule = FireWallRule.create(
cls.api_client,
ipaddressid=cls.non_src_nat_ip.ipaddress.id,
protocol=cls.services["lbrule"]["protocol"],
cidrlist=['0.0.0.0/0'],
startport=cls.services["lbrule"]["publicport"],
endport=cls.services["lbrule"]["publicport"]
)
cls._cleanup = [
cls.account,
cls.service_offering
]
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.cleanup = []
return
def tearDown(self):
cleanup_resources(self.apiclient, self.cleanup)
return
@classmethod
def tearDownClass(cls):
cleanup_resources(cls.api_client, cls._cleanup)
return
def try_ssh(self, ip_addr, hostnames):
try:
self.debug(
"SSH into VM (IPaddress: %s) & NAT Rule (Public IP: %s)" %
(self.vm_1.ipaddress, ip_addr)
)
# If Round Robin Algorithm is chosen,
# each ssh command should alternate between VMs
ssh_1 = SshClient(
ip_addr,
self.services['lbrule']["publicport"],
self.vm_1.username,
self.vm_1.password
)
hostnames.append(ssh_1.execute("hostname")[0])
self.debug(hostnames)
except Exception as e:
self.fail("%s: SSH failed for VM with IP Address: %s" %
(e, ip_addr))
time.sleep(self.services["lb_switch_wait"])
return
@attr(tags = ["advanced", "advancedns", "smoke"])
def test_01_create_lb_rule_src_nat(self):
"""Test to create Load balancing rule with source NAT"""
# Validate the Following:
#1. listLoadBalancerRules should return the added rule
#2. attempt to ssh twice on the load balanced IP
#3. verify using the hostname of the VM
# that round robin is indeed happening as expected
src_nat_ip_addrs = PublicIPAddress.list(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid
)
self.assertEqual(
isinstance(src_nat_ip_addrs, list),
True,
"Check list response returns a valid list"
)
src_nat_ip_addr = src_nat_ip_addrs[0]
# Check if VM is in Running state before creating LB rule
vm_response = VirtualMachine.list(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid
)
self.assertEqual(
isinstance(vm_response, list),
True,
"Check list VM returns a valid list"
)
self.assertNotEqual(
len(vm_response),
0,
"Check Port Forwarding Rule is created"
)
for vm in vm_response:
self.assertEqual(
vm.state,
'Running',
"VM state should be Running before creating a NAT rule."
)
#Create Load Balancer rule and assign VMs to rule
lb_rule = LoadBalancerRule.create(
self.apiclient,
self.services["lbrule"],
src_nat_ip_addr.id,
accountid=self.account.name
)
self.cleanup.append(lb_rule)
lb_rule.assign(self.apiclient, [self.vm_1, self.vm_2])
lb_rules = list_lb_rules(
self.apiclient,
id=lb_rule.id
)
self.assertEqual(
isinstance(lb_rules, list),
True,
"Check list response returns a valid list"
)
#verify listLoadBalancerRules lists the added load balancing rule
self.assertNotEqual(
len(lb_rules),
0,
"Check Load Balancer Rule in its List"
)
self.assertEqual(
lb_rules[0].id,
lb_rule.id,
"Check List Load Balancer Rules returns valid Rule"
)
# listLoadBalancerRuleInstances should list all
# instances associated with that LB rule
lb_instance_rules = list_lb_instances(
self.apiclient,
id=lb_rule.id
)
self.assertEqual(
isinstance(lb_instance_rules, list),
True,
"Check list response returns a valid list"
)
self.assertNotEqual(
len(lb_instance_rules),
0,
"Check Load Balancer instances Rule in its List"
)
self.debug("lb_instance_rules Ids: %s, %s" % (
lb_instance_rules[0].id,
lb_instance_rules[1].id
))
self.debug("VM ids: %s, %s" % (self.vm_1.id, self.vm_2.id))
self.assertIn(
lb_instance_rules[0].id,
[self.vm_1.id, self.vm_2.id],
"Check List Load Balancer instances Rules returns valid VM ID"
)
self.assertIn(
lb_instance_rules[1].id,
[self.vm_1.id, self.vm_2.id],
"Check List Load Balancer instances Rules returns valid VM ID"
)
hostnames = []
self.try_ssh(src_nat_ip_addr.ipaddress, hostnames)
self.try_ssh(src_nat_ip_addr.ipaddress, hostnames)
self.try_ssh(src_nat_ip_addr.ipaddress, hostnames)
self.try_ssh(src_nat_ip_addr.ipaddress, hostnames)
self.try_ssh(src_nat_ip_addr.ipaddress, hostnames)
self.debug("Hostnames: %s" % str(hostnames))
self.assertIn(
self.vm_1.name,
hostnames,
"Check if ssh succeeded for server1"
)
self.assertIn(
self.vm_2.name,
hostnames,
"Check if ssh succeeded for server2"
)
#SSH should pass till there is a last VM associated with LB rule
lb_rule.remove(self.apiclient, [self.vm_2])
# making hostnames list empty
hostnames[:] = []
try:
self.debug("SSHing into IP address: %s after removing VM (ID: %s)" %
(
src_nat_ip_addr.ipaddress,
self.vm_2.id
))
self.try_ssh(src_nat_ip_addr.ipaddress, hostnames)
self.assertIn(
self.vm_1.name,
hostnames,
"Check if ssh succeeded for server1"
)
except Exception as e:
self.fail("%s: SSH failed for VM with IP Address: %s" %
(e, src_nat_ip_addr.ipaddress))
lb_rule.remove(self.apiclient, [self.vm_1])
with self.assertRaises(Exception):
self.debug("Removed all VMs, trying to SSH")
self.try_ssh(src_nat_ip_addr.ipaddress, hostnames)
return
@attr(tags = ["advanced", "advancedns", "smoke"])
def test_02_create_lb_rule_non_nat(self):
"""Test to create Load balancing rule with non source NAT"""
# Validate the Following:
#1. listLoadBalancerRules should return the added rule
#2. attempt to ssh twice on the load balanced IP
#3. verify using the hostname of the VM that
# round robin is indeed happening as expected
#Create Load Balancer rule and assign VMs to rule
lb_rule = LoadBalancerRule.create(
self.apiclient,
self.services["lbrule"],
self.non_src_nat_ip.ipaddress.id,
accountid=self.account.name
)
self.cleanup.append(lb_rule)
lb_rule.assign(self.apiclient, [self.vm_1, self.vm_2])
lb_rules = list_lb_rules(
self.apiclient,
id=lb_rule.id
)
self.assertEqual(
isinstance(lb_rules, list),
True,
"Check list response returns a valid list"
)
#verify listLoadBalancerRules lists the added load balancing rule
self.assertNotEqual(
len(lb_rules),
0,
"Check Load Balancer Rule in its List"
)
self.assertEqual(
lb_rules[0].id,
lb_rule.id,
"Check List Load Balancer Rules returns valid Rule"
)
# listLoadBalancerRuleInstances should list
# all instances associated with that LB rule
lb_instance_rules = list_lb_instances(
self.apiclient,
id=lb_rule.id
)
self.assertEqual(
isinstance(lb_instance_rules, list),
True,
"Check list response returns a valid list"
)
self.assertNotEqual(
len(lb_instance_rules),
0,
"Check Load Balancer instances Rule in its List"
)
self.assertIn(
lb_instance_rules[0].id,
[self.vm_1.id, self.vm_2.id],
"Check List Load Balancer instances Rules returns valid VM ID"
)
self.assertIn(
lb_instance_rules[1].id,
[self.vm_1.id, self.vm_2.id],
"Check List Load Balancer instances Rules returns valid VM ID"
)
try:
hostnames = []
self.try_ssh(self.non_src_nat_ip.ipaddress.ipaddress, hostnames)
self.try_ssh(self.non_src_nat_ip.ipaddress.ipaddress, hostnames)
self.try_ssh(self.non_src_nat_ip.ipaddress.ipaddress, hostnames)
self.try_ssh(self.non_src_nat_ip.ipaddress.ipaddress, hostnames)
self.try_ssh(self.non_src_nat_ip.ipaddress.ipaddress, hostnames)
self.debug("Hostnames: %s" % str(hostnames))
self.assertIn(
self.vm_1.name,
hostnames,
"Check if ssh succeeded for server1"
)
self.assertIn(
self.vm_2.name,
hostnames,
"Check if ssh succeeded for server2"
)
#SSH should pass till there is a last VM associated with LB rule
lb_rule.remove(self.apiclient, [self.vm_2])
self.debug("SSHing into IP address: %s after removing VM (ID: %s) from LB rule" %
(
self.non_src_nat_ip.ipaddress.ipaddress,
self.vm_2.id
))
# Making host list empty
hostnames[:] = []
self.try_ssh(self.non_src_nat_ip.ipaddress.ipaddress, hostnames)
self.assertIn(
self.vm_1.name,
hostnames,
"Check if ssh succeeded for server1"
)
self.debug("Hostnames after removing VM2: %s" % str(hostnames))
except Exception as e:
self.fail("%s: SSH failed for VM with IP Address: %s" %
(e, self.non_src_nat_ip.ipaddress.ipaddress))
lb_rule.remove(self.apiclient, [self.vm_1])
with self.assertRaises(Exception):
self.debug("SSHing into IP address: %s after removing VM (ID: %s) from LB rule" %
(
self.non_src_nat_ip.ipaddress.ipaddress,
self.vm_1.id
))
self.try_ssh(self.non_src_nat_ip.ipaddress.ipaddress, hostnames)
return
@attr(tags = ["advanced", "advancedns", "smoke"])
def test_assign_and_removal_lb(self):
"""Test for assign & removing load balancing rule"""
# Validate:
#1. Verify list API - listLoadBalancerRules lists
# all the rules with the relevant ports
#2. listLoadBalancerInstances will list
# the instances associated with the corresponding rule.
#3. verify ssh attempts should pass as long as there
# is at least one instance associated with the rule
# Check if VM is in Running state before creating LB rule
vm_response = VirtualMachine.list(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid
)
self.assertEqual(
isinstance(vm_response, list),
True,
"Check list VM returns a valid list"
)
self.assertNotEqual(
len(vm_response),
0,
"Check Port Forwarding Rule is created"
)
for vm in vm_response:
self.assertEqual(
vm.state,
'Running',
"VM state should be Running before creating a NAT rule."
)
lb_rule = LoadBalancerRule.create(
self.apiclient,
self.services["lbrule"],
self.non_src_nat_ip.ipaddress.id,
self.account.name
)
lb_rule.assign(self.apiclient, [self.vm_1, self.vm_2])
hostnames = []
self.try_ssh(self.non_src_nat_ip.ipaddress.ipaddress, hostnames)
self.try_ssh(self.non_src_nat_ip.ipaddress.ipaddress, hostnames)
self.try_ssh(self.non_src_nat_ip.ipaddress.ipaddress, hostnames)
self.try_ssh(self.non_src_nat_ip.ipaddress.ipaddress, hostnames)
self.try_ssh(self.non_src_nat_ip.ipaddress.ipaddress, hostnames)
self.debug("Hostnames: %s" % str(hostnames))
self.assertIn(
self.vm_1.name,
hostnames,
"Check if ssh succeeded for server1"
)
self.assertIn(
self.vm_2.name,
hostnames,
"Check if ssh succeeded for server2"
)
#Removing VM and assigning another VM to LB rule
lb_rule.remove(self.apiclient, [self.vm_2])
# making hostnames list empty
hostnames[:] = []
try:
self.debug("SSHing again into IP address: %s with VM (ID: %s) added to LB rule" %
(
self.non_src_nat_ip.ipaddress.ipaddress,
self.vm_1.id,
))
self.try_ssh(self.non_src_nat_ip.ipaddress.ipaddress, hostnames)
self.assertIn(
self.vm_1.name,
hostnames,
"Check if ssh succeeded for server1"
)
except Exception as e:
self.fail("SSH failed for VM with IP: %s" %
self.non_src_nat_ip.ipaddress.ipaddress)
lb_rule.assign(self.apiclient, [self.vm_3])
# # Making hostnames list empty
hostnames[:] = []
self.try_ssh(self.non_src_nat_ip.ipaddress.ipaddress, hostnames)
self.try_ssh(self.non_src_nat_ip.ipaddress.ipaddress, hostnames)
self.try_ssh(self.non_src_nat_ip.ipaddress.ipaddress, hostnames)
self.try_ssh(self.non_src_nat_ip.ipaddress.ipaddress, hostnames)
self.try_ssh(self.non_src_nat_ip.ipaddress.ipaddress, hostnames)
self.debug("Hostnames: %s" % str(hostnames))
self.assertIn(
self.vm_1.name,
hostnames,
"Check if ssh succeeded for server1"
)
self.assertIn(
self.vm_3.name,
hostnames,
"Check if ssh succeeded for server3"
)
return
| mufaddalq/cloudstack-datera-driver | test/integration/smoke/test_loadbalance.py | Python | apache-2.0 | 25,268 |
from OpenSSL.SSL import SSL_CB_HANDSHAKE_DONE
from twisted.internet._sslverify import OpenSSLCertificateOptions, ClientTLSOptions
from twisted.internet.interfaces import IOpenSSLClientConnectionCreator
from twisted.web.client import _requireSSL
from twisted.web.iweb import IPolicyForHTTPS
from zope.interface import implementer
@implementer(IOpenSSLClientConnectionCreator)
class DisabledVerificationClientTLSOptions(ClientTLSOptions):
"""
ClientTLSOptions replacement that does not validate the hostname certificate at all, i.e.
neither checks if the certificate matches the hostname nor the certificate's trust chain.
"""
def _identityVerifyingInfoCallback(self, connection, where, ret):
"""
In case *where* indicates that the SSL handshake has been done,
this does nothing (as opposed to ClientTLSOptions._identityVerifyingInfoCallback,
which would validate the certificate). In all other cases,
the superclass method is called.
"""
if where & SSL_CB_HANDSHAKE_DONE:
# ClientTLSOptions._identityVerifyingInfoCallback would validate the certificate
# in that case. Instead, we just do nothing.
pass
else:
return ClientTLSOptions._identityVerifyingInfoCallback(self, connection, where, ret)
@implementer(IPolicyForHTTPS)
class DisabledVerificationPolicyForHTTPS(object):
""" HTTPS policy that does not check the certificate hostname """
@_requireSSL
def creatorForNetloc(self, hostname, port):
hostname = hostname.decode("ascii")
certificate_options = OpenSSLCertificateOptions(
trustRoot=None,
acceptableProtocols=None,
)
return DisabledVerificationClientTLSOptions(hostname, certificate_options.getContext())
| NetKnights-GmbH/privacyidea-ldap-proxy | pi_ldapproxy/util.py | Python | agpl-3.0 | 1,817 |
import _minqlx
import re as _re
__version__ = _minqlx.__version__
temp = _re.search("([0-9]+)\.([0-9]+)\.([0-9]+)", __version__)
try:
__version_info__ = tuple(map(lambda i: int(temp.group(i)), [1, 2, 3]))
except:
__version_info__ = (999, 999, 999)
del temp
# Put everything into a single module.
from _minqlx import *
from ._core import *
from ._plugin import *
from ._game import *
from ._events import *
from ._commands import *
from ._handlers import *
from ._player import *
from ._zmq import *
| MinoMino/minqlx | python/minqlx/__init__.py | Python | gpl-3.0 | 510 |
"""
An example of how to send and receive arbitrary python objects, such as dictionaries.
"""
import pickle
import mpi
somedata = ["hello","world","!"]
somedict = {}
i = 0
for item in somedata:
somedict[i] = item
i += 1
def main():
rank,size = mpi.init()
serial_dict = pickle.dumps(somedict)
mpi.isend( serial_dict, len(serial_dict), mpi.MPI_CHAR, 0, 0, mpi.MPI_COMM_WORLD )
new_serial_dict = mpi.recv( len( serial_dict), mpi.MPI_CHAR, 0, 0, mpi.MPI_COMM_WORLD )
print new_serial_dict
mpi.finalize()
newdict = pickle.loads( new_serial_dict )
print newdict
return
if __name__=="__main__":
main()
| steder/maroonmpi | examples/serialize.py | Python | gpl-2.0 | 658 |
import os
import nose
#import subprocess
import pickle
import cle
TESTS_BASE = os.path.join(os.path.dirname(os.path.realpath(__file__)),
os.path.join('..', '..', 'binaries'))
TESTS_ARCHES = [os.path.join('i386', 'libc.so.6'),
os.path.join('i386', 'fauxware'),
os.path.join('x86_64', 'libc.so.6'),
os.path.join('x86_64', 'fauxware'),
os.path.join('armel', 'libc.so.6'),
os.path.join('armel', 'fauxware'),
os.path.join('armel', 'helloworld'),
os.path.join('armhf', 'libc.so.6'),
os.path.join('ppc', 'libc.so.6'),
os.path.join('ppc', 'fauxware'),
os.path.join('mips', 'libc.so.6'),
os.path.join('mips', 'fauxware'),
os.path.join('mips64', 'libc.so.6'),
os.path.join('mips64', 'test_arrays'),
os.path.join('aarch64', 'libc.so.6'),
os.path.join('aarch64', 'test_arrays'),
]
def check_plt_entries(filename):
real_filename = os.path.join(TESTS_BASE, 'tests', filename)
ld = cle.Loader(real_filename, auto_load_libs=False, main_opts={'custom_base_addr': 0})
if filename == os.path.join('ppc', 'libc.so.6'):
# objdump can't find PLT stubs for this...
nose.tools.assert_not_equal(ld.main_object._plt, {})
sorted_keys = sorted(ld.main_object._plt.values())
diffs = [y - x for x, y in zip(sorted_keys, sorted_keys[1:])]
nose.tools.assert_equal(diffs, [4]*len(diffs))
return
if filename == os.path.join('mips', 'libc.so.6'):
nose.tools.assert_in('__tls_get_addr', ld.main_object._plt)
nose.tools.assert_equal(ld.main_object.plt['__tls_get_addr'], 1331168)
return
if filename == os.path.join('mips', 'fauxware'):
nose.tools.assert_equal(ld.main_object.plt, {'puts': 4197264, 'read': 4197232, '__libc_start_main': 4197312, 'printf': 4197248, 'exit': 4197280, 'open': 4197296, 'strcmp': 4197216})
return
if filename == os.path.join('mips64', 'libc.so.6'):
nose.tools.assert_equal(ld.main_object.plt, {'__tls_get_addr': 1458432, '_dl_find_dso_for_object': 1458448})
return
if filename == os.path.join('mips64', 'test_arrays'):
nose.tools.assert_equal(ld.main_object.plt, {'__libc_start_main': 4831841456, 'puts': 4831841440})
return
if filename == os.path.join('armel', 'helloworld'):
nose.tools.assert_equal(ld.main_object.plt, {'printf': 0x102e0, '__libc_start_main': 0x102ec,
'__gmon_start__': 0x102f8, 'abort': 0x10304
}
)
return
ld.main_object._plt.pop('__gmon_start__', None)
#p1 = subprocess.Popen(['objdump', '-d', real_filename], stdout=subprocess.PIPE)
#p2 = subprocess.Popen(['grep', '@plt>:'], stdin=p1.stdout, stdout=subprocess.PIPE)
#p1.stdout.close()
#dat, _ = p2.communicate()
#lines = dat.strip().split('\n')
#ideal_plt = {}
#for line in lines:
# addr, ident = line.split()
# addr = int(addr, 16)
# name = ident.split('@')[0].strip('<')
# if '*' in name or name == '__gmon_start__':
# continue
# ideal_plt[name] = addr
#if filename == os.path.join('armhf', 'libc.so.6'):
# # objdump does these cases wrong as far as I can tell?
# # or maybe not wrong just... different
# # there's a prefix to this stub that jumps out of thumb mode
# # cle finds the arm stub, objdump finds the thumb prefix
# ideal_plt['free'] += 4
# ideal_plt['malloc'] += 4
#PLT_CACHE[filename.replace('\\', '/')] = ideal_plt
ideal_plt = PLT_CACHE[filename.replace('\\', '/')]
nose.tools.assert_equal(ideal_plt, ld.main_object.plt)
PLT_CACHE = {}
PLT_CACHE = pickle.load(open(os.path.join(TESTS_BASE, 'tests_data', 'objdump-grep-plt.p'), 'rb'))
def test_plt():
for filename in TESTS_ARCHES:
yield check_plt_entries, filename
if __name__ == '__main__':
for f, a in test_plt():
print a
f(a)
#pickle.dump(PLT_CACHE, open(os.path.join(TESTS_BASE, 'tests_data', 'objdump-grep-plt.p'), 'wb'))
| Ruide/angr-dev | cle/tests/test_plt.py | Python | bsd-2-clause | 4,343 |
# Copyright 2011-2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For Scheduler weights.
"""
from nova import context
from nova import exception
from nova.openstack.common.fixture import mockpatch
from nova.scheduler import weights
from nova import test
from nova.tests import matchers
from nova.tests.scheduler import fakes
class TestWeighedHost(test.NoDBTestCase):
def test_dict_conversion(self):
host_state = fakes.FakeHostState('somehost', None, {})
host = weights.WeighedHost(host_state, 'someweight')
expected = {'weight': 'someweight',
'host': 'somehost'}
self.assertThat(host.to_dict(), matchers.DictMatches(expected))
def test_all_weighers(self):
classes = weights.all_weighers()
class_names = [cls.__name__ for cls in classes]
self.assertEqual(len(classes), 2)
self.assertIn('RAMWeigher', class_names)
self.assertIn('MetricsWeigher', class_names)
class RamWeigherTestCase(test.NoDBTestCase):
def setUp(self):
super(RamWeigherTestCase, self).setUp()
self.useFixture(mockpatch.Patch(
'nova.db.compute_node_get_all',
return_value=fakes.COMPUTE_NODES))
self.host_manager = fakes.FakeHostManager()
self.weight_handler = weights.HostWeightHandler()
self.weight_classes = self.weight_handler.get_matching_classes(
['nova.scheduler.weights.ram.RAMWeigher'])
def _get_weighed_host(self, hosts, weight_properties=None):
if weight_properties is None:
weight_properties = {}
return self.weight_handler.get_weighed_objects(self.weight_classes,
hosts, weight_properties)[0]
def _get_all_hosts(self):
ctxt = context.get_admin_context()
return self.host_manager.get_all_host_states(ctxt)
def test_default_of_spreading_first(self):
hostinfo_list = self._get_all_hosts()
# host1: free_ram_mb=512
# host2: free_ram_mb=1024
# host3: free_ram_mb=3072
# host4: free_ram_mb=8192
# so, host4 should win:
weighed_host = self._get_weighed_host(hostinfo_list)
self.assertEqual(weighed_host.weight, 1.0)
self.assertEqual(weighed_host.obj.host, 'host4')
def test_ram_filter_multiplier1(self):
self.flags(ram_weight_multiplier=0.0)
hostinfo_list = self._get_all_hosts()
# host1: free_ram_mb=512
# host2: free_ram_mb=1024
# host3: free_ram_mb=3072
# host4: free_ram_mb=8192
# We do not know the host, all have same weight.
weighed_host = self._get_weighed_host(hostinfo_list)
self.assertEqual(weighed_host.weight, 0.0)
def test_ram_filter_multiplier2(self):
self.flags(ram_weight_multiplier=2.0)
hostinfo_list = self._get_all_hosts()
# host1: free_ram_mb=512
# host2: free_ram_mb=1024
# host3: free_ram_mb=3072
# host4: free_ram_mb=8192
# so, host4 should win:
weighed_host = self._get_weighed_host(hostinfo_list)
self.assertEqual(weighed_host.weight, 1.0 * 2)
self.assertEqual(weighed_host.obj.host, 'host4')
def test_ram_filter_negative(self):
self.flags(ram_weight_multiplier=1.0)
hostinfo_list = self._get_all_hosts()
host_attr = {'id': 100, 'memory_mb': 8192, 'free_ram_mb': -512}
host_state = fakes.FakeHostState('negative', 'negative', host_attr)
hostinfo_list = list(hostinfo_list) + [host_state]
# host1: free_ram_mb=512
# host2: free_ram_mb=1024
# host3: free_ram_mb=3072
# host4: free_ram_mb=8192
# negativehost: free_ram_mb=-512
# so, host4 should win
weights = self.weight_handler.get_weighed_objects(self.weight_classes,
hostinfo_list, {})
weighed_host = weights[0]
self.assertEqual(weighed_host.weight, 1)
self.assertEqual(weighed_host.obj.host, "host4")
# and negativehost should lose
weighed_host = weights[-1]
self.assertEqual(weighed_host.weight, 0)
self.assertEqual(weighed_host.obj.host, "negative")
class MetricsWeigherTestCase(test.NoDBTestCase):
def setUp(self):
super(MetricsWeigherTestCase, self).setUp()
self.useFixture(mockpatch.Patch(
'nova.db.compute_node_get_all',
return_value=fakes.COMPUTE_NODES_METRICS))
self.host_manager = fakes.FakeHostManager()
self.weight_handler = weights.HostWeightHandler()
self.weight_classes = self.weight_handler.get_matching_classes(
['nova.scheduler.weights.metrics.MetricsWeigher'])
def _get_weighed_host(self, hosts, setting, weight_properties=None):
if not weight_properties:
weight_properties = {}
self.flags(weight_setting=setting, group='metrics')
return self.weight_handler.get_weighed_objects(self.weight_classes,
hosts, weight_properties)[0]
def _get_all_hosts(self):
ctxt = context.get_admin_context()
return self.host_manager.get_all_host_states(ctxt)
def _do_test(self, settings, expected_weight, expected_host):
hostinfo_list = self._get_all_hosts()
weighed_host = self._get_weighed_host(hostinfo_list, settings)
self.assertEqual(weighed_host.weight, expected_weight)
self.assertEqual(weighed_host.obj.host, expected_host)
def test_single_resource(self):
# host1: foo=512
# host2: foo=1024
# host3: foo=3072
# host4: foo=8192
# so, host4 should win:
setting = ['foo=1']
self._do_test(setting, 1.0, 'host4')
def test_multiple_resource(self):
# host1: foo=512, bar=1
# host2: foo=1024, bar=2
# host3: foo=3072, bar=1
# host4: foo=8192, bar=0
# so, host2 should win:
setting = ['foo=0.0001', 'bar=1']
self._do_test(setting, 1.0, 'host2')
def test_single_resourcenegtive_ratio(self):
# host1: foo=512
# host2: foo=1024
# host3: foo=3072
# host4: foo=8192
# so, host1 should win:
setting = ['foo=-1']
self._do_test(setting, 1.0, 'host1')
def test_multiple_resource_missing_ratio(self):
# host1: foo=512, bar=1
# host2: foo=1024, bar=2
# host3: foo=3072, bar=1
# host4: foo=8192, bar=0
# so, host4 should win:
setting = ['foo=0.0001', 'bar']
self._do_test(setting, 1.0, 'host4')
def test_multiple_resource_wrong_ratio(self):
# host1: foo=512, bar=1
# host2: foo=1024, bar=2
# host3: foo=3072, bar=1
# host4: foo=8192, bar=0
# so, host4 should win:
setting = ['foo=0.0001', 'bar = 2.0t']
self._do_test(setting, 1.0, 'host4')
def _check_parsing_result(self, weigher, setting, results):
self.flags(weight_setting=setting, group='metrics')
weigher._parse_setting()
self.assertEqual(len(weigher.setting), len(results))
for item in results:
self.assertIn(item, weigher.setting)
def test_parse_setting(self):
weigher = self.weight_classes[0]()
self._check_parsing_result(weigher,
['foo=1'],
[('foo', 1.0)])
self._check_parsing_result(weigher,
['foo=1', 'bar=-2.1'],
[('foo', 1.0), ('bar', -2.1)])
self._check_parsing_result(weigher,
['foo=a1', 'bar=-2.1'],
[('bar', -2.1)])
self._check_parsing_result(weigher,
['foo', 'bar=-2.1'],
[('bar', -2.1)])
self._check_parsing_result(weigher,
['=5', 'bar=-2.1'],
[('bar', -2.1)])
def test_metric_not_found_required(self):
setting = ['foo=1', 'zot=2']
self.assertRaises(exception.ComputeHostMetricNotFound,
self._do_test,
setting,
8192,
'host4')
def test_metric_not_found_non_required(self):
# host1: foo=512, bar=1
# host2: foo=1024, bar=2
# host3: foo=3072, bar=1
# host4: foo=8192, bar=0
# host5: foo=768, bar=0, zot=1
# host6: foo=2048, bar=0, zot=2
# so, host5 should win:
self.flags(required=False, group='metrics')
setting = ['foo=0.0001', 'zot=-1']
self._do_test(setting, 1.0, 'host5')
| jumpstarter-io/nova | nova/tests/scheduler/test_weights.py | Python | apache-2.0 | 9,359 |
rules = {
"Author": {
"email": {
"type": "string",
},
"password": {
"type": "string",
"required": True,
},
"username": {
"type": "string",
"required": True,
},
},
}
| peterdemin/mutant | tests/regression/author/cerberus.py | Python | isc | 281 |
"""
SleekXMPP: The Sleek XMPP Library
Copyright (C) 2013 Nathanael C. Fritz, Lance J.T. Stout
This file is part of SleekXMPP.
See the file LICENSE for copying permission.
"""
from sleekxmpp.plugins.google.settings import stanza
from sleekxmpp.plugins.google.settings.settings import GoogleSettings
| danielvdao/facebookMacBot | venv/lib/python2.7/site-packages/sleekxmpp/plugins/google/settings/__init__.py | Python | mit | 316 |
import unittest
import json
from datetime import datetime
from pymongo import MongoClient
from apps.basic_resource import server
from apps.basic_resource.documents import Article, Comment
class ResourcePutIdentifierField(unittest.TestCase):
"""
Test if a HTTP PUT that updates a resource that has an embedded
document with an identifier field which is used in the update gives
the right response and updates the document correctly.
"""
@classmethod
def setUpClass(cls):
cls.app = server.app.test_client()
cls.mongo_client = MongoClient()
cls.initial_data = {
'title': "Test title",
'text': "Test text",
'publish': True,
'publish_date': datetime(2013, 10, 9, 8, 7, 8),
'comments': [
Comment(text="Test comment "),
Comment(text="Test comment 2"),
Comment(text="Test comment 3"),
],
'top_comment': Comment(text="Top comment"),
'tags': ['test', 'unittest', 'python', 'flask']
}
cls.article = Article(**cls.initial_data).save()
# the `id` field is the identifier field (duh)
cls.comments_update = {
'comments': [
{
'id': unicode(cls.article['comments'][0]['id']),
'text': "Test comment update"
},
{
'id': unicode(cls.article['comments'][1]['id']),
'text': "Test comment update 2"
}
]
}
cls.response = cls.app.put(
'/articles/{}/'.format(unicode(cls.article['id'])),
headers={'content-type': 'application/json'},
data=json.dumps(cls.comments_update)
)
@classmethod
def tearDownClass(cls):
cls.mongo_client.unittest_monkful.article.remove()
def test_status_code(self):
"""
Test if the response status code is 200.
"""
self.assertEqual(self.response.status_code, 200)
def test_content_type(self):
"""
Test if the content-type header is 'application/json'.
"""
self.assertEqual(
self.response.headers['content-type'],
'application/json'
)
def test_json(self):
"""
Test if the response data is valid JSON.
"""
try:
json.loads(self.response.data)
except:
self.fail("Response is not valid JSON.")
def test_content(self):
"""
Test if the deserialized response data evaluates back to our
data we posted to the resource in `setUpClass`.
"""
response_data = json.loads(self.response.data)
# Remap the response data so that it only has the fields our
# orignal data also had.
response_data = {
'title': response_data['title'],
'text': response_data['text'],
'publish': response_data['publish'],
'publish_date': response_data['publish_date'],
'comments': [
{
'id': response_data['comments'][0]['id'],
'text': response_data['comments'][0]['text']
},
{
'id': response_data['comments'][1]['id'],
'text': response_data['comments'][1]['text']
}
],
'top_comment': {
'text': response_data['top_comment']['text']
},
'tags': response_data['tags']
}
self.assertEqual(
response_data,
{
'title': self.initial_data['title'],
'text': self.initial_data['text'],
'publish': self.initial_data['publish'],
'publish_date': self.initial_data['publish_date'].isoformat(),
'comments': self.comments_update['comments'],
'top_comment': {
'text': self.initial_data['top_comment']['text']
},
'tags': self.initial_data['tags']
}
)
def test_documents(self):
"""
Test if the POST-ed data really ended up in the documents.
"""
article = Article.objects[0]
self.assertEqual(article.title, self.initial_data['title'])
self.assertEqual(article.text, self.initial_data['text'])
self.assertEqual(article.publish, self.initial_data['publish'])
self.assertEqual(
article.publish_date,
self.initial_data['publish_date']
)
self.assertEqual(
article.comments[0].text,
self.comments_update['comments'][0]['text']
)
self.assertEqual(
article.comments[1].text,
self.comments_update['comments'][1]['text']
)
# The complete `comments` field should've been overwritten so
# there should be only 2 comments instead of 3.
self.assertEqual(len(article.comments), 2)
self.assertEqual(
article.tags,
self.initial_data['tags']
)
| caiiiyua/monkful | tests/tests/basic_resource/put_identifier_field.py | Python | lgpl-3.0 | 5,212 |
from __future__ import absolute_import
import logging
import time
from collections import namedtuple
from multiprocessing import Process, Manager as MPManager
try:
from Queue import Empty, Full
except ImportError: # python 2
from queue import Empty, Full
from .base import (
AUTO_COMMIT_MSG_COUNT, AUTO_COMMIT_INTERVAL,
NO_MESSAGES_WAIT_TIME_SECONDS,
FULL_QUEUE_WAIT_TIME_SECONDS
)
from .simple import Consumer, SimpleConsumer
Events = namedtuple("Events", ["start", "pause", "exit"])
log = logging.getLogger("kafka")
def _mp_consume(client, group, topic, queue, size, events, **consumer_options):
"""
A child process worker which consumes messages based on the
notifications given by the controller process
NOTE: Ideally, this should have been a method inside the Consumer
class. However, multiprocessing module has issues in windows. The
functionality breaks unless this function is kept outside of a class
"""
# Make the child processes open separate socket connections
client.reinit()
# We will start consumers without auto-commit. Auto-commit will be
# done by the master controller process.
consumer = SimpleConsumer(client, group, topic,
auto_commit=False,
auto_commit_every_n=None,
auto_commit_every_t=None,
**consumer_options)
# Ensure that the consumer provides the partition information
consumer.provide_partition_info()
while True:
# Wait till the controller indicates us to start consumption
events.start.wait()
# If we are asked to quit, do so
if events.exit.is_set():
break
# Consume messages and add them to the queue. If the controller
# indicates a specific number of messages, follow that advice
count = 0
message = consumer.get_message()
if message:
while True:
try:
queue.put(message, timeout=FULL_QUEUE_WAIT_TIME_SECONDS)
break
except Full:
if events.exit.is_set(): break
count += 1
# We have reached the required size. The controller might have
# more than what he needs. Wait for a while.
# Without this logic, it is possible that we run into a big
# loop consuming all available messages before the controller
# can reset the 'start' event
if count == size.value:
events.pause.wait()
else:
# In case we did not receive any message, give up the CPU for
# a while before we try again
time.sleep(NO_MESSAGES_WAIT_TIME_SECONDS)
consumer.stop()
class MultiProcessConsumer(Consumer):
"""
A consumer implementation that consumes partitions for a topic in
parallel using multiple processes
Arguments:
client: a connected KafkaClient
group: a name for this consumer, used for offset storage and must be unique
If you are connecting to a server that does not support offset
commit/fetch (any prior to 0.8.1.1), then you *must* set this to None
topic: the topic to consume
Keyword Arguments:
auto_commit: default True. Whether or not to auto commit the offsets
auto_commit_every_n: default 100. How many messages to consume
before a commit
auto_commit_every_t: default 5000. How much time (in milliseconds) to
wait before commit
num_procs: Number of processes to start for consuming messages.
The available partitions will be divided among these processes
partitions_per_proc: Number of partitions to be allocated per process
(overrides num_procs)
Auto commit details:
If both auto_commit_every_n and auto_commit_every_t are set, they will
reset one another when one is triggered. These triggers simply call the
commit method on this class. A manual call to commit will also reset
these triggers
"""
def __init__(self, client, group, topic, auto_commit=True,
auto_commit_every_n=AUTO_COMMIT_MSG_COUNT,
auto_commit_every_t=AUTO_COMMIT_INTERVAL,
num_procs=1, partitions_per_proc=0,
**simple_consumer_options):
# Initiate the base consumer class
super(MultiProcessConsumer, self).__init__(
client, group, topic,
partitions=None,
auto_commit=auto_commit,
auto_commit_every_n=auto_commit_every_n,
auto_commit_every_t=auto_commit_every_t)
# Variables for managing and controlling the data flow from
# consumer child process to master
manager = MPManager()
self.queue = manager.Queue(1024) # Child consumers dump messages into this
self.events = Events(
start = manager.Event(), # Indicates the consumers to start fetch
exit = manager.Event(), # Requests the consumers to shutdown
pause = manager.Event()) # Requests the consumers to pause fetch
self.size = manager.Value('i', 0) # Indicator of number of messages to fetch
# dict.keys() returns a view in py3 + it's not a thread-safe operation
# http://blog.labix.org/2008/06/27/watch-out-for-listdictkeys-in-python-3
# It's safer to copy dict as it only runs during the init.
partitions = list(self.offsets.copy().keys())
# By default, start one consumer process for all partitions
# The logic below ensures that
# * we do not cross the num_procs limit
# * we have an even distribution of partitions among processes
if partitions_per_proc:
num_procs = len(partitions) / partitions_per_proc
if num_procs * partitions_per_proc < len(partitions):
num_procs += 1
# The final set of chunks
chunks = [partitions[proc::num_procs] for proc in range(num_procs)]
self.procs = []
for chunk in chunks:
options = {'partitions': list(chunk)}
if simple_consumer_options:
simple_consumer_options.pop('partitions', None)
options.update(simple_consumer_options)
args = (client.copy(), self.group, self.topic, self.queue,
self.size, self.events)
proc = Process(target=_mp_consume, args=args, kwargs=options)
proc.daemon = True
proc.start()
self.procs.append(proc)
def __repr__(self):
return '<MultiProcessConsumer group=%s, topic=%s, consumers=%d>' % \
(self.group, self.topic, len(self.procs))
def stop(self):
# Set exit and start off all waiting consumers
self.events.exit.set()
self.events.pause.set()
self.events.start.set()
for proc in self.procs:
proc.join()
proc.terminate()
super(MultiProcessConsumer, self).stop()
def __iter__(self):
"""
Iterator to consume the messages available on this consumer
"""
# Trigger the consumer procs to start off.
# We will iterate till there are no more messages available
self.size.value = 0
self.events.pause.set()
while True:
self.events.start.set()
try:
# We will block for a small while so that the consumers get
# a chance to run and put some messages in the queue
# TODO: This is a hack and will make the consumer block for
# at least one second. Need to find a better way of doing this
partition, message = self.queue.get(block=True, timeout=1)
except Empty:
break
# Count, check and commit messages if necessary
self.offsets[partition] = message.offset + 1
self.events.start.clear()
self.count_since_commit += 1
self._auto_commit()
yield message
self.events.start.clear()
def get_messages(self, count=1, block=True, timeout=10):
"""
Fetch the specified number of messages
Keyword Arguments:
count: Indicates the maximum number of messages to be fetched
block: If True, the API will block till some messages are fetched.
timeout: If block is True, the function will block for the specified
time (in seconds) until count messages is fetched. If None,
it will block forever.
"""
messages = []
# Give a size hint to the consumers. Each consumer process will fetch
# a maximum of "count" messages. This will fetch more messages than
# necessary, but these will not be committed to kafka. Also, the extra
# messages can be provided in subsequent runs
self.size.value = count
self.events.pause.clear()
if timeout is not None:
max_time = time.time() + timeout
new_offsets = {}
while count > 0 and (timeout is None or timeout > 0):
# Trigger consumption only if the queue is empty
# By doing this, we will ensure that consumers do not
# go into overdrive and keep consuming thousands of
# messages when the user might need only a few
if self.queue.empty():
self.events.start.set()
try:
partition, message = self.queue.get(block, timeout)
except Empty:
break
messages.append(message)
new_offsets[partition] = message.offset + 1
count -= 1
if timeout is not None:
timeout = max_time - time.time()
self.size.value = 0
self.events.start.clear()
self.events.pause.set()
# Update and commit offsets if necessary
self.offsets.update(new_offsets)
self.count_since_commit += len(messages)
self._auto_commit()
return messages
| vshlapakov/kafka-python | kafka/consumer/multiprocess.py | Python | apache-2.0 | 10,236 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import absolute_import
from django.db import migrations, models
import django.utils.timezone
import model_utils.fields
class Migration(migrations.Migration):
dependencies = [
('experiments', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ExperimentKeyValue',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('experiment_id', models.PositiveSmallIntegerField(verbose_name=b'Experiment ID', db_index=True)),
('key', models.CharField(max_length=255)),
('value', models.TextField()),
],
options={
'verbose_name': 'Experiment Data',
'verbose_name_plural': 'Experiment Data',
},
),
migrations.AlterUniqueTogether(
name='experimentkeyvalue',
unique_together=set([('experiment_id', 'key')]),
),
]
| ESOedX/edx-platform | lms/djangoapps/experiments/migrations/0002_auto_20170627_1402.py | Python | agpl-3.0 | 1,376 |
# -*- coding: utf-8 -*-
"""
Copyright 2016 Telefónica Investigación y Desarrollo, S.A.U.
This file is part of Toolium.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import mock
import pytest
from toolium.behave.environment import (get_jira_key_from_scenario, before_all, before_feature, before_scenario,
after_scenario, after_feature, after_all)
from toolium.config_files import ConfigFiles
from toolium.config_parser import ExtendedConfigParser
tags = (
(["jira('PROJECT-32')"], 'PROJECT-32'),
(["jira=PROJECT-32"], 'PROJECT-32'),
(["jira(PROJECT-32)"], 'PROJECT-32'),
(["jira='PROJECT-32'"], 'PROJECT-32'),
(["jiraPROJECT-32"], 'PROJECT-32'),
(["jira"], None),
(["PROJECT-32"], None),
(['slow', "jira('PROJECT-32')", 'critical'], 'PROJECT-32'),
(['slow', "PROJECT-32", 'critical'], None),
(['slow', "jira('PROJECT-32')", "jira('PROJECT-33')"], 'PROJECT-32'),
)
@pytest.mark.parametrize("tag_list, jira_key", tags)
def test_get_jira_key_from_scenario(tag_list, jira_key):
scenario = mock.Mock()
scenario.tags = tag_list
# Extract Jira key and compare with expected key
assert jira_key == get_jira_key_from_scenario(scenario)
@mock.patch('toolium.behave.environment.create_and_configure_wrapper')
def test_before_all(create_and_configure_wrapper):
# Create context mock
context = mock.MagicMock()
context.config.userdata.get.return_value = None
context.config_files = ConfigFiles()
before_all(context)
# Check that configuration folder is the same as environment folder
expected_config_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'conf')
assert context.config_files.config_directory == expected_config_directory
assert context.config_files.config_properties_filenames is None
assert context.config_files.config_log_filename is None
properties = (
'TOOLIUM_CONFIG_ENVIRONMENT',
'Config_environment',
'env'
)
@pytest.mark.parametrize("property_name", properties)
@mock.patch('toolium.behave.environment.create_and_configure_wrapper')
def test_before_all_config_environment(create_and_configure_wrapper, property_name):
# Create context mock
context = mock.MagicMock()
context.config.userdata.get.side_effect = lambda x: 'os' if x == property_name else None
context.config_files = ConfigFiles()
before_all(context)
# Check that configuration folder is the same as environment folder and property 'TOOLIUM_CONFIG_ENVIRONMENT' is
# configured
expected_config_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'conf')
assert context.config_files.config_directory == expected_config_directory
assert context.config_files.config_properties_filenames == 'properties.cfg;os-properties.cfg;local-os-properties.cfg'
assert context.config_files.config_log_filename is None
assert os.environ['TOOLIUM_CONFIG_ENVIRONMENT'] == 'os'
del os.environ['TOOLIUM_CONFIG_ENVIRONMENT']
@mock.patch('toolium.behave.environment.start_driver')
def test_before_feature(start_driver):
# Create context mock
context = mock.MagicMock()
context.toolium_config = ExtendedConfigParser()
feature = mock.MagicMock()
feature.tags = ['a', 'b']
before_feature(context, feature)
# Check that start_driver is not called
start_driver.assert_not_called()
@mock.patch('toolium.behave.environment.start_driver')
def test_before_feature_reuse_driver(start_driver):
# Create context mock
context = mock.MagicMock()
context.toolium_config = ExtendedConfigParser()
feature = mock.MagicMock()
feature.tags = ['a', 'reuse_driver', 'b']
before_feature(context, feature)
# Check that start_driver is called when reuse_driver tag
start_driver.assert_called_once_with(context, False)
assert context.reuse_driver_from_tags is True
@mock.patch('toolium.behave.environment.start_driver')
def test_before_feature_reuse_driver_no_driver(start_driver):
# Create context mock
context = mock.MagicMock()
context.toolium_config = ExtendedConfigParser()
feature = mock.MagicMock()
feature.tags = ['a', 'reuse_driver', 'b', 'no_driver']
before_feature(context, feature)
# Check that start_driver is called when reuse_driver tag, with True no_driver param
start_driver.assert_called_once_with(context, True)
assert context.reuse_driver_from_tags is True
@mock.patch('toolium.behave.environment.add_assert_screenshot_methods')
@mock.patch('toolium.behave.environment.DriverWrappersPool')
@mock.patch('toolium.behave.environment.start_driver')
def test_before_scenario(start_driver, DriverWrappersPool, add_assert_screenshot_methods):
# Create context mock
context = mock.MagicMock()
context.toolium_config = ExtendedConfigParser()
scenario = mock.MagicMock()
scenario.tags = ['a', 'b']
before_scenario(context, scenario)
# Check that start_driver is called
start_driver.assert_called_once_with(context, False)
DriverWrappersPool.stop_drivers.assert_not_called()
@mock.patch('toolium.behave.environment.add_assert_screenshot_methods')
@mock.patch('toolium.behave.environment.DriverWrappersPool')
@mock.patch('toolium.behave.environment.start_driver')
def test_before_scenario_reset_driver(start_driver, DriverWrappersPool, add_assert_screenshot_methods):
# Create context mock
context = mock.MagicMock()
context.toolium_config = ExtendedConfigParser()
scenario = mock.MagicMock()
scenario.tags = ['a', 'reset_driver', 'b']
before_scenario(context, scenario)
# Check that start_driver and stop drivers are called
start_driver.assert_called_once_with(context, False)
DriverWrappersPool.stop_drivers.assert_called_once_with()
@mock.patch('toolium.behave.environment.add_assert_screenshot_methods')
@mock.patch('toolium.behave.environment.start_driver')
def test_before_scenario_no_driver(start_driver, add_assert_screenshot_methods):
# Create context mock
context = mock.MagicMock()
context.toolium_config = ExtendedConfigParser()
scenario = mock.MagicMock()
scenario.tags = ['a', 'no_driver', 'b']
before_scenario(context, scenario)
# Check that start_driver is called
start_driver.assert_called_once_with(context, True)
@mock.patch('toolium.behave.environment.add_assert_screenshot_methods')
@mock.patch('toolium.behave.environment.start_driver')
def test_before_scenario_no_driver_feature(start_driver, add_assert_screenshot_methods):
# Create context mock
context = mock.MagicMock()
context.toolium_config = ExtendedConfigParser()
scenario = mock.MagicMock()
scenario.tags = ['a', 'b']
scenario.feature.tags = ['no_driver']
before_scenario(context, scenario)
# Check that start_driver is called
start_driver.assert_called_once_with(context, True)
@mock.patch('toolium.behave.environment.DriverWrappersPool')
def test_after_scenario_passed(DriverWrappersPool):
# Create context mock
context = mock.MagicMock()
context.global_status = {'test_passed': True}
scenario = mock.MagicMock()
scenario.name = 'name'
scenario.status = 'passed'
after_scenario(context, scenario)
# Check that close_drivers is called
assert context.global_status['test_passed'] is True
DriverWrappersPool.close_drivers.assert_called_once_with(context=context, scope='function', test_name='name',
test_passed=True)
@mock.patch('toolium.behave.environment.DriverWrappersPool')
def test_after_scenario_failed(DriverWrappersPool):
# Create context mock
context = mock.MagicMock()
context.global_status = {'test_passed': True}
scenario = mock.MagicMock()
scenario.name = 'name'
scenario.status = 'failed'
after_scenario(context, scenario)
# Check that close_drivers is called
assert context.global_status['test_passed'] is False
DriverWrappersPool.close_drivers.assert_called_once_with(context=context, scope='function', test_name='name',
test_passed=False)
@mock.patch('toolium.behave.environment.DriverWrappersPool')
def test_after_scenario_skipped(DriverWrappersPool):
# Create context mock
context = mock.MagicMock()
context.global_status = {'test_passed': True}
scenario = mock.MagicMock()
scenario.name = 'name'
scenario.status = 'skipped'
after_scenario(context, scenario)
# Check that close_drivers is not called
assert context.global_status['test_passed'] is True
DriverWrappersPool.close_drivers.assert_not_called()
@mock.patch('toolium.behave.environment.DriverWrappersPool')
def test_after_feature(DriverWrappersPool):
# Create context mock
context = mock.MagicMock()
context.global_status = {'test_passed': True}
feature = mock.MagicMock()
feature.name = 'name'
after_feature(context, feature)
# Check that close_drivers is called
DriverWrappersPool.close_drivers.assert_called_once_with(scope='module', test_name='name', test_passed=True)
@mock.patch('toolium.behave.environment.DriverWrappersPool')
def test_after_all(DriverWrappersPool):
# Create context mock
context = mock.MagicMock()
context.global_status = {'test_passed': True}
after_all(context)
# Check that close_drivers is called
DriverWrappersPool.close_drivers.assert_called_once_with(scope='session', test_name='multiple_tests',
test_passed=True)
| Telefonica/toolium | toolium/test/behave/test_environment.py | Python | apache-2.0 | 10,130 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Support for ragged tensors."""
import typing
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import gen_ragged_math_ops
from tensorflow.python.ops import map_fn
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops.ragged import ragged_functional_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.ragged import segment_id_ops
from tensorflow.python.util import dispatch
from tensorflow.python.util.tf_export import tf_export
#===============================================================================
# ragged.range
#===============================================================================
# pylint: disable=redefined-builtin
@tf_export('ragged.range')
@dispatch.add_dispatch_support
def range(starts,
limits=None,
deltas=1,
dtype=None,
name=None,
row_splits_dtype=dtypes.int64):
"""Returns a `RaggedTensor` containing the specified sequences of numbers.
Each row of the returned `RaggedTensor` contains a single sequence:
```python
ragged.range(starts, limits, deltas)[i] ==
tf.range(starts[i], limits[i], deltas[i])
```
If `start[i] < limits[i] and deltas[i] > 0`, then `output[i]` will be an
empty list. Similarly, if `start[i] > limits[i] and deltas[i] < 0`, then
`output[i]` will be an empty list. This behavior is consistent with the
Python `range` function, but differs from the `tf.range` op, which returns
an error for these cases.
Examples:
>>> tf.ragged.range([3, 5, 2]).to_list()
[[0, 1, 2], [0, 1, 2, 3, 4], [0, 1]]
>>> tf.ragged.range([0, 5, 8], [3, 3, 12]).to_list()
[[0, 1, 2], [], [8, 9, 10, 11]]
>>> tf.ragged.range([0, 5, 8], [3, 3, 12], 2).to_list()
[[0, 2], [], [8, 10]]
The input tensors `starts`, `limits`, and `deltas` may be scalars or vectors.
The vector inputs must all have the same size. Scalar inputs are broadcast
to match the size of the vector inputs.
Args:
starts: Vector or scalar `Tensor`. Specifies the first entry for each range
if `limits` is not `None`; otherwise, specifies the range limits, and the
first entries default to `0`.
limits: Vector or scalar `Tensor`. Specifies the exclusive upper limits for
each range.
deltas: Vector or scalar `Tensor`. Specifies the increment for each range.
Defaults to `1`.
dtype: The type of the elements of the resulting tensor. If not specified,
then a value is chosen based on the other args.
name: A name for the operation.
row_splits_dtype: `dtype` for the returned `RaggedTensor`'s `row_splits`
tensor. One of `tf.int32` or `tf.int64`.
Returns:
A `RaggedTensor` of type `dtype` with `ragged_rank=1`.
"""
row_splits_dtype = dtypes.as_dtype(row_splits_dtype)
if limits is None:
starts, limits = 0, starts
with ops.name_scope(name, 'RaggedRange', [starts, limits, deltas]) as name:
starts = ops.convert_to_tensor(starts, dtype=dtype, name='starts')
limits = ops.convert_to_tensor(limits, dtype=dtype, name='limits')
deltas = ops.convert_to_tensor(deltas, dtype=dtype, name='deltas')
# infer dtype if not explicitly provided
if dtype is None:
starts, limits, deltas = _infer_matching_dtype(
[starts, limits, deltas],
[dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64])
result = gen_ragged_math_ops.ragged_range(
starts, limits, deltas, Tsplits=row_splits_dtype, name=name)
return ragged_tensor.RaggedTensor.from_row_splits(
result.rt_dense_values, result.rt_nested_splits, validate=False)
def _infer_matching_dtype(tensors, dtype_hierarchy):
"""Infers a matching dtype for tensors, and casts them to that dtype."""
assert all(t.dtype in dtype_hierarchy for t in tensors)
inferred_dtype = max([t.dtype for t in tensors], key=dtype_hierarchy.index)
return [math_ops.cast(t, inferred_dtype) for t in tensors]
ops.no_gradient('RaggedRange')
#===============================================================================
# ragged_segment_<AGGREGATE>
#===============================================================================
# Docstring template used for the raggged_segment_<AGGREGATE> ops.
_RAGGED_SEGMENT_DOCSTRING = """\
Computes the %(combination)s along segments of a RaggedTensor.
Returns a RaggedTensor `output` with `num_segments` rows, where the row
`output[i]` is formed by taking the %(combination)s of all rows of `data`
whose corresponding `segment_id` is `i`.
The length of the row `output[i]` will be the maximum of the lengths of
all rows of `data` whose corresponding `segment_id` is `i`. If no `data`
rows correspond to a given segment ID, then the output row for that segment
ID will be empty.
Args:
data: A `RaggedTensor` containing the values to combine.
segment_ids: A `Tensor` or `RaggedTensor`. Must have type `int64` or
`int32`. `segment_ids.shape` must be a prefix of `data.shape`.
Must be greater than or equal to zero, and less than `num_segments`.
`segment_ids` is not required to be sorted.
num_segments: An `int32` or `int64` scalar specifying the number of
distinct segment ids.
name: A name prefix for the returned tensor (optional).
Returns:
A `RaggedTensor` containing the %(combined)s values. The returned tensor
has the same dtype as `data`, and its shape is
`[num_segments] + data.shape[segment_ids.rank:]`.
Raises:
ValueError: If `segment_ids.shape` is not a prefix of `data.shape`.
"""
def _ragged_segment_aggregate(unsorted_segment_op,
data,
segment_ids,
num_segments,
separator=None,
name=None):
"""Aggregates along segments of a RaggedTensor using `unsorted_segment_op`.
Returns a RaggedTensor `output` with `num_segments` rows, where the row
`output[i]` is formed by combining all rows of `data` whose corresponding
`segment_id` is `i`. The values in each row are combined using
`unsorted_segment_op`.
The length of the row `output[i]` will be the maximum of the lengths of
all rows of `data` whose corresponding `segment_id` is `i`. If no `data`
rows correspond to a given segment ID, then the output row for that segment
ID will be empty.
Args:
unsorted_segment_op: The tensorflow `op` that should be used to combine
values in each row. Must have the same signature and basic behavior as
`unsorted_segment_sum`, `unsorted_segment_max`, etc.
data: A `RaggedTensor` containing the values to be combined.
segment_ids: A `Tensor` or `RaggedTensor`. Must have type `int64` or
`int32`. `segment_ids.shape` must be a prefix of `data.shape`.
`segment_ids` is not required to be sorted.
num_segments: An `int32` or `int64` scalar.
separator: An optional string. Defaults to None. The separator to use when
joining. Only used for string types.
name: A name prefix for the returned tensor (optional).
Returns:
A `RaggedTensor` containing the aggregated values. The returned tensor
has the same dtype as `data`, and its shape is
`[num_segments] + data.shape[segment_ids.rank:]`.
Raises:
ValueError: If segment_ids.shape is not a prefix of data.shape.
"""
if not (ragged_tensor.is_ragged(data) or
ragged_tensor.is_ragged(segment_ids)):
if separator is not None:
# It uses unsorted_segment_join.
return unsorted_segment_op(data, segment_ids, num_segments, separator,
name)
else:
return unsorted_segment_op(data, segment_ids, num_segments, name)
with ops.name_scope(name, 'RaggedSegment',
[data, segment_ids, num_segments]) as name:
data = ragged_tensor.convert_to_tensor_or_ragged_tensor(data, name='data')
segment_ids = ragged_tensor.convert_to_tensor_or_ragged_tensor(
segment_ids, name='segment_ids')
data, segment_ids = ragged_tensor.match_row_splits_dtypes(data, segment_ids)
if segment_ids.dtype not in (dtypes.int32, dtypes.int64):
raise ValueError('segment_ids must have dtype int32 or int64.')
if ragged_tensor.is_ragged(segment_ids):
if not ragged_tensor.is_ragged(data):
raise ValueError('segment_ids.shape must be a prefix of data.shape, '
'but segment_ids is ragged and data is not.')
check_splits = check_ops.assert_equal(
segment_ids.row_splits,
data.row_splits,
message='segment_ids.shape must be a prefix of data.shape')
with ops.control_dependencies([check_splits]):
return _ragged_segment_aggregate(unsorted_segment_op, data.values,
segment_ids.values, num_segments,
separator)
# Find the length of each row in data. (shape=[data_nrows])
data_row_lengths = data.row_splits[1:] - data.row_splits[:-1]
# Find the length that each output row will have. The length of the row
# corresponding to segment `id` is `max(data_row_lengths[i])` where
# `segment_ids[i]=id`. (shape=[output_nrows])
output_row_lengths = math_ops.maximum(
math_ops.unsorted_segment_max(data_row_lengths, segment_ids,
num_segments), 0)
# Build the splits tensor for the output RaggedTensor.
output_splits = array_ops.concat([
array_ops.zeros([1], output_row_lengths.dtype),
math_ops.cumsum(output_row_lengths)
],
axis=0)
# For each row in `data`, find the start & limit position where that row's
# values will be aggregated in output.values.
data_row_to_out_row_start = array_ops.gather(output_splits, segment_ids)
data_row_to_out_row_limit = data_row_to_out_row_start + data_row_lengths
# For each value in `data.values`, find the position where it will
# aggregated in `output.values`.
# Get the target output values index for each data values index.
data_val_to_out_val_index = range(data_row_to_out_row_start,
data_row_to_out_row_limit).values
# Recursively aggregate the values.
output_values = _ragged_segment_aggregate(unsorted_segment_op, data.values,
data_val_to_out_val_index,
output_splits[-1], separator)
return ragged_tensor.RaggedTensor.from_row_splits(
output_values, output_splits, validate=False)
@dispatch.dispatch_for_api(math_ops.unsorted_segment_sum)
def segment_sum(data: ragged_tensor.RaggedOrDense,
segment_ids: ragged_tensor.RaggedOrDense,
num_segments,
name=None):
# For docs, see: _RAGGED_SEGMENT_DOCSTRING
return _ragged_segment_aggregate(
math_ops.unsorted_segment_sum,
data=data,
segment_ids=segment_ids,
num_segments=num_segments,
name=(name or 'RaggedSegmentSum'))
@dispatch.dispatch_for_api(math_ops.unsorted_segment_prod)
def segment_prod(data: ragged_tensor.RaggedOrDense,
segment_ids: ragged_tensor.RaggedOrDense,
num_segments,
name=None):
# For docs, see: _RAGGED_SEGMENT_DOCSTRING
return _ragged_segment_aggregate(
math_ops.unsorted_segment_prod,
data=data,
segment_ids=segment_ids,
num_segments=num_segments,
name=(name or 'RaggedSegmentProd'))
@dispatch.dispatch_for_api(math_ops.unsorted_segment_min)
def segment_min(data: ragged_tensor.RaggedOrDense,
segment_ids: ragged_tensor.RaggedOrDense,
num_segments,
name=None):
# For docs, see: _RAGGED_SEGMENT_DOCSTRING
return _ragged_segment_aggregate(
math_ops.unsorted_segment_min,
data=data,
segment_ids=segment_ids,
num_segments=num_segments,
name=(name or 'RaggedSegmentMin'))
@dispatch.dispatch_for_api(math_ops.unsorted_segment_max)
def segment_max(data: ragged_tensor.RaggedOrDense,
segment_ids: ragged_tensor.RaggedOrDense,
num_segments,
name=None):
# For docs, see: _RAGGED_SEGMENT_DOCSTRING
return _ragged_segment_aggregate(
math_ops.unsorted_segment_max,
data=data,
segment_ids=segment_ids,
num_segments=num_segments,
name=(name or 'RaggedSegmentMax'))
@dispatch.dispatch_for_api(math_ops.unsorted_segment_mean)
def segment_mean(data: ragged_tensor.RaggedOrDense,
segment_ids: ragged_tensor.RaggedOrDense,
num_segments,
name=None):
"""For docs, see: _RAGGED_SEGMENT_DOCSTRING."""
with ops.name_scope(name, 'RaggedSegmentMean',
[data, segment_ids, num_segments]):
total = segment_sum(data, segment_ids, num_segments)
ones = ragged_tensor.RaggedTensor.from_nested_row_splits(
array_ops.ones_like(data.flat_values),
data.nested_row_splits,
validate=False)
count = segment_sum(ones, segment_ids, num_segments)
if ragged_tensor.is_ragged(total):
return total.with_flat_values(total.flat_values / count.flat_values)
else:
return total / count
@dispatch.dispatch_for_api(math_ops.unsorted_segment_sqrt_n)
def segment_sqrt_n(data: ragged_tensor.RaggedOrDense,
segment_ids: ragged_tensor.RaggedOrDense,
num_segments,
name=None):
"""For docs, see: _RAGGED_SEGMENT_DOCSTRING."""
with ops.name_scope(name, 'RaggedSegmentSqrtN',
[data, segment_ids, num_segments]):
total = segment_sum(data, segment_ids, num_segments)
ones = ragged_tensor.RaggedTensor.from_nested_row_splits(
array_ops.ones_like(data.flat_values),
data.nested_row_splits,
validate=False)
count = segment_sum(ones, segment_ids, num_segments)
if ragged_tensor.is_ragged(total):
return total.with_flat_values(total.flat_values /
math_ops.sqrt(count.flat_values))
else:
return total / math_ops.sqrt(count)
def _set_ragged_segment_docstring(func, combination, combined):
func.__doc__ = _RAGGED_SEGMENT_DOCSTRING % dict(
combination=combination, combined=combined)
_set_ragged_segment_docstring(segment_sum, 'sum', 'summed')
_set_ragged_segment_docstring(segment_prod, 'product', 'multiplied')
_set_ragged_segment_docstring(segment_min, 'minimum', 'minimized')
_set_ragged_segment_docstring(segment_max, 'maximum', 'maximized')
_set_ragged_segment_docstring(segment_mean, 'mean', 'averaged')
_set_ragged_segment_docstring(segment_sqrt_n, 'sum divided by sqrt(N)',
'summed')
#===============================================================================
# ragged_reduce_<AGGREGATE>
#===============================================================================
# Docstring template used for ragged_reduce_<AGGREGATE> ops.
_RAGGED_REDUCE_DOCSTRING = """\
Computes the %(combination)s of elements across dimensions of a `RaggedTensor`.
Reduces `input_tensor` along the dimensions given in `axis` by taking the
%(combination)s of values. If a reduced dimension has no elements for
some index, then the value for that index will be %(default)s.
The rank of the tensor is reduced by `1` for each entry in `axis`. If
`axis` is not specified, then all dimensions are reduced, and a scalar
value is returned.
Args:
input_tensor: A `RaggedTensor` containing the values to be %(combined)s.
axis: The dimensions to reduce. May be `None` (to reduce all axes), an
`int` (to reduce a single axis), a `list` or `tuple` of `int` (to reduce
a given set of axes), or a `Tensor` with a constant value. Must be in
the range `[0, input_tensor.rank]`.
name: A name prefix for the returned tensor (optional).
Returns:
A `RaggedTensor` containing the %(combined)s values. The returned tensor
has the same dtype as `data`, and its shape is given by removing the
dimensions specified in `axis` from `input_tensor.shape`. The `ragged_rank`
of the returned tensor is given by substracting any ragged dimensions
specified in `axis` from `input_tensor.ragged_rank`.
Raises:
ValueError: If `axis` contains a `Tensor` whose value is not constant.
####Example:
%(example)s
"""
_RAGGED_REDUCE_SUM_EXAMPLE = """
>>> rt = tf.ragged.constant([[3, 1, 4], [1, 5], [9], [2, 6]])
>>> tf.reduce_sum(rt, axis=0).numpy() # = [3+1+9+2, 1+5+6, 4]
array([15, 12, 4], dtype=int32)
>>> tf.reduce_sum(rt, axis=1).numpy() # = [3+1+4, 1+5, 9, 2+6]
array([8, 6, 9, 8], dtype=int32)
"""
_RAGGED_REDUCE_PROD_EXAMPLE = """
>>> rt = tf.ragged.constant([[3, 1, 4], [1, 5], [9], [2, 6]])
>>> tf.reduce_prod(rt, axis=0).numpy() # = [3*1*9*2, 1*5*6, 4]
array([54, 30, 4], dtype=int32)
>>> tf.reduce_prod(rt, axis=1).numpy() # = [3*1*4, 1*5, 9, 2*6]
array([12, 5, 9, 12], dtype=int32)
"""
_RAGGED_REDUCE_MIN_EXAMPLE = """
>>> rt = tf.ragged.constant([[3, 1, 4], [1, 5], [9], [2, 6]])
>>> tf.reduce_min(rt, axis=0).numpy()
array([1, 1, 4], dtype=int32)
>>> tf.reduce_min(rt, axis=1).numpy()
array([1, 1, 9, 2], dtype=int32)
"""
_RAGGED_REDUCE_MAX_EXAMPLE = """
>>> rt = tf.ragged.constant([[3, 1, 4], [1, 5], [9], [2, 6]])
>>> tf.reduce_max(rt, axis=0).numpy()
array([9, 6, 4], dtype=int32)
>>> tf.reduce_max(rt, axis=1).numpy()
array([4, 5, 9, 6], dtype=int32)
"""
_RAGGED_REDUCE_MEAN_EXAMPLE = """
>>> rt = tf.ragged.constant([[3, 1, 4], [1, 5], [9], [2, 6]])
>>> tf.reduce_mean(rt, axis=0).numpy()
array([3.75, 4. , 4. ])
>>> tf.reduce_mean(rt, axis=1).numpy()
array([2.66666667, 3. , 9. , 4. ])
"""
_RAGGED_REDUCE_VARIANCE_EXAMPLE = """
>>> rt = tf.ragged.constant([[1, 1, 4], [2, 1], [3], [4, 1]],
... dtype=tf.float64)
>>> tf.math.reduce_variance(rt, axis=0).numpy()
array([1.25, 0., 0.])
>>> tf.math.reduce_variance(rt, axis=1).numpy()
array([2., 0.25, 0., 2.25])
"""
_RAGGED_REDUCE_STD_EXAMPLE = """
>>> rt = tf.ragged.constant([[1, 0], [2, 1], [3], [4, 1]],
... dtype=tf.float64)
>>> tf.math.reduce_std(rt, axis=0).numpy()
array([1.11803399, 0.47140452])
>>> tf.math.reduce_std(rt, axis=1).numpy()
array([0.5, 0.5, 0., 1.5])
"""
_RAGGED_REDUCE_ALL_EXAMPLE = """
>>> rt = tf.ragged.constant([[True, True], [True, True, False, True], [False, True]])
>>> tf.reduce_all(rt, axis=0).numpy()
array([False, True, False, True])
>>> tf.reduce_all(rt, axis=1).numpy()
array([ True, False, False])
"""
_RAGGED_REDUCE_ANY_EXAMPLE = """
>>> rt = tf.ragged.constant([[True, True], [True, True, False, True], [False, True]])
>>> tf.reduce_any(rt, axis=0).numpy()
array([ True, True, False, True])
>>> tf.reduce_any(rt, axis=1).numpy()
array([ True, True, True])
"""
def ragged_reduce_aggregate(reduce_op,
unsorted_segment_op,
rt_input,
axis,
keepdims,
separator=None,
name=None):
"""Aggregates across axes of a RaggedTensor using the given `Tensor` ops.
Reduces `rt_input` along the dimensions given in `axis`. The rank of the
tensor is reduced by 1 for each entry in `axis`. If `axis` is not specified,
then all dimensions are reduced, and a scalar value is returned.
This op assumes that `reduce_op` and `unsorted_segment_op` are associative;
if not, then reducing multiple axes will return incorrect results. (In
particular, reducing multiple axes is currently implemented by reducing the
axes one at a time.)
Args:
reduce_op: The tensorflow `op` that should be used to reduce values in
uniform dimensions. Must have the same signature and basic behavior as
`reduce_sum`, `reduce_max`, etc.
unsorted_segment_op: The tensorflow `op` that should be used to combine
values in ragged dimensions. Must have the same signature and basic
behavior as `unsorted_segment_sum`, `unsorted_segment_max`, etc.
rt_input: A `Tensor` or `RaggedTensor` containing the values to be reduced.
axis: The axis or axes to reduce. May be `None` (to reduce all axes), an
`int` (to reduce a single axis), a `list` or `tuple` of `int` (to reduce a
given set of axes), or a `Tensor` with a constant value. Must be in the
range `[0, rt_input.rank)`.
keepdims: If true, retains reduced dimensions with length 1.
separator: An optional string. Defaults to None. The separator to use when
joining. The separator must not be set for non-string data types. (i.e. if
separator is not None then it uses string ops)
name: A name prefix for the returned tensor (optional).
Returns:
A `RaggedTensor` containing the reduced values. The returned tensor
has the same dtype as `data`, and its shape is given by removing the
dimensions specified in `axis` from `rt_input.shape`. The `ragged_rank`
of the returned tensor is given by substracting any ragged dimensions
specified in `axis` from `rt_input.ragged_rank`.
Raises:
ValueError: If `axis` contains a `Tensor` whose value is not constant.
"""
if not ragged_tensor.is_ragged(rt_input):
if separator is None:
return reduce_op(rt_input, axis, keepdims=keepdims, name=name)
else:
# When separator is not None, We infer that dtype is string and
# reduce_join will be called.
return reduce_op(
rt_input, axis, keepdims=keepdims, name=name, separator=separator)
if isinstance(axis, ops.Tensor):
axis = tensor_util.constant_value(axis)
if axis is None:
raise ValueError('axis must be known at graph construction time.')
if isinstance(axis, np.ndarray):
axis = axis.tolist()
# When reducing all axes, just ignore splits & reduce the inner values.
if axis is None:
result = reduce_op(rt_input.flat_values, None, keepdims=keepdims, name=name)
if keepdims:
# Expand the result to the input number of dimensions.
for _ in rt_input.shape[1:]:
result = array_ops.expand_dims(result, axis=0)
return result
with ops.name_scope(name, 'RaggedReduce', [rt_input, axis]):
if isinstance(axis, (tuple, list)):
if not axis:
return rt_input
elif len(axis) == 1:
axis = axis[0]
else:
# When reducing multiple axes, as we reduce one at a time (see below),
# the negative axis has to be converted to positive at the first run
# as the sort with negative axis will have different orders.
# See GitHub issue 27497.
axis = [
array_ops.get_positive_axis(a, rt_input.shape.ndims, 'axis[%s]' % i,
'rank(input_tensor)')
for i, a in enumerate(axis)
]
# When reducing multiple axes, just reduce one at a time. This is less
# efficient, and only works for associative ops. (In particular, it
# does not work for reduce_mean.) However, reducing multiple axes at
# once will probably require a nontrivial c++ op.
axis = sorted(axis)
inner_reduced = ragged_reduce_aggregate(reduce_op, unsorted_segment_op,
rt_input, axis[-1], keepdims,
separator)
return ragged_reduce_aggregate(reduce_op, unsorted_segment_op,
inner_reduced, axis[:-1], keepdims,
separator)
rt_input = ragged_tensor.convert_to_tensor_or_ragged_tensor(
rt_input, name='rt_input')
axis = array_ops.get_positive_axis(
axis, rt_input.shape.ndims, ndims_name='rank(input_tensor)')
if axis == 0:
# out[i_1, i_2, ..., i_N] = sum_{j} rt_input[j, i_1, i_2, ..., i_N]
row_lengths = rt_input.row_splits[1:] - rt_input.row_splits[:-1]
num_segments = math_ops.maximum(math_ops.reduce_max(row_lengths), 0)
segment_ids = range(row_lengths).values
result = _ragged_segment_aggregate(unsorted_segment_op, rt_input.values,
segment_ids, num_segments, separator)
if keepdims:
result = array_ops.expand_dims(result, axis=0)
return result
elif axis == 1:
# out[i_0, i_1, i_2, ..., i_N] = sum_{j} rt_input[i_0, j, i_2, ..., i_N]
num_segments = array_ops.shape(rt_input.row_splits)[0] - 1
segment_ids = segment_id_ops.row_splits_to_segment_ids(
rt_input.row_splits)
result = _ragged_segment_aggregate(unsorted_segment_op, rt_input.values,
segment_ids, num_segments, separator)
if keepdims:
result = array_ops.expand_dims(result, axis=1)
return result
else:
# out[i_0, ..., i_[axis-1], i_axis+1], ..., i_N] =
# sum_{j} rt_input [i_0, ..., i_[axis-1], j, i_axis+1], ..., i_N]
return rt_input.with_values(
ragged_reduce_aggregate(reduce_op, unsorted_segment_op,
rt_input.values, axis - 1, keepdims,
separator))
@dispatch.dispatch_for_api(math_ops.reduce_sum)
def reduce_sum(input_tensor: ragged_tensor.Ragged,
axis=None,
keepdims=None,
name=None):
"""For docs, see: _RAGGED_REDUCE_DOCSTRING."""
return ragged_reduce_aggregate(
reduce_op=math_ops.reduce_sum,
unsorted_segment_op=math_ops.unsorted_segment_sum,
rt_input=input_tensor,
axis=axis,
keepdims=keepdims,
name=(name or 'RaggedReduceSum'))
@dispatch.dispatch_for_api(math_ops.reduce_prod)
def reduce_prod(input_tensor: ragged_tensor.Ragged,
axis=None,
keepdims=None,
name=None):
"""For docs, see: _RAGGED_REDUCE_DOCSTRING."""
return ragged_reduce_aggregate(
reduce_op=math_ops.reduce_prod,
unsorted_segment_op=math_ops.unsorted_segment_prod,
rt_input=input_tensor,
axis=axis,
keepdims=keepdims,
name=(name or 'RaggedReduceProd'))
@dispatch.dispatch_for_api(math_ops.reduce_min)
def reduce_min(input_tensor: ragged_tensor.Ragged,
axis=None,
keepdims=None,
name=None):
"""For docs, see: _RAGGED_REDUCE_DOCSTRING."""
return ragged_reduce_aggregate(
reduce_op=math_ops.reduce_min,
unsorted_segment_op=math_ops.unsorted_segment_min,
rt_input=input_tensor,
axis=axis,
keepdims=keepdims,
name=(name or 'RaggedReduceMin'))
@dispatch.dispatch_for_api(math_ops.reduce_max)
def reduce_max(input_tensor: ragged_tensor.Ragged,
axis=None,
keepdims=None,
name=None):
"""For docs, see: _RAGGED_REDUCE_DOCSTRING."""
return ragged_reduce_aggregate(
reduce_op=math_ops.reduce_max,
unsorted_segment_op=math_ops.unsorted_segment_max,
rt_input=input_tensor,
axis=axis,
keepdims=keepdims,
name=(name or 'RaggedReduceMax'))
@dispatch.dispatch_for_api(math_ops.reduce_mean)
def reduce_mean(input_tensor: ragged_tensor.Ragged,
axis=None,
keepdims=None,
name=None):
"""For docs, see: _RAGGED_REDUCE_DOCSTRING."""
with ops.name_scope(name, 'RaggedReduceMean', [input_tensor, axis]):
total = reduce_sum(input_tensor, axis, keepdims)
if ragged_tensor.is_ragged(input_tensor):
ones = ragged_tensor.RaggedTensor.from_nested_row_splits(
array_ops.ones_like(input_tensor.flat_values),
input_tensor.nested_row_splits,
validate=False)
else:
ones = array_ops.ones_like(input_tensor)
count = reduce_sum(ones, axis, keepdims)
if ragged_tensor.is_ragged(total):
return ragged_tensor.RaggedTensor.from_nested_row_splits(
total.flat_values / count.flat_values,
total.nested_row_splits,
validate=False)
else:
return total / count
@dispatch.dispatch_for_api(math_ops.reduce_variance)
def reduce_variance(input_tensor: ragged_tensor.Ragged,
axis=None,
keepdims=False,
name=None):
"""For docs, see: _RAGGED_REDUCE_DOCSTRING."""
with ops.name_scope(name, 'RaggedReduceVariance', [input_tensor, axis]):
square_of_input = math_ops.square(input_tensor)
mean_of_square = reduce_mean(square_of_input, axis=axis, keepdims=keepdims)
mean = reduce_mean(input_tensor, axis=axis, keepdims=keepdims)
square_of_mean = math_ops.square(mean)
return mean_of_square - square_of_mean
@dispatch.dispatch_for_api(math_ops.reduce_std)
def reduce_std(input_tensor: ragged_tensor.Ragged,
axis=None,
keepdims=False,
name=None):
"""For docs, see: _RAGGED_REDUCE_DOCSTRING."""
with ops.name_scope(name, 'RaggedReduceStd', [input_tensor, axis]):
variance = reduce_variance(input_tensor, axis=axis, keepdims=keepdims)
return math_ops.sqrt(variance)
def _cast(input_tensor, dtype):
return ragged_functional_ops.map_flat_values(math_ops.cast, input_tensor,
dtype)
@dispatch.dispatch_for_api(math_ops.reduce_all)
def reduce_all(input_tensor: ragged_tensor.Ragged,
axis=None,
keepdims=None,
name=None):
"""For docs, see: _RAGGED_REDUCE_DOCSTRING."""
with ops.name_scope(name, 'RaggedReduceAll', [input_tensor, axis]):
return _cast(
reduce_prod(_cast(input_tensor, dtypes.int32), axis, keepdims),
dtypes.bool)
@dispatch.dispatch_for_api(math_ops.reduce_any)
def reduce_any(input_tensor: ragged_tensor.Ragged,
axis=None,
keepdims=None,
name=None):
"""For docs, see: _RAGGED_REDUCE_DOCSTRING."""
with ops.name_scope(name, 'RaggedReduceAny', [input_tensor, axis]):
return _cast(
reduce_sum(_cast(input_tensor, dtypes.int32), axis, keepdims),
dtypes.bool)
def _set_ragged_reduce_docstring(func, combination, combined, default, example):
func.__doc__ = _RAGGED_REDUCE_DOCSTRING % dict(
combination=combination,
combined=combined,
default=default,
example=example)
_set_ragged_reduce_docstring(reduce_sum, 'sum', 'summed', '0',
_RAGGED_REDUCE_SUM_EXAMPLE)
_set_ragged_reduce_docstring(reduce_prod, 'product', 'multiplied', '1',
_RAGGED_REDUCE_PROD_EXAMPLE)
_set_ragged_reduce_docstring(reduce_min, 'minimum', 'minimized',
'`input_tensor.dtype.min`',
_RAGGED_REDUCE_MIN_EXAMPLE)
_set_ragged_reduce_docstring(reduce_max, 'maximum', 'maximized',
'`input_tensor.dtype.max`',
_RAGGED_REDUCE_MAX_EXAMPLE)
_set_ragged_reduce_docstring(reduce_mean, 'mean', 'averaged', 'NaN',
_RAGGED_REDUCE_MEAN_EXAMPLE)
_set_ragged_reduce_docstring(reduce_variance, 'variance', 'averaged', 'NaN',
_RAGGED_REDUCE_VARIANCE_EXAMPLE)
_set_ragged_reduce_docstring(reduce_std, 'std', 'averaged', 'NaN',
_RAGGED_REDUCE_STD_EXAMPLE)
_set_ragged_reduce_docstring(reduce_all, 'logical and', 'and-ed', 'True',
_RAGGED_REDUCE_ALL_EXAMPLE)
_set_ragged_reduce_docstring(reduce_any, 'logical or', 'or-ed', 'False',
_RAGGED_REDUCE_ANY_EXAMPLE)
#===============================================================================
# ragged.matmul
#===============================================================================
@dispatch.dispatch_for_api(math_ops.matmul)
def matmul(a: ragged_tensor.RaggedOrDense,
b: ragged_tensor.RaggedOrDense,
transpose_a=False,
transpose_b=False,
adjoint_a=False,
adjoint_b=False,
a_is_sparse=False,
b_is_sparse=False,
output_type=None,
name=None):
"""Multiplies matrix `a` by matrix `b`.
If all transpose or adjoint attributes are `False` then:
```
output[..., i, j] = sum_k (a[..., i, k] * b[..., k, j]), for all indices i, j.
```
The inputs `a` and `b` must have `rank >= 2`, where the outermost `rank - 2`
dimensions are batch dimensions. The inputs must have the same dtype. See
`tf.matmul` for more information.
Args:
a: `tf.Tensor` or `RaggedTensor` with `rank > 1`.
b: `tf.Tensor` or `RaggedTensor` with same type and rank as `a`.
transpose_a: If `True`, `a` is transposed before multiplication.
transpose_b: If `True`, `b` is transposed before multiplication.
adjoint_a: If `True`, `a` is conjugated & transposed before multiplication.
adjoint_b: If `True`, `b` is conjugated & transposed before multiplication.
a_is_sparse: If `True`, optimize assuming `a` is mostly zero.
b_is_sparse: If `True`, optimize assuming `b` is mostly zero.
output_type: The output datatype (optional).
name: Name for the operation (optional).
Returns:
A `Tensor` or `RaggedTensor` with the same rank and shape as `a`, where
each inner-most matrix is the product of the corresponding matrices in `a`
and `b`.
"""
if transpose_a and adjoint_a:
raise ValueError('Only one of transpose_a and adjoint_a can be True.')
if transpose_b and adjoint_b:
raise ValueError('Only one of transpose_b and adjoint_b can be True.')
kwargs = dict(
transpose_a=transpose_a,
transpose_b=transpose_b,
adjoint_a=adjoint_a,
adjoint_b=adjoint_b,
a_is_sparse=a_is_sparse,
b_is_sparse=b_is_sparse,
output_type=output_type)
with ops.name_scope(name, 'RaggedMatMul', [a, b]) as name:
a = ragged_tensor.convert_to_tensor_or_ragged_tensor(a, name='a')
b = ragged_tensor.convert_to_tensor_or_ragged_tensor(b, name='b')
a_is_ragged = isinstance(a, ragged_tensor.RaggedTensor)
b_is_ragged = isinstance(b, ragged_tensor.RaggedTensor)
if not (a_is_ragged or b_is_ragged):
return math_ops.matmul(a, b, **kwargs)
if a.dtype != b.dtype:
raise ValueError('`a` and `b` must have the same dtype.')
# TODO(edloper): Support broadcasting inputs. (Broadcast support is not
# documented by https://www.tensorflow.org/api_docs/python/tf/linalg/matmul,
# but it is supported by the op.)
# Find the rank of the input tensors.
if a.shape.rank is None:
if b.shape.rank is None:
raise ValueError('matmul requires at least one input to have known '
'rank if either input is ragged.')
rank = b.shape.rank
else:
if b.shape.rank is not None and a.shape.rank != b.shape.rank:
raise ValueError('`a` and `b` must have the same rank.')
rank = a.shape.rank
# At least one of `a` and `b` is ragged; and ragged tensors always have
# rank>=2.
if rank < 2:
# This can happen if e.g. `a` is a 1D dense tensor and `b` is a
# ragged tensor with unknown rank. Since ragged tensors always have
# `rank>=2`, this implies that `a` and `b` have different ranks.
raise ValueError('`a` and `b` must have the same rank.')
# Rank>3: We have multiple batch dimensions. Merge them into a single
# batch dimension, recursively call `matmul`, and then restore the original
# batch dimension (using a.row_splits).
if rank > 3:
shape_err = 'Batch dimensions of `a` and `b` do not have the same size.'
if not a_is_ragged:
a = ragged_tensor.RaggedTensor.from_tensor(a, ragged_rank=1)
if not b_is_ragged:
b = ragged_tensor.RaggedTensor.from_tensor(b, ragged_rank=1)
with ops.control_dependencies([
check_ops.assert_equal(a.row_splits, b.row_splits, message=shape_err)
]):
flat_result = matmul(a.values, b.values, **kwargs)
return a.with_values(flat_result)
if rank == 2:
return _matmul_2d(a, b, **kwargs)
assert rank == 3 # I.e., we have a single batch dimension.
a_ragged_rank = a.ragged_rank if a_is_ragged else 0
if a_ragged_rank == 1 and not (b_is_ragged or transpose_a or adjoint_a):
# If `a.shape=[B, (I), J]` and `b.shape=[B, J, K], then we can compute
# the result with a single dense `matmul`.
return _matmul_3d_with_batch_dim_folding(a, b, **kwargs)
else:
# Otherwie, fall back on using `map_fn`.
return _matmul_3d_with_map_fn(a, b, **kwargs)
def _matmul_2d(a, b, **kwargs):
"""Multiplies potentially ragged 2D tensors.
Args:
a: A 2D Tensor or RaggedTensor with `shape=[I, J]`
b: A 2D Tensor or RaggedTensor with `shape=[J, K]`
**kwargs: Additional arguments for `tf.matmul` (e.g. transpose_a).
Returns:
A 2D Tensor with `shape=[I, K]`.
"""
# multiplying `a` and `b` is only well-defined if `a` and `b` are
# actually uniform (and just happened to be stored as ragged tensors).
# Check that they're uniform, convert them to tf.Tensor.
ragged_err = ('The matrices in `a` and `b` may not be '
'ragged in their innermost dimension.')
checks = []
if isinstance(a, ragged_tensor.RaggedTensor):
original_size = array_ops.size(a.flat_values)
a = a.to_tensor()
checks.append(
check_ops.assert_equal(
original_size, array_ops.size(a), message=ragged_err))
if isinstance(b, ragged_tensor.RaggedTensor):
original_size = array_ops.size(b.flat_values)
b = b.to_tensor()
checks.append(
check_ops.assert_equal(
original_size, array_ops.size(b), message=ragged_err))
with ops.control_dependencies(checks):
return math_ops.matmul(a, b, **kwargs)
def _matmul_3d_with_map_fn(a, b, **kwargs):
"""Multiplies batches of 2D matrices using map_fn.
`output[n, i, k]` = sum_j (a[n, i, j] * b[n, j, k])` (for all `n`, `i`, `k`).
Requires that `a[n, i].nrows()` == `b[n].nrows()` (for all `n` and `i`).
Args:
a: A 3D Tensor or RaggedTensor with `shape=[B, I, J]`, where dimensions `I`
and `J` may be ragged.
b: A 3D Tensor or RaggedTensor with `shape=[B, J, K]`, where dimensions `J`
and `K` may be ragged.
**kwargs: Additional arguments for `tf.matmul` (e.g. transpose_a).
Returns:
A 3D RaggedTensor with `shape=[B, (I), (K)]`.
"""
if isinstance(b, ragged_tensor.RaggedTensor) and b.ragged_rank == 2:
output_ragged_rank = 2
else:
output_ragged_rank = 1
def single_batch_matmul(x):
out = _matmul_2d(x[0], x[1], **kwargs)
if output_ragged_rank == 2:
out = ragged_tensor.RaggedTensor.from_tensor(out)
return out
fn_out_shape = None # Figure out proper shape.
row_splits_dtype = (
a.row_splits.dtype
if isinstance(a, ragged_tensor.RaggedTensor) else b.row_splits.dtype)
output_type = kwargs['output_type']
if output_type is None:
output_type = a.dtype
spec = ragged_tensor.RaggedTensorSpec(
shape=fn_out_shape,
dtype=output_type,
ragged_rank=output_ragged_rank - 1,
row_splits_dtype=row_splits_dtype)
result = map_fn.map_fn(
single_batch_matmul, elems=(a, b), fn_output_signature=spec)
# map_fn loses shape information; restore it, where possible.
# pylint: disable=protected-access
if kwargs.get('transpose_a') or kwargs.get('adjoint_a'):
result._set_shape(a.shape[:-2] + a.shape[-1:] + [None])
else:
result._set_shape(a.shape[:-2] + a.shape[-2:-1] + [None])
if kwargs.get('transpose_b') or kwargs.get('adjoint_b'):
result._set_shape(b.shape[:-2] + [None] + b.shape[-2:-1])
else:
result._set_shape(b.shape[:-2] + [None] + b.shape[-1:])
return result
def _matmul_3d_with_batch_dim_folding(a, b, **kwargs):
"""Multiply batches of 2D matrices where only `a.shape[1]` is ragged.
Args:
a: A RaggedTensor with `shape=[B, (I), J]`. (ragged_rank must be 1.)
b: A Tensor with `shape=[B, J, K]`
**kwargs: Additional arguments for `tf.matmul` (e.g. transpose_a).
transpose_a and adjoint_a must not be true.
Returns:
A RaggedTensor with `shape=[B, (I), K].
"""
# reshaped_a.shape = [sum(i_1, i_2, ..., i_B), 1, J]
reshaped_a = array_ops.expand_dims(a.values, 1)
# reshaped_b.shape = [sum(i_1, i_2, ..., i_B), J, K]
reshaped_b = array_ops.repeat(b, a.row_lengths(), axis=0)
# flat_result.shape = [sum(i_1, i_2, ..., i_B), 1, K]
flat_result = math_ops.matmul(reshaped_a, reshaped_b, **kwargs)
# result.shape = [B, (I), K]
return a.with_values(array_ops.squeeze(flat_result, axis=1))
#===============================================================================
# ragged.softmax
#===============================================================================
@dispatch.dispatch_for_api(nn_ops.softmax_v2)
def softmax(logits: ragged_tensor.Ragged, axis=None, name=None):
"""Computes softmax activations.
Used for multi-class predictions. The sum of all outputs generated by softmax
is 1.
This function performs the equivalent of
softmax = tf.exp(logits) / tf.reduce_sum(tf.exp(logits), axis)
Example usage:
>>> softmax = tf.nn.softmax([-1, 0., 1.])
>>> softmax
<tf.Tensor: shape=(3,), dtype=float32,
numpy=array([0.09003057, 0.24472848, 0.66524094], dtype=float32)>
>>> sum(softmax)
<tf.Tensor: shape=(), dtype=float32, numpy=1.0>
Args:
logits: A non-empty `Tensor`. Must be one of the following types: `half`,
`float32`, `float64`.
axis: The dimension softmax would be performed on. The default is -1 which
indicates the last dimension.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type and shape as `logits`.
Raises:
InvalidArgumentError: if `logits` is empty or `axis` is beyond the last
dimension of `logits`.
"""
if axis is None:
axis = -1
with ops.name_scope(name, 'RaggedSoftmax', [logits]) as name:
logits_exp = math_ops.exp(logits)
denominator = reduce_sum(logits_exp, axis=axis, keepdims=True)
return math_ops.divide(logits_exp, denominator)
#===============================================================================
# ragged.add_n
#===============================================================================
@dispatch.dispatch_for_api(math_ops.add_n)
def add_n(inputs: typing.List[ragged_tensor.RaggedOrDense], name=None):
"""RaggedTensor implementation for tf.math.add_n."""
if len(inputs) < 0:
raise ValueError('tf.add_n: expected at least one input.')
with ops.name_scope(name, 'RaggedAddN', inputs):
return ragged_functional_ops.map_flat_values(math_ops.add_n, inputs)
#===============================================================================
# Ragged version of nn_ops.dropout
#===============================================================================
@dispatch.dispatch_for_api(nn_ops.dropout)
def dropout_v1(x: ragged_tensor.Ragged,
keep_prob=None,
noise_shape=None,
seed=None,
name=None,
rate=None):
"""Ragged dispatch target for tf.nn.dropout."""
if noise_shape is not None:
raise ValueError('noise_shape is not supported yet for RaggedTensor x')
with ops.name_scope(name, 'RaggedNNDropout', [x, rate]):
x = ragged_tensor.convert_to_tensor_or_ragged_tensor(x, name='x')
return x.with_flat_values(
nn_ops.dropout(
x.flat_values, keep_prob=keep_prob, seed=seed, rate=rate))
@dispatch.dispatch_for_api(nn_ops.dropout_v2)
def dropout_v2(x: ragged_tensor.Ragged,
rate,
noise_shape=None,
seed=None,
name=None):
"""Ragged dispatch target for tf.nn.dropout."""
if noise_shape is not None:
raise ValueError('noise_shape is not supported yet for RaggedTensor x')
with ops.name_scope(name, 'RaggedNNDropout', [x, rate]):
x = ragged_tensor.convert_to_tensor_or_ragged_tensor(x, name='x')
return x.with_flat_values(
nn_ops.dropout_v2(x.flat_values, rate=rate, seed=seed))
#===============================================================================
# Ragged version of Tensor.__eq__ and Tensor.__ne__
#===============================================================================
@dispatch.dispatch_for_api(math_ops.tensor_equals)
def tensor_equals(self: ragged_tensor.RaggedOrDense,
other: ragged_tensor.RaggedOrDense):
"""Ragged version of the operation invoked by `Tensor.__eq__`."""
if other is None:
return False
elif _use_legacy_mode_for_tensor_equality(self):
return self is other
else:
try:
return math_ops.equal(self, other)
except (errors.InvalidArgumentError, ValueError):
return False # values are not broadcast-compatbile.
@dispatch.dispatch_for_api(math_ops.tensor_not_equals)
def tensor_not_equals(self: ragged_tensor.RaggedOrDense,
other: ragged_tensor.RaggedOrDense):
"""Ragged version of the operation invoked by `Tensor.__ne__`."""
if other is None:
return False
elif _use_legacy_mode_for_tensor_equality(self):
return self is not other
else:
try:
return math_ops.not_equal(self, other)
except (errors.InvalidArgumentError, ValueError):
return True # values are not broadcast-compatbile.
def _use_legacy_mode_for_tensor_equality(self):
g = getattr(self, 'graph', None)
return not (ops.Tensor._USE_EQUALITY and # pylint: disable=protected-access
ops.executing_eagerly_outside_functions() and
(g is None or g.building_function))
| tensorflow/tensorflow | tensorflow/python/ops/ragged/ragged_math_ops.py | Python | apache-2.0 | 46,928 |
from netforce.model import Model,fields,get_model
class Settings(Model):
_name="ecom2.settings"
_string="Settings"
_fields={
"delivery_slot_discount": fields.Decimal("Same Delivery Slot Discount"),
"delivery_max_days": fields.Integer("Delivery Max Days"),
"delivery_min_hours": fields.Integer("Delivery Min Hours"),
"ecom_num_lots": fields.Integer("Number Of Lots To Show On Website"),
}
Settings.register()
| anastue/netforce | netforce_ecom2/netforce_ecom2/models/ecom2_settings.py | Python | mit | 458 |
#!/usr/bin/python
for letter in 'Python':
if letter == 'h':
pass
print 'This is pass block'
print 'Current Letter :', letter
print "Good bye!"
# Current Letter : P
# Current Letter : y
# Current Letter : t
# This is pass block
# Current Letter : h
# Current Letter : o
# Current Letter : n
# Good bye! | TheShellLand/pies | v3/Libraries/builtin/statement/pass.py | Python | mit | 322 |
from node_user import UserAttribute
| xii/xii | src/xii/builtin/components/node/attributes/user/__init__.py | Python | apache-2.0 | 36 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import os.path
import re
from cs.CsDatabag import CsDataBag
from CsProcess import CsProcess
from CsFile import CsFile
import CsHelper
HAPROXY_CONF_T = "/etc/haproxy/haproxy.cfg.new"
HAPROXY_CONF_P = "/etc/haproxy/haproxy.cfg"
class CsLoadBalancer(CsDataBag):
""" Manage Load Balancer entries """
def process(self):
if "config" not in self.dbag.keys():
return
if 'configuration' not in self.dbag['config'][0].keys():
return
config = self.dbag['config'][0]['configuration']
file1 = CsFile(HAPROXY_CONF_T)
file1.empty()
for x in config:
[file1.append(w, -1) for w in x.split('\n')]
file1.commit()
file2 = CsFile(HAPROXY_CONF_P)
if not file2.compare(file1):
CsHelper.copy(HAPROXY_CONF_T, HAPROXY_CONF_P)
proc = CsProcess(['/run/haproxy.pid'])
if not proc.find():
logging.debug("CsLoadBalancer:: will restart HAproxy!")
CsHelper.service("haproxy", "restart")
else:
logging.debug("CsLoadBalancer:: will reload HAproxy!")
CsHelper.service("haproxy", "reload")
add_rules = self.dbag['config'][0]['add_rules']
remove_rules = self.dbag['config'][0]['remove_rules']
stat_rules = self.dbag['config'][0]['stat_rules']
self._configure_firewall(add_rules, remove_rules, stat_rules)
def _configure_firewall(self, add_rules, remove_rules, stat_rules):
firewall = self.config.get_fw()
logging.debug("CsLoadBalancer:: configuring firewall. Add rules ==> %s" % add_rules)
logging.debug("CsLoadBalancer:: configuring firewall. Remove rules ==> %s" % remove_rules)
logging.debug("CsLoadBalancer:: configuring firewall. Stat rules ==> %s" % stat_rules)
for rules in add_rules:
path = rules.split(':')
ip = path[0]
port = path[1]
firewall.append(["filter", "", "-A INPUT -p tcp -m tcp -d %s --dport %s -m state --state NEW -j ACCEPT" % (ip, port)])
for rules in remove_rules:
path = rules.split(':')
ip = path[0]
port = path[1]
firewall.append(["filter", "", "-D INPUT -p tcp -m tcp -d %s --dport %s -m state --state NEW -j ACCEPT" % (ip, port)])
for rules in stat_rules:
path = rules.split(':')
ip = path[0]
port = path[1]
firewall.append(["filter", "", "-A INPUT -p tcp -m tcp -d %s --dport %s -m state --state NEW -j ACCEPT" % (ip, port)])
| DaanHoogland/cloudstack | systemvm/debian/opt/cloud/bin/cs/CsLoadBalancer.py | Python | apache-2.0 | 3,393 |
#!/usr/bin/env python
############################################################################
##
## Copyright (C) 2006-2006 Trolltech ASA. All rights reserved.
##
## This file is part of the example classes of the Qt Toolkit.
##
## Licensees holding a valid Qt License Agreement may use this file in
## accordance with the rights, responsibilities and obligations
## contained therein. Please consult your licensing agreement or
## contact [email protected] if any conditions of this licensing
## agreement are not clear to you.
##
## Further information about Qt licensing is available at:
## http://www.trolltech.com/products/qt/licensing.html or by
## contacting [email protected].
##
## This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
## WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
##
############################################################################
import math
from PySide import QtCore, QtGui
import mice_rc
class Mouse(QtGui.QGraphicsItem):
Pi = math.pi
TwoPi = 2.0 * Pi
# Create the bounding rectangle once.
adjust = 0.5
BoundingRect = QtCore.QRectF(-20 - adjust, -22 - adjust, 40 + adjust,
83 + adjust)
def __init__(self):
super(Mouse, self).__init__()
self.angle = 0.0
self.speed = 0.0
self.mouseEyeDirection = 0.0
self.color = QtGui.QColor(QtCore.qrand() % 256, QtCore.qrand() % 256,
QtCore.qrand() % 256)
self.rotate(QtCore.qrand() % (360 * 16))
# In the C++ version of this example, this class is also derived from
# QObject in order to receive timer events. PyQt does not support
# deriving from more than one wrapped class so we just create an
# explicit timer instead.
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.timerEvent)
self.timer.start(1000 / 33)
@staticmethod
def normalizeAngle(angle):
while angle < 0:
angle += Mouse.TwoPi
while angle > Mouse.TwoPi:
angle -= Mouse.TwoPi
return angle
def boundingRect(self):
return Mouse.BoundingRect
def shape(self):
path = QtGui.QPainterPath()
path.addRect(-10, -20, 20, 40)
return path;
def paint(self, painter, option, widget):
# Body.
painter.setBrush(self.color)
painter.drawEllipse(-10, -20, 20, 40)
# Eyes.
painter.setBrush(QtCore.Qt.white)
painter.drawEllipse(-10, -17, 8, 8)
painter.drawEllipse(2, -17, 8, 8)
# Nose.
painter.setBrush(QtCore.Qt.black)
painter.drawEllipse(QtCore.QRectF(-2, -22, 4, 4))
# Pupils.
painter.drawEllipse(QtCore.QRectF(-8.0 + self.mouseEyeDirection, -17, 4, 4))
painter.drawEllipse(QtCore.QRectF(4.0 + self.mouseEyeDirection, -17, 4, 4))
# Ears.
if self.scene().collidingItems(self):
painter.setBrush(QtCore.Qt.red)
else:
painter.setBrush(QtCore.Qt.darkYellow)
painter.drawEllipse(-17, -12, 16, 16)
painter.drawEllipse(1, -12, 16, 16)
# Tail.
path = QtGui.QPainterPath(QtCore.QPointF(0, 20))
path.cubicTo(-5, 22, -5, 22, 0, 25)
path.cubicTo(5, 27, 5, 32, 0, 30)
path.cubicTo(-5, 32, -5, 42, 0, 35)
painter.setBrush(QtCore.Qt.NoBrush)
painter.drawPath(path)
def timerEvent(self):
# Don't move too far away.
lineToCenter = QtCore.QLineF(QtCore.QPointF(0, 0), self.mapFromScene(0, 0))
if lineToCenter.length() > 150:
angleToCenter = math.acos(lineToCenter.dx() / lineToCenter.length())
if lineToCenter.dy() < 0:
angleToCenter = Mouse.TwoPi - angleToCenter;
angleToCenter = Mouse.normalizeAngle((Mouse.Pi - angleToCenter) + Mouse.Pi / 2)
if angleToCenter < Mouse.Pi and angleToCenter > Mouse.Pi / 4:
# Rotate left.
self.angle += [-0.25, 0.25][self.angle < -Mouse.Pi / 2]
elif angleToCenter >= Mouse.Pi and angleToCenter < (Mouse.Pi + Mouse.Pi / 2 + Mouse.Pi / 4):
# Rotate right.
self.angle += [-0.25, 0.25][self.angle < Mouse.Pi / 2]
elif math.sin(self.angle) < 0:
self.angle += 0.25
elif math.sin(self.angle) > 0:
self.angle -= 0.25
# Try not to crash with any other mice.
dangerMice = self.scene().items(QtGui.QPolygonF([self.mapToScene(0, 0),
self.mapToScene(-30, -50),
self.mapToScene(30, -50)]))
for item in dangerMice:
if item is self:
continue
lineToMouse = QtCore.QLineF(QtCore.QPointF(0, 0), self.mapFromItem(item, 0, 0))
angleToMouse = math.acos(lineToMouse.dx() / lineToMouse.length())
if lineToMouse.dy() < 0:
angleToMouse = Mouse.TwoPi - angleToMouse
angleToMouse = Mouse.normalizeAngle((Mouse.Pi - angleToMouse) + Mouse.Pi / 2)
if angleToMouse >= 0 and angleToMouse < Mouse.Pi / 2:
# Rotate right.
self.angle += 0.5
elif angleToMouse <= Mouse.TwoPi and angleToMouse > (Mouse.TwoPi - Mouse.Pi / 2):
# Rotate left.
self.angle -= 0.5
# Add some random movement.
if len(dangerMice) > 1 and (QtCore.qrand() % 10) == 0:
if QtCore.qrand() % 1:
self.angle += (QtCore.qrand() % 100) / 500.0
else:
self.angle -= (QtCore.qrand() % 100) / 500.0
self.speed += (-50 + QtCore.qrand() % 100) / 100.0
dx = math.sin(self.angle) * 10
self.mouseEyeDirection = [dx / 5, 0.0][QtCore.qAbs(dx / 5) < 1]
self.rotate(dx)
self.setPos(self.mapToParent(0, -(3 + math.sin(self.speed) * 3)))
if __name__ == '__main__':
import sys
MouseCount = 7
app = QtGui.QApplication(sys.argv)
QtCore.qsrand(QtCore.QTime(0,0,0).secsTo(QtCore.QTime.currentTime()))
scene = QtGui.QGraphicsScene()
scene.setSceneRect(-300, -300, 600, 600)
scene.setItemIndexMethod(QtGui.QGraphicsScene.NoIndex)
for i in range(MouseCount):
mouse = Mouse()
mouse.setPos(math.sin((i * 6.28) / MouseCount) * 200,
math.cos((i * 6.28) / MouseCount) * 200)
scene.addItem(mouse)
view = QtGui.QGraphicsView(scene)
view.setRenderHint(QtGui.QPainter.Antialiasing)
view.setBackgroundBrush(QtGui.QBrush(QtGui.QPixmap(':/images/cheese.jpg')))
view.setCacheMode(QtGui.QGraphicsView.CacheBackground)
view.setViewportUpdateMode(QtGui.QGraphicsView.BoundingRectViewportUpdate)
view.setDragMode(QtGui.QGraphicsView.ScrollHandDrag)
view.setWindowTitle("Colliding Mice")
view.resize(400, 300)
view.show()
sys.exit(app.exec_())
| cherry-wb/SideTools | examples/graphicsview/collidingmice/collidingmice.py | Python | apache-2.0 | 7,219 |
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2021 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""Module with a *procedures* dictionary specifying available quantum
chemical methods and functions driving the main quantum chemical
functionality, namely single-point energies, geometry optimizations,
properties, and vibrational frequency calculations.
"""
import json
import os
import re
import shutil
import sys
from typing import Union
import numpy as np
from psi4 import core # for typing
from psi4.driver import driver_util
from psi4.driver import driver_cbs
from psi4.driver import driver_nbody
from psi4.driver import driver_findif
from psi4.driver import p4util
from psi4.driver import qcdb
from psi4.driver.procrouting import *
from psi4.driver.p4util.exceptions import *
from psi4.driver.mdi_engine import mdi_run
# never import wrappers or aliases into this file
def _find_derivative_type(ptype, method_name, user_dertype):
r"""
Figures out the derivative type (0, 1, 2) for a given method_name. Will
first use user default and then the highest available derivative type for
a given method.
"""
derivatives = {"gradient": 1, "hessian": 2}
if ptype not in derivatives:
raise ValidationError("_find_derivative_type: ptype must either be gradient or hessian.")
dertype = "(auto)"
# If user type is None, try to find the highest derivative
if user_dertype is None:
if (ptype == 'hessian') and (method_name in procedures['hessian']):
dertype = 2
# Will need special logic if we ever have managed Hessians
elif method_name in procedures['gradient']:
dertype = 1
if procedures['gradient'][method_name].__name__.startswith('select_'):
try:
procedures['gradient'][method_name](method_name, probe=True)
except ManagedMethodError:
dertype = 0
elif method_name in procedures['energy']:
dertype = 0
else:
# Quick sanity check. Only *should* be able to be None or int, but hey, kids today...
if not isinstance(user_dertype, int):
raise ValidationError("_find_derivative_type: user_dertype should only be None or int!")
dertype = user_dertype
if (core.get_global_option('INTEGRAL_PACKAGE') == 'ERD') and (dertype != 0):
raise ValidationError('INTEGRAL_PACKAGE ERD does not play nicely with derivatives, so stopping.')
if (core.get_global_option('PCM')) and (dertype != 0):
core.print_out('\nPCM analytic gradients are not implemented yet, re-routing to finite differences.\n')
dertype = 0
# Summary validation
if (dertype == 2) and (method_name in procedures['hessian']):
pass
elif (dertype == 1) and (method_name in procedures['gradient']):
pass
elif (dertype == 0) and (method_name in procedures['energy']):
pass
else:
alternatives = ''
alt_method_name = p4util.text.find_approximate_string_matches(method_name, procedures['energy'].keys(), 2)
if len(alt_method_name) > 0:
alternatives = """ Did you mean? %s""" % (' '.join(alt_method_name))
raise ValidationError("""Derivative method 'name' %s and derivative level 'dertype' %s are not available.%s"""
% (method_name, str(dertype), alternatives))
dertype = min(dertype, derivatives[ptype])
return dertype
def _energy_is_invariant(gradient, stationary_criterion=1.e-2):
"""Polls options and probes `gradient` to return whether current method
and system expected to be invariant to translations and rotations of
the coordinate system.
"""
stationary_point = gradient.rms() < stationary_criterion # 1.e-2 pulled out of a hat
mol = core.get_active_molecule()
efp_present = hasattr(mol, 'EFP')
translations_projection_sound = (not core.get_option('SCF', 'EXTERN') and not core.get_option('SCF', 'PERTURB_H')
and not efp_present)
rotations_projection_sound = (translations_projection_sound and stationary_point)
return translations_projection_sound, rotations_projection_sound
def _process_displacement(derivfunc, method, molecule, displacement, n, ndisp, **kwargs):
"""A helper function to perform all processing for an individual finite
difference computation.
Parameters
----------
derivfunc : func
The function computing the target derivative.
method : str
A string specifying the method to be used for the computation.
molecule: psi4.core.molecule or qcdb.molecule
The molecule for the computation. All processing is handled internally.
molecule must not be modified!
displacement : dict
A dictionary containing the necessary information for the displacement.
See driver_findif/_geom_generator.py docstring for details.
n : int
The number of the displacement being computed, for print purposes.
ndisp : int
The total number of geometries, for print purposes.
Returns
-------
wfn: :py:class:`~psi4.core.Wavefunction`
The wavefunction computed.
"""
# print progress to file and screen
core.print_out('\n')
p4util.banner('Loading displacement %d of %d' % (n, ndisp))
print(""" %d""" % (n), end=('\n' if (n == ndisp) else ''))
sys.stdout.flush()
parent_group = molecule.point_group()
clone = molecule.clone()
clone.reinterpret_coordentry(False)
clone.fix_orientation(True)
# Load in displacement (flat list) into the active molecule
geom_array = np.reshape(displacement["geometry"], (-1, 3))
clone.set_geometry(core.Matrix.from_array(geom_array))
# If the user insists on symmetry, weaken it if some is lost when displacing.
if molecule.symmetry_from_input():
disp_group = clone.find_highest_point_group()
new_bits = parent_group.bits() & disp_group.bits()
new_symm_string = qcdb.PointGroup.bits_to_full_name(new_bits)
clone.reset_point_group(new_symm_string)
# clean possibly necessary for n=1 if its irrep (unsorted in displacement list) different from initial G0 for freq
core.clean()
# Perform the derivative calculation
derivative, wfn = derivfunc(method, return_wfn=True, molecule=clone, **kwargs)
displacement["energy"] = core.variable('CURRENT ENERGY')
# If we computed a first or higher order derivative, set it.
if derivfunc == gradient:
displacement["gradient"] = wfn.gradient().np.ravel().tolist()
# clean may be necessary when changing irreps of displacements
core.clean()
return wfn
def _filter_renamed_methods(compute, method):
r"""Raises UpgradeHelper when a method has been renamed."""
if method == "dcft":
raise UpgradeHelper(compute + "('dcft')", compute + "('dct')", 1.4, " All instances of 'dcft' should be replaced with 'dct'.")
def energy(name, **kwargs):
r"""Function to compute the single-point electronic energy.
:returns: *float* |w--w| Total electronic energy in Hartrees. SAPT & EFP return interaction energy.
:returns: (*float*, :py:class:`~psi4.core.Wavefunction`) |w--w| energy and wavefunction when **return_wfn** specified.
:PSI variables:
.. hlist::
:columns: 1
* :psivar:`CURRENT ENERGY`
* :psivar:`CURRENT REFERENCE ENERGY`
* :psivar:`CURRENT CORRELATION ENERGY`
:type name: str
:param name: ``'scf'`` || ``'mp2'`` || ``'ci5'`` || etc.
First argument, usually unlabeled. Indicates the computational method
to be applied to the system.
:type molecule: :ref:`molecule <op_py_molecule>`
:param molecule: ``h2o`` || etc.
The target molecule, if not the last molecule defined.
:type return_wfn: :ref:`boolean <op_py_boolean>`
:param return_wfn: ``'on'`` || |dl| ``'off'`` |dr|
Indicate to additionally return the :py:class:`~psi4.core.Wavefunction`
calculation result as the second element (after *float* energy) of a tuple.
:type restart_file: str
:param restart_file: ``['file.1, file.32]`` || ``./file`` || etc.
Binary data files to be renamed for calculation restart.
.. _`table:energy_gen`:
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| name | calls method |
+=========================+===============================================================================================================+
| efp | effective fragment potential (EFP) :ref:`[manual] <sec:libefp>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| scf | Hartree--Fock (HF) or density functional theory (DFT) :ref:`[manual] <sec:scf>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| hf | HF self consistent field (SCF) :ref:`[manual] <sec:scf>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| hf3c | HF with dispersion, BSSE, and basis set corrections :ref:`[manual] <sec:gcp>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| pbeh3c | PBEh with dispersion, BSSE, and basis set corrections :ref:`[manual] <sec:gcp>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| dct | density cumulant (functional) theory :ref:`[manual] <sec:dct>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| mp2 | 2nd-order |MollerPlesset| perturbation theory (MP2) :ref:`[manual] <sec:dfmp2>` :ref:`[details] <tlmp2>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| mp3 | 3rd-order |MollerPlesset| perturbation theory (MP3) :ref:`[manual] <sec:occ_nonoo>` :ref:`[details] <tlmp3>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| fno-mp3 | MP3 with frozen natural orbitals :ref:`[manual] <sec:fnocc>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| mp2.5 | average of MP2 and MP3 :ref:`[manual] <sec:occ_nonoo>` :ref:`[details] <tlmp25>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| mp4(sdq) | 4th-order MP perturbation theory (MP4) less triples :ref:`[manual] <sec:fnompn>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| fno-mp4(sdq) | MP4 (less triples) with frozen natural orbitals :ref:`[manual] <sec:fnocc>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| mp4 | full MP4 :ref:`[manual] <sec:fnompn>` :ref:`[details] <tlmp4>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| fno-mp4 | full MP4 with frozen natural orbitals :ref:`[manual] <sec:fnocc>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| mp\ *n* | *n*\ th-order |MollerPlesset| (MP) perturbation theory :ref:`[manual] <sec:arbpt>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| zapt\ *n* | *n*\ th-order z-averaged perturbation theory (ZAPT) :ref:`[manual] <sec:arbpt>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| omp2 | orbital-optimized second-order MP perturbation theory :ref:`[manual] <sec:occ_oo>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| scs-omp2 | spin-component scaled OMP2 :ref:`[manual] <sec:occ_oo>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| scs(n)-omp2 | a special version of SCS-OMP2 for nucleobase interactions :ref:`[manual] <sec:occ_oo>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| scs-omp2-vdw | a special version of SCS-OMP2 (from ethene dimers) :ref:`[manual] <sec:occ_oo>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| sos-omp2 | spin-opposite scaled OMP2 :ref:`[manual] <sec:occ_oo>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| sos-pi-omp2 | A special version of SOS-OMP2 for pi systems :ref:`[manual] <sec:occ_oo>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| omp3 | orbital-optimized third-order MP perturbation theory :ref:`[manual] <sec:occ_oo>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| scs-omp3 | spin-component scaled OMP3 :ref:`[manual] <sec:occ_oo>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| scs(n)-omp3 | a special version of SCS-OMP3 for nucleobase interactions :ref:`[manual] <sec:occ_oo>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| scs-omp3-vdw | a special version of SCS-OMP3 (from ethene dimers) :ref:`[manual] <sec:occ_oo>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| sos-omp3 | spin-opposite scaled OMP3 :ref:`[manual] <sec:occ_oo>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| sos-pi-omp3 | A special version of SOS-OMP3 for pi systems :ref:`[manual] <sec:occ_oo>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| omp2.5 | orbital-optimized MP2.5 :ref:`[manual] <sec:occ_oo>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| lccsd, cepa(0) | coupled electron pair approximation variant 0 :ref:`[manual] <sec:fnocepa>` :ref:`[details] <tllccsd>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| fno-lccsd, fno-cepa(0) | CEPA(0) with frozen natural orbitals :ref:`[manual] <sec:fnocc>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| cepa(1) | coupled electron pair approximation variant 1 :ref:`[manual] <sec:fnocepa>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| fno-cepa(1) | CEPA(1) with frozen natural orbitals :ref:`[manual] <sec:fnocc>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| cepa(3) | coupled electron pair approximation variant 3 :ref:`[manual] <sec:fnocepa>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| fno-cepa(3) | CEPA(3) with frozen natural orbitals :ref:`[manual] <sec:fnocc>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| acpf | averaged coupled-pair functional :ref:`[manual] <sec:fnocepa>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| fno-acpf | ACPF with frozen natural orbitals :ref:`[manual] <sec:fnocc>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| aqcc | averaged quadratic coupled cluster :ref:`[manual] <sec:fnocepa>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| fno-aqcc | AQCC with frozen natural orbitals :ref:`[manual] <sec:fnocc>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| qcisd | quadratic CI singles doubles (QCISD) :ref:`[manual] <sec:fnocc>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| fno-qcisd | QCISD with frozen natural orbitals :ref:`[manual] <sec:fnocc>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| lccd | Linear CCD :ref:`[manual] <sec:occ_nonoo>` :ref:`[details] <tllccd>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| fno-lccd | LCCD with frozen natural orbitals :ref:`[manual] <sec:fnocc>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| olccd | orbital optimized LCCD :ref:`[manual] <sec:occ_oo>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| cc2 | approximate coupled cluster singles and doubles (CC2) :ref:`[manual] <sec:cc>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| ccd | coupled cluster doubles (CCD) :ref:`[manual] <sec:occ_nonoo>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| ccsd | coupled cluster singles and doubles (CCSD) :ref:`[manual] <sec:cc>` :ref:`[details] <tlccsd>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| bccd | Brueckner coupled cluster doubles (BCCD) :ref:`[manual] <sec:cc>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| fno-ccsd | CCSD with frozen natural orbitals :ref:`[manual] <sec:fnocc>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| qcisd(t) | QCISD with perturbative triples :ref:`[manual] <sec:fnocc>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| fno-qcisd(t) | QCISD(T) with frozen natural orbitals :ref:`[manual] <sec:fnocc>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| ccsd(t) | CCSD with perturbative triples (CCSD(T)) :ref:`[manual] <sec:cc>` :ref:`[details] <tlccsdt>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| ccsd(at) | CCSD with asymmetric perturbative triples (CCSD(AT)) :ref:`[manual] <sec:cc>` :ref:`[details] <tlccsdat>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| bccd(t) | BCCD with perturbative triples :ref:`[manual] <sec:cc>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| fno-ccsd(t) | CCSD(T) with frozen natural orbitals :ref:`[manual] <sec:fnocc>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| cc3 | approximate CC singles, doubles, and triples (CC3) :ref:`[manual] <sec:cc>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| ccenergy | **expert** full control over ccenergy module |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| dfocc | **expert** full control over dfocc module |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| cisd | configuration interaction (CI) singles and doubles (CISD) :ref:`[manual] <sec:ci>` :ref:`[details] <tlcisd>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| fno-cisd | CISD with frozen natural orbitals :ref:`[manual] <sec:fnocc>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| cisdt | CI singles, doubles, and triples (CISDT) :ref:`[manual] <sec:ci>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| cisdtq | CI singles, doubles, triples, and quadruples (CISDTQ) :ref:`[manual] <sec:ci>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| ci\ *n* | *n*\ th-order CI :ref:`[manual] <sec:ci>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| fci | full configuration interaction (FCI) :ref:`[manual] <sec:ci>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| detci | **expert** full control over detci module |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| casscf | complete active space self consistent field (CASSCF) :ref:`[manual] <sec:ci>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| rasscf | restricted active space self consistent field (RASSCF) :ref:`[manual] <sec:ci>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| mcscf | multiconfigurational self consistent field (SCF) :ref:`[manual] <sec:psimrcc>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| psimrcc | Mukherjee multireference coupled cluster (Mk-MRCC) :ref:`[manual] <sec:psimrcc>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| dmrg-scf | density matrix renormalization group SCF :ref:`[manual] <sec:chemps2>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| dmrg-caspt2 | density matrix renormalization group CASPT2 :ref:`[manual] <sec:chemps2>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| dmrg-ci | density matrix renormalization group CI :ref:`[manual] <sec:chemps2>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| sapt0 | 0th-order symmetry adapted perturbation theory (SAPT) :ref:`[manual] <sec:sapt>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| ssapt0 | 0th-order SAPT with special exchange scaling :ref:`[manual] <sec:sapt>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| fisapt0 | 0th-order functional and/or intramolecular SAPT :ref:`[manual] <sec:fisapt>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| sapt2 | 2nd-order SAPT, traditional definition :ref:`[manual] <sec:sapt>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| sapt2+ | SAPT including all 2nd-order terms :ref:`[manual] <sec:sapt>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| sapt2+(3) | SAPT including perturbative triples :ref:`[manual] <sec:sapt>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| sapt2+3 | SAPT including all 3rd-order terms :ref:`[manual] <sec:sapt>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| sapt2+(ccd) | SAPT2+ with CC-based dispersion :ref:`[manual] <sec:sapt>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| sapt2+(3)(ccd) | SAPT2+(3) with CC-based dispersion :ref:`[manual] <sec:sapt>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| sapt2+3(ccd) | SAPT2+3 with CC-based dispersion :ref:`[manual] <sec:sapt>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| sapt2+dmp2 | SAPT including all 2nd-order terms and MP2 correction :ref:`[manual] <sec:sapt>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| sapt2+(3)dmp2 | SAPT including perturbative triples and MP2 correction :ref:`[manual] <sec:sapt>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| sapt2+3dmp2 | SAPT including all 3rd-order terms and MP2 correction :ref:`[manual] <sec:sapt>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| sapt2+(ccd)dmp2 | SAPT2+ with CC-based dispersion and MP2 correction :ref:`[manual] <sec:sapt>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| sapt2+(3)(ccd)dmp2 | SAPT2+(3) with CC-based dispersion and MP2 correction :ref:`[manual] <sec:sapt>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| sapt2+3(ccd)dmp2 | SAPT2+3 with CC-based dispersion and MP2 correction :ref:`[manual] <sec:sapt>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| sapt0-ct | 0th-order SAPT plus charge transfer (CT) calculation :ref:`[manual] <sec:saptct>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| sapt2-ct | SAPT2 plus CT :ref:`[manual] <sec:saptct>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| sapt2+-ct | SAPT2+ plus CT :ref:`[manual] <sec:saptct>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| sapt2+(3)-ct | SAPT2+(3) plus CT :ref:`[manual] <sec:saptct>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| sapt2+3-ct | SAPT2+3 plus CT :ref:`[manual] <sec:saptct>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| sapt2+(ccd)-ct | SAPT2+(CCD) plus CT :ref:`[manual] <sec:saptct>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| sapt2+(3)(ccd)-ct | SAPT2+(3)(CCD) plus CT :ref:`[manual] <sec:saptct>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| sapt2+3(ccd)-ct | SAPT2+3(CCD) plus CT :ref:`[manual] <sec:saptct>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| adc | 2nd-order algebraic diagrammatic construction (ADC) :ref:`[manual] <sec:adc>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| eom-cc2 | EOM-CC2 :ref:`[manual] <sec:eomcc>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| eom-ccsd | equation of motion (EOM) CCSD :ref:`[manual] <sec:eomcc>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| eom-cc3 | EOM-CC3 :ref:`[manual] <sec:eomcc>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
.. comment missing and why
.. comment a certain isapt --- marginally released
.. comment mrcc --- this is handled in its own table
.. comment psimrcc_scf --- convenience fn
.. include:: /autodoc_dft_energy.rst
.. include:: /mrcc_table_energy.rst
.. include:: /cfour_table_energy.rst
:examples:
>>> # [1] Coupled-cluster singles and doubles calculation with psi code
>>> energy('ccsd')
>>> # [2] Charge-transfer SAPT calculation with scf projection from small into
>>> # requested basis, with specified projection fitting basis
>>> set basis_guess true
>>> set df_basis_guess jun-cc-pVDZ-JKFIT
>>> energy('sapt0-ct')
>>> # [3] Arbitrary-order MPn calculation
>>> energy('mp7')
>>> # [4] Converge scf as singlet, then run detci as triplet upon singlet reference
>>> # Note that the integral transformation is not done automatically when detci is run in a separate step.
>>> molecule H2 {\n0 1\nH\nH 1 0.74\n}
>>> set basis cc-pVDZ
>>> set reference rohf
>>> scf_e, scf_wfn = energy('scf', return_wfn=True)
>>> H2.set_multiplicity(3)
>>> core.MintsHelper(scf_wfn.basisset()).integrals()
>>> energy('detci', ref_wfn=scf_wfn)
>>> # [5] Run two CI calculations, keeping the integrals generated in the first one.
>>> molecule ne {\nNe\n}
>>> set basis cc-pVDZ
>>> cisd_e, cisd_wfn = energy('cisd', return_wfn=True)
>>> energy('fci', ref_wfn=cisd_wfn)
>>> # [6] Can automatically perform complete basis set extrapolations
>>> energy("CCSD/cc-pV[DT]Z")
>>> # [7] Can automatically perform delta corrections that include extrapolations
>>> # even with a user-defined extrapolation formula. See sample inputs named
>>> # cbs-xtpl* for more examples of this input style
>>> energy("MP2/aug-cc-pv([d,t]+d)z + d:ccsd(t)/cc-pvdz", corl_scheme=myxtplfn_2)
"""
kwargs = p4util.kwargs_lower(kwargs)
# Bounce to MDI if mdi kwarg
use_mdi = kwargs.pop('mdi', False)
if use_mdi:
return mdi_run(name, **kwargs)
core.print_out("\nScratch directory: %s\n" % core.IOManager.shared_object().get_default_path())
# Bounce to CP if bsse kwarg
if kwargs.get('bsse_type', None) is not None:
return driver_nbody.nbody_gufunc(energy, name, ptype='energy', **kwargs)
# Bounce if name is function
if hasattr(name, '__call__'):
return name(energy, kwargs.pop('label', 'custom function'), ptype='energy', **kwargs)
# Allow specification of methods to arbitrary order
lowername = name.lower()
lowername, level = driver_util.parse_arbitrary_order(lowername)
if level:
kwargs['level'] = level
# Bounce to CBS if "method/basis" name
if "/" in lowername:
return driver_cbs._cbs_gufunc(energy, name, ptype='energy', **kwargs)
_filter_renamed_methods("energy", lowername)
# Commit to procedures['energy'] call hereafter
return_wfn = kwargs.pop('return_wfn', False)
core.clean_variables()
# Make sure the molecule the user provided is the active one
molecule = kwargs.pop('molecule', core.get_active_molecule())
molecule.update_geometry()
#for precallback in hooks['energy']['pre']:
# precallback(lowername, **kwargs)
optstash = driver_util._set_convergence_criterion('energy', lowername, 6, 8, 6, 8, 6)
# Before invoking the procedure, we rename any file that should be read.
# This is a workaround to do restarts with the current PSI4 capabilities
# before actual, clean restarts are put in there
# Restartfile is always converted to a single-element list if
# it contains a single string
# DGAS Note: This is hacked together at this point and should be revamped.
if 'restart_file' in kwargs:
restartfile = kwargs['restart_file'] # Option still available for procedure-specific action
if not isinstance(restartfile, (list, tuple)):
restartfile = (restartfile, )
# Rename the files to be read to be consistent with psi4's file system
for item in restartfile:
name_split = re.split(r'\.', item)
if "npz" in item:
fname = os.path.split(os.path.abspath(core.get_writer_file_prefix(molecule.name())))[1]
psi_scratch = core.IOManager.shared_object().get_default_path()
file_num = item.split('.')[-2]
targetfile = os.path.join(psi_scratch, fname + "." + file_num + ".npz")
else:
filenum = name_split[-1]
try:
filenum = int(filenum)
except ValueError:
filenum = 32 # Default file number is the checkpoint one
psioh = core.IOManager.shared_object()
psio = core.IO.shared_object()
filepath = psioh.get_file_path(filenum)
namespace = psio.get_default_namespace()
pid = str(os.getpid())
prefix = 'psi'
targetfile = filepath + prefix + '.' + pid + '.' + namespace + '.' + str(filenum)
shutil.copy(item, targetfile)
wfn = procedures['energy'][lowername](lowername, molecule=molecule, **kwargs)
for postcallback in hooks['energy']['post']:
postcallback(lowername, wfn=wfn, **kwargs)
optstash.restore()
if return_wfn: # TODO current energy safer than wfn.energy() for now, but should be revisited
# TODO place this with the associated call, very awkward to call this in other areas at the moment
if lowername in ['efp', 'mrcc', 'dmrg', 'psimrcc']:
core.print_out("\n\nWarning! %s does not have an associated derived wavefunction." % name)
core.print_out("The returned wavefunction is the incoming reference wavefunction.\n\n")
elif 'sapt' in lowername:
core.print_out("\n\nWarning! %s does not have an associated derived wavefunction." % name)
core.print_out("The returned wavefunction is the dimer SCF wavefunction.\n\n")
return (core.variable('CURRENT ENERGY'), wfn)
else:
return core.variable('CURRENT ENERGY')
def gradient(name, **kwargs):
r"""Function complementary to :py:func:`~psi4.optimize()`. Carries out one gradient pass,
deciding analytic or finite difference.
:returns: :py:class:`~psi4.core.Matrix` |w--w| Total electronic gradient in Hartrees/Bohr.
:returns: (:py:class:`~psi4.core.Matrix`, :py:class:`~psi4.core.Wavefunction`) |w--w| gradient and wavefunction when **return_wfn** specified.
:examples:
>>> # [1] Single-point dft gradient getting the gradient
>>> # in file, core.Matrix, and np.array forms
>>> set gradient_write on
>>> G, wfn = gradient('b3lyp-d', return_wfn=True)
>>> wfn.gradient().print_out()
>>> np.array(G)
"""
kwargs = p4util.kwargs_lower(kwargs)
core.print_out("\nScratch directory: %s\n" % core.IOManager.shared_object().get_default_path())
# Figure out what kind of gradient this is
if hasattr(name, '__call__'):
if name.__name__ in ['cbs', 'complete_basis_set']:
gradient_type = 'cbs_wrapper'
else:
# Bounce to name if name is non-CBS function
gradient_type = 'custom_function'
elif kwargs.get('bsse_type', None) is not None:
gradient_type = 'nbody_gufunc'
elif '/' in name:
gradient_type = 'cbs_gufunc'
else:
gradient_type = 'conventional'
# Figure out lowername, dertype, and func
# If we have analytical gradients we want to pass to our wrappers, otherwise we want to run
# finite-diference energy or cbs energies
# TODO MP5/cc-pv[DT]Z behavior unkown due to "levels"
user_dertype = kwargs.pop('dertype', None)
if gradient_type == 'custom_function':
if user_dertype is None:
dertype = 0
core.print_out(
"\nGradient: Custom function passed in without a defined dertype, assuming fd-energy based gradient.\n"
)
else:
core.print_out("\nGradient: Custom function passed in with a dertype of %d\n" % user_dertype)
dertype = user_dertype
if dertype == 1:
return name(gradient, kwargs.pop('label', 'custom function'), ptype='gradient', **kwargs)
else:
optstash = driver_util._set_convergence_criterion('energy', 'scf', 8, 10, 8, 10, 8)
lowername = name
elif gradient_type == 'nbody_gufunc':
return driver_nbody.nbody_gufunc(gradient, name, ptype='gradient', **kwargs)
elif gradient_type == 'cbs_wrapper':
cbs_methods = driver_cbs._cbs_wrapper_methods(**kwargs)
dertype = min([_find_derivative_type('gradient', method, user_dertype) for method in cbs_methods])
if dertype == 1:
# Bounce to CBS (directly) in pure-gradient mode if name is CBS and all parts have analytic grad. avail.
return name(gradient, kwargs.pop('label', 'custom function'), ptype='gradient', **kwargs)
else:
optstash = driver_util._set_convergence_criterion('energy', cbs_methods[0], 8, 10, 8, 10, 8)
lowername = name
# Pass through to G by E
elif gradient_type == 'cbs_gufunc':
cbs_methods = driver_cbs._parse_cbs_gufunc_string(name.lower())[0]
for method in cbs_methods:
_filter_renamed_methods("gradient", method)
dertype = min([_find_derivative_type('gradient', method, user_dertype) for method in cbs_methods])
lowername = name.lower()
if dertype == 1:
# Bounce to CBS in pure-gradient mode if "method/basis" name and all parts have analytic grad. avail.
return driver_cbs._cbs_gufunc(gradient, name, ptype='gradient', **kwargs)
else:
# Set method-dependent scf convergence criteria (test on procedures['energy'] since that's guaranteed)
optstash = driver_util._set_convergence_criterion('energy', cbs_methods[0], 8, 10, 8, 10, 8)
else:
# Allow specification of methods to arbitrary order
lowername = name.lower()
_filter_renamed_methods("gradient", lowername)
lowername, level = driver_util.parse_arbitrary_order(lowername)
if level:
kwargs['level'] = level
# Prevent methods that do not have associated gradients
if lowername in energy_only_methods:
raise ValidationError("gradient('%s') does not have an associated gradient" % name)
dertype = _find_derivative_type('gradient', lowername, user_dertype)
# Set method-dependent scf convergence criteria (test on procedures['energy'] since that's guaranteed)
optstash = driver_util._set_convergence_criterion('energy', lowername, 8, 10, 8, 10, 8)
# Commit to procedures[] call hereafter
return_wfn = kwargs.pop('return_wfn', False)
core.clean_variables()
# no analytic derivatives for scf_type cd
if core.get_global_option('SCF_TYPE') == 'CD':
if (dertype == 1):
raise ValidationError("""No analytic derivatives for SCF_TYPE CD.""")
# Make sure the molecule the user provided is the active one
molecule = kwargs.pop('molecule', core.get_active_molecule())
molecule.update_geometry()
# Does dertype indicate an analytic procedure both exists and is wanted?
if dertype == 1:
core.print_out("""gradient() will perform analytic gradient computation.\n""")
# Perform the gradient calculation
wfn = procedures['gradient'][lowername](lowername, molecule=molecule, **kwargs)
else:
core.print_out("""gradient() will perform gradient computation by finite difference of analytic energies.\n""")
opt_iter = kwargs.get('opt_iter', 1)
if opt_iter is True:
opt_iter = 1
if opt_iter == 1:
print('Performing finite difference calculations')
# Obtain list of displacements
findif_meta_dict = driver_findif.gradient_from_energies_geometries(molecule)
ndisp = len(findif_meta_dict["displacements"]) + 1
print(""" %d displacements needed ...""" % (ndisp), end='')
wfn = _process_displacement(energy, lowername, molecule, findif_meta_dict["reference"], 1, ndisp,
**kwargs)
var_dict = core.variables()
for n, displacement in enumerate(findif_meta_dict["displacements"].values(), start=2):
_process_displacement(
energy, lowername, molecule, displacement, n, ndisp, write_orbitals=False, **kwargs)
# Reset variables
for key, val in var_dict.items():
core.set_variable(key, val)
# Compute the gradient
core.set_local_option('FINDIF', 'GRADIENT_WRITE', True)
G = driver_findif.assemble_gradient_from_energies(findif_meta_dict)
grad_psi_matrix = core.Matrix.from_array(G)
grad_psi_matrix.print_out()
wfn.set_gradient(grad_psi_matrix)
core.set_variable('CURRENT GRADIENT', grad_psi_matrix)
# Explicitly set the current energy..
if isinstance(lowername, str) and lowername in procedures['energy']:
# this correctly filters out cbs fn and "hf/cc-pvtz"
# it probably incorrectly filters out mp5, but reconsider in DDD
core.set_variable(f"{lowername.upper()} TOTAL GRADIENT", grad_psi_matrix)
wfn.set_variable(f"{lowername.upper()} TOTAL GRADIENT", grad_psi_matrix)
core.set_variable('CURRENT ENERGY', findif_meta_dict["reference"]["energy"])
wfn.set_variable('CURRENT ENERGY', findif_meta_dict["reference"]["energy"])
optstash.restore()
if core.get_option('FINDIF', 'GRADIENT_WRITE'):
filename = core.get_writer_file_prefix(wfn.molecule().name()) + ".grad"
qcdb.gradparse.to_string(np.asarray(wfn.gradient()), filename, dtype='GRD', mol=molecule, energy=wfn.energy())
if return_wfn:
return (wfn.gradient(), wfn)
else:
return wfn.gradient()
def properties(*args, **kwargs):
r"""Function to compute various properties.
:aliases: prop()
:returns: none.
.. caution:: Some features are not yet implemented. Buy a developer a coffee.
- This function at present has a limited functionality.
Consult the keywords sections of other modules for further property capabilities.
+--------------------+-----------------------------------------------+----------------+---------------------------------------------------------------+
| Name | Calls Method | Reference | Supported Properties |
+====================+===============================================+================+===============================================================+
| scf | Self-consistent field method(s) | RHF/ROHF/UHF | Listed :ref:`here <sec:oeprop>` |
+--------------------+-----------------------------------------------+----------------+---------------------------------------------------------------+
| hf | HF Self-consistent field method(s) | RHF/ROHF/UHF | Listed :ref:`here <sec:oeprop>` |
+--------------------+-----------------------------------------------+----------------+---------------------------------------------------------------+
| mp2 | MP2 with density fitting only (mp2_type df) | RHF | Listed :ref:`here <sec:oeprop>` |
+--------------------+-----------------------------------------------+----------------+---------------------------------------------------------------+
| cc2 | 2nd-order approximate CCSD | RHF | dipole, quadrupole, polarizability, rotation, roa_tensor |
+--------------------+-----------------------------------------------+----------------+---------------------------------------------------------------+
| ccsd | Coupled cluster singles and doubles (CCSD) | RHF | dipole, quadrupole, polarizability, rotation, roa_tensor |
+--------------------+-----------------------------------------------+----------------+---------------------------------------------------------------+
| dct | density cumulant (functional) theory | RHF/UHF | Listed :ref:`here <sec:oeprop>` |
| | :ref:`[manual] <sec:dct>` | | |
+--------------------+-----------------------------------------------+----------------+---------------------------------------------------------------+
| omp2 | orbital-optimized second-order | RHF/UHF | Listed :ref:`here <sec:oeprop>` |
| | MP perturbation theory | | Density fitted only |
| | :ref:`[manual] <sec:occ_oo>` | | |
+--------------------+-----------------------------------------------+----------------+---------------------------------------------------------------+
| omp3 | orbital-optimized third-order | RHF/UHF | Listed :ref:`here <sec:oeprop>` |
| | MP perturbation theory | | Density fitted only |
| | :ref:`[manual] <sec:occ_oo>` | | |
+--------------------+-----------------------------------------------+----------------+---------------------------------------------------------------+
| omp2.5 | orbital-optimized MP2.5 | RHF/UHF | Listed :ref:`here <sec:oeprop>` |
| | :ref:`[manual] <sec:occ_oo>` | | Density fitted only |
+--------------------+-----------------------------------------------+----------------+---------------------------------------------------------------+
| olccd | orbital optimized LCCD | RHF/UHF | Listed :ref:`here <sec:oeprop>` |
| | :ref:`[manual] <sec:occ_oo>` | | Density fitted only |
+--------------------+-----------------------------------------------+----------------+---------------------------------------------------------------+
| eom-cc2 | 2nd-order approximate EOM-CCSD | RHF | oscillator_strength, rotational_strength |
+--------------------+-----------------------------------------------+----------------+---------------------------------------------------------------+
| eom-ccsd | Equation-of-motion CCSD (EOM-CCSD) | RHF | oscillator_strength, rotational_strength |
+--------------------+-----------------------------------------------+----------------+---------------------------------------------------------------+
| cisd, cisdt, | Configuration interaction | RHF/ROHF | Listed :ref:`here <sec:oeprop>`, transition_dipole, |
| cisdt, cisdtq, | | | transition_quadrupole |
| ci5, ..., fci | | | |
+--------------------+-----------------------------------------------+----------------+---------------------------------------------------------------+
| casscf, rasscf | Multi-configurational SCF | RHF/ROHF | Listed :ref:`here <sec:oeprop>`, transition_dipole, |
| | | | transition_quadrupole |
+--------------------+-----------------------------------------------+----------------+---------------------------------------------------------------+
| adc(0), adc(1), | Algebraic-diagrammatic construction methods | RHF/UHF | dipole, transition_dipole, oscillator_strength, |
| ..., adc(3), | :ref:`[manual] <sec:adc>` | | rotational_strength |
| cvs-adc(0), ... | | | |
| cvs-adc(3) | | | |
+--------------------+-----------------------------------------------+----------------+---------------------------------------------------------------+
:type name: str
:param name: ``'ccsd'`` || etc.
First argument, usually unlabeled. Indicates the computational method
to be applied to the system.
:type properties: List[str]
:param properties: |dl| ``[]`` |dr| || ``['rotation', 'polarizability', 'oscillator_strength', 'roa']`` || etc.
Indicates which properties should be computed. Defaults to dipole and quadrupole.
:type molecule: :ref:`molecule <op_py_molecule>`
:param molecule: ``h2o`` || etc.
The target molecule, if not the last molecule defined.
:examples:
>>> # [1] Optical rotation calculation
>>> properties('cc2', properties=['rotation'])
"""
kwargs = p4util.kwargs_lower(kwargs)
# Make sure the molecule the user provided is the active one
molecule = kwargs.pop('molecule', core.get_active_molecule())
molecule.update_geometry()
kwargs['molecule'] = molecule
# Allow specification of methods to arbitrary order
lowername = args[0].lower()
lowername, level = driver_util.parse_arbitrary_order(lowername)
if level:
kwargs['level'] = level
if "/" in lowername:
return driver_cbs._cbs_gufunc(properties, lowername, ptype='properties', **kwargs)
return_wfn = kwargs.pop('return_wfn', False)
props = kwargs.get('properties', ['dipole', 'quadrupole'])
if len(args) > 1:
props += args[1:]
kwargs['properties'] = p4util.drop_duplicates(props)
optstash = driver_util._set_convergence_criterion('properties', lowername, 6, 10, 6, 10, 8)
wfn = procedures['properties'][lowername](lowername, **kwargs)
optstash.restore()
if return_wfn:
return (core.variable('CURRENT ENERGY'), wfn)
else:
return core.variable('CURRENT ENERGY')
def optimize_geometric(name, **kwargs):
import qcelemental as qcel
from qcelemental.util import which_import
if not which_import('geometric', return_bool=True):
raise ModuleNotFoundError('Python module geometric not found. Solve by installing it: `conda install -c conda-forge geometric` or `pip install geometric`')
import geometric
class Psi4NativeEngine(geometric.engine.Engine):
"""
Internally run an energy and gradient calculation for geometric
"""
def __init__(self, p4_name, p4_mol, p4_return_wfn, **p4_kwargs):
self.p4_name = p4_name
self.p4_mol = p4_mol
self.p4_return_wfn = p4_return_wfn
self.p4_kwargs = p4_kwargs
molecule = geometric.molecule.Molecule()
molecule.elem = [p4_mol.symbol(i) for i in range(p4_mol.natom())]
molecule.xyzs = [p4_mol.geometry().np * qcel.constants.bohr2angstroms]
molecule.build_bonds()
super(Psi4NativeEngine, self).__init__(molecule)
def calc(self, coords, dirname):
self.p4_mol.set_geometry(core.Matrix.from_array(coords.reshape(-1,3)))
self.p4_mol.update_geometry()
if self.p4_return_wfn:
g, wfn = gradient(self.p4_name, return_wfn=True, molecule=self.p4_mol, **self.p4_kwargs)
self.p4_wfn = wfn
else:
g = gradient(self.p4_name, return_wfn=False, molecule=self.p4_mol, **self.p4_kwargs)
e = core.variable('CURRENT ENERGY')
return {'energy': e, 'gradient': g.np.ravel()}
return_wfn = kwargs.pop('return_wfn', False)
return_history = kwargs.pop('return_history', False)
if return_history:
step_energies = []
step_gradients = []
step_coordinates = []
# Make sure the molecule the user provided is the active one
molecule = kwargs.get('molecule', core.get_active_molecule())
# Do not change orientation or COM
molecule.fix_orientation(True)
molecule.fix_com(True)
molecule.update_geometry()
# Get geometric-specific options
optimizer_keywords = {k.lower(): v for k, v in kwargs.get("optimizer_keywords", {}).items()}
core.print_out('\n')
core.print_out("\n ==> GeomeTRIC Optimizer <== ~\n")
# Default to Psi4 maxiter unless overridden
if 'maxiter' not in optimizer_keywords:
optimizer_keywords['maxiter'] = core.get_global_option('GEOM_MAXITER')
# Default to Psi4 geometry convergence criteria unless overridden
if 'convergence_set' not in optimizer_keywords:
optimizer_keywords['convergence_set'] = core.get_global_option('G_CONVERGENCE')
# GeomeTRIC doesn't know these convergence criterion
if optimizer_keywords['convergence_set'] in ['CFOUR', 'QCHEM', 'MOLPRO']:
core.print_out(f"\n Psi4 convergence criteria {optimizer_keywords['convergence_set']:6s} not recognized by GeomeTRIC, switching to GAU_TIGHT ~")
optimizer_keywords['convergence_set'] = 'GAU_TIGHT'
engine = Psi4NativeEngine(name, molecule, return_wfn, **kwargs)
M = engine.M
# Handle constraints
constraints_dict = {k.lower(): v for k, v in optimizer_keywords.get("constraints", {}).items()}
constraints_string = geometric.run_json.make_constraints_string(constraints_dict)
Cons, CVals = None, None
if constraints_string:
if 'scan' in constraints_dict:
raise ValueError("Coordinate scans are not yet available through the Psi4-GeomeTRIC interface")
Cons, CVals = geometric.optimize.ParseConstraints(M, constraints_string)
# Set up the internal coordinate system
coordsys = optimizer_keywords.get('coordsys', 'tric')
CoordSysDict = {
'cart': (geometric.internal.CartesianCoordinates, False, False),
'prim': (geometric.internal.PrimitiveInternalCoordinates, True, False),
'dlc': (geometric.internal.DelocalizedInternalCoordinates, True, False),
'hdlc': (geometric.internal.DelocalizedInternalCoordinates, False, True),
'tric': (geometric.internal.DelocalizedInternalCoordinates, False, False)
}
# Build internal coordinates
CoordClass, connect, addcart = CoordSysDict[coordsys.lower()]
IC = CoordClass(
M,
build=True,
connect=connect,
addcart=addcart,
constraints=Cons,
cvals=CVals[0] if CVals is not None else None)
# Get initial coordinates in bohr
coords = M.xyzs[0].flatten() / qcel.constants.bohr2angstroms
# Setup an optimizer object
params = geometric.optimize.OptParams(**optimizer_keywords)
optimizer = geometric.optimize.Optimizer(coords, M, IC, engine, None, params)
# TODO: print constraints
# IC.printConstraints(coords, thre=-1)
optimizer.calcEnergyForce()
optimizer.prepareFirstStep()
grms, gmax = optimizer.calcGradNorm()
conv_gmax = '*' if gmax < params.Convergence_gmax else ' '
conv_grms = '*' if grms < params.Convergence_grms else ' '
core.print_out("\n Measures of convergence in internal coordinates in au. ~")
core.print_out("\n Criteria marked as inactive (o), active & met (*), and active & unmet ( ). ~")
core.print_out("\n --------------------------------------------------------------------------------------------- ~")
core.print_out("\n Step Total Energy Delta E MAX Force RMS Force MAX Disp RMS Disp ~")
core.print_out("\n --------------------------------------------------------------------------------------------- ~")
core.print_out((f"\n Convergence Criteria {params.Convergence_energy:10.2e} "
f"{params.Convergence_gmax:10.2e} {params.Convergence_grms:10.2e} "
f"{params.Convergence_dmax:10.2e} {params.Convergence_drms:10.2e} ~"))
core.print_out("\n --------------------------------------------------------------------------------------------- ~")
core.print_out((f"\n {optimizer.Iteration:4d} {optimizer.E:16.8e} -------- "
f"{gmax:10.2e} {conv_gmax} {grms:10.2e} {conv_grms} -------- -------- ~"))
while True:
if optimizer.state == geometric.optimize.OPT_STATE.CONVERGED:
core.print_out("\n\n Optimization converged! ~\n")
break
elif optimizer.state == geometric.optimize.OPT_STATE.FAILED:
core.print_out("\n\n Optimization failed to converge! ~\n")
break
optimizer.step()
optimizer.calcEnergyForce()
optimizer.evaluateStep()
grms, gmax = optimizer.calcGradNorm()
drms, dmax = geometric.optimize.calc_drms_dmax(optimizer.X, optimizer.Xprev)
conv_energy = '*' if np.abs(optimizer.E - optimizer.Eprev) < params.Convergence_energy else ' '
conv_gmax = '*' if gmax < params.Convergence_gmax else ' '
conv_grms = '*' if grms < params.Convergence_grms else ' '
conv_dmax = '*' if dmax < params.Convergence_dmax else ' '
conv_drms = '*' if drms < params.Convergence_drms else ' '
core.print_out((f'\n {optimizer.Iteration:4d} {optimizer.E:16.8e} '
f'{optimizer.E-optimizer.Eprev:10.2e} {conv_energy} {gmax:10.2e} {conv_gmax} '
f'{grms:10.2e} {conv_grms} {dmax:10.2e} {conv_dmax} {drms:10.2e} {conv_drms} ~'))
if return_history:
step_energies.append(optimizer.E)
step_coordinates.append(core.Matrix.from_array(optimizer.X.reshape(-1,3)))
step_gradients.append(core.Matrix.from_array(optimizer.gradx.reshape(-1,3)))
return_energy = optimizer.E
opt_geometry = core.Matrix.from_array(optimizer.X.reshape(-1,3))
molecule.set_geometry(opt_geometry)
molecule.update_geometry()
core.print_out(f'\n Final Energy : {return_energy} \n')
core.print_out('\n Final Geometry : \n')
molecule.print_in_input_format()
if return_history:
history = {
'energy': step_energies,
'gradient': step_gradients,
'coordinates': step_coordinates,
}
if return_wfn:
wfn = engine.p4_wfn
if return_wfn and return_history:
return (return_energy, wfn, history)
elif return_wfn and not return_history:
return (return_energy, wfn)
elif return_history and not return_wfn:
return (return_energy, history)
else:
return return_energy
def optimize(name, **kwargs):
r"""Function to perform a geometry optimization.
:aliases: opt()
:returns: *float* |w--w| Total electronic energy of optimized structure in Hartrees.
:returns: (*float*, :py:class:`~psi4.core.Wavefunction`) |w--w| energy and wavefunction when **return_wfn** specified.
:raises: :py:class:`psi4.OptimizationConvergenceError` if :term:`GEOM_MAXITER <GEOM_MAXITER (OPTKING)>` exceeded without reaching geometry convergence.
:PSI variables:
.. hlist::
:columns: 1
* :psivar:`CURRENT ENERGY`
:type name: str
:param name: ``'scf'`` || ``'mp2'`` || ``'ci5'`` || etc.
First argument, usually unlabeled. Indicates the computational method
to be applied to the database. May be any valid argument to
:py:func:`psi4.energy`.
:type molecule: :ref:`molecule <op_py_molecule>`
:param molecule: ``h2o`` || etc.
The target molecule, if not the last molecule defined.
:type return_wfn: :ref:`boolean <op_py_boolean>`
:param return_wfn: ``'on'`` || |dl| ``'off'`` |dr|
Indicate to additionally return the :py:class:`~psi4.core.Wavefunction`
calculation result as the second element (after *float* energy) of a tuple.
:type return_history: :ref:`boolean <op_py_boolean>`
:param return_history: ``'on'`` || |dl| ``'off'`` |dr|
Indicate to additionally return dictionary of lists of geometries,
energies, and gradients at each step in the optimization.
:type engine: str
:param engine: |dl| ``'optking'`` |dr| || ``'geometric'``
Indicates the optimization engine to use, which can be either Psi4's
native Optking optimizer or the GeomeTRIC program.
:type optimizer_keywords: dict
:param optimizer_keywords: Options passed to the GeomeTRIC optimizer
Indicates additional options to be passed to the GeomeTRIC optimizer if
chosen as the optimization engine.
:type func: :ref:`function <op_py_function>`
:param func: |dl| ``gradient`` |dr| || ``energy`` || ``cbs``
Indicates the type of calculation to be performed on the molecule.
The default dertype accesses ``'gradient'`` or ``'energy'``, while
``'cbs'`` performs a multistage finite difference calculation.
If a nested series of python functions is intended (see :ref:`sec:intercalls`),
use keyword ``opt_func`` instead of ``func``.
:type dertype: :ref:`dertype <op_py_dertype>`
:param dertype: ``'gradient'`` || ``'energy'``
Indicates whether analytic (if available) or finite difference
optimization is to be performed.
:type hessian_with: str
:param hessian_with: ``'scf'`` || ``'mp2'`` || etc.
Indicates the computational method with which to perform a hessian
analysis to guide the geometry optimization.
.. warning:: Optimizations where the molecule is specified in Z-matrix format
with dummy atoms will result in the geometry being converted to a Cartesian representation.
.. note:: Analytic gradients area available for all methods in the table
below. Optimizations with other methods in the energy table proceed
by finite differences.
.. _`table:grad_gen`:
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| name | calls method |
+=========================+===============================================================================================================+
| efp | efp-only optimizations |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| scf | Hartree--Fock (HF) or density functional theory (DFT) :ref:`[manual] <sec:scf>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| hf | HF self consistent field (SCF) :ref:`[manual] <sec:scf>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| dct | density cumulant (functional) theory :ref:`[manual] <sec:dct>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| mp2 | 2nd-order |MollerPlesset| perturbation theory (MP2) :ref:`[manual] <sec:dfmp2>` :ref:`[details] <tlmp2>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| mp3 | 3rd-order |MollerPlesset| perturbation theory (MP3) :ref:`[manual] <sec:occ_nonoo>` :ref:`[details] <tlmp3>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| mp2.5 | average of MP2 and MP3 :ref:`[manual] <sec:occ_nonoo>` :ref:`[details] <tlmp25>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| omp2 | orbital-optimized second-order MP perturbation theory :ref:`[manual] <sec:occ_oo>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| omp3 | orbital-optimized third-order MP perturbation theory :ref:`[manual] <sec:occ_oo>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| omp2.5 | orbital-optimized MP2.5 :ref:`[manual] <sec:occ_oo>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| lccd | Linear CCD :ref:`[manual] <sec:occ_nonoo>` :ref:`[details] <tllccd>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| olccd | orbital optimized LCCD :ref:`[manual] <sec:occ_oo>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| ccd | coupled cluster doubles (CCD) :ref:`[manual] <sec:occ_nonoo>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| ccsd | coupled cluster singles and doubles (CCSD) :ref:`[manual] <sec:cc>` :ref:`[details] <tlccsd>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| ccsd(t) | CCSD with perturbative triples (CCSD(T)) :ref:`[manual] <sec:cc>` :ref:`[details] <tlccsdt>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| eom-ccsd | equation of motion (EOM) CCSD :ref:`[manual] <sec:eomcc>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
.. _`table:grad_scf`:
.. include:: /autodoc_dft_opt.rst
.. include:: /cfour_table_grad.rst
:examples:
>>> # [1] Analytic hf optimization
>>> optimize('hf')
>>> # [2] Finite difference mp5 optimization with gradient
>>> # printed to output file
>>> e, wfn = opt('mp5', return_wfn='yes')
>>> wfn.gradient().print_out()
>>> # [3] Can automatically perform complete basis set extrapolations
>>> optimize('MP2/cc-pV([D,T]+d)Z')
>>> # [4] Can automatically perform delta corrections that include extrapolations
>>> # even with a user-defined extrapolation formula. See sample inputs named
>>> # cbs-xtpl* for more examples of this input style
>>> optimize("MP2/aug-cc-pv([d,t]+d)z + d:ccsd(t)/cc-pvdz", corl_scheme=myxtplfn_2)
>>> # [5] Get info like geometry, gradient, energy back after an
>>> # optimization fails. Note that the energy and gradient
>>> # correspond to the last optimization cycle, whereas the
>>> # geometry (by default) is the anticipated *next* optimization step.
>>> try:
>>> optimize('hf/cc-pvtz')
>>> except psi4.OptimizationConvergenceError as ex:
>>> next_geom_coords_as_numpy_array = np.asarray(ex.wfn.molecule().geometry())
"""
kwargs = p4util.kwargs_lower(kwargs)
engine = kwargs.pop('engine', 'optking')
if engine == 'geometric':
return optimize_geometric(name, **kwargs)
elif engine != 'optking':
raise ValidationError(f"Optimizer {engine} is not supported.")
if hasattr(name, '__call__'):
lowername = name
custom_gradient = True
else:
lowername = name.lower()
custom_gradient = False
return_wfn = kwargs.pop('return_wfn', False)
return_history = kwargs.pop('return_history', False)
if return_history:
# Add wfn once the deep copy issues are worked out
step_energies = []
step_gradients = []
step_coordinates = []
# For CBS and nbody wrappers, need to set retention on INTCO file
if custom_gradient or ('/' in lowername) or kwargs.get('bsse_type', None) is not None:
core.IOManager.shared_object().set_specific_retention(1, True)
full_hess_every = core.get_option('OPTKING', 'FULL_HESS_EVERY')
steps_since_last_hessian = 0
if custom_gradient and core.has_option_changed('OPTKING', 'FULL_HESS_EVERY'):
raise ValidationError("Optimize: Does not support custom Hessian's yet.")
else:
hessian_with_method = kwargs.get('hessian_with', lowername)
_filter_renamed_methods("optimize", lowername)
optstash = p4util.OptionsState(
['OPTKING', 'INTRAFRAG_STEP_LIMIT'],
['FINDIF', 'HESSIAN_WRITE'],
['OPTKING', 'CART_HESS_READ'],
['SCF', 'GUESS_PERSIST'], # handle on behalf of cbs()
['SCF', 'GUESS'])
n = kwargs.get('opt_iter', 1)
# Make sure the molecule the user provided is the active one
molecule = kwargs.pop('molecule', core.get_active_molecule())
# If we are freezing cartesian, do not orient or COM
if core.get_local_option("OPTKING", "FROZEN_CARTESIAN"):
molecule.fix_orientation(True)
molecule.fix_com(True)
molecule.update_geometry()
# Shifting the geometry so need to copy the active molecule
moleculeclone = molecule.clone()
initial_sym = moleculeclone.schoenflies_symbol()
while n <= core.get_option('OPTKING', 'GEOM_MAXITER'):
current_sym = moleculeclone.schoenflies_symbol()
if initial_sym != current_sym:
raise ValidationError("""Point group changed! (%s <-- %s) You should restart """
"""using the last geometry in the output, after """
"""carefully making sure all symmetry-dependent """
"""input, such as DOCC, is correct.""" % (current_sym, initial_sym))
kwargs['opt_iter'] = n
# Use orbitals from previous iteration as a guess
# set within loop so that can be influenced by fns to optimize (e.g., cbs)
if (n > 1) and (not core.get_option('SCF', 'GUESS_PERSIST')):
core.set_local_option('SCF', 'GUESS', 'READ')
# Before computing gradient, save previous molecule and wavefunction if this is an IRC optimization
if (n > 1) and (core.get_option('OPTKING', 'OPT_TYPE') == 'IRC'):
old_thisenergy = core.variable('CURRENT ENERGY')
# Compute the gradient - preserve opt data despite core.clean calls in gradient
core.IOManager.shared_object().set_specific_retention(1, True)
G, wfn = gradient(lowername, return_wfn=True, molecule=moleculeclone, **kwargs)
thisenergy = core.variable('CURRENT ENERGY')
# above, used to be getting energy as last of energy list from gradient()
# thisenergy below should ultimately be testing on wfn.energy()
# Record optimization steps
# Add wavefunctions later
if return_history:
step_energies.append(thisenergy)
step_coordinates.append(moleculeclone.geometry())
step_gradients.append(G.clone())
core.set_legacy_gradient(G)
# opt_func = kwargs.get('opt_func', kwargs.get('func', energy))
# if opt_func.__name__ == 'complete_basis_set':
# core.IOManager.shared_object().set_specific_retention(1, True)
if full_hess_every > -1:
core.set_global_option('HESSIAN_WRITE', True)
# compute Hessian as requested; frequency wipes out gradient so stash it
if ((full_hess_every > -1) and (n == 1)) or (steps_since_last_hessian + 1 == full_hess_every):
G = core.get_legacy_gradient() # TODO
core.IOManager.shared_object().set_specific_retention(1, True)
core.IOManager.shared_object().set_specific_path(1, './')
frequencies(hessian_with_method, molecule=moleculeclone, ref_gradient = G, **kwargs)
steps_since_last_hessian = 0
core.set_legacy_gradient(G)
core.set_global_option('CART_HESS_READ', True)
elif (full_hess_every == -1) and core.get_global_option('CART_HESS_READ') and (n == 1):
pass
# Do nothing; user said to read existing hessian once
else:
core.set_global_option('CART_HESS_READ', False)
steps_since_last_hessian += 1
# Take step. communicate to/from/within optking through legacy_molecule
core.set_legacy_molecule(moleculeclone)
optking_rval = core.optking()
moleculeclone = core.get_legacy_molecule()
moleculeclone.update_geometry()
if optking_rval == core.PsiReturnType.EndLoop:
# if this is the end of an IRC run, set wfn, energy, and molecule to that
# of the last optimized IRC point
if core.get_option('OPTKING', 'OPT_TYPE') == 'IRC':
thisenergy = old_thisenergy
print('Optimizer: Optimization complete!')
core.print_out('\n Final optimized geometry and variables:\n')
moleculeclone.print_in_input_format()
# Mark the optimization data as disposable now that the optimization is done.
core.IOManager.shared_object().set_specific_retention(1, False)
# Check if user wants to see the intcos; if so, don't delete them.
if core.get_option('OPTKING', 'INTCOS_GENERATE_EXIT') == False:
if core.get_option('OPTKING', 'KEEP_INTCOS') == False:
core.opt_clean()
# Changing environment to optimized geometry as expected by user
molecule.set_geometry(moleculeclone.geometry())
for postcallback in hooks['optimize']['post']:
postcallback(lowername, wfn=wfn, **kwargs)
core.clean()
# Cleanup binary file 1
if custom_gradient or ('/' in lowername) or kwargs.get('bsse_type', None) is not None:
core.IOManager.shared_object().set_specific_retention(1, False)
optstash.restore()
if return_history:
history = {
'energy': step_energies,
'gradient': step_gradients,
'coordinates': step_coordinates,
}
if return_wfn and return_history:
return (thisenergy, wfn, history)
elif return_wfn and not return_history:
return (thisenergy, wfn)
elif return_history and not return_wfn:
return (thisenergy, history)
else:
return thisenergy
elif optking_rval == core.PsiReturnType.Failure:
print('Optimizer: Optimization failed!')
# Mark the optimization data as disposable now that the optimization is done.
core.IOManager.shared_object().set_specific_retention(1, False)
if (core.get_option('OPTKING', 'KEEP_INTCOS') == False):
core.opt_clean()
molecule.set_geometry(moleculeclone.geometry())
core.clean()
optstash.restore()
raise OptimizationConvergenceError("""geometry optimization""", n - 1, wfn)
return thisenergy
core.print_out('\n Structure for next step:\n')
moleculeclone.print_in_input_format()
n += 1
if core.get_option('OPTKING', 'INTCOS_GENERATE_EXIT') == False:
if core.get_option('OPTKING', 'KEEP_INTCOS') == False:
core.opt_clean()
optstash.restore()
raise OptimizationConvergenceError("""geometry optimization""", n - 1, wfn)
def hessian(name, **kwargs):
r"""Function complementary to :py:func:`~frequency`. Computes force
constants, deciding analytic, finite difference of gradients, or
finite difference of energies.
:returns: :py:class:`~psi4.core.Matrix` |w--w| Total non-mass-weighted electronic Hessian in Hartrees/Bohr/Bohr.
:returns: (:py:class:`~psi4.core.Matrix`, :py:class:`~psi4.core.Wavefunction`) |w--w| Hessian and wavefunction when **return_wfn** specified.
:examples:
>>> # [1] Frequency calculation without thermochemical analysis
>>> hessian('mp3')
>>> # [2] Frequency calc w/o thermo analysis getting the Hessian
>>> # in file, core.Matrix, and np.array forms
>>> set hessian_write on
>>> H, wfn = hessian('ccsd', return_wfn=True)
>>> wfn.hessian().print_out()
>>> np.array(H)
"""
kwargs = p4util.kwargs_lower(kwargs)
# Figure out what kind of gradient this is
if hasattr(name, '__call__'):
if name.__name__ in ['cbs', 'complete_basis_set']:
gradient_type = 'cbs_wrapper'
else:
# Bounce to name if name is non-CBS function
gradient_type = 'custom_function'
elif kwargs.get('bsse_type', None) is not None:
gradient_type = 'nbody_gufunc'
elif '/' in name:
gradient_type = 'cbs_gufunc'
else:
gradient_type = 'conventional'
# Call appropriate wrappers
if gradient_type == 'nbody_gufunc':
return driver_nbody.nbody_gufunc(hessian, name.lower(), ptype='hessian', **kwargs)
# Check if this is a CBS extrapolation
elif gradient_type == "cbs_gufunc":
return driver_cbs._cbs_gufunc(hessian, name.lower(), **kwargs, ptype="hessian")
elif gradient_type == "cbs_wrapper":
return driver_cbs.cbs(hessian, "cbs", **kwargs, ptype="hessian")
elif gradient_type != "conventional":
raise ValidationError("Hessian: Does not yet support custom functions.")
else:
lowername = name.lower()
_filter_renamed_methods("frequency", lowername)
return_wfn = kwargs.pop('return_wfn', False)
core.clean_variables()
dertype = 2
# Prevent methods that do not have associated energies
if lowername in energy_only_methods:
raise ValidationError("hessian('%s') does not have an associated hessian" % name)
optstash = p4util.OptionsState(
['FINDIF', 'HESSIAN_WRITE'],
['FINDIF', 'FD_PROJECT'],
)
# Allow specification of methods to arbitrary order
lowername, level = driver_util.parse_arbitrary_order(lowername)
if level:
kwargs['level'] = level
dertype = _find_derivative_type('hessian', lowername, kwargs.pop('freq_dertype', kwargs.get('dertype', None)))
# Make sure the molecule the user provided is the active one
molecule = kwargs.pop('molecule', core.get_active_molecule())
molecule.update_geometry()
# Set method-dependent scf convergence criteria (test on procedures['energy'] since that's guaranteed)
optstash_conv = driver_util._set_convergence_criterion('energy', lowername, 8, 10, 8, 10, 8)
# Select certain irreps
irrep = kwargs.get('irrep', -1)
if irrep == -1:
pass # do all irreps
else:
irrep = driver_util.parse_cotton_irreps(irrep, molecule.schoenflies_symbol())
irrep -= 1 # A1 irrep is externally 1, internally 0
if dertype == 2:
core.print_out(
"""hessian() switching to finite difference by gradients for partial Hessian calculation.\n""")
dertype = 1
# At stationary point?
if 'ref_gradient' in kwargs:
core.print_out("""hessian() using ref_gradient to assess stationary point.\n""")
G0 = kwargs['ref_gradient']
else:
G0 = gradient(lowername, molecule=molecule, **kwargs)
translations_projection_sound, rotations_projection_sound = _energy_is_invariant(G0)
core.print_out(
'\n Based on options and gradient (rms={:.2E}), recommend {}projecting translations and {}projecting rotations.\n'
.format(G0.rms(), '' if translations_projection_sound else 'not ',
'' if rotations_projection_sound else 'not '))
if not core.has_option_changed('FINDIF', 'FD_PROJECT'):
core.set_local_option('FINDIF', 'FD_PROJECT', rotations_projection_sound)
# Does an analytic procedure exist for the requested method?
if dertype == 2:
core.print_out("""hessian() will perform analytic frequency computation.\n""")
# We have the desired method. Do it.
wfn = procedures['hessian'][lowername](lowername, molecule=molecule, **kwargs)
wfn.set_gradient(G0)
optstash.restore()
optstash_conv.restore()
# TODO: check that current energy's being set to the right figure when this code is actually used
core.set_variable('CURRENT ENERGY', wfn.energy())
wfn.set_variable('CURRENT ENERGY', wfn.energy())
elif dertype == 1:
core.print_out(
"""hessian() will perform frequency computation by finite difference of analytic gradients.\n""")
# Obtain list of displacements
findif_meta_dict = driver_findif.hessian_from_gradients_geometries(molecule, irrep)
# Record undisplaced symmetry for projection of displaced point groups
core.set_global_option("PARENT_SYMMETRY", molecule.schoenflies_symbol())
ndisp = len(findif_meta_dict["displacements"]) + 1
print(""" %d displacements needed.""" % ndisp)
wfn = _process_displacement(gradient, lowername, molecule, findif_meta_dict["reference"], 1, ndisp,
**kwargs)
var_dict = core.variables()
for n, displacement in enumerate(findif_meta_dict["displacements"].values(), start=2):
_process_displacement(
gradient, lowername, molecule, displacement, n, ndisp, write_orbitals=False, **kwargs)
# Reset variables
for key, val in var_dict.items():
core.set_variable(key, val)
# Assemble Hessian from gradients
# Final disp is undisp, so wfn has mol, G, H general to freq calc
H = driver_findif.assemble_hessian_from_gradients(findif_meta_dict, irrep)
wfn.set_hessian(core.Matrix.from_array(H))
wfn.set_gradient(G0)
# Explicitly set the current energy..
core.set_variable('CURRENT ENERGY', findif_meta_dict["reference"]["energy"])
wfn.set_variable('CURRENT ENERGY', findif_meta_dict["reference"]["energy"])
core.set_global_option("PARENT_SYMMETRY", "")
optstash.restore()
optstash_conv.restore()
else:
core.print_out("""hessian() will perform frequency computation by finite difference of analytic energies.\n""")
# Set method-dependent scf convergence criteria (test on procedures['energy'] since that's guaranteed)
optstash.restore()
optstash_conv.restore()
optstash_conv = driver_util._set_convergence_criterion('energy', lowername, 10, 11, 10, 11, 10)
# Obtain list of displacements
findif_meta_dict = driver_findif.hessian_from_energies_geometries(molecule, irrep)
# Record undisplaced symmetry for projection of diplaced point groups
core.set_global_option("PARENT_SYMMETRY", molecule.schoenflies_symbol())
ndisp = len(findif_meta_dict["displacements"]) + 1
print(' %d displacements needed.' % ndisp)
wfn = _process_displacement(energy, lowername, molecule, findif_meta_dict["reference"], 1, ndisp,
**kwargs)
var_dict = core.variables()
for n, displacement in enumerate(findif_meta_dict["displacements"].values(), start=2):
_process_displacement(
energy, lowername, molecule, displacement, n, ndisp, write_orbitals=False, **kwargs)
# Reset variables
for key, val in var_dict.items():
core.set_variable(key, val)
# Assemble Hessian from energies
H = driver_findif.assemble_hessian_from_energies(findif_meta_dict, irrep)
wfn.set_hessian(core.Matrix.from_array(H))
wfn.set_gradient(G0)
# Explicitly set the current energy..
core.set_variable('CURRENT ENERGY', findif_meta_dict["reference"]["energy"])
wfn.set_variable('CURRENT ENERGY', findif_meta_dict["reference"]["energy"])
core.set_global_option("PARENT_SYMMETRY", "")
optstash.restore()
optstash_conv.restore()
_hessian_write(wfn)
if return_wfn:
return (wfn.hessian(), wfn)
else:
return wfn.hessian()
def frequency(name, **kwargs):
r"""Function to compute harmonic vibrational frequencies.
:aliases: frequencies(), freq()
:returns: *float* |w--w| Total electronic energy in Hartrees.
:returns: (*float*, :py:class:`~psi4.core.Wavefunction`) |w--w| energy and wavefunction when **return_wfn** specified.
:type name: str
:param name: ``'scf'`` || ``'mp2'`` || ``'ci5'`` || etc.
First argument, usually unlabeled. Indicates the computational method
to be applied to the system.
:type molecule: :ref:`molecule <op_py_molecule>`
:param molecule: ``h2o`` || etc.
The target molecule, if not the last molecule defined.
:type return_wfn: :ref:`boolean <op_py_boolean>`
:param return_wfn: ``'on'`` || |dl| ``'off'`` |dr|
Indicate to additionally return the :py:class:`~psi4.core.Wavefunction`
calculation result as the second element (after *float* energy) of a tuple.
Arrays of frequencies and the Hessian can be accessed through the wavefunction.
:type func: :ref:`function <op_py_function>`
:param func: |dl| ``gradient`` |dr| || ``energy`` || ``cbs``
Indicates the type of calculation to be performed on the molecule.
The default dertype accesses ``'gradient'`` or ``'energy'``, while
``'cbs'`` performs a multistage finite difference calculation.
If a nested series of python functions is intended (see :ref:`sec:intercalls`),
use keyword ``freq_func`` instead of ``func``.
:type dertype: :ref:`dertype <op_py_dertype>`
:param dertype: |dl| ``'hessian'`` |dr| || ``'gradient'`` || ``'energy'``
Indicates whether analytic (if available- they're not), finite
difference of gradients (if available) or finite difference of
energies is to be performed.
:type irrep: int or str
:param irrep: |dl| ``-1`` |dr| || ``1`` || ``'b2'`` || ``'App'`` || etc.
Indicates which symmetry block (:ref:`Cotton <table:irrepOrdering>` ordering) of vibrational
frequencies to be computed. ``1``, ``'1'``, or ``'a1'`` represents
:math:`a_1`, requesting only the totally symmetric modes.
``-1`` indicates a full frequency calculation.
.. note:: Analytic hessians are only available for RHF. For all other methods, Frequencies will
proceed through finite differences according to availability of gradients or energies.
.. _`table:freq_gen`:
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| name | calls method |
+=========================+===============================================================================================================+
| scf | Hartree--Fock (HF) :ref:`[manual] <sec:scf>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
:examples:
>>> # [1] Frequency calculation for all modes through highest available derivatives
>>> frequency('ccsd')
>>> # [2] Frequency calculation for b2 modes through finite difference of gradients
>>> # printing lowest mode frequency to screen and Hessian to output
>>> E, wfn = frequencies('scf', dertype=1, irrep=4, return_wfn=True)
>>> print wfn.frequencies().get(0, 0)
>>> wfn.hessian().print_out()
>>> # [3] Frequency calculation at default conditions and Hessian reuse at STP
>>> E, wfn = freq('mp2', return_wfn=True)
>>> set t 273.15
>>> set p 100000
>>> thermo(wfn, wfn.frequencies())
>>> # [4] Opt+Freq, skipping the gradient recalc at the start of the Hessian
>>> e, wfn = optimize('hf', return_wfn=True)
>>> frequencies('hf', ref_gradient=wfn.gradient())
"""
kwargs = p4util.kwargs_lower(kwargs)
return_wfn = kwargs.pop('return_wfn', False)
# Make sure the molecule the user provided is the active one
molecule = kwargs.pop('molecule', core.get_active_molecule())
molecule.update_geometry()
# Compute the hessian
H, wfn = hessian(name, return_wfn=True, molecule=molecule, **kwargs)
# Project final frequencies?
translations_projection_sound, rotations_projection_sound = _energy_is_invariant(wfn.gradient())
project_trans = kwargs.get('project_trans', translations_projection_sound)
project_rot = kwargs.get('project_rot', rotations_projection_sound)
irrep = kwargs.get('irrep', None)
vibinfo = vibanal_wfn(wfn, irrep=irrep, project_trans=project_trans, project_rot=project_rot)
wfn.frequency_analysis = vibinfo
for postcallback in hooks['frequency']['post']:
postcallback(lowername, wfn=wfn, **kwargs)
if return_wfn:
return (core.variable('CURRENT ENERGY'), wfn)
else:
return core.variable('CURRENT ENERGY')
def vibanal_wfn(wfn: core.Wavefunction, hess: np.ndarray = None, irrep: Union[int, str] = None, molecule=None, project_trans: bool = True, project_rot: bool = True):
"""Function to perform analysis of a hessian or hessian block, specifically...
calling for and printing vibrational and thermochemical analysis, setting thermochemical variables,
and writing the vibrec and normal mode files.
Parameters
----------
wfn
The wavefunction which had its Hessian computed.
hess
Hessian to analyze, if not the hessian in wfn.
(3*nat, 3*nat) non-mass-weighted Hessian in atomic units, [Eh/a0/a0].
irrep
The irrep for which frequencies are calculated. Thermochemical analysis is skipped if this is given,
as only one symmetry block of the hessian has been computed.
molecule : :py:class:`~psi4.core.Molecule` or qcdb.Molecule, optional
The molecule to pull information from, if not the molecule in wfn. Must at least have similar
geometry to the molecule in wfn.
project_trans
Should translations be projected in the harmonic analysis?
project_rot
Should rotations be projected in the harmonic analysis?
Returns
-------
vibinfo : dict
A dictionary of vibrational information. See :py:func:`~psi4.driver.qcdb.vib.harmonic_analysis`
"""
if hess is None:
nmwhess = np.asarray(wfn.hessian())
else:
nmwhess = hess
dipder = wfn.variables().get("CURRENT DIPOLE GRADIENT", None)
if dipder is not None:
dipder = np.asarray(dipder).T
mol = wfn.molecule()
geom = np.asarray(mol.geometry())
symbols = [mol.symbol(at) for at in range(mol.natom())]
vibrec = {'molecule': mol.to_dict(np_out=False), 'hessian': nmwhess.tolist()}
if molecule is not None:
molecule.update_geometry()
if mol.natom() != molecule.natom():
raise ValidationError('Impostor molecule trying to be analyzed! natom {} != {}'.format(
mol.natom(), molecule.natom()))
if abs(mol.nuclear_repulsion_energy() - molecule.nuclear_repulsion_energy()) > 1.e-6:
raise ValidationError('Impostor molecule trying to be analyzed! NRE {} != {}'.format(
mol.nuclear_repulsion_energy(), molecule.nuclear_repulsion_energy()))
if not np.allclose(np.asarray(mol.geometry()), np.asarray(molecule.geometry()), atol=1.e-6):
core.print_out(
'Warning: geometry center/orientation mismatch. Normal modes may not be in expected coordinate system.'
)
# raise ValidationError('Impostor molecule trying to be analyzed! geometry\n{}\n !=\n{}'.format(
# np.asarray(mol.geometry()), np.asarray(molecule.geometry())))
mol = molecule
m = np.asarray([mol.mass(at) for at in range(mol.natom())])
irrep_labels = mol.irrep_labels()
vibinfo, vibtext = qcdb.vib.harmonic_analysis(
nmwhess, geom, m, wfn.basisset(), irrep_labels, dipder=dipder, project_trans=project_trans, project_rot=project_rot)
vibrec.update({k: qca.json() for k, qca in vibinfo.items()})
core.print_out(vibtext)
core.print_out(qcdb.vib.print_vibs(vibinfo, shortlong=True, normco='x', atom_lbl=symbols))
if core.has_option_changed('THERMO', 'ROTATIONAL_SYMMETRY_NUMBER'):
rsn = core.get_option('THERMO', 'ROTATIONAL_SYMMETRY_NUMBER')
else:
rsn = mol.rotational_symmetry_number()
if irrep is None:
therminfo, thermtext = qcdb.vib.thermo(
vibinfo,
T=core.get_option("THERMO", "T"), # 298.15 [K]
P=core.get_option("THERMO", "P"), # 101325. [Pa]
multiplicity=mol.multiplicity(),
molecular_mass=np.sum(m),
sigma=rsn,
rotor_type=mol.rotor_type(),
rot_const=np.asarray(mol.rotational_constants()),
E0=core.variable('CURRENT ENERGY')) # someday, wfn.energy()
vibrec.update({k: qca.json() for k, qca in therminfo.items()})
core.set_variable("ZPVE", therminfo['ZPE_corr'].data) # P::e THERMO
core.set_variable("THERMAL ENERGY CORRECTION", therminfo['E_corr'].data) # P::e THERMO
core.set_variable("ENTHALPY CORRECTION", therminfo['H_corr'].data) # P::e THERMO
core.set_variable("GIBBS FREE ENERGY CORRECTION", therminfo['G_corr'].data) # P::e THERMO
core.set_variable("ZERO K ENTHALPY", therminfo['ZPE_tot'].data) # P::e THERMO
core.set_variable("THERMAL ENERGY", therminfo['E_tot'].data) # P::e THERMO
core.set_variable("ENTHALPY", therminfo['H_tot'].data) # P::e THERMO
core.set_variable("GIBBS FREE ENERGY", therminfo['G_tot'].data) # P::e THERMO
core.print_out(thermtext)
else:
core.print_out(' Thermochemical analysis skipped for partial frequency calculation.\n')
if core.get_option('FINDIF', 'HESSIAN_WRITE'):
filename = core.get_writer_file_prefix(mol.name()) + ".vibrec"
with open(filename, 'w') as handle:
json.dump(vibrec, handle, sort_keys=True, indent=4)
if core.get_option('FINDIF', 'NORMAL_MODES_WRITE'):
filename = core.get_writer_file_prefix(mol.name()) + ".molden_normal_modes"
with open(filename, 'w') as handle:
handle.write(qcdb.vib.print_molden_vibs(vibinfo, symbols, geom, standalone=True))
return vibinfo
def _hessian_write(wfn):
if core.get_option('FINDIF', 'HESSIAN_WRITE'):
filename = core.get_writer_file_prefix(wfn.molecule().name()) + ".hess"
with open(filename, 'wb') as handle:
qcdb.hessparse.to_string(np.asarray(wfn.hessian()), handle, dtype='psi4')
def gdma(wfn, datafile=""):
"""Function to use wavefunction information in *wfn* and, if specified,
additional commands in *filename* to run GDMA analysis.
.. versionadded:: 0.6
:returns: None
:type wfn: :py:class:`~psi4.core.Wavefunction`
:param wfn: set of molecule, basis, orbitals from which to generate DMA analysis
:type datafile: str
:param datafile: optional control file (see GDMA manual) to peform more complicated DMA
analyses. If this option is used, the File keyword must be set to read
a filename.fchk, where filename is provided by :term:`WRITER_FILE_LABEL <WRITER_FILE_LABEL (GLOBALS)>` .
:examples:
>>> # [1] DMA analysis from MP2 wavefunction. N.B. gradient must be requested to generate MP2 density.
>>> grad, wfn = gradient('mp2', return_wfn=True)
>>> gdma(wfn)
"""
# Start by writing a G* checkpoint file, for the GDMA code to read in
fw = core.FCHKWriter(wfn)
molname = wfn.molecule().name()
prefix = core.get_writer_file_prefix(molname)
fchkfile = prefix + '.fchk'
fw.write(fchkfile)
if datafile:
commands = datafile
else:
if wfn.reference_wavefunction():
densname = "CC"
else:
densname = "SCF"
commands = 'psi4_dma_datafile.dma'
radii = core.get_option('GDMA', 'GDMA_RADIUS')
origin = core.get_option('GDMA', 'GDMA_ORIGIN')
with open(commands, 'w') as f:
f.write("File %s Density %s\n" % (fchkfile, densname))
f.write("Angstrom\n")
f.write("%s\n" % core.get_option('GDMA', 'GDMA_MULTIPOLE_UNITS'))
f.write("Multipoles\n")
if origin:
try:
f.write("Origin %f %f %f\n" % (float(origin[0]), float(origin[1]), float(origin[2])))
except:
raise ValidationError("The GDMA origin array should contain three entries: x, y, and z.")
f.write("Switch %f\n" % core.get_option('GDMA', 'GDMA_SWITCH'))
if radii:
f.write("Radius %s\n" % " ".join([str(r) for r in radii]))
f.write("Limit %d\n" % core.get_option('GDMA', 'GDMA_LIMIT'))
f.write("Start\n")
f.write("Finish\n")
core.run_gdma(wfn, commands)
os.remove(fchkfile)
# If we generated the DMA control file, we should clean up here
if not datafile:
os.remove(commands)
def fchk(wfn: core.Wavefunction, filename: str, *, debug: bool = False, strict_label: bool = True):
"""Function to write wavefunction information in *wfn* to *filename* in
Gaussian FCHK format.
.. versionadded:: 0.6
:returns: None
:param wfn: set of molecule, basis, orbitals from which to generate fchk file
:param filename: destination file name for FCHK file
:param debug: returns a dictionary to aid with debugging
:param strict_label: If true set a density label compliant with what Gaussian would write. A warning will be printed if this is not possible.
Otherwise set the density label according to the method name.
Notes
-----
* A description of the FCHK format is http://wild.life.nctu.edu.tw/~jsyu/compchem/g09/g09ur/f_formchk.htm
* The allowed headers for methods are general and limited, i.e., "Total SCF|MP2|CI|CC Density",
PSI4 will try to find the right one for the current calculation. If `strict_label=False` the PSI4 method name will be used as label.
* Not all theory modules in PSI4 are compatible with the FCHK writer.
A warning will be printed if a theory module is not supported.
* Caution! For orbital-optimized correlated methods (e.g. DCT, OMP2) the 'Orbital Energy' field contains ambiguous data.
:examples:
>>> # [1] FCHK file for DFT calculation
>>> E, wfn = energy('b3lyp', return_wfn=True)
>>> fchk(wfn, 'mycalc.fchk')
>>> # [2] FCHK file for correlated densities
>>> E, wfn = gradient('ccsd', return_wfn=True)
>>> fchk(wfn, 'mycalc.fchk')
>>> # [2] Write FCHK file with non-standard label.
>>> E, wfn = gradient('mp2.5', return_wfn=True)
>>> fchk(wfn, 'mycalc.fchk', strict_label=False)
"""
# * Known limitations and notes *
#
# OCC: (occ theory module only, not dfocc) is turned off as densities are not correctly set.
# DFMP2: Contains natural orbitals in wfn.C() and wfn.epsilon() data. This is fixed to contain respective HF data.
allowed = ['DFMP2', 'SCF', 'CCENERGY', 'DCT', 'DFOCC']
module_ = wfn.module().upper()
if module_ not in allowed:
core.print_out(f"FCHKWriter: Theory module {module_} is currently not supported by the FCHK writer.")
return None
if (wfn.basisset().has_ECP()):
core.print_out(f"FCHKWriter: Limited ECP support! No ECP data will be written to the FCHK file.")
# fix orbital coefficients and energies for DFMP2
if module_ in ['DFMP2']:
wfn_ = core.Wavefunction.build(wfn.molecule(), core.get_global_option('BASIS'))
wfn_.deep_copy(wfn)
refwfn = wfn.reference_wavefunction()
wfn_.set_reference_wavefunction(refwfn) # refwfn not deep_copied
wfn_.Ca().copy(refwfn.Ca())
wfn_.Cb().copy(refwfn.Cb())
wfn_.epsilon_a().copy(refwfn.epsilon_a())
wfn_.epsilon_b().copy(refwfn.epsilon_b())
fw = core.FCHKWriter(wfn_)
else:
fw = core.FCHKWriter(wfn)
if module_ in ['DCT', 'DFOCC']:
core.print_out("""FCHKWriter: Caution! For orbital-optimized correlated methods
the 'Orbital Energy' field contains ambiguous data. \n""")
# At this point we don't know the method name, so we try to search for it.
# idea: get the method from the variable matching closely the 'current energy'
varlist = core.scalar_variables()
current = varlist['CURRENT ENERGY']
# delete problematic entries
for key in ['CURRENT ENERGY', 'CURRENT REFERENCE ENERGY']:
varlist.pop(key, None)
# find closest matching energy
for (key, val) in varlist.items():
if (np.isclose(val, current, 1e-12)):
method = key.split()[0]
break
# The 'official' list of labels for compatibility.
# OMP2,MP2.5,OCCD, etc get reduced to MP2,CC.
allowed_labels = {
"HF": " SCF Density",
"SCF": " SCF Density",
"DFT": " SCF Density",
"MP2": " MP2 Density",
"MP3": " MP3 Density",
"MP4": " MP4 Density",
"CI": " CI Density",
"CC": " CC Density",
}
# assign label from method name
fchk_label = f" {method} Density"
if strict_label:
in_list = False
for key in allowed_labels:
if key in method:
if key is not method:
core.print_out(f"FCHKWriter: !WARNING! method '{method}'' renamed to label '{key}'.\n")
fchk_label = allowed_labels[key]
in_list = True
if not in_list:
core.print_out(f"FCHKWriter: !WARNING! {method} is not recognized. Using non-standard label.\n")
core.print_out(f"FCHKWriter: Writing {filename} with label '{fchk_label}'.\n")
fw.set_postscf_density_label(fchk_label)
fw.write(filename)
# needed for the pytest. The SCF density below follows PSI4 ordering not FCHK ordering.
if debug:
ret = {
"filename": filename,
"detected energy": method,
"selected label": fchk_label,
"Total SCF Density": fw.SCF_Dtot().np,
}
return ret
return None
def molden(wfn, filename=None, density_a=None, density_b=None, dovirtual=None):
"""Function to write wavefunction information in *wfn* to *filename* in
molden format. Will write natural orbitals from *density* (MO basis) if supplied.
Warning! Most post-SCF Wavefunctions do not build the density as this is often
much more costly than the energy. In addition, the Wavefunction density attributes
(Da and Db) return the SO density and must be transformed to the MO basis
to use with this function.
.. versionadded:: 0.5
*wfn* parameter passed explicitly
:returns: None
:type wfn: :py:class:`~psi4.core.Wavefunction`
:param wfn: set of molecule, basis, orbitals from which to generate cube files
:type filename: str
:param filename: destination file name for MOLDEN file (optional)
:type density_a: :py:class:`~psi4.core.Matrix`
:param density_a: density in the MO basis to build alpha NO's from (optional)
:type density_b: :py:class:`~psi4.core.Matrix`
:param density_b: density in the MO basis to build beta NO's from, assumes restricted if not supplied (optional)
:type dovirtual: bool
:param dovirtual: do write all the MOs to the MOLDEN file (true) or discard the unoccupied MOs, not valid for NO's (false) (optional)
:examples:
1. Molden file with the Kohn-Sham orbitals of a DFT calculation.
>>> E, wfn = energy('b3lyp', return_wfn=True)
>>> molden(wfn, 'mycalc.molden')
2. Molden file for CI/MCSCF computation using NO roots.
Any method returning a ``CIWavefunction`` object will work: ``detci``,
``fci``, ``casscf``, etc. The first two arguments of ``get_opdm`` can be
set to ``n, n`` where n => 0 selects the root to write out, provided
these roots were computed, see :term:`NUM_ROOTS <NUM_ROOTS (DETCI)>`. The
third argument controls the spin (``"A"``, ``"B"`` or ``"SUM"``) and the final
boolean option determines whether inactive orbitals are included.
>>> E, wfn = energy('detci', return_wfn=True)
>>> molden(wfn, 'no_root1.molden', density_a=wfn.get_opdm(0, 0, "A", True))
3. The following produces **an INCORRECT Molden file**, because the
``molden`` function needs orbitals in the MO basis (which are internally
converted and written to the Molden file in the AO basis). The correct
usage is given in the next point.
>>> E, wfn = energy('ccsd', return_wfn=True)
>>> molden(wfn, 'ccsd_no.molden', density_a=wfn.Da())
4. Molden file with the natural orbitals of the ground-state 1RDM of a
Post-HF calculation. Note the required transformation of Da (SO->MO).
>>> E, wfn = properties('ccsd', return_wfn=True)
>>> Da_so = wfn.Da()
>>> SCa = core.doublet(wfn.S(), wfn.Ca(), False, False)
>>> Da_mo = core.triplet(SCa, Da_so, SCa, True, False, False)
>>> molden(wfn, 'ccsd_no.molden', density_a=Da_mo)
"""
if filename is None:
filename = core.get_writer_file_prefix(wfn.molecule().name()) + ".molden"
if dovirtual is None:
dovirt = bool(core.get_option("SCF", "MOLDEN_WITH_VIRTUAL"))
else:
dovirt = dovirtual
if density_a:
nmopi = wfn.nmopi()
nsopi = wfn.nsopi()
NO_Ra = core.Matrix("NO Alpha Rotation Matrix", nmopi, nmopi)
NO_occa = core.Vector(nmopi)
density_a.diagonalize(NO_Ra, NO_occa, core.DiagonalizeOrder.Descending)
NO_Ca = core.Matrix("Ca Natural Orbitals", nsopi, nmopi)
NO_Ca.gemm(False, False, 1.0, wfn.Ca(), NO_Ra, 0)
if density_b:
NO_Rb = core.Matrix("NO Beta Rotation Matrix", nmopi, nmopi)
NO_occb = core.Vector(nmopi)
density_b.diagonalize(NO_Rb, NO_occb, core.DiagonalizeOrder.Descending)
NO_Cb = core.Matrix("Cb Natural Orbitals", nsopi, nmopi)
NO_Cb.gemm(False, False, 1.0, wfn.Cb(), NO_Rb, 0)
else:
NO_occb = NO_occa
NO_Cb = NO_Ca
mw = core.MoldenWriter(wfn)
mw.write(filename, NO_Ca, NO_Cb, NO_occa, NO_occb, NO_occa, NO_occb, dovirt)
else:
try:
occa = wfn.occupation_a()
occb = wfn.occupation_b()
except AttributeError:
core.print_out("\n!Molden warning: This wavefunction does not have occupation numbers.\n"
"Writing zero's for occupation numbers\n\n")
occa = core.Vector(wfn.nmopi())
occb = core.Vector(wfn.nmopi())
mw = core.MoldenWriter(wfn)
mw.write(filename, wfn.Ca(), wfn.Cb(), wfn.epsilon_a(), wfn.epsilon_b(), occa, occb, dovirt)
def tdscf(wfn, **kwargs):
return proc.run_tdscf_excitations(wfn,**kwargs)
# Aliases
opt = optimize
freq = frequency
frequencies = frequency
prop = properties
| lothian/psi4 | psi4/driver/driver.py | Python | lgpl-3.0 | 120,682 |
import sys, os
import subprocess
import traceback
import urllib
import zipfile
import ontology_to_daikon
import common
daikon_jar = common.get_jar("daikon.jar")
DAIKON_SPLITTER = "====================="
def run_daikon_on_dtrace_file(dtrace_file, classpath=daikon_jar, checked_invariant=None):
cmd = ["java", "-classpath", classpath, "daikon.DaikonSimple", dtrace_file]
if checked_invariant:
cmd += ["--disable-all-invariants", "--user-defined-invariant", checked_invariant]
cmd += ["--config_option", "daikon.Daikon.undo_opts=true"]
return common.run_cmd(cmd, print_output=True)['output']
def find_ppts_that_establish_inv_in_daikon_output(daikon_output, inv_substring):
ppts_with_inv = []
start_of_new_block = False
current_method = None
lines = daikon_output.splitlines(True)
i = 0
while (i<len(lines)):
if DAIKON_SPLITTER in lines[i]:
i+=1
ppt_name = lines[i]
i+=1
while (i<len(lines) and DAIKON_SPLITTER not in lines[i]):
if inv_substring in lines[i] and "return" in lines[i]:
# check if the invariant is established on the return var.
ppts_with_inv+=[ppt_name]
i+=1
else:
i+=1
return ppts_with_inv
def find_ppts_that_establish_inv(dtrace_file, pattern_class_dir, pattern_class_name):
"""
This is the main method to be called from the outside.
INPUT: dtrace_file - for a given project
pattern_class_name - the root class dir for the daikon pattern that needs to be added to the CP when running daikon
pattern_class_name - qualified name of the pattern class
OUTPUT: set of daikon program points (ppts) that establish the given invariant.
"""
cp = daikon_jar
if pattern_class_dir:
cp = daikon_jar+":"+pattern_class_dir
daikon_output = run_daikon_on_dtrace_file(dtrace_file, cp, pattern_class_name)
ppts = find_ppts_that_establish_inv_in_daikon_output(daikon_output, pattern_class_name)
return ppts
def main():
with common.cd(common.WORKING_DIR):
test_dtrace = "test.dtrace.gz"
test_inv_name = "TestInvariant"
ontology_to_daikon.create_daikon_invariant("README.md", test_inv_name)
cmd = ["javac", "-classpath", daikon_jar+ ":.", test_inv_name+".java"]
common.run_cmd(cmd, print_output=True)
print ("Finding program points")
ppts = find_ppts_that_establish_inv(test_dtrace, WORKING_DIR, test_inv_name)
print ("deleting temp files")
os.remove(test_inv_name+".class")
os.remove(test_inv_name+".java")
os.remove("test.inv.gz")
#output = run_daikon_on_dtrace_file(test_dtrace, checked_invariant="daikon.inv.unary.sequence.EltwiseIntLessThan")
#print output
#ppts = find_ppts_that_establish_inv_in_daikon_output(output, " sorted by ")
print ("Methods that establish FirstMuseInvariant:")
for ppt in ppts:
print ppt
if __name__ == '__main__':
main()
| aas-integration/integration-test2 | inv_check/inv_check.py | Python | mit | 2,879 |
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(name='GradespeedScraper',
version='0.1-dev',
description='Scrapes Gradespeed',
author='Davis Robertson',
author_email='[email protected]',
license='MIT',
url='https://github.com/epicdavi/GradespeedScraper/',
install_requires=['mechanize>=0.2.5', 'beautifulsoup4>=4.3.x,<4.4'],
) | EpicDavi/GradespeedScraper | setup.py | Python | mit | 430 |
# -*- coding: utf-8 -*-
from .base import *
DEBUG = True | memnonila/taskbuster-boilerplate | taskbuster/taskbuster/settings/testing.py | Python | mit | 57 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Node'
db.create_table(u'data_node', (
('node_id', self.gf('django.db.models.fields.IntegerField')(primary_key=True, db_index=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=200, null=True, blank=True)),
('latitude', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('longitude', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('added_on', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('last_modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, auto_now_add=True, blank=True)),
('indoor', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal(u'data', ['Node'])
# Adding model 'Latest'
db.create_table(u'data_latest', (
('node_id', self.gf('django.db.models.fields.IntegerField')(primary_key=True, db_index=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=200, null=True, blank=True)),
('indoor', self.gf('django.db.models.fields.BooleanField')(default=False)),
('latitude', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('longitude', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('temperature', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('rh', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('dylos_bin_1', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('dylos_bin_2', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('dylos_bin_3', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('dylos_bin_4', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('alphasense_1', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('alphasense_2', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('alphasense_3', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('alphasense_4', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('alphasense_5', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('alphasense_6', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('alphasense_7', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('alphasense_8', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('added_on', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, db_index=True, blank=True)),
('last_modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, auto_now_add=True, blank=True)),
))
db.send_create_signal(u'data', ['Latest'])
# Adding model 'DataPoint'
db.create_table(u'data_datapoint', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('node_id', self.gf('django.db.models.fields.IntegerField')(db_index=True)),
('temperature', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('rh', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('dylos_bin_1', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('dylos_bin_2', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('dylos_bin_3', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('dylos_bin_4', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('alphasense_1', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('alphasense_2', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('alphasense_3', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('alphasense_4', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('alphasense_5', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('alphasense_6', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('alphasense_7', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('alphasense_8', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('added_on', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, db_index=True, blank=True)),
('last_modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, auto_now_add=True, blank=True)),
('reading_time', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
))
db.send_create_signal(u'data', ['DataPoint'])
# Adding model 'Dylos'
db.create_table(u'data_dylos', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('node_id', self.gf('django.db.models.fields.IntegerField')(db_index=True)),
('dylos_bin_1', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('dylos_bin_2', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('dylos_bin_3', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('dylos_bin_4', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('added_on', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, db_index=True, blank=True)),
('last_modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, auto_now_add=True, blank=True)),
('reading_time', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
))
db.send_create_signal(u'data', ['Dylos'])
# Adding model 'Alphasense'
db.create_table(u'data_alphasense', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('node_id', self.gf('django.db.models.fields.IntegerField')(db_index=True)),
('alphasense_1', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('alphasense_2', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('alphasense_3', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('alphasense_4', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('alphasense_5', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('alphasense_6', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('alphasense_7', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('alphasense_8', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('added_on', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, db_index=True, blank=True)),
('last_modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, auto_now_add=True, blank=True)),
('reading_time', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
))
db.send_create_signal(u'data', ['Alphasense'])
# Adding model 'Met'
db.create_table(u'data_met', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('node_id', self.gf('django.db.models.fields.IntegerField')(db_index=True)),
('temperature', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('rh', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('added_on', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, db_index=True, blank=True)),
('last_modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, auto_now_add=True, blank=True)),
('reading_time', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
))
db.send_create_signal(u'data', ['Met'])
# Adding model 'AQI'
db.create_table(u'data_aqi', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('node_id', self.gf('django.db.models.fields.IntegerField')(db_index=True)),
('no', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('no2', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('o3', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('co', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('dylos_bin_1', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('dylos_bin_2', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('dylos_bin_3', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('dylos_bin_4', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('mitaqi', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('no_rank', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('no2_rank', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('o3_rank', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('co_rank', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('dylos_bin_1_rank', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('dylos_bin_2_rank', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('dylos_bin_3_rank', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('dylos_bin_4_rank', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('mitaqi_rank', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('added_on', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, db_index=True, blank=True)),
('last_modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, auto_now_add=True, blank=True)),
))
db.send_create_signal(u'data', ['AQI'])
# Adding model 'SensorDetail'
db.create_table(u'data_sensordetail', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('node_id', self.gf('django.db.models.fields.IntegerField')(db_index=True)),
('no_serial', self.gf('django.db.models.fields.CharField')(max_length=200, null=True, blank=True)),
('o3_serial', self.gf('django.db.models.fields.CharField')(max_length=200, null=True, blank=True)),
('no2_serial', self.gf('django.db.models.fields.CharField')(max_length=200, null=True, blank=True)),
('co_serial', self.gf('django.db.models.fields.CharField')(max_length=200, null=True, blank=True)),
('no_electronic_we_zero', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('no_total_we_zero', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('no_electronic_aux_zero', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('no_total_aux_zero', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('no_electronic_we_sens', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('no_total_we_sens', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('o3_electronic_we_zero', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('o3_total_we_zero', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('o3_electronic_aux_zero', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('o3_total_aux_zero', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('o3_electronic_we_sens', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('o3_total_we_sens', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('no2_electronic_we_zero', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('no2_total_we_zero', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('no2_electronic_aux_zero', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('no2_total_aux_zero', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('no2_electronic_we_sens', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('no2_total_we_sens', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('co_electronic_we_zero', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('co_total_we_zero', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('co_electronic_aux_zero', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('co_total_aux_zero', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('co_electronic_we_sens', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('co_total_we_sens', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
))
db.send_create_signal(u'data', ['SensorDetail'])
def backwards(self, orm):
# Deleting model 'Node'
db.delete_table(u'data_node')
# Deleting model 'Latest'
db.delete_table(u'data_latest')
# Deleting model 'DataPoint'
db.delete_table(u'data_datapoint')
# Deleting model 'Dylos'
db.delete_table(u'data_dylos')
# Deleting model 'Alphasense'
db.delete_table(u'data_alphasense')
# Deleting model 'Met'
db.delete_table(u'data_met')
# Deleting model 'AQI'
db.delete_table(u'data_aqi')
# Deleting model 'SensorDetail'
db.delete_table(u'data_sensordetail')
models = {
u'data.alphasense': {
'Meta': {'object_name': 'Alphasense'},
'added_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'alphasense_1': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'alphasense_2': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'alphasense_3': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'alphasense_4': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'alphasense_5': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'alphasense_6': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'alphasense_7': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'alphasense_8': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'node_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'reading_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
u'data.aqi': {
'Meta': {'object_name': 'AQI'},
'added_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'co': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'co_rank': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'dylos_bin_1': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'dylos_bin_1_rank': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'dylos_bin_2': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'dylos_bin_2_rank': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'dylos_bin_3': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'dylos_bin_3_rank': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'dylos_bin_4': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'dylos_bin_4_rank': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'mitaqi': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'mitaqi_rank': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'no': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'no2': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'no2_rank': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'no_rank': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'node_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'o3': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'o3_rank': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'data.datapoint': {
'Meta': {'object_name': 'DataPoint'},
'added_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'alphasense_1': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'alphasense_2': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'alphasense_3': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'alphasense_4': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'alphasense_5': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'alphasense_6': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'alphasense_7': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'alphasense_8': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'dylos_bin_1': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'dylos_bin_2': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'dylos_bin_3': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'dylos_bin_4': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'node_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'reading_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'rh': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'temperature': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
u'data.dylos': {
'Meta': {'object_name': 'Dylos'},
'added_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'dylos_bin_1': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'dylos_bin_2': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'dylos_bin_3': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'dylos_bin_4': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'node_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'reading_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
u'data.latest': {
'Meta': {'object_name': 'Latest'},
'added_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'alphasense_1': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'alphasense_2': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'alphasense_3': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'alphasense_4': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'alphasense_5': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'alphasense_6': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'alphasense_7': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'alphasense_8': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'dylos_bin_1': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'dylos_bin_2': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'dylos_bin_3': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'dylos_bin_4': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'indoor': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'latitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'longitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'node_id': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True', 'db_index': 'True'}),
'rh': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'temperature': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
u'data.met': {
'Meta': {'object_name': 'Met'},
'added_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'node_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'reading_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'rh': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'temperature': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
u'data.node': {
'Meta': {'object_name': 'Node'},
'added_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'indoor': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'latitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'longitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'node_id': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True', 'db_index': 'True'})
},
u'data.sensordetail': {
'Meta': {'object_name': 'SensorDetail'},
'co_electronic_aux_zero': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'co_electronic_we_sens': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'co_electronic_we_zero': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'co_serial': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'co_total_aux_zero': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'co_total_we_sens': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'co_total_we_zero': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'no2_electronic_aux_zero': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'no2_electronic_we_sens': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'no2_electronic_we_zero': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'no2_serial': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'no2_total_aux_zero': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'no2_total_we_sens': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'no2_total_we_zero': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'no_electronic_aux_zero': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'no_electronic_we_sens': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'no_electronic_we_zero': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'no_serial': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'no_total_aux_zero': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'no_total_we_sens': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'no_total_we_zero': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'node_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'o3_electronic_aux_zero': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'o3_electronic_we_sens': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'o3_electronic_we_zero': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'o3_serial': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'o3_total_aux_zero': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'o3_total_we_sens': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'o3_total_we_zero': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['data'] | clairityproject/backend | data/migrations/0001_initial.py | Python | mit | 29,927 |
from django.conf import settings
from django.template import Library
from ..utils import get_oauth_handler, get_gravatar_url
register = Library()
@register.simple_tag
def github_auth_url():
oauth_handler = get_oauth_handler()
return oauth_handler.authorize_url(settings.GITHUB['SCOPES'])
@register.simple_tag
def gravatar_url(email, size):
return get_gravatar_url(email, size)
| beni55/djangolint | project/oauth/templatetags/oauth.py | Python | isc | 396 |
from random import Random
from collections_extended import setlist
# The version of seeding to use for random
SEED_VERSION = 2
# Common alphabets to use
ALPHANUM = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
BASE58 = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
def shuffle(key, x):
random = Random(key)
random.shuffle(x)
def key_gen(key, base):
'''Generate values from the key.
This will indefinitely generate integers in [0, base).
key is used to initialize random, so that the "random" number generated are
the same each time for a given key. This turns a key of any length into an
"infinitely" long key without simply cycling over the key.
'''
random = Random(key)
while True:
value = random.randint(0, base-1)
yield value
def encode_base_n(num, base, min_length=0):
'''Convert an integer into a list of integers storing the number in base base.
If a minimum length is specified, the result will be 0-padded.
'''
out = []
while num > 0 or len(out) < min_length:
num, remainder = divmod(num, base)
out.append(remainder)
return out
def decode_base_n(int_list, base):
'''Convert a list of numbers representing a number in base base to an integer.'''
out = 0
for index, num in enumerate(int_list):
if num >= base or num < 0:
raise ValueError
out += (base ** index) * num
return out
def calc_check_digits(int_list, base, num_check_chars):
checksum_base = base ** num_check_chars
checksum_value = sum(int_list) % checksum_base
return encode_base_n(checksum_value, base, min_length=num_check_chars)
def add_check_digits(int_list, base, num_check_chars):
'''Calculate a checksum for int_list and translate into a number of base base
made up of num_check_chars digits.
Args:
int_list: A list of integers >= 0 and < base
base: The number of characters in the alphabet
num_check_chars: The number of check characters to return
Returns:
A list of integers that represent the checksum in base base.
'''
check_digits = calc_check_digits(int_list, base, num_check_chars)
return int_list + check_digits
def eval_check_digits(decrypted_ints, base, num_check_chars):
'''Evaluate the check digits in decrypted_ints.
Args:
decrypted_ints: A list of integers >=0 and < base (the result of add_check_digits)
Returns:
The decrypted_ints without the check digits
Raises:
ValueError: if the check digits don't match
'''
if num_check_chars == 0:
return decrypted_ints
int_list = decrypted_ints[:-num_check_chars]
check_digits = decrypted_ints[-num_check_chars:]
if calc_check_digits(int_list, base, num_check_chars) != check_digits:
raise ValueError()
return int_list
def encode(int_list, alphabet):
'''Encode ints using alphabet.'''
char_list = []
for i in int_list:
if i > len(alphabet) or i < 0:
raise ValueError
char_list.append(alphabet[i])
return ''.join(char_list)
def decode(s, alphabet):
'''Decode a string s using alphabet returning a list of ints.'''
try:
return [alphabet.index(c) for c in s]
except (TypeError, IndexError):
raise ValueError
def encrypt(int_list, key, base):
encrypted_ints = []
moving_value = 0
for char_index, key_value in zip(int_list, key_gen(key, base)):
encrypted_int = (char_index + key_value + moving_value) % base
encrypted_ints.append(encrypted_int)
moving_value += encrypted_int
return encrypted_ints
def decrypt(int_list, key, base):
decrypted_ints = []
moving_value = 0
for char_index, key_value in zip(int_list, key_gen(key, base)):
decrypted_int = (char_index - key_value - moving_value) % base
decrypted_ints.append(decrypted_int)
moving_value += char_index
return decrypted_ints
def obfuscate(num, key, alphabet, min_chars=0, num_check_chars=1):
''' Obfuscate num using key.
This does some minor encryption by adding values to a key and a moving value.
The moving value is so that one small change makes all of the resulting
characters change.
Args:
num: The integer to obfuscate
key: An int, string or bytes to generate key values (anything that can be passed to random.seed)
alphabet: A list of characters to use for the alphabet
min_chars: A minimum number of chars for the resulting string
num_check_chars: The number of chars to use as a check
Returns:
A string encoding the number in the passed alphabet and encrypted with key.
Raises:
ValueError: if num is not a number or < 0
'''
try:
if num < 0:
raise ValueError()
except TypeError:
raise ValueError()
base = len(alphabet)
num_as_ints = encode_base_n(num, base, min_chars)
unencrypted_digits = add_check_digits(num_as_ints, base, num_check_chars)
encrypted_digits = encrypt(unencrypted_digits, key, base)
return encode(encrypted_digits, alphabet)
def deobfuscate(s, key, alphabet, num_check_chars=1):
'''Deobfuscate a string using key and alphabet.
key, alphabet and num_check_chars must be identical to the values used to obfuscate.
Args:
s: The string to deobfuscate
key: The key used to obfuscate
alphabet: The alphabet used to obfuscate
num_check_chars: The number of chars to use as a check
Returns:
The deobfuscated integer.
Raises:
ValueError: if s isn't a string, s doesn't use alphabet or the checksum doesn't match
'''
base = len(alphabet)
encrypted_ints = decode(s, alphabet)
decrypted_ints = decrypt(encrypted_ints, key, base)
num_as_ints = eval_check_digits(decrypted_ints, base, num_check_chars)
return decode_base_n(num_as_ints, base)
class Obfuscator():
def __init__(self, key, alphabet=None, min_length=0, num_check_chars=1, version=1):
'''
This accepts a version number in case the algorithm changes at some point
in the future.
Args:
key: The key.
alphabet: Optionally, specify an alternative alphabet to use.
min_length: An encoded value will always be at least min_length
characters (including the check characters)
num_check_chars: The number of chars used for the check
version: The version of the algorithm to use.
'''
if isinstance(num_check_chars, int) and num_check_chars >= 0:
self.num_check_chars = num_check_chars
else:
raise ValueError('num_check_chars must be an int >= 0')
if isinstance(min_length, int) and min_length >= 0:
self.min_length = min_length - num_check_chars
else:
raise ValueError('min_length must be an int >= 0')
self.key = key
alphabet = list(alphabet or ALPHANUM)
shuffle(key, alphabet)
self.alphabet = setlist(alphabet)
def obfuscate(self, num, salt=None, min_length=None):
if salt:
key = self.key + salt
else:
key = self.key
if min_length is None:
min_length = self.min_length
return obfuscate(num, key, self.alphabet, min_length, self.num_check_chars)
def deobfuscate(self, s, salt=None):
if salt:
key = self.key + salt
else:
key = self.key
return deobfuscate(s, key, self.alphabet, self.num_check_chars)
| mlenzen/flask-obfuscateids | flask_obfuscateids/lib.py | Python | bsd-3-clause | 6,881 |
import time
import math
import rospy
import sys
from classes.BNO055 import *
from tf.transformations import quaternion_from_euler, euler_from_quaternion
class ImuDriver(object):
def __init__(self, serial_port="/dev/ttyUSB0", calibration_vector=[]):
self.degrees2rad = math.pi / 180.0
self.debug = False
self.string_debug = ''
self.calibration_vector = calibration_vector
self.real_calibration_vector = []
self.counter_calibration = 0
self.counter_max_calibration = 10
self.serial_port = serial_port
[self.cal_sys, self.cal_gyro, self.cal_accel, self.cal_mag] = [0., 0., 0., 0.]
self.bno = BNO055(serial_port=serial_port)
# Interface properties
self.enable_load_calibration = False
self.enable_set_offset = False
self.enable_calibration_status_reading = False
self.enable_print_calibration_vector = False
self.enable_reset_imu = False
# IMU info
self.linear_acceleration_x = 0.0
self.linear_acceleration_y = 0.0
self.linear_acceleration_z = 0.0
self.angular_velocity_x = 0.0
self.angular_velocity_y = 0.0
self.angular_velocity_z = 0.0
self.euler_yaw = 0.0
self.euler_roll = 0.0
self.euler_pitch = 0.0
self.orientation_x = 0.0
self.orientation_y = 0.0
self.orientation_z = 0.0
self.orientation_w = 0.0
# Offset info
self.offset_yaw = 0.0
self.offset_roll = 0.0
self.offset_pitch = 0.0
self.temperature = 0
def init_imu(self):
rospy.loginfo("initializing IMU, mode: OPERATION_MODE_NDOF")
is_init_imu = False
while not is_init_imu:
try:
self.bno.begin(mode=OPERATION_MODE_NDOF)
# self.bno.begin(mode=OPERATION_MODE_NDOF_FMC_OFF) # more stable
# self.bno.begin(mode=OPERATION_MODE_IMUPLUS)
# self.bno.begin(mode=OPERATION_MODE_M4G)
is_init_imu = True
self.string_debug = 'Connected to BNO055 at port: ' + str(self.serial_port)
except BaseException as eb:
self.string_debug = 'Failed to initialize BNO055 at port: ' \
+ str(self.serial_port) + " Error: " + str(eb)
time.sleep(0.1)
if self.debug:
rospy.loginfo(self.string_debug)
rospy.loginfo("initializing Device")
is_init_device = False
while not is_init_device:
status, self_test, error = self.bno.get_system_status(False)
if error == 0 and status != 0x01:
is_init_device = True
else:
self.string_debug = 'Failed to initialize IMU port: ' + str(self.serial_port) + ' error: ' + str(error)
time.sleep(0.1)
if self.debug:
rospy.loginfo(self.string_debug)
def load_calibration(self):
# computed using tutorial:
# https://learn.adafruit.com/bno055-absolute-orientation-sensor-with-raspberry-pi-and-beaglebone-black/webgl-example
# Bosch video: https://www.youtube.com/watch?v=Bw0WuAyGsnY
try:
self.bno.serial_attempt_delay = 0.3
self.bno.set_calibration(self.calibration_vector)
self.bno.serial_attempt_delay = 0.0
time.sleep(1.5) # wait for stable measurement
return True
except BaseException as eb:
self.string_debug = "load_calibration error" + str(eb)
rospy.loginfo(self.string_debug)
return False
def update_offset(self):
time.sleep(1) # Wait for stable measurements
qx, qy, qz, qw = self.bno.read_quaternion() # Orientation as a quaternion
(self.offset_roll, self.offset_pitch, self.offset_yaw) = euler_from_quaternion([qx, qy, qz, qw])
self.string_debug = "calibration offset: [yaw %f, roll %f, pitch %f]" % (
self.offset_yaw, self.offset_roll, self.offset_pitch)
rospy.loginfo(self.string_debug)
def get_calibration_status(self):
try:
if self.counter_calibration > self.counter_max_calibration:
self.cal_sys, self.cal_gyro, self.cal_accel, self.cal_mag = self.bno.get_calibration_status()
self.counter_calibration = 0
if self.enable_print_calibration_vector:
self.bno.serial_attempt_delay = 0.5
self.real_calibration_vector = self.bno.get_calibration()
self.bno.serial_attempt_delay = 0.0
time.sleep(0.5)
else:
self.counter_calibration += 1
except BaseException as eb:
self.string_debug = "get_calibration_status error" + str(eb)
rospy.loginfo(self.string_debug)
def read(self):
try:
# reset IMU if flag is active
if self.enable_reset_imu:
self.init_imu()
self.enable_reset_imu = False
return [False, 'enable_reset_imu']
# calibrate IMU if flag is active
if self.enable_load_calibration:
self.load_calibration()
self.enable_load_calibration = False
return [False, 'enable_load_calibration']
# Set offset IMU if flag is active
if self.enable_set_offset:
self.update_offset()
self.enable_set_offset = False
return [False, 'enable_set_offset']
# Read calibration status if flag is active (Reduces frame rate!)
if self.enable_calibration_status_reading:
self.get_calibration_status()
# mx, my, mz = self.bno.read_magnetometer() # Magnetometer data (in micro-Teslas)
# ax, ay, az = self.bno.read_accelerometer() # Accelerometer data (in meters per second squared)
# yaw, roll, pitch = self.bno.read_euler() # Euler angles for heading, roll, pitch (degrees)
# temp = self.bno.read_temp() # Temperature in degrees Celsius
# Linear acceleration data (i.e. acceleration from movement, not gravity--
# returned in meters per second squared)
# lx, ly, lz = self.bno.read_linear_acceleration()
# Gravity acceleration data (i.e. acceleration just from gravity--returned
# in meters per second squared):
ax, ay, az = self.bno.read_gravity()
qx, qy, qz, qw = self.bno.read_quaternion() # Orientation as a quaternion
gx, gy, gz = self.bno.read_gyroscope() # Gyroscope data (in degrees per second)
# IMU info
self.linear_acceleration_x = ax
self.linear_acceleration_y = ay
self.linear_acceleration_z = az
self.angular_velocity_x = gx
self.angular_velocity_y = gy
self.angular_velocity_z = gz
# update (adjust) offset in euler space
(roll, pitch, yaw) = euler_from_quaternion([qx, qy, qz, qw])
current_roll = roll - self.offset_roll
current_pitch = pitch - self.offset_pitch
current_yaw = yaw - self.offset_yaw
quat = quaternion_from_euler(current_roll, current_pitch, current_yaw)
self.orientation_x = quat[0]
self.orientation_y = quat[1]
self.orientation_z = quat[2]
self.orientation_w = quat[3]
return [True, 'ok']
except BaseException as eb:
self.init_imu()
return [False, 'Error while reading IMU sensor: ' + str(eb)]
| francisc0garcia/autonomous_bicycle | src/classes/ImuDriver.py | Python | apache-2.0 | 7,759 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-09 20:52
from __future__ import unicode_literals
import django.contrib.auth.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0003_user_staged'),
]
operations = [
migrations.AlterField(
model_name='user',
name='username',
field=models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username'),
),
]
| amstart/demo | demoslogic/users/migrations/0004_auto_20161009_2252.py | Python | mit | 747 |
from alerts import Alerter, BasicMatchString
import requests
import json
class ServiceNowAlerter(Alerter):
required_options = set(['username', 'password', 'servicenow_rest_url', 'short_description', 'comments', 'assignment_group', 'category', 'subcategory', 'cmdb_ci', 'caller_id'])
# Alert is called
def alert(self, matches):
for match in matches:
# Parse everything into description.
description = str(BasicMatchString(self.rule, match))
# Set proper headers
headers = {
"Content-Type":"application/json",
"Accept":"application/json;charset=utf-8"
}
data = {
"description": description,
"short_description": self.rule['short_description'],
"comments": self.rule['comments'],
"assignment_group": self.rule['assignment_group'],
"category": self.rule['category'],
"subcategory": self.rule['subcategory'],
"cmdb_ci": self.rule['cmdb_ci'],
"caller_id": self.rule["caller_id"]
}
response = requests.post(self.rule['servicenow_rest_url'], auth=(self.rule['username'], self.rule['password']), headers=headers , data=json.dumps(data))
if response.status_code != 201:
print('Status:', response.status_code, 'Headers:', response.headers, 'Error Response:',response.json())
exit()
# get_info is called after an alert is sent to get data that is written back
# to Elasticsearch in the field "alert_info"
# It should return a dict of information relevant to what the alert does
def get_info(self):
return {'type': 'Awesome Alerter',
'SN_description': self.rule['description']} | Vitiate/ShellScripts | ElastAlert/elastalert_modules/servicenow_alert.py | Python | gpl-2.0 | 1,758 |
"""
Common tests shared by test_unicode, test_userstring and test_string.
"""
import unittest, string, sys, struct
from test import support
from collections import UserList
class Sequence:
def __init__(self, seq='wxyz'): self.seq = seq
def __len__(self): return len(self.seq)
def __getitem__(self, i): return self.seq[i]
class BadSeq1(Sequence):
def __init__(self): self.seq = [7, 'hello', 123]
def __str__(self): return '{0} {1} {2}'.format(*self.seq)
class BadSeq2(Sequence):
def __init__(self): self.seq = ['a', 'b', 'c']
def __len__(self): return 8
class BaseTest:
# These tests are for buffers of values (bytes) and not
# specific to character interpretation, used for bytes objects
# and various string implementations
# The type to be tested
# Change in subclasses to change the behaviour of fixtesttype()
type2test = None
# Whether the "contained items" of the container are integers in
# range(0, 256) (i.e. bytes, bytearray) or strings of length 1
# (str)
contains_bytes = False
# All tests pass their arguments to the testing methods
# as str objects. fixtesttype() can be used to propagate
# these arguments to the appropriate type
def fixtype(self, obj):
if isinstance(obj, str):
return self.__class__.type2test(obj)
elif isinstance(obj, list):
return [self.fixtype(x) for x in obj]
elif isinstance(obj, tuple):
return tuple([self.fixtype(x) for x in obj])
elif isinstance(obj, dict):
return dict([
(self.fixtype(key), self.fixtype(value))
for (key, value) in obj.items()
])
else:
return obj
# check that obj.method(*args) returns result
def checkequal(self, result, obj, methodname, *args, **kwargs):
result = self.fixtype(result)
obj = self.fixtype(obj)
args = self.fixtype(args)
kwargs = {k: self.fixtype(v) for k,v in kwargs.items()}
realresult = getattr(obj, methodname)(*args, **kwargs)
self.assertEqual(
result,
realresult
)
# if the original is returned make sure that
# this doesn't happen with subclasses
if obj is realresult:
try:
class subtype(self.__class__.type2test):
pass
except TypeError:
pass # Skip this if we can't subclass
else:
obj = subtype(obj)
realresult = getattr(obj, methodname)(*args)
self.assertIsNot(obj, realresult)
# check that obj.method(*args) raises exc
def checkraises(self, exc, obj, methodname, *args):
obj = self.fixtype(obj)
args = self.fixtype(args)
with self.assertRaises(exc) as cm:
getattr(obj, methodname)(*args)
self.assertNotEqual(str(cm.exception), '')
# call obj.method(*args) without any checks
def checkcall(self, obj, methodname, *args):
obj = self.fixtype(obj)
args = self.fixtype(args)
getattr(obj, methodname)(*args)
def test_count(self):
self.checkequal(3, 'aaa', 'count', 'a')
self.checkequal(0, 'aaa', 'count', 'b')
self.checkequal(3, 'aaa', 'count', 'a')
self.checkequal(0, 'aaa', 'count', 'b')
self.checkequal(3, 'aaa', 'count', 'a')
self.checkequal(0, 'aaa', 'count', 'b')
self.checkequal(0, 'aaa', 'count', 'b')
self.checkequal(2, 'aaa', 'count', 'a', 1)
self.checkequal(0, 'aaa', 'count', 'a', 10)
self.checkequal(1, 'aaa', 'count', 'a', -1)
self.checkequal(3, 'aaa', 'count', 'a', -10)
self.checkequal(1, 'aaa', 'count', 'a', 0, 1)
self.checkequal(3, 'aaa', 'count', 'a', 0, 10)
self.checkequal(2, 'aaa', 'count', 'a', 0, -1)
self.checkequal(0, 'aaa', 'count', 'a', 0, -10)
self.checkequal(3, 'aaa', 'count', '', 1)
self.checkequal(1, 'aaa', 'count', '', 3)
self.checkequal(0, 'aaa', 'count', '', 10)
self.checkequal(2, 'aaa', 'count', '', -1)
self.checkequal(4, 'aaa', 'count', '', -10)
self.checkequal(1, '', 'count', '')
self.checkequal(0, '', 'count', '', 1, 1)
self.checkequal(0, '', 'count', '', sys.maxsize, 0)
self.checkequal(0, '', 'count', 'xx')
self.checkequal(0, '', 'count', 'xx', 1, 1)
self.checkequal(0, '', 'count', 'xx', sys.maxsize, 0)
self.checkraises(TypeError, 'hello', 'count')
if self.contains_bytes:
self.checkequal(0, 'hello', 'count', 42)
else:
self.checkraises(TypeError, 'hello', 'count', 42)
# For a variety of combinations,
# verify that str.count() matches an equivalent function
# replacing all occurrences and then differencing the string lengths
charset = ['', 'a', 'b']
digits = 7
base = len(charset)
teststrings = set()
for i in range(base ** digits):
entry = []
for j in range(digits):
i, m = divmod(i, base)
entry.append(charset[m])
teststrings.add(''.join(entry))
teststrings = [self.fixtype(ts) for ts in teststrings]
for i in teststrings:
n = len(i)
for j in teststrings:
r1 = i.count(j)
if j:
r2, rem = divmod(n - len(i.replace(j, self.fixtype(''))),
len(j))
else:
r2, rem = len(i)+1, 0
if rem or r1 != r2:
self.assertEqual(rem, 0, '%s != 0 for %s' % (rem, i))
self.assertEqual(r1, r2, '%s != %s for %s' % (r1, r2, i))
def test_find(self):
self.checkequal(0, 'abcdefghiabc', 'find', 'abc')
self.checkequal(9, 'abcdefghiabc', 'find', 'abc', 1)
self.checkequal(-1, 'abcdefghiabc', 'find', 'def', 4)
self.checkequal(0, 'abc', 'find', '', 0)
self.checkequal(3, 'abc', 'find', '', 3)
self.checkequal(-1, 'abc', 'find', '', 4)
# to check the ability to pass None as defaults
self.checkequal( 2, 'rrarrrrrrrrra', 'find', 'a')
self.checkequal(12, 'rrarrrrrrrrra', 'find', 'a', 4)
self.checkequal(-1, 'rrarrrrrrrrra', 'find', 'a', 4, 6)
self.checkequal(12, 'rrarrrrrrrrra', 'find', 'a', 4, None)
self.checkequal( 2, 'rrarrrrrrrrra', 'find', 'a', None, 6)
self.checkraises(TypeError, 'hello', 'find')
if self.contains_bytes:
self.checkequal(-1, 'hello', 'find', 42)
else:
self.checkraises(TypeError, 'hello', 'find', 42)
self.checkequal(0, '', 'find', '')
self.checkequal(-1, '', 'find', '', 1, 1)
self.checkequal(-1, '', 'find', '', sys.maxsize, 0)
self.checkequal(-1, '', 'find', 'xx')
self.checkequal(-1, '', 'find', 'xx', 1, 1)
self.checkequal(-1, '', 'find', 'xx', sys.maxsize, 0)
# issue 7458
self.checkequal(-1, 'ab', 'find', 'xxx', sys.maxsize + 1, 0)
# For a variety of combinations,
# verify that str.find() matches __contains__
# and that the found substring is really at that location
charset = ['', 'a', 'b', 'c']
digits = 5
base = len(charset)
teststrings = set()
for i in range(base ** digits):
entry = []
for j in range(digits):
i, m = divmod(i, base)
entry.append(charset[m])
teststrings.add(''.join(entry))
teststrings = [self.fixtype(ts) for ts in teststrings]
for i in teststrings:
for j in teststrings:
loc = i.find(j)
r1 = (loc != -1)
r2 = j in i
self.assertEqual(r1, r2)
if loc != -1:
self.assertEqual(i[loc:loc+len(j)], j)
def test_rfind(self):
self.checkequal(9, 'abcdefghiabc', 'rfind', 'abc')
self.checkequal(12, 'abcdefghiabc', 'rfind', '')
self.checkequal(0, 'abcdefghiabc', 'rfind', 'abcd')
self.checkequal(-1, 'abcdefghiabc', 'rfind', 'abcz')
self.checkequal(3, 'abc', 'rfind', '', 0)
self.checkequal(3, 'abc', 'rfind', '', 3)
self.checkequal(-1, 'abc', 'rfind', '', 4)
# to check the ability to pass None as defaults
self.checkequal(12, 'rrarrrrrrrrra', 'rfind', 'a')
self.checkequal(12, 'rrarrrrrrrrra', 'rfind', 'a', 4)
self.checkequal(-1, 'rrarrrrrrrrra', 'rfind', 'a', 4, 6)
self.checkequal(12, 'rrarrrrrrrrra', 'rfind', 'a', 4, None)
self.checkequal( 2, 'rrarrrrrrrrra', 'rfind', 'a', None, 6)
self.checkraises(TypeError, 'hello', 'rfind')
if self.contains_bytes:
self.checkequal(-1, 'hello', 'rfind', 42)
else:
self.checkraises(TypeError, 'hello', 'rfind', 42)
# For a variety of combinations,
# verify that str.rfind() matches __contains__
# and that the found substring is really at that location
charset = ['', 'a', 'b', 'c']
digits = 5
base = len(charset)
teststrings = set()
for i in range(base ** digits):
entry = []
for j in range(digits):
i, m = divmod(i, base)
entry.append(charset[m])
teststrings.add(''.join(entry))
teststrings = [self.fixtype(ts) for ts in teststrings]
for i in teststrings:
for j in teststrings:
loc = i.rfind(j)
r1 = (loc != -1)
r2 = j in i
self.assertEqual(r1, r2)
if loc != -1:
self.assertEqual(i[loc:loc+len(j)], j)
# issue 7458
self.checkequal(-1, 'ab', 'rfind', 'xxx', sys.maxsize + 1, 0)
# issue #15534
self.checkequal(0, '<......\u043c...', "rfind", "<")
def test_index(self):
self.checkequal(0, 'abcdefghiabc', 'index', '')
self.checkequal(3, 'abcdefghiabc', 'index', 'def')
self.checkequal(0, 'abcdefghiabc', 'index', 'abc')
self.checkequal(9, 'abcdefghiabc', 'index', 'abc', 1)
self.checkraises(ValueError, 'abcdefghiabc', 'index', 'hib')
self.checkraises(ValueError, 'abcdefghiab', 'index', 'abc', 1)
self.checkraises(ValueError, 'abcdefghi', 'index', 'ghi', 8)
self.checkraises(ValueError, 'abcdefghi', 'index', 'ghi', -1)
# to check the ability to pass None as defaults
self.checkequal( 2, 'rrarrrrrrrrra', 'index', 'a')
self.checkequal(12, 'rrarrrrrrrrra', 'index', 'a', 4)
self.checkraises(ValueError, 'rrarrrrrrrrra', 'index', 'a', 4, 6)
self.checkequal(12, 'rrarrrrrrrrra', 'index', 'a', 4, None)
self.checkequal( 2, 'rrarrrrrrrrra', 'index', 'a', None, 6)
self.checkraises(TypeError, 'hello', 'index')
if self.contains_bytes:
self.checkraises(ValueError, 'hello', 'index', 42)
else:
self.checkraises(TypeError, 'hello', 'index', 42)
def test_rindex(self):
self.checkequal(12, 'abcdefghiabc', 'rindex', '')
self.checkequal(3, 'abcdefghiabc', 'rindex', 'def')
self.checkequal(9, 'abcdefghiabc', 'rindex', 'abc')
self.checkequal(0, 'abcdefghiabc', 'rindex', 'abc', 0, -1)
self.checkraises(ValueError, 'abcdefghiabc', 'rindex', 'hib')
self.checkraises(ValueError, 'defghiabc', 'rindex', 'def', 1)
self.checkraises(ValueError, 'defghiabc', 'rindex', 'abc', 0, -1)
self.checkraises(ValueError, 'abcdefghi', 'rindex', 'ghi', 0, 8)
self.checkraises(ValueError, 'abcdefghi', 'rindex', 'ghi', 0, -1)
# to check the ability to pass None as defaults
self.checkequal(12, 'rrarrrrrrrrra', 'rindex', 'a')
self.checkequal(12, 'rrarrrrrrrrra', 'rindex', 'a', 4)
self.checkraises(ValueError, 'rrarrrrrrrrra', 'rindex', 'a', 4, 6)
self.checkequal(12, 'rrarrrrrrrrra', 'rindex', 'a', 4, None)
self.checkequal( 2, 'rrarrrrrrrrra', 'rindex', 'a', None, 6)
self.checkraises(TypeError, 'hello', 'rindex')
if self.contains_bytes:
self.checkraises(ValueError, 'hello', 'rindex', 42)
else:
self.checkraises(TypeError, 'hello', 'rindex', 42)
def test_lower(self):
self.checkequal('hello', 'HeLLo', 'lower')
self.checkequal('hello', 'hello', 'lower')
self.checkraises(TypeError, 'hello', 'lower', 42)
def test_upper(self):
self.checkequal('HELLO', 'HeLLo', 'upper')
self.checkequal('HELLO', 'HELLO', 'upper')
self.checkraises(TypeError, 'hello', 'upper', 42)
def test_expandtabs(self):
self.checkequal('abc\rab def\ng hi', 'abc\rab\tdef\ng\thi',
'expandtabs')
self.checkequal('abc\rab def\ng hi', 'abc\rab\tdef\ng\thi',
'expandtabs', 8)
self.checkequal('abc\rab def\ng hi', 'abc\rab\tdef\ng\thi',
'expandtabs', 4)
self.checkequal('abc\r\nab def\ng hi', 'abc\r\nab\tdef\ng\thi',
'expandtabs')
self.checkequal('abc\r\nab def\ng hi', 'abc\r\nab\tdef\ng\thi',
'expandtabs', 8)
self.checkequal('abc\r\nab def\ng hi', 'abc\r\nab\tdef\ng\thi',
'expandtabs', 4)
self.checkequal('abc\r\nab\r\ndef\ng\r\nhi', 'abc\r\nab\r\ndef\ng\r\nhi',
'expandtabs', 4)
# check keyword args
self.checkequal('abc\rab def\ng hi', 'abc\rab\tdef\ng\thi',
'expandtabs', tabsize=8)
self.checkequal('abc\rab def\ng hi', 'abc\rab\tdef\ng\thi',
'expandtabs', tabsize=4)
self.checkequal(' a\n b', ' \ta\n\tb', 'expandtabs', 1)
self.checkraises(TypeError, 'hello', 'expandtabs', 42, 42)
# This test is only valid when sizeof(int) == sizeof(void*) == 4.
if sys.maxsize < (1 << 32) and struct.calcsize('P') == 4:
self.checkraises(OverflowError,
'\ta\n\tb', 'expandtabs', sys.maxsize)
def test_split(self):
# by a char
self.checkequal(['a', 'b', 'c', 'd'], 'a|b|c|d', 'split', '|')
self.checkequal(['a|b|c|d'], 'a|b|c|d', 'split', '|', 0)
self.checkequal(['a', 'b|c|d'], 'a|b|c|d', 'split', '|', 1)
self.checkequal(['a', 'b', 'c|d'], 'a|b|c|d', 'split', '|', 2)
self.checkequal(['a', 'b', 'c', 'd'], 'a|b|c|d', 'split', '|', 3)
self.checkequal(['a', 'b', 'c', 'd'], 'a|b|c|d', 'split', '|', 4)
self.checkequal(['a', 'b', 'c', 'd'], 'a|b|c|d', 'split', '|',
sys.maxsize-2)
self.checkequal(['a|b|c|d'], 'a|b|c|d', 'split', '|', 0)
self.checkequal(['a', '', 'b||c||d'], 'a||b||c||d', 'split', '|', 2)
self.checkequal(['endcase ', ''], 'endcase |', 'split', '|')
self.checkequal(['', ' startcase'], '| startcase', 'split', '|')
self.checkequal(['', 'bothcase', ''], '|bothcase|', 'split', '|')
self.checkequal(['a', '', 'b\x00c\x00d'], 'a\x00\x00b\x00c\x00d', 'split', '\x00', 2)
self.checkequal(['a']*20, ('a|'*20)[:-1], 'split', '|')
self.checkequal(['a']*15 +['a|a|a|a|a'],
('a|'*20)[:-1], 'split', '|', 15)
# by string
self.checkequal(['a', 'b', 'c', 'd'], 'a//b//c//d', 'split', '//')
self.checkequal(['a', 'b//c//d'], 'a//b//c//d', 'split', '//', 1)
self.checkequal(['a', 'b', 'c//d'], 'a//b//c//d', 'split', '//', 2)
self.checkequal(['a', 'b', 'c', 'd'], 'a//b//c//d', 'split', '//', 3)
self.checkequal(['a', 'b', 'c', 'd'], 'a//b//c//d', 'split', '//', 4)
self.checkequal(['a', 'b', 'c', 'd'], 'a//b//c//d', 'split', '//',
sys.maxsize-10)
self.checkequal(['a//b//c//d'], 'a//b//c//d', 'split', '//', 0)
self.checkequal(['a', '', 'b////c////d'], 'a////b////c////d', 'split', '//', 2)
self.checkequal(['endcase ', ''], 'endcase test', 'split', 'test')
self.checkequal(['', ' begincase'], 'test begincase', 'split', 'test')
self.checkequal(['', ' bothcase ', ''], 'test bothcase test',
'split', 'test')
self.checkequal(['a', 'bc'], 'abbbc', 'split', 'bb')
self.checkequal(['', ''], 'aaa', 'split', 'aaa')
self.checkequal(['aaa'], 'aaa', 'split', 'aaa', 0)
self.checkequal(['ab', 'ab'], 'abbaab', 'split', 'ba')
self.checkequal(['aaaa'], 'aaaa', 'split', 'aab')
self.checkequal([''], '', 'split', 'aaa')
self.checkequal(['aa'], 'aa', 'split', 'aaa')
self.checkequal(['A', 'bobb'], 'Abbobbbobb', 'split', 'bbobb')
self.checkequal(['A', 'B', ''], 'AbbobbBbbobb', 'split', 'bbobb')
self.checkequal(['a']*20, ('aBLAH'*20)[:-4], 'split', 'BLAH')
self.checkequal(['a']*20, ('aBLAH'*20)[:-4], 'split', 'BLAH', 19)
self.checkequal(['a']*18 + ['aBLAHa'], ('aBLAH'*20)[:-4],
'split', 'BLAH', 18)
# with keyword args
self.checkequal(['a', 'b', 'c', 'd'], 'a|b|c|d', 'split', sep='|')
self.checkequal(['a', 'b|c|d'],
'a|b|c|d', 'split', '|', maxsplit=1)
self.checkequal(['a', 'b|c|d'],
'a|b|c|d', 'split', sep='|', maxsplit=1)
self.checkequal(['a', 'b|c|d'],
'a|b|c|d', 'split', maxsplit=1, sep='|')
self.checkequal(['a', 'b c d'],
'a b c d', 'split', maxsplit=1)
# argument type
self.checkraises(TypeError, 'hello', 'split', 42, 42, 42)
# null case
self.checkraises(ValueError, 'hello', 'split', '')
self.checkraises(ValueError, 'hello', 'split', '', 0)
def test_rsplit(self):
# by a char
self.checkequal(['a', 'b', 'c', 'd'], 'a|b|c|d', 'rsplit', '|')
self.checkequal(['a|b|c', 'd'], 'a|b|c|d', 'rsplit', '|', 1)
self.checkequal(['a|b', 'c', 'd'], 'a|b|c|d', 'rsplit', '|', 2)
self.checkequal(['a', 'b', 'c', 'd'], 'a|b|c|d', 'rsplit', '|', 3)
self.checkequal(['a', 'b', 'c', 'd'], 'a|b|c|d', 'rsplit', '|', 4)
self.checkequal(['a', 'b', 'c', 'd'], 'a|b|c|d', 'rsplit', '|',
sys.maxsize-100)
self.checkequal(['a|b|c|d'], 'a|b|c|d', 'rsplit', '|', 0)
self.checkequal(['a||b||c', '', 'd'], 'a||b||c||d', 'rsplit', '|', 2)
self.checkequal(['', ' begincase'], '| begincase', 'rsplit', '|')
self.checkequal(['endcase ', ''], 'endcase |', 'rsplit', '|')
self.checkequal(['', 'bothcase', ''], '|bothcase|', 'rsplit', '|')
self.checkequal(['a\x00\x00b', 'c', 'd'], 'a\x00\x00b\x00c\x00d', 'rsplit', '\x00', 2)
self.checkequal(['a']*20, ('a|'*20)[:-1], 'rsplit', '|')
self.checkequal(['a|a|a|a|a']+['a']*15,
('a|'*20)[:-1], 'rsplit', '|', 15)
# by string
self.checkequal(['a', 'b', 'c', 'd'], 'a//b//c//d', 'rsplit', '//')
self.checkequal(['a//b//c', 'd'], 'a//b//c//d', 'rsplit', '//', 1)
self.checkequal(['a//b', 'c', 'd'], 'a//b//c//d', 'rsplit', '//', 2)
self.checkequal(['a', 'b', 'c', 'd'], 'a//b//c//d', 'rsplit', '//', 3)
self.checkequal(['a', 'b', 'c', 'd'], 'a//b//c//d', 'rsplit', '//', 4)
self.checkequal(['a', 'b', 'c', 'd'], 'a//b//c//d', 'rsplit', '//',
sys.maxsize-5)
self.checkequal(['a//b//c//d'], 'a//b//c//d', 'rsplit', '//', 0)
self.checkequal(['a////b////c', '', 'd'], 'a////b////c////d', 'rsplit', '//', 2)
self.checkequal(['', ' begincase'], 'test begincase', 'rsplit', 'test')
self.checkequal(['endcase ', ''], 'endcase test', 'rsplit', 'test')
self.checkequal(['', ' bothcase ', ''], 'test bothcase test',
'rsplit', 'test')
self.checkequal(['ab', 'c'], 'abbbc', 'rsplit', 'bb')
self.checkequal(['', ''], 'aaa', 'rsplit', 'aaa')
self.checkequal(['aaa'], 'aaa', 'rsplit', 'aaa', 0)
self.checkequal(['ab', 'ab'], 'abbaab', 'rsplit', 'ba')
self.checkequal(['aaaa'], 'aaaa', 'rsplit', 'aab')
self.checkequal([''], '', 'rsplit', 'aaa')
self.checkequal(['aa'], 'aa', 'rsplit', 'aaa')
self.checkequal(['bbob', 'A'], 'bbobbbobbA', 'rsplit', 'bbobb')
self.checkequal(['', 'B', 'A'], 'bbobbBbbobbA', 'rsplit', 'bbobb')
self.checkequal(['a']*20, ('aBLAH'*20)[:-4], 'rsplit', 'BLAH')
self.checkequal(['a']*20, ('aBLAH'*20)[:-4], 'rsplit', 'BLAH', 19)
self.checkequal(['aBLAHa'] + ['a']*18, ('aBLAH'*20)[:-4],
'rsplit', 'BLAH', 18)
# with keyword args
self.checkequal(['a', 'b', 'c', 'd'], 'a|b|c|d', 'rsplit', sep='|')
self.checkequal(['a|b|c', 'd'],
'a|b|c|d', 'rsplit', '|', maxsplit=1)
self.checkequal(['a|b|c', 'd'],
'a|b|c|d', 'rsplit', sep='|', maxsplit=1)
self.checkequal(['a|b|c', 'd'],
'a|b|c|d', 'rsplit', maxsplit=1, sep='|')
self.checkequal(['a b c', 'd'],
'a b c d', 'rsplit', maxsplit=1)
# argument type
self.checkraises(TypeError, 'hello', 'rsplit', 42, 42, 42)
# null case
self.checkraises(ValueError, 'hello', 'rsplit', '')
self.checkraises(ValueError, 'hello', 'rsplit', '', 0)
def test_replace(self):
EQ = self.checkequal
# Operations on the empty string
EQ("", "", "replace", "", "")
EQ("A", "", "replace", "", "A")
EQ("", "", "replace", "A", "")
EQ("", "", "replace", "A", "A")
EQ("", "", "replace", "", "", 100)
EQ("", "", "replace", "", "", sys.maxsize)
# interleave (from=="", 'to' gets inserted everywhere)
EQ("A", "A", "replace", "", "")
EQ("*A*", "A", "replace", "", "*")
EQ("*1A*1", "A", "replace", "", "*1")
EQ("*-#A*-#", "A", "replace", "", "*-#")
EQ("*-A*-A*-", "AA", "replace", "", "*-")
EQ("*-A*-A*-", "AA", "replace", "", "*-", -1)
EQ("*-A*-A*-", "AA", "replace", "", "*-", sys.maxsize)
EQ("*-A*-A*-", "AA", "replace", "", "*-", 4)
EQ("*-A*-A*-", "AA", "replace", "", "*-", 3)
EQ("*-A*-A", "AA", "replace", "", "*-", 2)
EQ("*-AA", "AA", "replace", "", "*-", 1)
EQ("AA", "AA", "replace", "", "*-", 0)
# single character deletion (from=="A", to=="")
EQ("", "A", "replace", "A", "")
EQ("", "AAA", "replace", "A", "")
EQ("", "AAA", "replace", "A", "", -1)
EQ("", "AAA", "replace", "A", "", sys.maxsize)
EQ("", "AAA", "replace", "A", "", 4)
EQ("", "AAA", "replace", "A", "", 3)
EQ("A", "AAA", "replace", "A", "", 2)
EQ("AA", "AAA", "replace", "A", "", 1)
EQ("AAA", "AAA", "replace", "A", "", 0)
EQ("", "AAAAAAAAAA", "replace", "A", "")
EQ("BCD", "ABACADA", "replace", "A", "")
EQ("BCD", "ABACADA", "replace", "A", "", -1)
EQ("BCD", "ABACADA", "replace", "A", "", sys.maxsize)
EQ("BCD", "ABACADA", "replace", "A", "", 5)
EQ("BCD", "ABACADA", "replace", "A", "", 4)
EQ("BCDA", "ABACADA", "replace", "A", "", 3)
EQ("BCADA", "ABACADA", "replace", "A", "", 2)
EQ("BACADA", "ABACADA", "replace", "A", "", 1)
EQ("ABACADA", "ABACADA", "replace", "A", "", 0)
EQ("BCD", "ABCAD", "replace", "A", "")
EQ("BCD", "ABCADAA", "replace", "A", "")
EQ("BCD", "BCD", "replace", "A", "")
EQ("*************", "*************", "replace", "A", "")
EQ("^A^", "^"+"A"*1000+"^", "replace", "A", "", 999)
# substring deletion (from=="the", to=="")
EQ("", "the", "replace", "the", "")
EQ("ater", "theater", "replace", "the", "")
EQ("", "thethe", "replace", "the", "")
EQ("", "thethethethe", "replace", "the", "")
EQ("aaaa", "theatheatheathea", "replace", "the", "")
EQ("that", "that", "replace", "the", "")
EQ("thaet", "thaet", "replace", "the", "")
EQ("here and re", "here and there", "replace", "the", "")
EQ("here and re and re", "here and there and there",
"replace", "the", "", sys.maxsize)
EQ("here and re and re", "here and there and there",
"replace", "the", "", -1)
EQ("here and re and re", "here and there and there",
"replace", "the", "", 3)
EQ("here and re and re", "here and there and there",
"replace", "the", "", 2)
EQ("here and re and there", "here and there and there",
"replace", "the", "", 1)
EQ("here and there and there", "here and there and there",
"replace", "the", "", 0)
EQ("here and re and re", "here and there and there", "replace", "the", "")
EQ("abc", "abc", "replace", "the", "")
EQ("abcdefg", "abcdefg", "replace", "the", "")
# substring deletion (from=="bob", to=="")
EQ("bob", "bbobob", "replace", "bob", "")
EQ("bobXbob", "bbobobXbbobob", "replace", "bob", "")
EQ("aaaaaaa", "aaaaaaabob", "replace", "bob", "")
EQ("aaaaaaa", "aaaaaaa", "replace", "bob", "")
# single character replace in place (len(from)==len(to)==1)
EQ("Who goes there?", "Who goes there?", "replace", "o", "o")
EQ("WhO gOes there?", "Who goes there?", "replace", "o", "O")
EQ("WhO gOes there?", "Who goes there?", "replace", "o", "O", sys.maxsize)
EQ("WhO gOes there?", "Who goes there?", "replace", "o", "O", -1)
EQ("WhO gOes there?", "Who goes there?", "replace", "o", "O", 3)
EQ("WhO gOes there?", "Who goes there?", "replace", "o", "O", 2)
EQ("WhO goes there?", "Who goes there?", "replace", "o", "O", 1)
EQ("Who goes there?", "Who goes there?", "replace", "o", "O", 0)
EQ("Who goes there?", "Who goes there?", "replace", "a", "q")
EQ("who goes there?", "Who goes there?", "replace", "W", "w")
EQ("wwho goes there?ww", "WWho goes there?WW", "replace", "W", "w")
EQ("Who goes there!", "Who goes there?", "replace", "?", "!")
EQ("Who goes there!!", "Who goes there??", "replace", "?", "!")
EQ("Who goes there?", "Who goes there?", "replace", ".", "!")
# substring replace in place (len(from)==len(to) > 1)
EQ("Th** ** a t**sue", "This is a tissue", "replace", "is", "**")
EQ("Th** ** a t**sue", "This is a tissue", "replace", "is", "**", sys.maxsize)
EQ("Th** ** a t**sue", "This is a tissue", "replace", "is", "**", -1)
EQ("Th** ** a t**sue", "This is a tissue", "replace", "is", "**", 4)
EQ("Th** ** a t**sue", "This is a tissue", "replace", "is", "**", 3)
EQ("Th** ** a tissue", "This is a tissue", "replace", "is", "**", 2)
EQ("Th** is a tissue", "This is a tissue", "replace", "is", "**", 1)
EQ("This is a tissue", "This is a tissue", "replace", "is", "**", 0)
EQ("cobob", "bobob", "replace", "bob", "cob")
EQ("cobobXcobocob", "bobobXbobobob", "replace", "bob", "cob")
EQ("bobob", "bobob", "replace", "bot", "bot")
# replace single character (len(from)==1, len(to)>1)
EQ("ReyKKjaviKK", "Reykjavik", "replace", "k", "KK")
EQ("ReyKKjaviKK", "Reykjavik", "replace", "k", "KK", -1)
EQ("ReyKKjaviKK", "Reykjavik", "replace", "k", "KK", sys.maxsize)
EQ("ReyKKjaviKK", "Reykjavik", "replace", "k", "KK", 2)
EQ("ReyKKjavik", "Reykjavik", "replace", "k", "KK", 1)
EQ("Reykjavik", "Reykjavik", "replace", "k", "KK", 0)
EQ("A----B----C----", "A.B.C.", "replace", ".", "----")
# issue #15534
EQ('...\u043c......<', '...\u043c......<', "replace", "<", "<")
EQ("Reykjavik", "Reykjavik", "replace", "q", "KK")
# replace substring (len(from)>1, len(to)!=len(from))
EQ("ham, ham, eggs and ham", "spam, spam, eggs and spam",
"replace", "spam", "ham")
EQ("ham, ham, eggs and ham", "spam, spam, eggs and spam",
"replace", "spam", "ham", sys.maxsize)
EQ("ham, ham, eggs and ham", "spam, spam, eggs and spam",
"replace", "spam", "ham", -1)
EQ("ham, ham, eggs and ham", "spam, spam, eggs and spam",
"replace", "spam", "ham", 4)
EQ("ham, ham, eggs and ham", "spam, spam, eggs and spam",
"replace", "spam", "ham", 3)
EQ("ham, ham, eggs and spam", "spam, spam, eggs and spam",
"replace", "spam", "ham", 2)
EQ("ham, spam, eggs and spam", "spam, spam, eggs and spam",
"replace", "spam", "ham", 1)
EQ("spam, spam, eggs and spam", "spam, spam, eggs and spam",
"replace", "spam", "ham", 0)
EQ("bobob", "bobobob", "replace", "bobob", "bob")
EQ("bobobXbobob", "bobobobXbobobob", "replace", "bobob", "bob")
EQ("BOBOBOB", "BOBOBOB", "replace", "bob", "bobby")
# XXX Commented out. Is there any reason to support buffer objects
# as arguments for str.replace()? GvR
## ba = bytearray('a')
## bb = bytearray('b')
## EQ("bbc", "abc", "replace", ba, bb)
## EQ("aac", "abc", "replace", bb, ba)
#
self.checkequal('one@two!three!', 'one!two!three!', 'replace', '!', '@', 1)
self.checkequal('onetwothree', 'one!two!three!', 'replace', '!', '')
self.checkequal('one@two@three!', 'one!two!three!', 'replace', '!', '@', 2)
self.checkequal('one@two@three@', 'one!two!three!', 'replace', '!', '@', 3)
self.checkequal('one@two@three@', 'one!two!three!', 'replace', '!', '@', 4)
self.checkequal('one!two!three!', 'one!two!three!', 'replace', '!', '@', 0)
self.checkequal('one@two@three@', 'one!two!three!', 'replace', '!', '@')
self.checkequal('one!two!three!', 'one!two!three!', 'replace', 'x', '@')
self.checkequal('one!two!three!', 'one!two!three!', 'replace', 'x', '@', 2)
self.checkequal('-a-b-c-', 'abc', 'replace', '', '-')
self.checkequal('-a-b-c', 'abc', 'replace', '', '-', 3)
self.checkequal('abc', 'abc', 'replace', '', '-', 0)
self.checkequal('', '', 'replace', '', '')
self.checkequal('abc', 'abc', 'replace', 'ab', '--', 0)
self.checkequal('abc', 'abc', 'replace', 'xy', '--')
# Next three for SF bug 422088: [OSF1 alpha] string.replace(); died with
# MemoryError due to empty result (platform malloc issue when requesting
# 0 bytes).
self.checkequal('', '123', 'replace', '123', '')
self.checkequal('', '123123', 'replace', '123', '')
self.checkequal('x', '123x123', 'replace', '123', '')
self.checkraises(TypeError, 'hello', 'replace')
self.checkraises(TypeError, 'hello', 'replace', 42)
self.checkraises(TypeError, 'hello', 'replace', 42, 'h')
self.checkraises(TypeError, 'hello', 'replace', 'h', 42)
@unittest.skipIf(sys.maxsize > (1 << 32) or struct.calcsize('P') != 4,
'only applies to 32-bit platforms')
def test_replace_overflow(self):
# Check for overflow checking on 32 bit machines
A2_16 = "A" * (2**16)
self.checkraises(OverflowError, A2_16, "replace", "", A2_16)
self.checkraises(OverflowError, A2_16, "replace", "A", A2_16)
self.checkraises(OverflowError, A2_16, "replace", "AA", A2_16+A2_16)
class CommonTest(BaseTest):
# This testcase contains test that can be used in all
# stringlike classes. Currently this is str, unicode
# UserString and the string module.
def test_hash(self):
# SF bug 1054139: += optimization was not invalidating cached hash value
a = self.type2test('DNSSEC')
b = self.type2test('')
for c in a:
b += c
hash(b)
self.assertEqual(hash(a), hash(b))
def test_capitalize(self):
self.checkequal(' hello ', ' hello ', 'capitalize')
self.checkequal('Hello ', 'Hello ','capitalize')
self.checkequal('Hello ', 'hello ','capitalize')
self.checkequal('Aaaa', 'aaaa', 'capitalize')
self.checkequal('Aaaa', 'AaAa', 'capitalize')
# check that titlecased chars are lowered correctly
# \u1ffc is the titlecased char
self.checkequal('\u03a9\u0399\u1ff3\u1ff3\u1ff3',
'\u1ff3\u1ff3\u1ffc\u1ffc', 'capitalize')
# check with cased non-letter chars
self.checkequal('\u24c5\u24e8\u24e3\u24d7\u24de\u24dd',
'\u24c5\u24ce\u24c9\u24bd\u24c4\u24c3', 'capitalize')
self.checkequal('\u24c5\u24e8\u24e3\u24d7\u24de\u24dd',
'\u24df\u24e8\u24e3\u24d7\u24de\u24dd', 'capitalize')
self.checkequal('\u2160\u2171\u2172',
'\u2160\u2161\u2162', 'capitalize')
self.checkequal('\u2160\u2171\u2172',
'\u2170\u2171\u2172', 'capitalize')
# check with Ll chars with no upper - nothing changes here
self.checkequal('\u019b\u1d00\u1d86\u0221\u1fb7',
'\u019b\u1d00\u1d86\u0221\u1fb7', 'capitalize')
self.checkraises(TypeError, 'hello', 'capitalize', 42)
def test_additional_split(self):
self.checkequal(['this', 'is', 'the', 'split', 'function'],
'this is the split function', 'split')
# by whitespace
self.checkequal(['a', 'b', 'c', 'd'], 'a b c d ', 'split')
self.checkequal(['a', 'b c d'], 'a b c d', 'split', None, 1)
self.checkequal(['a', 'b', 'c d'], 'a b c d', 'split', None, 2)
self.checkequal(['a', 'b', 'c', 'd'], 'a b c d', 'split', None, 3)
self.checkequal(['a', 'b', 'c', 'd'], 'a b c d', 'split', None, 4)
self.checkequal(['a', 'b', 'c', 'd'], 'a b c d', 'split', None,
sys.maxsize-1)
self.checkequal(['a b c d'], 'a b c d', 'split', None, 0)
self.checkequal(['a b c d'], ' a b c d', 'split', None, 0)
self.checkequal(['a', 'b', 'c d'], 'a b c d', 'split', None, 2)
self.checkequal([], ' ', 'split')
self.checkequal(['a'], ' a ', 'split')
self.checkequal(['a', 'b'], ' a b ', 'split')
self.checkequal(['a', 'b '], ' a b ', 'split', None, 1)
self.checkequal(['a', 'b c '], ' a b c ', 'split', None, 1)
self.checkequal(['a', 'b', 'c '], ' a b c ', 'split', None, 2)
self.checkequal(['a', 'b'], '\n\ta \t\r b \v ', 'split')
aaa = ' a '*20
self.checkequal(['a']*20, aaa, 'split')
self.checkequal(['a'] + [aaa[4:]], aaa, 'split', None, 1)
self.checkequal(['a']*19 + ['a '], aaa, 'split', None, 19)
# mixed use of str and unicode
self.checkequal(['a', 'b', 'c d'], 'a b c d', 'split', ' ', 2)
def test_additional_rsplit(self):
self.checkequal(['this', 'is', 'the', 'rsplit', 'function'],
'this is the rsplit function', 'rsplit')
# by whitespace
self.checkequal(['a', 'b', 'c', 'd'], 'a b c d ', 'rsplit')
self.checkequal(['a b c', 'd'], 'a b c d', 'rsplit', None, 1)
self.checkequal(['a b', 'c', 'd'], 'a b c d', 'rsplit', None, 2)
self.checkequal(['a', 'b', 'c', 'd'], 'a b c d', 'rsplit', None, 3)
self.checkequal(['a', 'b', 'c', 'd'], 'a b c d', 'rsplit', None, 4)
self.checkequal(['a', 'b', 'c', 'd'], 'a b c d', 'rsplit', None,
sys.maxsize-20)
self.checkequal(['a b c d'], 'a b c d', 'rsplit', None, 0)
self.checkequal(['a b c d'], 'a b c d ', 'rsplit', None, 0)
self.checkequal(['a b', 'c', 'd'], 'a b c d', 'rsplit', None, 2)
self.checkequal([], ' ', 'rsplit')
self.checkequal(['a'], ' a ', 'rsplit')
self.checkequal(['a', 'b'], ' a b ', 'rsplit')
self.checkequal([' a', 'b'], ' a b ', 'rsplit', None, 1)
self.checkequal([' a b','c'], ' a b c ', 'rsplit',
None, 1)
self.checkequal([' a', 'b', 'c'], ' a b c ', 'rsplit',
None, 2)
self.checkequal(['a', 'b'], '\n\ta \t\r b \v ', 'rsplit', None, 88)
aaa = ' a '*20
self.checkequal(['a']*20, aaa, 'rsplit')
self.checkequal([aaa[:-4]] + ['a'], aaa, 'rsplit', None, 1)
self.checkequal([' a a'] + ['a']*18, aaa, 'rsplit', None, 18)
# mixed use of str and unicode
self.checkequal(['a b', 'c', 'd'], 'a b c d', 'rsplit', ' ', 2)
def test_strip(self):
self.checkequal('hello', ' hello ', 'strip')
self.checkequal('hello ', ' hello ', 'lstrip')
self.checkequal(' hello', ' hello ', 'rstrip')
self.checkequal('hello', 'hello', 'strip')
# strip/lstrip/rstrip with None arg
self.checkequal('hello', ' hello ', 'strip', None)
self.checkequal('hello ', ' hello ', 'lstrip', None)
self.checkequal(' hello', ' hello ', 'rstrip', None)
self.checkequal('hello', 'hello', 'strip', None)
# strip/lstrip/rstrip with str arg
self.checkequal('hello', 'xyzzyhelloxyzzy', 'strip', 'xyz')
self.checkequal('helloxyzzy', 'xyzzyhelloxyzzy', 'lstrip', 'xyz')
self.checkequal('xyzzyhello', 'xyzzyhelloxyzzy', 'rstrip', 'xyz')
self.checkequal('hello', 'hello', 'strip', 'xyz')
self.checkraises(TypeError, 'hello', 'strip', 42, 42)
self.checkraises(TypeError, 'hello', 'lstrip', 42, 42)
self.checkraises(TypeError, 'hello', 'rstrip', 42, 42)
def test_ljust(self):
self.checkequal('abc ', 'abc', 'ljust', 10)
self.checkequal('abc ', 'abc', 'ljust', 6)
self.checkequal('abc', 'abc', 'ljust', 3)
self.checkequal('abc', 'abc', 'ljust', 2)
self.checkequal('abc*******', 'abc', 'ljust', 10, '*')
self.checkraises(TypeError, 'abc', 'ljust')
def test_rjust(self):
self.checkequal(' abc', 'abc', 'rjust', 10)
self.checkequal(' abc', 'abc', 'rjust', 6)
self.checkequal('abc', 'abc', 'rjust', 3)
self.checkequal('abc', 'abc', 'rjust', 2)
self.checkequal('*******abc', 'abc', 'rjust', 10, '*')
self.checkraises(TypeError, 'abc', 'rjust')
def test_center(self):
self.checkequal(' abc ', 'abc', 'center', 10)
self.checkequal(' abc ', 'abc', 'center', 6)
self.checkequal('abc', 'abc', 'center', 3)
self.checkequal('abc', 'abc', 'center', 2)
self.checkequal('***abc****', 'abc', 'center', 10, '*')
self.checkraises(TypeError, 'abc', 'center')
def test_swapcase(self):
self.checkequal('hEllO CoMPuTErS', 'HeLLo cOmpUteRs', 'swapcase')
self.checkraises(TypeError, 'hello', 'swapcase', 42)
def test_zfill(self):
self.checkequal('123', '123', 'zfill', 2)
self.checkequal('123', '123', 'zfill', 3)
self.checkequal('0123', '123', 'zfill', 4)
self.checkequal('+123', '+123', 'zfill', 3)
self.checkequal('+123', '+123', 'zfill', 4)
self.checkequal('+0123', '+123', 'zfill', 5)
self.checkequal('-123', '-123', 'zfill', 3)
self.checkequal('-123', '-123', 'zfill', 4)
self.checkequal('-0123', '-123', 'zfill', 5)
self.checkequal('000', '', 'zfill', 3)
self.checkequal('34', '34', 'zfill', 1)
self.checkequal('0034', '34', 'zfill', 4)
self.checkraises(TypeError, '123', 'zfill')
class MixinStrUnicodeUserStringTest:
# additional tests that only work for
# stringlike objects, i.e. str, unicode, UserString
# (but not the string module)
def test_islower(self):
self.checkequal(False, '', 'islower')
self.checkequal(True, 'a', 'islower')
self.checkequal(False, 'A', 'islower')
self.checkequal(False, '\n', 'islower')
self.checkequal(True, 'abc', 'islower')
self.checkequal(False, 'aBc', 'islower')
self.checkequal(True, 'abc\n', 'islower')
self.checkraises(TypeError, 'abc', 'islower', 42)
def test_isupper(self):
self.checkequal(False, '', 'isupper')
self.checkequal(False, 'a', 'isupper')
self.checkequal(True, 'A', 'isupper')
self.checkequal(False, '\n', 'isupper')
self.checkequal(True, 'ABC', 'isupper')
self.checkequal(False, 'AbC', 'isupper')
self.checkequal(True, 'ABC\n', 'isupper')
self.checkraises(TypeError, 'abc', 'isupper', 42)
def test_istitle(self):
self.checkequal(False, '', 'istitle')
self.checkequal(False, 'a', 'istitle')
self.checkequal(True, 'A', 'istitle')
self.checkequal(False, '\n', 'istitle')
self.checkequal(True, 'A Titlecased Line', 'istitle')
self.checkequal(True, 'A\nTitlecased Line', 'istitle')
self.checkequal(True, 'A Titlecased, Line', 'istitle')
self.checkequal(False, 'Not a capitalized String', 'istitle')
self.checkequal(False, 'Not\ta Titlecase String', 'istitle')
self.checkequal(False, 'Not--a Titlecase String', 'istitle')
self.checkequal(False, 'NOT', 'istitle')
self.checkraises(TypeError, 'abc', 'istitle', 42)
def test_isspace(self):
self.checkequal(False, '', 'isspace')
self.checkequal(False, 'a', 'isspace')
self.checkequal(True, ' ', 'isspace')
self.checkequal(True, '\t', 'isspace')
self.checkequal(True, '\r', 'isspace')
self.checkequal(True, '\n', 'isspace')
self.checkequal(True, ' \t\r\n', 'isspace')
self.checkequal(False, ' \t\r\na', 'isspace')
self.checkraises(TypeError, 'abc', 'isspace', 42)
def test_isalpha(self):
self.checkequal(False, '', 'isalpha')
self.checkequal(True, 'a', 'isalpha')
self.checkequal(True, 'A', 'isalpha')
self.checkequal(False, '\n', 'isalpha')
self.checkequal(True, 'abc', 'isalpha')
self.checkequal(False, 'aBc123', 'isalpha')
self.checkequal(False, 'abc\n', 'isalpha')
self.checkraises(TypeError, 'abc', 'isalpha', 42)
def test_isalnum(self):
self.checkequal(False, '', 'isalnum')
self.checkequal(True, 'a', 'isalnum')
self.checkequal(True, 'A', 'isalnum')
self.checkequal(False, '\n', 'isalnum')
self.checkequal(True, '123abc456', 'isalnum')
self.checkequal(True, 'a1b3c', 'isalnum')
self.checkequal(False, 'aBc000 ', 'isalnum')
self.checkequal(False, 'abc\n', 'isalnum')
self.checkraises(TypeError, 'abc', 'isalnum', 42)
def test_isdigit(self):
self.checkequal(False, '', 'isdigit')
self.checkequal(False, 'a', 'isdigit')
self.checkequal(True, '0', 'isdigit')
self.checkequal(True, '0123456789', 'isdigit')
self.checkequal(False, '0123456789a', 'isdigit')
self.checkraises(TypeError, 'abc', 'isdigit', 42)
def test_title(self):
self.checkequal(' Hello ', ' hello ', 'title')
self.checkequal('Hello ', 'hello ', 'title')
self.checkequal('Hello ', 'Hello ', 'title')
self.checkequal('Format This As Title String', "fOrMaT thIs aS titLe String", 'title')
self.checkequal('Format,This-As*Title;String', "fOrMaT,thIs-aS*titLe;String", 'title', )
self.checkequal('Getint', "getInt", 'title')
self.checkraises(TypeError, 'hello', 'title', 42)
def test_splitlines(self):
self.checkequal(['abc', 'def', '', 'ghi'], "abc\ndef\n\rghi", 'splitlines')
self.checkequal(['abc', 'def', '', 'ghi'], "abc\ndef\n\r\nghi", 'splitlines')
self.checkequal(['abc', 'def', 'ghi'], "abc\ndef\r\nghi", 'splitlines')
self.checkequal(['abc', 'def', 'ghi'], "abc\ndef\r\nghi\n", 'splitlines')
self.checkequal(['abc', 'def', 'ghi', ''], "abc\ndef\r\nghi\n\r", 'splitlines')
self.checkequal(['', 'abc', 'def', 'ghi', ''], "\nabc\ndef\r\nghi\n\r", 'splitlines')
self.checkequal(['', 'abc', 'def', 'ghi', ''],
"\nabc\ndef\r\nghi\n\r", 'splitlines', False)
self.checkequal(['\n', 'abc\n', 'def\r\n', 'ghi\n', '\r'],
"\nabc\ndef\r\nghi\n\r", 'splitlines', True)
self.checkequal(['', 'abc', 'def', 'ghi', ''], "\nabc\ndef\r\nghi\n\r",
'splitlines', keepends=False)
self.checkequal(['\n', 'abc\n', 'def\r\n', 'ghi\n', '\r'],
"\nabc\ndef\r\nghi\n\r", 'splitlines', keepends=True)
self.checkraises(TypeError, 'abc', 'splitlines', 42, 42)
def test_startswith(self):
self.checkequal(True, 'hello', 'startswith', 'he')
self.checkequal(True, 'hello', 'startswith', 'hello')
self.checkequal(False, 'hello', 'startswith', 'hello world')
self.checkequal(True, 'hello', 'startswith', '')
self.checkequal(False, 'hello', 'startswith', 'ello')
self.checkequal(True, 'hello', 'startswith', 'ello', 1)
self.checkequal(True, 'hello', 'startswith', 'o', 4)
self.checkequal(False, 'hello', 'startswith', 'o', 5)
self.checkequal(True, 'hello', 'startswith', '', 5)
self.checkequal(False, 'hello', 'startswith', 'lo', 6)
self.checkequal(True, 'helloworld', 'startswith', 'lowo', 3)
self.checkequal(True, 'helloworld', 'startswith', 'lowo', 3, 7)
self.checkequal(False, 'helloworld', 'startswith', 'lowo', 3, 6)
# test negative indices
self.checkequal(True, 'hello', 'startswith', 'he', 0, -1)
self.checkequal(True, 'hello', 'startswith', 'he', -53, -1)
self.checkequal(False, 'hello', 'startswith', 'hello', 0, -1)
self.checkequal(False, 'hello', 'startswith', 'hello world', -1, -10)
self.checkequal(False, 'hello', 'startswith', 'ello', -5)
self.checkequal(True, 'hello', 'startswith', 'ello', -4)
self.checkequal(False, 'hello', 'startswith', 'o', -2)
self.checkequal(True, 'hello', 'startswith', 'o', -1)
self.checkequal(True, 'hello', 'startswith', '', -3, -3)
self.checkequal(False, 'hello', 'startswith', 'lo', -9)
self.checkraises(TypeError, 'hello', 'startswith')
self.checkraises(TypeError, 'hello', 'startswith', 42)
# test tuple arguments
self.checkequal(True, 'hello', 'startswith', ('he', 'ha'))
self.checkequal(False, 'hello', 'startswith', ('lo', 'llo'))
self.checkequal(True, 'hello', 'startswith', ('hellox', 'hello'))
self.checkequal(False, 'hello', 'startswith', ())
self.checkequal(True, 'helloworld', 'startswith', ('hellowo',
'rld', 'lowo'), 3)
self.checkequal(False, 'helloworld', 'startswith', ('hellowo', 'ello',
'rld'), 3)
self.checkequal(True, 'hello', 'startswith', ('lo', 'he'), 0, -1)
self.checkequal(False, 'hello', 'startswith', ('he', 'hel'), 0, 1)
self.checkequal(True, 'hello', 'startswith', ('he', 'hel'), 0, 2)
self.checkraises(TypeError, 'hello', 'startswith', (42,))
def test_endswith(self):
self.checkequal(True, 'hello', 'endswith', 'lo')
self.checkequal(False, 'hello', 'endswith', 'he')
self.checkequal(True, 'hello', 'endswith', '')
self.checkequal(False, 'hello', 'endswith', 'hello world')
self.checkequal(False, 'helloworld', 'endswith', 'worl')
self.checkequal(True, 'helloworld', 'endswith', 'worl', 3, 9)
self.checkequal(True, 'helloworld', 'endswith', 'world', 3, 12)
self.checkequal(True, 'helloworld', 'endswith', 'lowo', 1, 7)
self.checkequal(True, 'helloworld', 'endswith', 'lowo', 2, 7)
self.checkequal(True, 'helloworld', 'endswith', 'lowo', 3, 7)
self.checkequal(False, 'helloworld', 'endswith', 'lowo', 4, 7)
self.checkequal(False, 'helloworld', 'endswith', 'lowo', 3, 8)
self.checkequal(False, 'ab', 'endswith', 'ab', 0, 1)
self.checkequal(False, 'ab', 'endswith', 'ab', 0, 0)
# test negative indices
self.checkequal(True, 'hello', 'endswith', 'lo', -2)
self.checkequal(False, 'hello', 'endswith', 'he', -2)
self.checkequal(True, 'hello', 'endswith', '', -3, -3)
self.checkequal(False, 'hello', 'endswith', 'hello world', -10, -2)
self.checkequal(False, 'helloworld', 'endswith', 'worl', -6)
self.checkequal(True, 'helloworld', 'endswith', 'worl', -5, -1)
self.checkequal(True, 'helloworld', 'endswith', 'worl', -5, 9)
self.checkequal(True, 'helloworld', 'endswith', 'world', -7, 12)
self.checkequal(True, 'helloworld', 'endswith', 'lowo', -99, -3)
self.checkequal(True, 'helloworld', 'endswith', 'lowo', -8, -3)
self.checkequal(True, 'helloworld', 'endswith', 'lowo', -7, -3)
self.checkequal(False, 'helloworld', 'endswith', 'lowo', 3, -4)
self.checkequal(False, 'helloworld', 'endswith', 'lowo', -8, -2)
self.checkraises(TypeError, 'hello', 'endswith')
self.checkraises(TypeError, 'hello', 'endswith', 42)
# test tuple arguments
self.checkequal(False, 'hello', 'endswith', ('he', 'ha'))
self.checkequal(True, 'hello', 'endswith', ('lo', 'llo'))
self.checkequal(True, 'hello', 'endswith', ('hellox', 'hello'))
self.checkequal(False, 'hello', 'endswith', ())
self.checkequal(True, 'helloworld', 'endswith', ('hellowo',
'rld', 'lowo'), 3)
self.checkequal(False, 'helloworld', 'endswith', ('hellowo', 'ello',
'rld'), 3, -1)
self.checkequal(True, 'hello', 'endswith', ('hell', 'ell'), 0, -1)
self.checkequal(False, 'hello', 'endswith', ('he', 'hel'), 0, 1)
self.checkequal(True, 'hello', 'endswith', ('he', 'hell'), 0, 4)
self.checkraises(TypeError, 'hello', 'endswith', (42,))
def test___contains__(self):
self.checkequal(True, '', '__contains__', '')
self.checkequal(True, 'abc', '__contains__', '')
self.checkequal(False, 'abc', '__contains__', '\0')
self.checkequal(True, '\0abc', '__contains__', '\0')
self.checkequal(True, 'abc\0', '__contains__', '\0')
self.checkequal(True, '\0abc', '__contains__', 'a')
self.checkequal(True, 'asdf', '__contains__', 'asdf')
self.checkequal(False, 'asd', '__contains__', 'asdf')
self.checkequal(False, '', '__contains__', 'asdf')
def test_subscript(self):
self.checkequal('a', 'abc', '__getitem__', 0)
self.checkequal('c', 'abc', '__getitem__', -1)
self.checkequal('a', 'abc', '__getitem__', 0)
self.checkequal('abc', 'abc', '__getitem__', slice(0, 3))
self.checkequal('abc', 'abc', '__getitem__', slice(0, 1000))
self.checkequal('a', 'abc', '__getitem__', slice(0, 1))
self.checkequal('', 'abc', '__getitem__', slice(0, 0))
self.checkraises(TypeError, 'abc', '__getitem__', 'def')
def test_slice(self):
self.checkequal('abc', 'abc', '__getitem__', slice(0, 1000))
self.checkequal('abc', 'abc', '__getitem__', slice(0, 3))
self.checkequal('ab', 'abc', '__getitem__', slice(0, 2))
self.checkequal('bc', 'abc', '__getitem__', slice(1, 3))
self.checkequal('b', 'abc', '__getitem__', slice(1, 2))
self.checkequal('', 'abc', '__getitem__', slice(2, 2))
self.checkequal('', 'abc', '__getitem__', slice(1000, 1000))
self.checkequal('', 'abc', '__getitem__', slice(2000, 1000))
self.checkequal('', 'abc', '__getitem__', slice(2, 1))
self.checkraises(TypeError, 'abc', '__getitem__', 'def')
def test_extended_getslice(self):
# Test extended slicing by comparing with list slicing.
s = string.ascii_letters + string.digits
indices = (0, None, 1, 3, 41, -1, -2, -37)
for start in indices:
for stop in indices:
# Skip step 0 (invalid)
for step in indices[1:]:
L = list(s)[start:stop:step]
self.checkequal("".join(L), s, '__getitem__',
slice(start, stop, step))
def test_mul(self):
self.checkequal('', 'abc', '__mul__', -1)
self.checkequal('', 'abc', '__mul__', 0)
self.checkequal('abc', 'abc', '__mul__', 1)
self.checkequal('abcabcabc', 'abc', '__mul__', 3)
self.checkraises(TypeError, 'abc', '__mul__')
self.checkraises(TypeError, 'abc', '__mul__', '')
# XXX: on a 64-bit system, this doesn't raise an overflow error,
# but either raises a MemoryError, or succeeds (if you have 54TiB)
#self.checkraises(OverflowError, 10000*'abc', '__mul__', 2000000000)
def test_join(self):
# join now works with any sequence type
# moved here, because the argument order is
# different in string.join
self.checkequal('a b c d', ' ', 'join', ['a', 'b', 'c', 'd'])
self.checkequal('abcd', '', 'join', ('a', 'b', 'c', 'd'))
self.checkequal('bd', '', 'join', ('', 'b', '', 'd'))
self.checkequal('ac', '', 'join', ('a', '', 'c', ''))
self.checkequal('w x y z', ' ', 'join', Sequence())
self.checkequal('abc', 'a', 'join', ('abc',))
self.checkequal('z', 'a', 'join', UserList(['z']))
self.checkequal('a.b.c', '.', 'join', ['a', 'b', 'c'])
self.assertRaises(TypeError, '.'.join, ['a', 'b', 3])
for i in [5, 25, 125]:
self.checkequal(((('a' * i) + '-') * i)[:-1], '-', 'join',
['a' * i] * i)
self.checkequal(((('a' * i) + '-') * i)[:-1], '-', 'join',
('a' * i,) * i)
#self.checkequal(str(BadSeq1()), ' ', 'join', BadSeq1())
self.checkequal('a b c', ' ', 'join', BadSeq2())
self.checkraises(TypeError, ' ', 'join')
self.checkraises(TypeError, ' ', 'join', None)
self.checkraises(TypeError, ' ', 'join', 7)
self.checkraises(TypeError, ' ', 'join', [1, 2, bytes()])
try:
def f():
yield 4 + ""
self.fixtype(' ').join(f())
except TypeError as e:
if '+' not in str(e):
self.fail('join() ate exception message')
else:
self.fail('exception not raised')
def test_formatting(self):
self.checkequal('+hello+', '+%s+', '__mod__', 'hello')
self.checkequal('+10+', '+%d+', '__mod__', 10)
self.checkequal('a', "%c", '__mod__', "a")
self.checkequal('a', "%c", '__mod__', "a")
self.checkequal('"', "%c", '__mod__', 34)
self.checkequal('$', "%c", '__mod__', 36)
self.checkequal('10', "%d", '__mod__', 10)
self.checkequal('\x7f', "%c", '__mod__', 0x7f)
for ordinal in (-100, 0x200000):
# unicode raises ValueError, str raises OverflowError
self.checkraises((ValueError, OverflowError), '%c', '__mod__', ordinal)
longvalue = sys.maxsize + 10
slongvalue = str(longvalue)
self.checkequal(' 42', '%3ld', '__mod__', 42)
self.checkequal('42', '%d', '__mod__', 42.0)
self.checkequal(slongvalue, '%d', '__mod__', longvalue)
self.checkcall('%d', '__mod__', float(longvalue))
self.checkequal('0042.00', '%07.2f', '__mod__', 42)
self.checkequal('0042.00', '%07.2F', '__mod__', 42)
self.checkraises(TypeError, 'abc', '__mod__')
self.checkraises(TypeError, '%(foo)s', '__mod__', 42)
self.checkraises(TypeError, '%s%s', '__mod__', (42,))
with self.assertWarns(DeprecationWarning):
self.checkraises(TypeError, '%c', '__mod__', (None,))
self.checkraises(ValueError, '%(foo', '__mod__', {})
self.checkraises(TypeError, '%(foo)s %(bar)s', '__mod__', ('foo', 42))
self.checkraises(TypeError, '%d', '__mod__', "42") # not numeric
self.checkraises(TypeError, '%d', '__mod__', (42+0j)) # no int conversion provided
# argument names with properly nested brackets are supported
self.checkequal('bar', '%((foo))s', '__mod__', {'(foo)': 'bar'})
# 100 is a magic number in PyUnicode_Format, this forces a resize
self.checkequal(103*'a'+'x', '%sx', '__mod__', 103*'a')
self.checkraises(TypeError, '%*s', '__mod__', ('foo', 'bar'))
self.checkraises(TypeError, '%10.*f', '__mod__', ('foo', 42.))
self.checkraises(ValueError, '%10', '__mod__', (42,))
# Outrageously large width or precision should raise ValueError.
self.checkraises(ValueError, '%%%df' % (2**64), '__mod__', (3.2))
self.checkraises(ValueError, '%%.%df' % (2**64), '__mod__', (3.2))
self.checkraises(OverflowError, '%*s', '__mod__',
(sys.maxsize + 1, ''))
self.checkraises(OverflowError, '%.*f', '__mod__',
(sys.maxsize + 1, 1. / 7))
class X(object): pass
self.checkraises(TypeError, 'abc', '__mod__', X())
@support.cpython_only
def test_formatting_c_limits(self):
from _testcapi import PY_SSIZE_T_MAX, INT_MAX, UINT_MAX
SIZE_MAX = (1 << (PY_SSIZE_T_MAX.bit_length() + 1)) - 1
self.checkraises(OverflowError, '%*s', '__mod__',
(PY_SSIZE_T_MAX + 1, ''))
self.checkraises(OverflowError, '%.*f', '__mod__',
(INT_MAX + 1, 1. / 7))
# Issue 15989
self.checkraises(OverflowError, '%*s', '__mod__',
(SIZE_MAX + 1, ''))
self.checkraises(OverflowError, '%.*f', '__mod__',
(UINT_MAX + 1, 1. / 7))
def test_floatformatting(self):
# float formatting
for prec in range(100):
format = '%%.%if' % prec
value = 0.01
for x in range(60):
value = value * 3.14159265359 / 3.0 * 10.0
self.checkcall(format, "__mod__", value)
def test_inplace_rewrites(self):
# Check that strings don't copy and modify cached single-character strings
self.checkequal('a', 'A', 'lower')
self.checkequal(True, 'A', 'isupper')
self.checkequal('A', 'a', 'upper')
self.checkequal(True, 'a', 'islower')
self.checkequal('a', 'A', 'replace', 'A', 'a')
self.checkequal(True, 'A', 'isupper')
self.checkequal('A', 'a', 'capitalize')
self.checkequal(True, 'a', 'islower')
self.checkequal('A', 'a', 'swapcase')
self.checkequal(True, 'a', 'islower')
self.checkequal('A', 'a', 'title')
self.checkequal(True, 'a', 'islower')
def test_partition(self):
self.checkequal(('this is the par', 'ti', 'tion method'),
'this is the partition method', 'partition', 'ti')
# from raymond's original specification
S = 'http://www.python.org'
self.checkequal(('http', '://', 'www.python.org'), S, 'partition', '://')
self.checkequal(('http://www.python.org', '', ''), S, 'partition', '?')
self.checkequal(('', 'http://', 'www.python.org'), S, 'partition', 'http://')
self.checkequal(('http://www.python.', 'org', ''), S, 'partition', 'org')
self.checkraises(ValueError, S, 'partition', '')
self.checkraises(TypeError, S, 'partition', None)
def test_rpartition(self):
self.checkequal(('this is the rparti', 'ti', 'on method'),
'this is the rpartition method', 'rpartition', 'ti')
# from raymond's original specification
S = 'http://www.python.org'
self.checkequal(('http', '://', 'www.python.org'), S, 'rpartition', '://')
self.checkequal(('', '', 'http://www.python.org'), S, 'rpartition', '?')
self.checkequal(('', 'http://', 'www.python.org'), S, 'rpartition', 'http://')
self.checkequal(('http://www.python.', 'org', ''), S, 'rpartition', 'org')
self.checkraises(ValueError, S, 'rpartition', '')
self.checkraises(TypeError, S, 'rpartition', None)
def test_none_arguments(self):
# issue 11828
s = 'hello'
self.checkequal(2, s, 'find', 'l', None)
self.checkequal(3, s, 'find', 'l', -2, None)
self.checkequal(2, s, 'find', 'l', None, -2)
self.checkequal(0, s, 'find', 'h', None, None)
self.checkequal(3, s, 'rfind', 'l', None)
self.checkequal(3, s, 'rfind', 'l', -2, None)
self.checkequal(2, s, 'rfind', 'l', None, -2)
self.checkequal(0, s, 'rfind', 'h', None, None)
self.checkequal(2, s, 'index', 'l', None)
self.checkequal(3, s, 'index', 'l', -2, None)
self.checkequal(2, s, 'index', 'l', None, -2)
self.checkequal(0, s, 'index', 'h', None, None)
self.checkequal(3, s, 'rindex', 'l', None)
self.checkequal(3, s, 'rindex', 'l', -2, None)
self.checkequal(2, s, 'rindex', 'l', None, -2)
self.checkequal(0, s, 'rindex', 'h', None, None)
self.checkequal(2, s, 'count', 'l', None)
self.checkequal(1, s, 'count', 'l', -2, None)
self.checkequal(1, s, 'count', 'l', None, -2)
self.checkequal(0, s, 'count', 'x', None, None)
self.checkequal(True, s, 'endswith', 'o', None)
self.checkequal(True, s, 'endswith', 'lo', -2, None)
self.checkequal(True, s, 'endswith', 'l', None, -2)
self.checkequal(False, s, 'endswith', 'x', None, None)
self.checkequal(True, s, 'startswith', 'h', None)
self.checkequal(True, s, 'startswith', 'l', -2, None)
self.checkequal(True, s, 'startswith', 'h', None, -2)
self.checkequal(False, s, 'startswith', 'x', None, None)
def test_find_etc_raise_correct_error_messages(self):
# issue 11828
s = 'hello'
x = 'x'
self.assertRaisesRegex(TypeError, r'^find\(', s.find,
x, None, None, None)
self.assertRaisesRegex(TypeError, r'^rfind\(', s.rfind,
x, None, None, None)
self.assertRaisesRegex(TypeError, r'^index\(', s.index,
x, None, None, None)
self.assertRaisesRegex(TypeError, r'^rindex\(', s.rindex,
x, None, None, None)
self.assertRaisesRegex(TypeError, r'^count\(', s.count,
x, None, None, None)
self.assertRaisesRegex(TypeError, r'^startswith\(', s.startswith,
x, None, None, None)
self.assertRaisesRegex(TypeError, r'^endswith\(', s.endswith,
x, None, None, None)
# issue #15534
self.checkequal(10, "...\u043c......<", "find", "<")
class MixinStrUnicodeTest:
# Additional tests that only work with str and unicode.
def test_bug1001011(self):
# Make sure join returns a NEW object for single item sequences
# involving a subclass.
# Make sure that it is of the appropriate type.
# Check the optimisation still occurs for standard objects.
t = self.type2test
class subclass(t):
pass
s1 = subclass("abcd")
s2 = t().join([s1])
self.assertIsNot(s1, s2)
self.assertIs(type(s2), t)
s1 = t("abcd")
s2 = t().join([s1])
self.assertIs(s1, s2)
# Should also test mixed-type join.
if t is str:
s1 = subclass("abcd")
s2 = "".join([s1])
self.assertIsNot(s1, s2)
self.assertIs(type(s2), t)
s1 = t("abcd")
s2 = "".join([s1])
self.assertIs(s1, s2)
## elif t is str8:
## s1 = subclass("abcd")
## s2 = "".join([s1])
## self.assertIsNot(s1, s2)
## self.assertIs(type(s2), str) # promotes!
## s1 = t("abcd")
## s2 = "".join([s1])
## self.assertIsNot(s1, s2)
## self.assertIs(type(s2), str) # promotes!
else:
self.fail("unexpected type for MixinStrUnicodeTest %r" % t)
| FireWRT/OpenWrt-Firefly-Libraries | staging_dir/target-mipsel_1004kc+dsp_uClibc-0.9.33.2/usr/lib/python3.4/test/string_tests.py | Python | gpl-2.0 | 64,766 |
# Copyright 2016-2017 University of Pittsburgh
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http:www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys, uuid, datetime
# query concept id and concept code for specific vocabulary
def getConceptCodeByVocabId(conn, vocabId):
cur = conn.cursor()
qry = """
select concept_id, concept_code, concept_name from public.concept where vocabulary_id = %s
""" % vocabId
cur.execute(qry)
return cur.fetchall()
# query concept id by concept code and vocabulary id
def getConceptIdByConceptCode(conn, conceptCode, vocabId):
cur = conn.cursor()
qry = """
select * from public.concept where concept_code = '%s' and vocabulary_id = '%s';
""" % (conceptCode, vocabId)
cur.execute(qry)
for row in cur.fetchall():
return row[0]
| dbmi-pitt/dbmi-annotator | translation/mp-evidence-base-ETL/postgres/omopConceptQry.py | Python | apache-2.0 | 1,265 |
from django.conf import settings
from django.http import Http404
from django.test import TestCase
from django.utils import timezone
from cradmin_legacy import cradmin_testhelpers
from model_bakery import baker
from devilry.devilry_dbcache.customsql import AssignmentGroupDbCacheCustomSql
from devilry.devilry_group import models as group_models
from devilry.devilry_group.views.admin import group_comment_history
class TestAdminCommentEditHistoryView(TestCase, cradmin_testhelpers.TestCaseMixin):
"""
General testing of what gets rendered to student view.
"""
viewclass = group_comment_history.AdminGroupCommentHistoryView
def setUp(self):
AssignmentGroupDbCacheCustomSql().initialize()
def __make_admin_comment(self, feedback_set, user=None):
if not user:
user = baker.make(settings.AUTH_USER_MODEL)
return baker.make('devilry_group.GroupComment',
text='Test',
user=user,
user_role=group_models.GroupComment.USER_ROLE_ADMIN,
published_datetime=timezone.now(),
feedback_set=feedback_set)
def __make_user_admin(self, user, assignment, permissiongroup_type='period'):
if permissiongroup_type == 'period':
permissiongroup = baker.make('devilry_account.PeriodPermissionGroup',
period=assignment.period)
else:
permissiongroup = baker.make('devilry_account.SubjectPermissionGroup',
period=assignment.period)
baker.make('devilry_account.PermissionGroupUser',
user=user,
permissiongroup=permissiongroup.permissiongroup)
def test_missing_comment_id_in_kwargs(self):
testuser = baker.make('devilry_account.User', shortname='admin', fullname='Thor')
testperiod = baker.make_recipe('devilry.apps.core.period_active', admins=[testuser])
testgroup = baker.make('core.AssignmentGroup', parentnode__parentnode=testperiod)
self.__make_user_admin(user=testuser, assignment=testgroup.parentnode)
with self.assertRaises(Http404):
self.mock_http200_getrequest_htmls(
cradmin_role=testgroup,
requestuser=testuser)
def test_comment_does_not_exists_raises_404(self):
testuser = baker.make('devilry_account.User', shortname='admin', fullname='Thor')
testperiod = baker.make_recipe('devilry.apps.core.period_active', admins=[testuser])
testgroup = baker.make('core.AssignmentGroup', parentnode__parentnode=testperiod)
self.__make_user_admin(user=testuser, assignment=testgroup.parentnode)
with self.assertRaises(Http404):
self.mock_http200_getrequest_htmls(
cradmin_role=testgroup,
requestuser=testuser,
viewkwargs={'group_comment_id': 1})
def test_comment_no_history_no_history_items_rendered(self):
testuser = baker.make('devilry_account.User', shortname='admin', fullname='Thor')
testperiod = baker.make_recipe('devilry.apps.core.period_active', admins=[testuser])
testgroup = baker.make('core.AssignmentGroup', parentnode__parentnode=testperiod)
self.__make_user_admin(user=testuser, assignment=testgroup.parentnode)
groupcomment = baker.make('devilry_group.GroupComment',
feedback_set__group=testgroup,
visibility=group_models.GroupComment.VISIBILITY_VISIBLE_TO_EVERYONE)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testgroup,
requestuser=testuser,
viewkwargs={'group_comment_id': groupcomment.id})
self.assertFalse(mockresponse.selector.exists('.devilry-comment-edit-history-item'))
self.assertTrue(mockresponse.selector.exists('.devilry-comment-history-no-items'))
def test_admin_can_not_see_other_users_private_comments_history(self):
testuser = baker.make('devilry_account.User', shortname='admin', fullname='Thor')
testperiod = baker.make_recipe('devilry.apps.core.period_active', admins=[testuser])
testgroup = baker.make('core.AssignmentGroup', parentnode__parentnode=testperiod)
self.__make_user_admin(user=testuser, assignment=testgroup.parentnode)
groupcomment = baker.make('devilry_group.GroupComment',
feedback_set__group=testgroup,
visibility=group_models.GroupComment.VISIBILITY_PRIVATE)
with self.assertRaises(Http404):
self.mock_http200_getrequest_htmls(
cradmin_role=testgroup,
requestuser=testuser,
viewkwargs={'group_comment_id': groupcomment.id})
def test_admin_can_see_private_history_entries_from_their_own_comments(self):
testuser = baker.make('devilry_account.User', shortname='admin', fullname='Thor')
testperiod = baker.make_recipe('devilry.apps.core.period_active', admins=[testuser])
testgroup = baker.make('core.AssignmentGroup', parentnode__parentnode=testperiod)
self.__make_user_admin(user=testuser, assignment=testgroup.parentnode)
groupcomment = baker.make('devilry_group.GroupComment',
user=testuser,
feedback_set__group=testgroup)
baker.make('devilry_group.GroupCommentEditHistory', group_comment=groupcomment,
visibility=group_models.GroupComment.VISIBILITY_VISIBLE_TO_EXAMINER_AND_ADMINS,
edited_by=groupcomment.user, _quantity=2)
baker.make('devilry_group.GroupCommentEditHistory', group_comment=groupcomment,
visibility=group_models.GroupComment.VISIBILITY_VISIBLE_TO_EVERYONE,
edited_by=groupcomment.user, _quantity=2)
baker.make('devilry_group.GroupCommentEditHistory', group_comment=groupcomment,
visibility=group_models.GroupComment.VISIBILITY_PRIVATE, edited_by=groupcomment.user, _quantity=3)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testgroup,
requestuser=testuser,
viewkwargs={'group_comment_id': groupcomment.id})
self.assertEqual(mockresponse.selector.count('.devilry-comment-edit-history-item'), 7)
def test_admin_can_not_see_private_history_entries_from_other_users(self):
testuser = baker.make('devilry_account.User', shortname='admin', fullname='Thor')
testperiod = baker.make_recipe('devilry.apps.core.period_active', admins=[testuser])
testgroup = baker.make('core.AssignmentGroup', parentnode__parentnode=testperiod)
self.__make_user_admin(user=testuser, assignment=testgroup.parentnode)
groupcomment = baker.make('devilry_group.GroupComment',
feedback_set__group=testgroup)
baker.make('devilry_group.GroupCommentEditHistory', group_comment=groupcomment,
visibility=group_models.GroupComment.VISIBILITY_VISIBLE_TO_EXAMINER_AND_ADMINS,
edited_by=groupcomment.user, _quantity=2)
baker.make('devilry_group.GroupCommentEditHistory', group_comment=groupcomment,
visibility=group_models.GroupComment.VISIBILITY_VISIBLE_TO_EVERYONE,
edited_by=groupcomment.user, _quantity=2)
baker.make('devilry_group.GroupCommentEditHistory', group_comment=groupcomment,
visibility=group_models.GroupComment.VISIBILITY_PRIVATE, edited_by=groupcomment.user, _quantity=3)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testgroup,
requestuser=testuser,
viewkwargs={'group_comment_id': groupcomment.id})
self.assertEqual(mockresponse.selector.count('.devilry-comment-edit-history-item'), 4)
| devilry/devilry-django | devilry/devilry_group/tests/test_feedbackfeed/admin/test_comment_edit_history.py | Python | bsd-3-clause | 8,010 |
from setuptools import setup
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
# with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
# long_description = f.read()
setup(
name='bkmaker',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.0.5',
description='backup your schema from RDS directly to S3',
# long_description=long_description,
# The project's main homepage.
url='https://github.com/bjtox/bkmaker.git',
# Author details
author='Antonio Bitonti',
author_email='[email protected]',
packages=['bkmaker'],
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
# 'Intended Audience :: Developers',
# 'Topic :: Software Development :: Build Tools',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2.7',
],
# What does your project relate to?
keywords='aws rds backup schema mysql',
install_requires=['boto3','asyncio'],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
# extras_require={
# 'dev': ['check-manifest'],
# 'test': ['coverage'],
# },
entry_points={
'console_scripts': [
'bkmaker=bkmaker:main',
],
},
)
| bjtox/rds-bk-maker | setup.py | Python | mit | 2,162 |
# Copyright (c) 2014-2016 Cedric Bellegarde <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from gi.repository import GLib, Gio
import json
from lollypop.search_item import SearchItem
from lollypop.lio import Lio
class SpotifySearch:
"""
Search provider for Spotify
"""
def __init__(self):
"""
Init provider
"""
if not hasattr(self, '_cancel'):
self._cancel = Gio.Cancellable.new()
def tracks(self, name):
"""
Return tracks containing name
@param name as str
"""
try:
formated = GLib.uri_escape_string(name, None, True).replace(
' ', '+')
s = Lio.File.new_for_uri("https://api.spotify.com/v1/search?q=%s"
"&type=track" % formated)
(status, data, tag) = s.load_contents(self._cancel)
if status:
decode = json.loads(data.decode('utf-8'))
tracks = []
for item in decode['tracks']['items']:
if item['name'].lower() in tracks:
continue
search_item = SearchItem()
search_item.is_track = True
search_item.name = item['name']
tracks.append(search_item.name.lower())
search_item.album = item['album']['name']
search_item.tracknumber = int(item['track_number'])
search_item.discnumber = int(item['disc_number'])
search_item.duration = int(item['duration_ms']) / 1000
search_item.cover = item['album']['images'][0]['url']
search_item.smallcover = item['album']['images'][2]['url']
for artist in item['artists']:
search_item.artists.append(artist['name'])
self._items.append(search_item)
GLib.idle_add(self.emit, 'item-found')
except Exception as e:
print("SpotifySearch::tracks(): %s" % e)
def albums(self, name):
"""
Return albums containing name
@param name as str
@return albums as [SearchItem]
"""
self.__get_artists(name)
self.__get_albums(name)
def get_album_id(self, track_id):
"""
Get album id for track
@param track id as str
@return album id as str
"""
try:
s = Lio.File.new_for_uri("https://api.spotify.com/v1/"
"tracks/%s" % track_id)
(status, data, tag) = s.load_contents(self._cancel)
if status:
decode = json.loads(data.decode('utf-8'))
return decode['album']['id']
except Exception as e:
print("SpotifySearch::get_album_id():", e, track_id)
def get_album(self, album_id):
"""
Return spotify album as SearchItem
@param album id as str
@return SearchItem
"""
try:
s = Lio.File.new_for_uri("https://api.spotify.com/v1/"
"albums/%s" % album_id)
(status, data, tag) = s.load_contents(self._cancel)
if status:
decode = json.loads(data.decode('utf-8'))
album_item = SearchItem()
album_item.name = album_item.album_name = decode['name']
album_item.cover = decode['images'][0]['url']
album_item.smallcover = decode['images'][2]['url']
album_item.ex_id = album_id
for item in decode['tracks']['items']:
track_item = SearchItem()
track_item.is_track = True
track_item.name = item['name']
track_item.album = album_item.name
try:
track_item.year = decode[
'release_date'][:4]
except:
pass # May be missing
track_item.tracknumber = int(
item['track_number'])
track_item.discnumber = int(
item['disc_number'])
track_item.duration = int(
item['duration_ms']) / 1000
for artist in item['artists']:
track_item.artists.append(artist['name'])
if not album_item.artists:
album_item.artists = track_item.artists
album_item.subitems.append(track_item)
return album_item
except Exception as e:
print("SpotifySearch::get_album:", e)
return None
#######################
# PRIVATE #
#######################
def __get_artists(self, name):
"""
Get albums for artists name
@param name as str
"""
try:
# Read album list
formated = GLib.uri_escape_string(name, None, True).replace(
' ', '+')
s = Lio.File.new_for_uri("https://api.spotify.com/v1/search?q=%s"
"&type=artist" % formated)
(status, data, tag) = s.load_contents(self._cancel)
if status:
decode = json.loads(data.decode('utf-8'))
# For each album, get cover and tracks
artists = []
for item in decode['artists']['items']:
album_items = []
artist_id = item['id']
if item['name'].lower() in artists:
continue
artists.append(item['name'].lower())
s = Lio.File.new_for_uri("https://api.spotify.com/"
"v1/artists/%s/albums" %
artist_id)
(status, data, tag) = s.load_contents(self._cancel)
if status:
decode = json.loads(data.decode('utf-8'))
albums = []
for item in decode['items']:
if item['name'].lower() in albums:
continue
album_item = SearchItem()
album_item.name = album_item.album_name = item[
'name']
albums.append(album_item.name.lower())
album_item.cover = item['images'][0]['url']
album_item.smallcover = item['images'][2]['url']
album_items.append(album_item)
album_item.ex_id = item['id']
for album_item in album_items:
s = Lio.File.new_for_uri("https://api.spotify.com/v1/"
"albums/%s" %
album_item.ex_id)
(status, data, tag) = s.load_contents(self._cancel)
if status:
decode = json.loads(data.decode('utf-8'))
for item in decode['tracks']['items']:
track_item = SearchItem()
track_item.is_track = True
track_item.name = item['name']
track_item.album = album_item.name
try:
track_item.year = decode[
'release_date'][:4]
except:
pass # May be missing
track_item.tracknumber = int(
item['track_number'])
track_item.discnumber = int(
item['disc_number'])
track_item.duration = int(
item['duration_ms']) / 1000
for artist in item['artists']:
track_item.artists.append(artist['name'])
if not album_item.artists:
album_item.artists = track_item.artists
album_item.subitems.append(track_item)
self._items.append(album_item)
GLib.idle_add(self.emit, 'item-found')
except Exception as e:
print("SpotifySearch::albums(): %s" % e)
def __get_albums(self, name):
"""
Get albums for name
@param name as str
"""
try:
# Read album list
formated = GLib.uri_escape_string(name, None, True).replace(
' ', '+')
s = Lio.File.new_for_uri("https://api.spotify.com/v1/search?q=%s"
"&type=album" % formated)
(status, data, tag) = s.load_contents(self._cancel)
if status:
decode = json.loads(data.decode('utf-8'))
# For each album, get cover and tracks
for item in decode['albums']['items']:
album_item = SearchItem()
album_item.name = album_item.album_name = item['name']
album_item.is_track = False
album_item.cover = item['images'][0]['url']
album_item.smallcover = item['images'][2]['url']
s = Lio.File.new_for_uri("https://api.spotify.com/v1/"
"albums/%s" % item['id'])
(status, data, tag) = s.load_contents(self._cancel)
if status:
decode = json.loads(data.decode('utf-8'))
for item in decode['tracks']['items']:
track_item = SearchItem()
track_item.is_track = True
try:
track_item.year = decode[
'release_date'][:4]
except:
pass # May be missing
track_item.name = item['name']
track_item.album = album_item.name
track_item.tracknumber = int(item['track_number'])
track_item.discnumber = int(item['disc_number'])
track_item.duration = int(item['duration_ms'])\
/ 1000
for artist in item['artists']:
track_item.artists.append(artist['name'])
if not album_item.artists:
album_item.artists = track_item.artists
album_item.subitems.append(track_item)
self._items.append(album_item)
GLib.idle_add(self.emit, 'item-found')
except Exception as e:
print("SpotifySearch::albums(): %s" % e)
| kerimlcr/ab2017-dpyo | ornek/lollypop/lollypop-0.9.229/src/search_spotify.py | Python | gpl-3.0 | 12,425 |
# Copyright (c) 2013 Hesky Fisher
# See LICENSE.txt for details.
# TODO: maybe move this code into the StenoDictionary itself. The current saver
# structure is odd and awkward.
# TODO: write tests for this file
"""Common elements to all dictionary formats."""
from os.path import splitext
import shutil
import threading
import plover.dictionary.json_dict as json_dict
import plover.dictionary.rtfcre_dict as rtfcre_dict
from plover.config import JSON_EXTENSION, RTF_EXTENSION, CONFIG_DIR
from plover.exception import DictionaryLoaderException
dictionaries = {
JSON_EXTENSION.lower(): json_dict,
RTF_EXTENSION.lower(): rtfcre_dict,
}
def load_dictionary(filename):
"""Load a dictionary from a file."""
extension = splitext(filename)[1].lower()
try:
dict_type = dictionaries[extension]
except KeyError:
raise DictionaryLoaderException(
'Unsupported extension for dictionary: %s. Supported extensions: %s' %
(extension, ', '.join(dictionaries.keys())))
loader = dict_type.load_dictionary
try:
with open(filename, 'rb') as f:
d = loader(f.read())
except IOError as e:
raise DictionaryLoaderException(unicode(e))
d.set_path(filename)
d.save = ThreadedSaver(d, filename, dict_type.save_dictionary)
return d
def save_dictionary(d, filename, saver):
# Write the new file to a temp location.
tmp = filename + '.tmp'
with open(tmp, 'wb') as fp:
saver(d, fp)
# Then move the new file to the final location.
shutil.move(tmp, filename)
class ThreadedSaver(object):
"""A callable that saves a dictionary in the background.
Also makes sure that there is only one active call at a time.
"""
def __init__(self, d, filename, saver):
self.d = d
self.filename = filename
self.saver = saver
self.lock = threading.Lock()
def __call__(self):
t = threading.Thread(target=self.save)
t.start()
def save(self):
with self.lock:
save_dictionary(self.d, self.filename, self.saver)
| dragon788/plover | plover/dictionary/base.py | Python | gpl-2.0 | 2,132 |
# coding: utf-8
import logging
logger = logging.getLogger()
pool = None
def init(host, port, db, passwd):
import redis
global pool
pool = redis.ConnectionPool(host=host, port=port, db=db, password=passwd, max_connections=3)
def get_redis_client():
import redis
r = redis.Redis(connection_pool=pool)
# print r.ping()
return r
class RedisExcuter:
def __init__(self):
self.client = get_redis_client()
self.stat_log = logger
def hset(self, name, key, value):
self.client.hset(name, key, value)
def hget(self, name, key):
return self.client.hget(name, key)
def hincrby(self, name, key, amount=1):
return self.client.hincrby(name, key, amount)
def hmget(self, name, keys):
return self.client.hmget(name, keys)
| slin1972/unity | unity/service/redis_service.py | Python | apache-2.0 | 814 |
"""
Handle file opening for read/write
"""
from numpy.lib._iotools import _is_string_like
from statsmodels.compat.python import PY3
class EmptyContextManager(object):
"""
This class is needed to allow file-like object to be used as
context manager, but without getting closed.
"""
def __init__(self, obj):
self._obj = obj
def __enter__(self):
'''When entering, return the embedded object'''
return self._obj
def __exit__(self, *args):
'''Don't hide anything'''
return False
def __getattr__(self, name):
return getattr(self._obj, name)
if PY3:
def _open(fname, mode, encoding):
if fname.endswith('.gz'):
import gzip
return gzip.open(fname, mode, encoding=encoding)
else:
return open(fname, mode, encoding=encoding)
else:
def _open(fname, mode, encoding):
if fname.endswith('.gz'):
import gzip
return gzip.open(fname, mode)
else:
return open(fname, mode)
def get_file_obj(fname, mode='r', encoding=None):
"""
Light wrapper to handle strings and let files (anything else) pass through.
It also handle '.gz' files.
Parameters
==========
fname: string or file-like object
File to open / forward
mode: string
Argument passed to the 'open' or 'gzip.open' function
encoding: string
For Python 3 only, specify the encoding of the file
Returns
=======
A file-like object that is always a context-manager. If the `fname` was already a file-like object,
the returned context manager *will not close the file*.
"""
if _is_string_like(fname):
return _open(fname, mode, encoding)
try:
# Make sure the object has the write methods
if 'r' in mode:
fname.read
if 'w' in mode or 'a' in mode:
fname.write
except AttributeError:
raise ValueError('fname must be a string or a file-like object')
return EmptyContextManager(fname)
| kiyoto/statsmodels | statsmodels/iolib/openfile.py | Python | bsd-3-clause | 2,064 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations
def update_site_forward(apps, schema_editor):
"""Set site domain and name."""
Site = apps.get_model("sites", "Site")
Site.objects.update_or_create(
id=settings.SITE_ID,
defaults={
"domain": "andys.computer",
"name": "cookietester"
}
)
def update_site_backward(apps, schema_editor):
"""Revert site domain and name to default."""
Site = apps.get_model("sites", "Site")
Site.objects.update_or_create(
id=settings.SITE_ID,
defaults={
"domain": "example.com",
"name": "example.com"
}
)
class Migration(migrations.Migration):
dependencies = [
('sites', '0001_initial'),
]
operations = [
migrations.RunPython(update_site_forward, update_site_backward),
]
| andkon/cookietester | cookietester/contrib/sites/migrations/0002_set_site_domain_and_name.py | Python | bsd-3-clause | 947 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Get MSRC documents from MSRC REST and attempt to annotate them
# against Bioportal
#
import msrcrest, restkit
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
API_KEY = '2b8a2949-2c4f-48db-a884-de9cf1e35bcc'
EMAIL = '[email protected]'
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def bioportalAnnotate():
testText = "I am researching the topic of military suicide which involves sleep disorders and drug abuse. That is suicide for ya"
# Structure containing parameters
# See: http://www.bioontology.org/wiki/index.php/Annotator_User_Guide
params = {
'longestOnly':'false',
'wholeWordOnly':'true',
'withContext':'true',
'filterNumber':'true',
'stopWords':'',
'withDefaultStopWords':'true',
'isStopWordsCaseSenstive':'false',
'minTermSize':'3',
'scored':'true',
'withSynonyms':'true',
'ontologiesToExpand':'', #Empty means 'all' (mesh VirtualID is 1351)
'ontologiesToKeepInResult':'1351', #Empty means all (if useVirtualOntologyId=true, use that field)
'isVirtualOntologyId':'true',
'semanticTypes':'', #T017,T047,T191&" #T999&"
'levelMax':'0',
'mappingTypes':'null',
'textToAnnotate':testText,
'format':'xml', #Output formats (one of): xml, tabDelimited, text
'apikey':API_KEY,
}
result = restkit.request('http://rest.bioontology.org/obs/annotator/submit/' + EMAIL, method='POST', body=params)
return result.body_string()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if __name__ == "__main__":
print bioportalAnnotate() | idiginfo/scholar2text | python/bioportalmeshtest.py | Python | gpl-2.0 | 1,746 |
class Solution:
# @return a list of lists of integers
# S[i][j] if j == 0 or j == i
# S[i][j] = S[i-1][j-1]+S[i-1][j]
def generate(self, numRows):
res = []
for i in range(numRows) :
cur = []
for j in range(i+1) :
if j == 0 or j == i :
cur.append(1)
else :
cur.append(res[i-1][j-1]+res[i-1][j])
res.append(cur)
return res
def printTri(self, res) :
for cur in res:
for i in cur:
print i, '\t'
print '\n'
sol = Solution()
res = sol.generate(5)
sol.printTri(res)
| yelu/leetcode | Pascal0.py | Python | gpl-2.0 | 662 |
# Portions Copyright (C) 2015 Intel Corporation
''' Powerflow results for one Gridlab instance. '''
import sys
import shutil
import os
import datetime
import multiprocessing
import pprint
import json
import math
import traceback
import __metaModel__
import logging
from os.path import join as pJoin
from os.path import split as pSplit
from pyhdfs import HdfsFileNotFoundException
from jinja2 import Template
from __metaModel__ import renderAndShow, cancel, roundSig, getStatus
import omf
from omf.solvers import gridlabd
from omf.weather import zipCodeToClimateName
from flask import session
logger = logging.getLogger(__name__)
sys.path.append(__metaModel__._omfDir)
pp = pprint.PrettyPrinter(indent=4)
from omf import feeder
template = None
def renderTemplate(template, fs, modelDir="", absolutePaths=False, datastoreNames={}):
''' Render the model template to an HTML string.
By default render a blank one for new input.
If modelDir is valid, render results post-model-run.
If absolutePaths, the HTML can be opened without a server. '''
# Our HTML template for the interface:
with fs.open("models/gridlabMulti.html") as tempFile:
template = Template(tempFile.read())
try:
inJson = json.load(fs.open(pJoin(modelDir, "allInputData.json")))
modelPath, modelName = pSplit(modelDir)
deepPath, user = pSplit(modelPath)
inJson["modelName"] = modelName
inJson["user"] = user
allInputData = json.dumps(inJson)
except HdfsFileNotFoundException:
allInputData = None
except IOError:
allInputData = None
try:
allOutputData = fs.open(pJoin(modelDir, "allOutputData.json")).read()
except HdfsFileNotFoundException:
allOutputData = None
except IOError:
allOutputData = None
if absolutePaths:
# Parent of current folder.
pathPrefix = __metaModel__._omfDir
else:
pathPrefix = ""
feederList = []
feederIDs = []
try:
inputDict = json.load(fs.open(pJoin(modelDir, "allInputData.json")))
for key in inputDict:
if key.startswith("feederName"):
feederIDs.append(key)
feederList.append(inputDict[key])
except HdfsFileNotFoundException:
pass
except IOError:
pass
with open('templates/footer.html', 'r') as footer_file:
footer = footer_file.read()
return template.render(allInputData=allInputData,
allOutputData=allOutputData, modelStatus=getStatus(modelDir, fs), pathPrefix=pathPrefix,
datastoreNames=datastoreNames, feederIDs=feederIDs, feederList=feederList, footer=footer)
def run(modelDir, inputDict, fs):
''' Run the model in a separate process. web.py calls this to run the model.
This function will return fast, but results take a while to hit the file system.'''
# Check whether model exist or not
logging.info("Running gridlabMulti model... modelDir: %s; inputDict: %s", modelDir, inputDict)
if not os.path.isdir(modelDir):
os.makedirs(modelDir)
inputDict["created"] = str(datetime.datetime.now())
# MAYBEFIX: remove this data dump. Check showModel in web.py and
# renderTemplate()
with open(pJoin(modelDir, "allInputData.json"), "w") as inputFile:
json.dump(inputDict, inputFile, indent=4)
fs.save(pJoin(modelDir, "allInputData.json"), json.dumps(inputDict, indent=4))
# If we are re-running, remove output:
try:
os.remove(pJoin(modelDir, "allOutputData.json"))
except:
pass
backProc = multiprocessing.Process(
target=runForeground, args=(modelDir, inputDict, fs))
backProc.start()
print "SENT TO BACKGROUND", modelDir
with open(pJoin(modelDir, "PPID.txt"), "w+") as pPidFile:
pPidFile.write(str(backProc.pid))
def runForeground(modelDir, inputDict, fs):
''' Run the model in its directory. WARNING: GRIDLAB CAN TAKE HOURS TO COMPLETE. '''
print "STARTING TO RUN", modelDir
beginTime = datetime.datetime.now()
feederList = []
# Get prepare of data and clean workspace if re-run, If re-run remove all
# the data in the subfolders
for dirs in os.listdir(modelDir):
if os.path.isdir(pJoin(modelDir, dirs)):
shutil.rmtree(pJoin(modelDir, dirs))
# Get each feeder, prepare data in separate folders, and run there.
for key in sorted(inputDict, key=inputDict.get):
if key.startswith("feederName"):
feederDir, feederName = inputDict[key].split("___")
feederList.append(feederName)
try:
os.remove(pJoin(modelDir, feederName, "allOutputData.json"))
fs.remove(pJoin(modelDir, feederName, "allOutputData.json"))
except Exception, e:
pass
if not os.path.isdir(pJoin(modelDir, feederName)):
# create subfolders for feeders
os.makedirs(pJoin(modelDir, feederName))
fs.export_from_fs_to_local(pJoin("data", "Feeder", feederDir, feederName + ".json"),
pJoin(modelDir, feederName, "feeder.json"))
inputDict["climateName"], latforpvwatts = zipCodeToClimateName(
inputDict["zipCode"], fs)
fs.export_from_fs_to_local(pJoin("data", "Climate", inputDict["climateName"] + ".tmy2"),
pJoin(modelDir, feederName, "climate.tmy2"))
try:
startTime = datetime.datetime.now()
feederJson = json.load(
open(pJoin(modelDir, feederName, "feeder.json")))
tree = feederJson["tree"]
# Set up GLM with correct time and recorders:
feeder.attachRecorders(
tree, "Regulator", "object", "regulator")
feeder.attachRecorders(
tree, "Capacitor", "object", "capacitor")
feeder.attachRecorders(tree, "Inverter", "object", "inverter")
feeder.attachRecorders(
tree, "Windmill", "object", "windturb_dg")
feeder.attachRecorders(tree, "CollectorVoltage", None, None)
feeder.attachRecorders(tree, "Climate", "object", "climate")
feeder.attachRecorders(tree, "OverheadLosses", None, None)
feeder.attachRecorders(tree, "UndergroundLosses", None, None)
feeder.attachRecorders(tree, "TriplexLosses", None, None)
feeder.attachRecorders(tree, "TransformerLosses", None, None)
feeder.groupSwingKids(tree)
feeder.adjustTime(tree=tree, simLength=float(inputDict["simLength"]),
simLengthUnits=inputDict["simLengthUnits"], simStartDate=inputDict["simStartDate"])
if "attachments" in feederJson:
attachments = feederJson["attachments"]
else:
attachments = []
# RUN GRIDLABD IN FILESYSTEM (EXPENSIVE!)
rawOut = gridlabd.runInFilesystem(tree, attachments=attachments,
keepFiles=True, workDir=pJoin(modelDir, feederName))
cleanOut = {}
# Std Err and Std Out
cleanOut['stderr'] = rawOut['stderr']
cleanOut['stdout'] = rawOut['stdout']
# Time Stamps
for key in rawOut:
if '# timestamp' in rawOut[key]:
cleanOut['timeStamps'] = rawOut[key]['# timestamp']
break
elif '# property.. timestamp' in rawOut[key]:
cleanOut['timeStamps'] = rawOut[
key]['# property.. timestamp']
else:
cleanOut['timeStamps'] = []
# Day/Month Aggregation Setup:
stamps = cleanOut.get('timeStamps', [])
level = inputDict.get('simLengthUnits', 'hours')
# Climate
for key in rawOut:
if key.startswith('Climate_') and key.endswith('.csv'):
cleanOut['climate'] = {}
cleanOut['climate'][
'Rain Fall (in/h)'] = hdmAgg(rawOut[key].get('rainfall'), sum, level, stamps)
cleanOut['climate'][
'Wind Speed (m/s)'] = hdmAgg(rawOut[key].get('wind_speed'), avg, level, stamps)
cleanOut['climate']['Temperature (F)'] = hdmAgg(
rawOut[key].get('temperature'), max, level, stamps)
cleanOut['climate']['Snow Depth (in)'] = hdmAgg(
rawOut[key].get('snowdepth'), max, level, stamps)
cleanOut['climate'][
'Direct Insolation (W/m^2)'] = hdmAgg(rawOut[key].get('solar_direct'), sum, level, stamps)
# Voltage Band
if 'VoltageJiggle.csv' in rawOut:
cleanOut['allMeterVoltages'] = {}
cleanOut['allMeterVoltages']['Min'] = hdmAgg(
[float(i / 2) for i in rawOut['VoltageJiggle.csv']['min(voltage_12.mag)']], min, level, stamps)
cleanOut['allMeterVoltages']['Mean'] = hdmAgg(
[float(i / 2) for i in rawOut['VoltageJiggle.csv']['mean(voltage_12.mag)']], avg, level, stamps)
cleanOut['allMeterVoltages']['StdDev'] = hdmAgg(
[float(i / 2) for i in rawOut['VoltageJiggle.csv']['std(voltage_12.mag)']], avg, level, stamps)
cleanOut['allMeterVoltages']['Max'] = hdmAgg(
[float(i / 2) for i in rawOut['VoltageJiggle.csv']['max(voltage_12.mag)']], max, level, stamps)
# Power Consumption
cleanOut['Consumption'] = {}
# Set default value to be 0, avoiding missing value when
# computing Loads
cleanOut['Consumption']['Power'] = [
0] * int(inputDict["simLength"])
cleanOut['Consumption']['Losses'] = [
0] * int(inputDict["simLength"])
cleanOut['Consumption']['DG'] = [
0] * int(inputDict["simLength"])
for key in rawOut:
if key.startswith('SwingKids_') and key.endswith('.csv'):
oneSwingPower = hdmAgg(vecPyth(
rawOut[key]['sum(power_in.real)'], rawOut[key]['sum(power_in.imag)']), avg, level, stamps)
if 'Power' not in cleanOut['Consumption']:
cleanOut['Consumption']['Power'] = oneSwingPower
else:
cleanOut['Consumption']['Power'] = vecSum(
oneSwingPower, cleanOut['Consumption']['Power'])
elif key.startswith('Inverter_') and key.endswith('.csv'):
realA = rawOut[key]['power_A.real']
realB = rawOut[key]['power_B.real']
realC = rawOut[key]['power_C.real']
imagA = rawOut[key]['power_A.imag']
imagB = rawOut[key]['power_B.imag']
imagC = rawOut[key]['power_C.imag']
oneDgPower = hdmAgg(vecSum(vecPyth(realA, imagA), vecPyth(
realB, imagB), vecPyth(realC, imagC)), avg, level, stamps)
if 'DG' not in cleanOut['Consumption']:
cleanOut['Consumption']['DG'] = oneDgPower
else:
cleanOut['Consumption']['DG'] = vecSum(
oneDgPower, cleanOut['Consumption']['DG'])
elif key.startswith('Windmill_') and key.endswith('.csv'):
vrA = rawOut[key]['voltage_A.real']
vrB = rawOut[key]['voltage_B.real']
vrC = rawOut[key]['voltage_C.real']
viA = rawOut[key]['voltage_A.imag']
viB = rawOut[key]['voltage_B.imag']
viC = rawOut[key]['voltage_C.imag']
crB = rawOut[key]['current_B.real']
crA = rawOut[key]['current_A.real']
crC = rawOut[key]['current_C.real']
ciA = rawOut[key]['current_A.imag']
ciB = rawOut[key]['current_B.imag']
ciC = rawOut[key]['current_C.imag']
powerA = vecProd(vecPyth(vrA, viA), vecPyth(crA, ciA))
powerB = vecProd(vecPyth(vrB, viB), vecPyth(crB, ciB))
powerC = vecProd(vecPyth(vrC, viC), vecPyth(crC, ciC))
# HACK: multiply by negative one because turbine power
# sign is opposite all other DG:
oneDgPower = [-1.0 *
x for x in hdmAgg(vecSum(powerA, powerB, powerC), avg, level, stamps)]
if 'DG' not in cleanOut['Consumption']:
cleanOut['Consumption']['DG'] = oneDgPower
else:
cleanOut['Consumption']['DG'] = vecSum(
oneDgPower, cleanOut['Consumption']['DG'])
elif key in ['OverheadLosses.csv', 'UndergroundLosses.csv', 'TriplexLosses.csv', 'TransformerLosses.csv']:
realA = rawOut[key]['sum(power_losses_A.real)']
imagA = rawOut[key]['sum(power_losses_A.imag)']
realB = rawOut[key]['sum(power_losses_B.real)']
imagB = rawOut[key]['sum(power_losses_B.imag)']
realC = rawOut[key]['sum(power_losses_C.real)']
imagC = rawOut[key]['sum(power_losses_C.imag)']
oneLoss = hdmAgg(vecSum(vecPyth(realA, imagA), vecPyth(
realB, imagB), vecPyth(realC, imagC)), avg, level, stamps)
if 'Losses' not in cleanOut['Consumption']:
cleanOut['Consumption']['Losses'] = oneLoss
else:
cleanOut['Consumption']['Losses'] = vecSum(
oneLoss, cleanOut['Consumption']['Losses'])
# Aggregate up the timestamps:
if level == 'days':
cleanOut['timeStamps'] = aggSeries(
stamps, stamps, lambda x: x[0][0:10], 'days')
elif level == 'months':
cleanOut['timeStamps'] = aggSeries(
stamps, stamps, lambda x: x[0][0:7], 'months')
# Write the output.
with open(pJoin(modelDir, feederName, "allOutputData.json"), "w") as outFile:
json.dump(cleanOut, outFile, indent=4)
# Update the runTime in the input file.
endTime = datetime.datetime.now()
inputDict["runTime"] = str(
datetime.timedelta(seconds=int((endTime - startTime).total_seconds())))
with open(pJoin(modelDir, feederName, "allInputData.json"), "w") as inFile:
json.dump(inputDict, inFile, indent=4)
# Clean up the PID file.
os.remove(pJoin(modelDir, feederName, "PID.txt"))
print "DONE RUNNING GRIDLABMULTI", modelDir, feederName
except Exception as e:
print "MODEL CRASHED GRIDLABMULTI", e, modelDir, feederName
cancel(pJoin(modelDir, feederName))
with open(pJoin(modelDir, feederName, "stderr.txt"), "a+") as stderrFile:
traceback.print_exc(file=stderrFile)
finishTime = datetime.datetime.now()
inputDict["runTime"] = str(
datetime.timedelta(seconds=int((finishTime - beginTime).total_seconds())))
with open(pJoin(modelDir, "allInputData.json"), "w") as inFile:
json.dump(inputDict, inFile, indent=4)
fs.save(pJoin(modelDir, "allInputData.json"), json.dumps(inputDict, indent=4))
# Integrate data into allOutputData.json, if error happens, cancel it
try:
output = {}
output["failures"] = {}
numOfFeeders = 0
files = []
for froot, _, fname in os.walk(modelDir):
files.extend(
[os.path.relpath(os.path.join(froot, f), modelDir) for f in fname])
logger.info('GridlabD outputs in %s:\n%s', modelDir, pp.pformat(files))
for root, dirs, files in os.walk(modelDir):
# dump error info into dict
if "stderr.txt" in files:
with open(pJoin(root, "stderr.txt"), "r") as stderrFile:
tempString = stderrFile.read()
if "ERROR" in tempString or "FATAL" in tempString or "Traceback" in tempString:
output["failures"][
"feeder_" + str(os.path.split(root)[-1])] = {"stderr": tempString}
continue
# dump simulated data into dict
if "allOutputData.json" in files:
with open(pJoin(root, "allOutputData.json"), "r") as feederOutputData:
numOfFeeders += 1
feederOutput = json.load(feederOutputData)
print "Feeder output: " + json.dumps(feederOutput)
# TODO: a better feeder name
output["feeder_" + str(os.path.split(root)[-1])] = {}
output[
"feeder_" + str(os.path.split(root)[-1])]["Consumption"] = feederOutput["Consumption"]
output[
"feeder_" + str(os.path.split(root)[-1])]["allMeterVoltages"] = feederOutput["allMeterVoltages"]
output[
"feeder_" + str(os.path.split(root)[-1])]["stderr"] = feederOutput["stderr"]
output[
"feeder_" + str(os.path.split(root)[-1])]["stdout"] = feederOutput["stdout"]
# output[root] = {feederOutput["Consumption"], feederOutput["allMeterVoltages"], feederOutput["stdout"], feederOutput["stderr"]}
output["numOfFeeders"] = numOfFeeders
output["timeStamps"] = feederOutput.get("timeStamps", [])
output["climate"] = feederOutput.get("climate", [])
fs.save(pJoin(modelDir, "allOutputData.json"), json.dumps(output, indent=4))
try:
os.remove(pJoin(modelDir, "PPID.txt"))
except:
pass
# Send email to user on successfully run status of model
emailStatus = inputDict.get('emailStatus', 0)
if (emailStatus == "on"):
print "\n EMAIL ALERT ON"
email = session['user_id']
try:
user = json.load(fs.open("data/User/" + email + ".json"))
modelPath, modelName = pSplit(modelDir)
message = "The model " + "<i>" + str(modelName) + "</i>" + " has successfully completed running. It ran for a total of " + str(
inputDict["runTime"]) + " seconds from " + str(beginTime) + ", to " + str(finishTime) + "."
from omf.web import send_link
return send_link(email, message, user)
except Exception, e:
logger.exception(
'ERROR: failed to send model completed running email to user %s. Exception', email)
# print "ERROR: failed to send model completed running email to
# user", email, "with exception", e
else:
print "\n EMAIL ALERT NOT ON"
except Exception, e:
logger.exception("Gridlab-D model crashed")
print "MODEL CRASHED GRIDLABMULTI", e, modelDir
try:
os.remove(pJoin(modelDir, "PPID.txt"))
except:
pass
# Send email to user on failed running status of model
email = session['user_id']
try:
user = json.load(fs.open("data/User/" + email + ".json"))
modelPath, modelName = pSplit(modelDir)
message = "The model " + "<i>" + str(modelName) + "</i>" + " has failed to complete running. It ran for a total of " + str(
inputDict["runTime"]) + " seconds from " + str(beginTime) + ", to " + str(finishTime) + "."
return omf.web.send_link(email, message, user)
except Exception, e:
logger.exception(
'ERROR: failed to send model completed running email to user %s. Exception', email)
print "ERROR: failed to send model failed running email to user", email, "with exception", e
cancel(modelDir)
def avg(inList):
''' Average a list. Really wish this was built-in. '''
return sum(inList) / len(inList)
def hdmAgg(series, func, level, stamps):
''' Simple hour/day/month aggregation for Gridlab. '''
if level in ['days', 'months']:
return aggSeries(stamps, series, func, level)
else:
return series
def aggSeries(timeStamps, timeSeries, func, level):
''' Aggregate a list + timeStamps up to the required time level. '''
# Different substring depending on what level we aggregate to:
if level == 'months':
endPos = 7
elif level == 'days':
endPos = 10
combo = zip(timeStamps, timeSeries)
# Group by level:
groupedCombo = _groupBy(
combo, lambda x1, x2: x1[0][0:endPos] == x2[0][0:endPos])
# Get rid of the timestamps:
groupedRaw = [[pair[1] for pair in group] for group in groupedCombo]
return map(func, groupedRaw)
def _pyth(x, y):
''' Compute the third side of a triangle--BUT KEEP SIGNS THE SAME FOR DG. '''
sign = lambda z: (-1 if z < 0 else 1)
fullSign = sign(sign(x) * x * x + sign(y) * y * y)
return fullSign * math.sqrt(x * x + y * y)
def vecPyth(vx, vy):
''' Pythagorean theorem for pairwise elements from two vectors. '''
rows = zip(vx, vy)
return map(lambda x: _pyth(*x), rows)
def vecSum(*args):
''' Add n vectors. '''
return map(sum, zip(*args))
def _prod(inList):
''' Product of all values in a list. '''
return reduce(lambda x, y: x * y, inList, 1)
def vecProd(*args):
''' Multiply n vectors. '''
return map(_prod, zip(*args))
def threePhasePowFac(ra, rb, rc, ia, ib, ic):
''' Get power factor for a row of threephase volts and amps. Gridlab-specific. '''
pfRow = lambda row: math.cos(
math.atan((row[0] + row[1] + row[2]) / (row[3] + row[4] + row[5])))
rows = zip(ra, rb, rc, ia, ib, ic)
return map(pfRow, rows)
def roundSeries(ser):
''' Round everything in a vector to 4 sig figs. '''
return map(lambda x: roundSig(x, 4), ser)
def _groupBy(inL, func):
''' Take a list and func, and group items in place comparing with func. Make sure the func is an equivalence relation, or your brain will hurt. '''
if inL == []:
return inL
if len(inL) == 1:
return [inL]
newL = [[inL[0]]]
for item in inL[1:]:
if func(item, newL[-1][0]):
newL[-1].append(item)
else:
newL.append([item])
return newL
def _tests():
# Variables
from .. import filesystem
fs = filesystem.Filesystem().fs
workDir = pJoin(__metaModel__._omfDir, "data", "Model")
inData = {"simStartDate": "2012-04-01",
"simLengthUnits": "hours",
# "feederName": "admin___Simple Market System",
# "feederName2": "admin___Simple Market System BROKEN", # configure error
# "feederName3": "public___13 Node Embedded DO NOT SAVE", # feeder error
# "feederName4": "public___13 Node Ref Feeder Flat",
# "feederName5": "public___13 Node Ref Feeder Laid Out ZERO CVR",
# "feederName6": "public___13 Node Ref Feeder Laid Out",
# "feederName7": "public___ABEC Columbia",
# "feederName8": "public___ABEC Frank LO Houses", # feeder error
# "feederName9": "public___ABEC Frank LO",
# "feederName10": "public___ACEC Geo",
# "feederName11": "public___Battery 13 Node Centralized",
# "feederName12": "public___Battery 13 Node Distributed",
# "feederName13": "public___DEC Red Base",
# "feederName14": "public___DEC Red Battery",
# "feederName15": "public___DEC Red CVR",
# "feederName16": "public___DEC Red DG",
# "feederName17": "public___INEC Renoir",
# "feederName18": "public___Olin Barre CVR Base",
# "feederName19": "public___Olin Barre Geo",
# "feederName20": "public___Olin Barre Housed 05Perc Solar",
# "feederName21": "public___Olin Barre Housed 20Perc Solar",
# "feederName22": "public___Olin Barre Housed 50Perc Solar",
# "feederName23": "public___Olin Barre Housed 90Perc Solar",
# "feederName24": "public___Olin Barre Housed Battery",
# "feederName25": "public___Olin Barre Housed Wind",
# "feederName26": "public___Olin Barre Housed",
# "feederName27": "public___Olin Barre", # feeder error
# "feederName28": "public___PNNL Taxonomy Feeder 1",
# "feederName29": "public___Simple Market System Comm Solar",
# "feederName30": "public___Simple Market System Indy Solar",
"feederName31": "public___Simple Market System",
# "feederName": "public___Battery 13 Node Distributed",
"modelType": "gridlabMulti",
"zipCode": "64735",
"simLength": "24",
"runTime": ""}
modelLoc = pJoin(workDir, "admin", "Automated Multiple GridlabD Testing")
# Blow away old test results if necessary.
try:
shutil.rmtree(modelLoc)
except:
# No previous test results.
pass
# No-input template.
renderAndShow(template, fs)
# Run the model.
run(modelLoc, inData, fs)
# Cancel the model.
# time.sleep(2)
# cancel(modelLoc)
# Show the output.
renderAndShow(template, fs, modelDir=modelLoc)
# Delete the model.
# shutil.rmtree(modelLoc)
if __name__ == '__main__':
_tests()
| geomf/omf-fork | omf/models/gridlabMulti.py | Python | gpl-2.0 | 26,721 |
""" GitDoUtils contain functions for use in GitDo"""
from colorama import Fore
import datetime
import os
import os.path
import re
import subprocess
from colorama import init # Back,
init()
from glt.PrintUtils import print_error, print_list
from glt.PrintUtils import get_logger
logger = get_logger(__name__)
def run_command_capture_output(cmd, redirect_stderr_to_stdout = False):
"""Runs the given <cmd>, returns a string of
string stdout, string stderr, int return code
if <redirect_stderr_to_stdout> is True then stderr will be empty"""
if redirect_stderr_to_stdout:
stderr_is = subprocess.STDOUT
else:
stderr_is = subprocess.PIPE
cmd_exploded = cmd.split();
rgCmd = []
accumulator = ""
for part in cmd_exploded:
# If we're rebuilding a string parameter:
if part[0] == "\"":
accumulator = part
continue
if part[-1:] == "\"":
accumulator = accumulator + " " + part
rgCmd.append(accumulator)
accumulator = ""
continue
if accumulator != "":
accumulator = accumulator + " " + part
continue
# otherwise, just put the next part on:
rgCmd.append(part)
p=subprocess.Popen(rgCmd, # shell=True,\
bufsize = 1,\
stderr=stderr_is,\
stdout=subprocess.PIPE,
universal_newlines=True)
tuple = p.communicate()
p.wait()
logger.debug( "StdOut:\n" + tuple[0] )
if stderr_is is subprocess.PIPE:
logger.debug( "StdErr:\n" + tuple[1] )
logger.debug("return code: " + str(p.returncode))
return tuple[0], tuple[1], p.returncode
def call_shell(cmd, exit_on_fail = True):
"""Invokes git in a command-line shell"""
logger.info( 'About to do:' + cmd)
sz_stdout, sz_stderr, ret = run_command_capture_output(cmd)
# git sometimes uses '1' to indicate that something didn't have
# a problem, but didn't do anything, either
if ret != 0 and ret != 1:
print_error("Problem executing '"+cmd+"'\n\tIn: " + os.getcwd() +\
"\n\tReturn code:"+str(ret))
if exit_on_fail:
exit()
else:
logger.debug('\nGit Command:\n\t' + cmd + ' - SUCCEEDED\n')
def git_clone_repo(ip_addr, project, dest_dir):
# NOTE: I'm testing GLT with a VM (VirtualBox+Ubuntu server)
# The VM thinks it's servername is "ubuntu". By using
# the environment's server IP addr setting we can get around this.
ssh_str = "git@" + ip_addr +":"+ project.path_with_namespace+".git"
cwd_prev = os.getcwd()
os.chdir(dest_dir)
# clone the newly-created project locally using
# normal command-line git tools
# The '--progress' is important for actually capturing the output
# http://stackoverflow.com/questions/39564455/pythons-popen-communicate-only-returning-the-first-line-of-stdout/39565119#39565119
call_shell("git clone --progress " + ssh_str)
os.chdir(cwd_prev)
# NOTES:
#
# 1) functions must leave the current working dir where it started
# They can change it, they just have to change it back before
# exiting
def upload_to_server():
print 'upload_to_server'
pass
class commit_feedback_collector(object):
"""A class to try and commit any files that match, and
to collect up lists of results for the instructor"""
def __init__(self):
"""Set up the empty lists"""
self.ungraded = list()
self.new_student_work_since_grading = list()
self.graded = list()
def renumber_current_tag(target_tag):
sz_stdout, sz_stderr, ret = run_command_capture_output(\
"git for-each-ref refs/tags/"+target_tag)
if sz_stdout:
logger.debug( "Existing tag already found for " + target_tag \
+ " in " + os.getcwd() )
# Get the SHA of the current tag (the one without the numbers)
# Remember that this is the SHA of the tag itself,
# NOT the commit that it's attached to
tags = sz_stdout.strip().split("\n")
if len(tags) > 1:
logger.error("Found more than 1 matching tag: " + sz_stdout)
current_tag = tags[0]
loc = current_tag.find(" ")
sha_tag = current_tag[:loc]
# already filtered list for the desired tag
# in the 'git for-each-ref' step
sz_stdout, sz_stderr, ret = run_command_capture_output(\
"git for-each-ref refs/tags/"+target_tag+"*")
# get the highest number prior tag
# by going through all of them
tags = sz_stdout.strip().split("\n")
highest_suffix = 0
for next_tag in tags:
loc = next_tag.find(target_tag)
sz_last_tag = next_tag[loc:] #get the whole tag, whatever it is
suffix = next_tag[loc+len(target_tag):] #grab the number
if suffix and int(suffix) > highest_suffix:
highest_suffix = int(suffix)
new_prior_tag = target_tag+ str( highest_suffix+1)
sha_actual_commit, dt_tag = extract_commit_datetime(sha_tag)
# rename the current commit to be the tag with the number
# after it:
git_cmd = "git tag -a -m INSTRUCTOR_FEEDBACK "+ \
new_prior_tag + " " + sha_actual_commit
print git_cmd
sz_stdout, sz_stderr, ret = run_command_capture_output( git_cmd )
# remove existing tag:
git_cmd = "git tag -d " + target_tag
sz_stdout, sz_stderr, ret = run_command_capture_output( git_cmd )
# now ready to tag the current commit
else:
logger.info( "Called renumber_current_tag, but no current tag")
class commit_feedback_collector:
def __init__(self):
self.no_feedback_ever = list()
self.new_feedback = list()
self.current_feedback_not_changed = list()
self.current_feedback_updated = list()
def generate_commit_feedback(self, pattern, tag, assign_dir):
""" returns a closure that enables us to commit
instructor feedback """
def commit_feedback():
""" Go through all the directories and if we find
a file that matches the pattern try to commit it and
tag it. """
# The expectation is that there's a single file that
# matches and either it's already been committed & tagged,
# or else that it's not yet in the repo (in which case,
# commit and tag it)
#
# The full outline of what happens when is listed after
# the code to determine if the tag exists and if any
# matching files still need to be committed
#
git_tag_cmd = "git tag -a -m INSTRUCTOR_FEEDBACK "+ tag
path_to_repo = os.getcwd()
regex = re.compile(pattern, flags=re.IGNORECASE)
# First figure out if the tag already exists:
logger.debug("Looking for tag \"" + tag + "\" in " + os.getcwd() )
git_cmd = "git tag -l " + tag
sz_stdout, sz_stderr, ret = run_command_capture_output(git_cmd, True)
if sz_stdout == "":
tagged = False
else:
tagged = True
# Next, figure out if any matching files need to be committed:
logger.debug("Looking for untracked and/or committed, modified files")
git_cmd = "git status --porcelain"
sz_stdout, sz_stderr, ret = run_command_capture_output(git_cmd, True)
modified_staged = list()
modified_not_staged = list()
untracked = list()
untracked_subdirs = list()
for line in sz_stdout.splitlines():
# line format: file:///C:/Program%20Files/Git/mingw64/share/doc/git-doc/git-status.html#_short_format
# [index][working tree]<space>filename
# examples of lines:
# M File.txt # present in repo, but not staged
#M NewFile.txt # modified, added to index
#A SubDir/FooFile.txt # added to index
#?? ExtraFile.txt # untracked
#
# Note that git does NOT include the contents of untracked
# subdirs in this output, so if a new file is put into a new
# subdir (say, SubDir2\Grade.txt) git status will list
#?? SubDir2 # note that Grade.txt is NOT listed
# Thus, we actually do need to traverse the file system to
# find new files
# does this line's file match the pattern?
both_codes = line[0:2]
filename = line[3:]
match = re.search(regex, filename)
# If there's a new, untracked subdir
# then we'll need to os.walk it to find
# any matching files
# (otherwise we can skip that)
if both_codes == "??" and \
filename[len(filename)-1:] == '/':
untracked_subdirs.append(os.path.join(path_to_repo, filename) )
if match:
code_index = line[0]
code_working = line[1]
if both_codes == "??":
untracked.append(filename)
continue
if both_codes == "!!":
print_error(filename + " (in "+os.getcwd()+"):"\
"\n\tWARNIG: This matched the pattern but it"\
" also matches something in .gitignore\n"\
"(This will NOT be committed now)\n")
continue
codes_changed = "M ARC"
if codes_changed.find(code_index) != -1:
# changed in the index
if code_working == " ":
modified_staged.append(filename)
# code_working & _index will never both be blank
# (that would mean no changes)
elif code_working == "M":
modified_not_staged.append(filename)
# find matching file(s) in untracked subdirs:
# Skip this unless there's an untracked directory
# (these can contain more stuff, and git doesn't scan through
# the untracked dir)
if untracked_subdirs:
for subdir in untracked_subdirs:
# walk through the subdir
# (starting the walk here avoids any of the
# files that git told us about)
for root, dirs, files in os.walk(subdir):
for name in files:
match = re.search(regex, name)
if match:
path = os.path.join(root, name)
local_dir = path.replace(path_to_repo, "")
# remove the leading /
if local_dir[0] == os.sep:
local_dir = local_dir[1:]
logger.debug( "found a match at " + local_dir )
untracked.append( local_dir )
#print_list(path_to_repo, modified_staged, Fore.CYAN, "modified, staged files:")
#print_list(path_to_repo, modified_not_staged, Fore.YELLOW, "modified, unstaged files:")
#print_list(path_to_repo, untracked, Fore.RED, "untracked files:")
if modified_staged:
need_commit = True
else:
need_commit = False
files_to_add = modified_not_staged + untracked
# The two 'expected' cases are listed at the top
# Here's the full outline:
# if not tagged:
# if file absent:
# note and skip
# if file present but untracked:
# add, commit, tag, done
# if file committed and unchanged:
# tag it? <ERROR>
#
# if tagged:
# file should be in repo (else error)
# if file not updated:
# note and skip
# if file has been updated:
# update existing tag to have number after it
# commit changes
# tag the current commit with the desired tag
if not tagged:
# if file absent:
if not need_commit and not files_to_add:
# note and skip
self.no_feedback_ever.append(os.getcwd() )
return
# if file present but untracked:
else:
# add, commit, tag, done
git_cmd = "git add " + " ".join(files_to_add)
call_shell(git_cmd)
call_shell("git commit -m Adding_Instructor_Feedback")
sz_stdout, sz_stderr, ret = run_command_capture_output( git_tag_cmd )
self.new_feedback.append(os.getcwd())
return
# if file committed and unchanged:
# tag it? <ERROR>
# we're not checking for previously committed files
# so we don't handle this case
# It *shouldn't* happen, anyways, so hopefully it won't
# (It might happen if the teacher commits their
# feedback manually)
if tagged:
# file should be in repo (else error)
# if file not updated:
if not need_commit and not files_to_add:
# note and skip
self.current_feedback_not_changed.append(os.getcwd() )
# if file has been updated:
else:
# update existing tag to have number after its
renumber_current_tag(tag)
git_cmd = "git add " + " ".join(files_to_add)
call_shell(git_cmd)
# commit changes
call_shell("git commit -m Adding_Instructor_Feedback")
# tag the current commit with the desired tag:
sz_stdout, sz_stderr, ret = run_command_capture_output( git_tag_cmd )
self.current_feedback_updated.append(os.getcwd())
#if files_to_add:
# # modified_staged are already in the index, ready to be committed
# files_to_add = modified_not_staged + untracked
# git_cmd = "git add " + " ".join(files_to_add)
# call_shell(git_cmd)
# call_shell("git commit -m Adding_Instructor_Feedback")
# # TODO: we can use a string with spaces for the -m message,
# # but only if we pass it as a single string object in the list
# # of strings (i.e., call_shell can't just call .split() )
# call_shell("git tag -a " + tag + " -m INSTRUCTOR_FEEDBACK_ADDED")
# logger.debug( "Added " + " ".join(files_to_add) )
# return True
#else:
# print_error( "Found NO feedback to add in " + \
# path_to_repo.replace(assign_dir, ""))
# return False
# to test:
# run the following in a repo, then run this code
# E:\Work\Tech_Research\Git\Tests\Batch_Files\commitFeedback.bat
return commit_feedback
def print_dir():
print "print_dir called in " + os.getcwd()
def extract_commit_datetime(tagOrCommit):
"""Given a Git stdout message for a tag or commit, extract
the SHA-1 ID for the commit and the date of the underlying commit"""
# ~0 asks for the 0th last commit (i.e., this one)
# for tags it'll get past the tag and talk about the commit itself
# for 'head' it'll no-op
rgCmdLine = ["git","show", "-s", "--format=\"%H %cI\"", tagOrCommit+"~0"]
# if things go ok then there will be no output on stderr, and any
# readline()'s would block.
# instead, check the first line for the word 'fatal' to detect error
p=subprocess.Popen(rgCmdLine,\
stderr=subprocess.STDOUT,\
stdout=subprocess.PIPE)
try:
# Did git report a fatal error?
output = p.stdout.readline().strip()
loc = output.lower().find("fatal")
if loc != -1:
logger.error( "Fatal error - found 'fatal' in tag/commit message" )
logger.debug( "tag/commit message:\n" + output )
return None, None
# otherwise we're expecting <SHA-1> <date>
loc = output.find(" ")
SHA_commit = output[:loc]
logger.debug( 'Found commit, SHA-1=' + SHA_commit )
# The remainder is the time, minus the '-0700'/'+0800' at the end:
date_str = output[loc+1:-7].strip()
logger.debug('Found date for commit:' + date_str)
# Thu Sep 15 22:18:40 2016 -0700
#dt = datetime.datetime.strptime(date_str, "%b %d %H:%M:%S %Y")
dt = datetime.datetime.strptime(date_str, "%Y-%m-%dT%H:%M:%S")
logger.debug('Resulting date object::' + str(dt) )
# We should exit the loop via state 3
# If we don't then we didn't find one or more of the things
# we were looking for then send back nothing
return SHA_commit, dt
finally:
p.terminate()
class grade_list_collector(object):
"""A class to collect up the 'what needs to be graded' info
for the instructor"""
def __init__(self):
"""Set up the empty lists"""
self.ungraded = list()
self.new_student_work_since_grading = list()
self.graded = list()
def generate_grading_list_collector(self, tag):
def grading_list_collector():
sha_tag, dt_tag = extract_commit_datetime(tag)
if sha_tag is None:
logger.debug( "This assignment hasn't been graded yet" )
self.ungraded.append(os.getcwd())
return True
sha_head, dt_head = extract_commit_datetime("head")
if sha_head == sha_tag:
logger.debug( "SHA's for commits matched\n\tGRADED MOST RECENT SUBMISSION" )
self.graded.append(os.getcwd())
elif dt_tag < dt_head:
logger.debug( "Instructor feedback was tagged then more work was submitted")
self.new_student_work_since_grading.append(os.getcwd())
else:
print_error("This directory has graded feedback, "\
"but the most recent commit is prior to the instructor's"\
" feedback commit & tag. This might indicate a problem"\
" with a timezone on the server\n\t"+\
os.getcwd())
return True
return grading_list_collector
class upload_list_collector(object):
"""A class to collect up the info about which projects were uploaded
for the instructor"""
def __init__(self):
"""Set up the empty lists"""
self.unchanged = list()
self.uploaded = list()
def generate_upload_list_collector(self):
def upload_list_collector():
p=subprocess.Popen("git push --progress".split(),\
stderr=subprocess.STDOUT,\
stdout=subprocess.PIPE,
universal_newlines=True)
sz_stdout, sz_stderr = p.communicate()
p.wait()
logger.debug("In response to 'git push', got: " + sz_stdout)
if sz_stdout.find("Everything up-to-date") != -1:
self.unchanged.append(os.getcwd() )
else:
if sz_stdout.find("To git@") == -1:
logger.error("Expected to find \"Writing objects:\" in output but didn't")
self.uploaded.append(os.getcwd() )
# the tags don't automatically upload,
# so push them separately:
p=subprocess.Popen("git push origin --tags --progress".split(),\
stderr=subprocess.STDOUT,\
stdout=subprocess.PIPE,
universal_newlines=True)
sz_stdout, sz_stderr = p.communicate()
p.wait()
logger.debug("In response to 'git push origin --tags', got: " + sz_stdout)
return True
return upload_list_collector | MikeTheGreat/GLT | glt/GitLocalUtils.py | Python | gpl-3.0 | 20,763 |
# -*- coding: utf-8 -*-
"""
Elo
~~~
An implementation of the Elo algorithm for Python. Elo is a rating system among
game players and it is used on many chess tournaments to rank.
.. sourcecode:: pycon
>>> from elo import rate_1vs1
>>> rate_1vs1(800, 1200)
(809.091, 1190.909)
Links
`````
- `GitHub repository <http://github.com/sublee/elo/>`_
- `development version
<http://github.com/sublee/elo/zipball/master#egg=elo-dev>`_
See Also
````````
- `Multiplayer Elo Calculator <http://elo.divergentinformatics.com/>`_
- `TrueSkill for Python <http://trueskill.org/>`_
"""
from __future__ import with_statement
import re
from setuptools import setup
from setuptools.command.test import test
import sys
# detect the current version
with open('elo.py') as f:
version = re.search(r'__version__\s*=\s*\'(.+?)\'', f.read()).group(1)
assert version
# use pytest instead
def run_tests(self):
pyc = re.compile(r'\.pyc|\$py\.class')
test_file = pyc.sub('.py', __import__(self.test_suite).__file__)
raise SystemExit(__import__('pytest').main([test_file]))
test.run_tests = run_tests
setup(
name='elo',
version=version,
license='BSD',
author='Heungsub Lee',
author_email=re.sub('((sub).)(.*)', r'\2@\1.\3', 'sublee'),
url='http://github.com/sublee/elo',
description='A rating system for chess tournaments',
long_description=__doc__,
platforms='any',
py_modules=['elo'],
classifiers=['Development Status :: 1 - Planning',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Games/Entertainment'],
test_suite='elotests',
tests_require=['pytest', 'almost'],
use_2to3=(sys.version_info[0] >= 3),
)
| sublee/elo | setup.py | Python | bsd-3-clause | 2,485 |
import prett
from .. import QTimeEdit, QDate
from .. import QDateEdit
from .. import ui_extension
from .. import BaseInterface
@ui_extension
class TimeEdit(QTimeEdit, BaseInterface, prett.WidgetStringInterface):
class StringItem(prett.WidgetStringItem):
def __init__(self, parent: 'TimeEdit'):
self.parent = parent
def get_value(self):
return self.parent.text()
def set_value(self, value):
value = value or ''
if value != self.get_value():
date = value.split('-')
if len(date) == 3:
raise ValueError('Date format is invalid')
self.parent.setDate(QDate(int(date[0]), int(date[1]), int(date[2])))
def set_changed_connection(self):
# noinspection PyUnresolvedReferences
self.parent.dateChanged.connect(self.string.check_change)
| SF-Zhou/quite | quite/gui/widgets/time_edit.py | Python | mit | 906 |
"""
Test cases for tabs.
"""
from mock import MagicMock, Mock, patch
from courseware.courses import get_course_by_id
from courseware.views import get_static_tab_contents
from django.test.utils import override_settings
from django.core.urlresolvers import reverse
from student.tests.factories import UserFactory
from xmodule.tabs import CourseTabList
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from courseware.tests.helpers import get_request_for_user, LoginEnrollmentTestCase
from courseware.tests.modulestore_config import TEST_DATA_MIXED_MODULESTORE
from opaque_keys.edx.locations import SlashSeparatedCourseKey
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class StaticTabDateTestCase(LoginEnrollmentTestCase, ModuleStoreTestCase):
"""Test cases for Static Tab Dates."""
def setUp(self):
self.course = CourseFactory.create()
self.page = ItemFactory.create(
category="static_tab", parent_location=self.course.location,
data="OOGIE BLOOGIE", display_name="new_tab"
)
self.toy_course_key = SlashSeparatedCourseKey('edX', 'toy', '2012_Fall')
def test_logged_in(self):
self.setup_user()
url = reverse('static_tab', args=[self.course.id.to_deprecated_string(), 'new_tab'])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("OOGIE BLOOGIE", resp.content)
def test_anonymous_user(self):
url = reverse('static_tab', args=[self.course.id.to_deprecated_string(), 'new_tab'])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("OOGIE BLOOGIE", resp.content)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
def test_get_static_tab_contents(self):
course = get_course_by_id(self.toy_course_key)
request = get_request_for_user(UserFactory.create())
tab = CourseTabList.get_tab_by_slug(course.tabs, 'resources')
# Test render works okay
tab_content = get_static_tab_contents(request, course, tab)
self.assertIn(self.toy_course_key.to_deprecated_string(), tab_content)
self.assertIn('static_tab', tab_content)
# Test when render raises an exception
with patch('courseware.views.get_module') as mock_module_render:
mock_module_render.return_value = MagicMock(
render=Mock(side_effect=Exception('Render failed!'))
)
static_tab = get_static_tab_contents(request, course, tab)
self.assertIn("this module is temporarily unavailable", static_tab)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class StaticTabDateTestCaseXML(LoginEnrollmentTestCase, ModuleStoreTestCase):
# The following XML test course (which lives at common/test/data/2014)
# is closed; we're testing that tabs still appear when
# the course is already closed
xml_course_key = SlashSeparatedCourseKey('edX', 'detached_pages', '2014')
# this text appears in the test course's tab
# common/test/data/2014/tabs/8e4cce2b4aaf4ba28b1220804619e41f.html
xml_data = "static 463139"
xml_url = "8e4cce2b4aaf4ba28b1220804619e41f"
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_logged_in_xml(self):
self.setup_user()
url = reverse('static_tab', args=[self.xml_course_key.to_deprecated_string(), self.xml_url])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn(self.xml_data, resp.content)
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_anonymous_user_xml(self):
url = reverse('static_tab', args=[self.xml_course_key.to_deprecated_string(), self.xml_url])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn(self.xml_data, resp.content)
| huchoi/edx-platform | lms/djangoapps/courseware/tests/test_tabs.py | Python | agpl-3.0 | 4,042 |
###
# Copyright (c) 2012, Frumious Bandersnatch
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import supybot.conf as conf
import supybot.registry as registry
def configure(advanced):
# This will be called by supybot to configure this module. advanced is
# a bool that specifies whether the user identified himself as an advanced
# user or not. You should effect your configuration by manipulating the
# registry as appropriate.
from supybot.questions import expect, anything, something, yn
conf.registerPlugin('Survey', True)
Survey = conf.registerPlugin('Survey')
# This is where your configuration variables (if any) should go. For example:
# conf.registerGlobalValue(Survey, 'someConfigVariableName',
# registry.Boolean(False, """Help for someConfigVariableName."""))
conf.registerGlobalValue(Survey, 'surveyDir',
registry.String('', """The directory holding .survey files."""))
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
| kg-bot/SupyBot | plugins/Survey/config.py | Python | gpl-3.0 | 2,482 |
##
# Generate symbal for memory profile info.
#
# This tool depends on DIA2Dump.exe (VS) or nm (gcc) to parse debug entry.
#
# Copyright (c) 2016, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials are licensed and made available under
# the terms and conditions of the BSD License that accompanies this distribution.
# The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php.
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
import os
import re
import sys
from optparse import OptionParser
versionNumber = "1.1"
__copyright__ = "Copyright (c) 2016, Intel Corporation. All rights reserved."
class Symbols:
def __init__(self):
self.listLineAddress = []
self.pdbName = ""
# Cache for function
self.functionName = ""
# Cache for line
self.sourceName = ""
def getSymbol (self, rva):
index = 0
lineName = 0
sourceName = "??"
while index + 1 < self.lineCount :
if self.listLineAddress[index][0] <= rva and self.listLineAddress[index + 1][0] > rva :
offset = rva - self.listLineAddress[index][0]
functionName = self.listLineAddress[index][1]
lineName = self.listLineAddress[index][2]
sourceName = self.listLineAddress[index][3]
if lineName == 0 :
return " (" + self.listLineAddress[index][1] + "() - " + ")"
else :
return " (" + self.listLineAddress[index][1] + "() - " + sourceName + ":" + str(lineName) + ")"
index += 1
return " (unknown)"
def parse_debug_file(self, driverName, pdbName):
if cmp (pdbName, "") == 0 :
return
self.pdbName = pdbName;
try:
nmCommand = "nm"
nmLineOption = "-l"
print "parsing (debug) - " + pdbName
os.system ('%s %s %s > nmDump.line.log' % (nmCommand, nmLineOption, pdbName))
except :
print 'ERROR: nm command not available. Please verify PATH'
return
#
# parse line
#
linefile = open("nmDump.line.log")
reportLines = linefile.readlines()
linefile.close()
# 000113ca T AllocatePool c:\home\edk-ii\MdePkg\Library\UefiMemoryAllocationLib\MemoryAllocationLib.c:399
patchLineFileMatchString = "([0-9a-fA-F]*)\s+[T|D|t|d]\s+(\w+)\s*((?:[a-zA-Z]:)?[\w+\-./_a-zA-Z0-9\\\\]*):?([0-9]*)"
for reportLine in reportLines:
#print "check - " + reportLine
match = re.match(patchLineFileMatchString, reportLine)
if match is not None:
#print "match - " + reportLine[:-1]
#print "0 - " + match.group(0)
#print "1 - " + match.group(1)
#print "2 - " + match.group(2)
#print "3 - " + match.group(3)
#print "4 - " + match.group(4)
rva = int (match.group(1), 16)
functionName = match.group(2)
sourceName = match.group(3)
if cmp (match.group(4), "") != 0 :
lineName = int (match.group(4))
else :
lineName = 0
self.listLineAddress.append ([rva, functionName, lineName, sourceName])
self.lineCount = len (self.listLineAddress)
self.listLineAddress = sorted(self.listLineAddress, key=lambda symbolAddress:symbolAddress[0])
#for key in self.listLineAddress :
#print "rva - " + "%x"%(key[0]) + ", func - " + key[1] + ", line - " + str(key[2]) + ", source - " + key[3]
def parse_pdb_file(self, driverName, pdbName):
if cmp (pdbName, "") == 0 :
return
self.pdbName = pdbName;
try:
#DIA2DumpCommand = "\"C:\\Program Files (x86)\Microsoft Visual Studio 14.0\\DIA SDK\\Samples\\DIA2Dump\\x64\\Debug\\Dia2Dump.exe\""
DIA2DumpCommand = "Dia2Dump.exe"
#DIA2SymbolOption = "-p"
DIA2LinesOption = "-l"
print "parsing (pdb) - " + pdbName
#os.system ('%s %s %s > DIA2Dump.symbol.log' % (DIA2DumpCommand, DIA2SymbolOption, pdbName))
os.system ('%s %s %s > DIA2Dump.line.log' % (DIA2DumpCommand, DIA2LinesOption, pdbName))
except :
print 'ERROR: DIA2Dump command not available. Please verify PATH'
return
#
# parse line
#
linefile = open("DIA2Dump.line.log")
reportLines = linefile.readlines()
linefile.close()
# ** GetDebugPrintErrorLevel
# line 32 at [0000C790][0001:0000B790], len = 0x3 c:\home\edk-ii\mdepkg\library\basedebugprinterrorlevellib\basedebugprinterrorlevellib.c (MD5: 687C0AE564079D35D56ED5D84A6164CC)
# line 36 at [0000C793][0001:0000B793], len = 0x5
# line 37 at [0000C798][0001:0000B798], len = 0x2
patchLineFileMatchString = "\s+line ([0-9]+) at \[([0-9a-fA-F]{8})\]\[[0-9a-fA-F]{4}\:[0-9a-fA-F]{8}\], len = 0x[0-9a-fA-F]+\s*([\w+\-\:./_a-zA-Z0-9\\\\]*)\s*"
patchLineFileMatchStringFunc = "\*\*\s+(\w+)\s*"
for reportLine in reportLines:
#print "check line - " + reportLine
match = re.match(patchLineFileMatchString, reportLine)
if match is not None:
#print "match - " + reportLine[:-1]
#print "0 - " + match.group(0)
#print "1 - " + match.group(1)
#print "2 - " + match.group(2)
if cmp (match.group(3), "") != 0 :
self.sourceName = match.group(3)
sourceName = self.sourceName
functionName = self.functionName
rva = int (match.group(2), 16)
lineName = int (match.group(1))
self.listLineAddress.append ([rva, functionName, lineName, sourceName])
else :
match = re.match(patchLineFileMatchStringFunc, reportLine)
if match is not None:
self.functionName = match.group(1)
self.lineCount = len (self.listLineAddress)
self.listLineAddress = sorted(self.listLineAddress, key=lambda symbolAddress:symbolAddress[0])
#for key in self.listLineAddress :
#print "rva - " + "%x"%(key[0]) + ", func - " + key[1] + ", line - " + str(key[2]) + ", source - " + key[3]
class SymbolsFile:
def __init__(self):
self.symbolsTable = {}
symbolsFile = ""
driverName = ""
rvaName = ""
symbolName = ""
def getSymbolName(driverName, rva):
global symbolsFile
#print "driverName - " + driverName
try :
symbolList = symbolsFile.symbolsTable[driverName]
if symbolList is not None:
return symbolList.getSymbol (rva)
else:
return " (???)"
except Exception:
return " (???)"
def processLine(newline):
global driverName
global rvaName
driverPrefixLen = len("Driver - ")
# get driver name
if cmp(newline[0:driverPrefixLen],"Driver - ") == 0 :
driverlineList = newline.split(" ")
driverName = driverlineList[2]
#print "Checking : ", driverName
# EDKII application output
pdbMatchString = "Driver - \w* \(Usage - 0x[0-9a-fA-F]+\) \(Pdb - ([:\-.\w\\\\/]*)\)\s*"
pdbName = ""
match = re.match(pdbMatchString, newline)
if match is not None:
#print "match - " + newline
#print "0 - " + match.group(0)
#print "1 - " + match.group(1)
pdbName = match.group(1)
#print "PDB - " + pdbName
symbolsFile.symbolsTable[driverName] = Symbols()
if cmp (pdbName[-3:], "pdb") == 0 :
symbolsFile.symbolsTable[driverName].parse_pdb_file (driverName, pdbName)
else :
symbolsFile.symbolsTable[driverName].parse_debug_file (driverName, pdbName)
elif cmp(newline,"") == 0 :
driverName = ""
# check entry line
if newline.find ("<==") != -1 :
entry_list = newline.split(" ")
rvaName = entry_list[4]
#print "rva : ", rvaName
symbolName = getSymbolName (driverName, int(rvaName, 16))
else :
rvaName = ""
symbolName = ""
if cmp(rvaName,"") == 0 :
return newline
else :
return newline + symbolName
def myOptionParser():
usage = "%prog [--version] [-h] [--help] [-i inputfile [-o outputfile]]"
Parser = OptionParser(usage=usage, description=__copyright__, version="%prog " + str(versionNumber))
Parser.add_option("-i", "--inputfile", dest="inputfilename", type="string", help="The input memory profile info file output from MemoryProfileInfo application in MdeModulePkg")
Parser.add_option("-o", "--outputfile", dest="outputfilename", type="string", help="The output memory profile info file with symbol, MemoryProfileInfoSymbol.txt will be used if it is not specified")
(Options, args) = Parser.parse_args()
if Options.inputfilename is None:
Parser.error("no input file specified")
if Options.outputfilename is None:
Options.outputfilename = "MemoryProfileInfoSymbol.txt"
return Options
def main():
global symbolsFile
global Options
Options = myOptionParser()
symbolsFile = SymbolsFile()
try :
file = open(Options.inputfilename)
except Exception:
print "fail to open " + Options.inputfilename
return 1
try :
newfile = open(Options.outputfilename, "w")
except Exception:
print "fail to open " + Options.outputfilename
return 1
try:
while 1:
line = file.readline()
if not line:
break
newline = line[:-1]
newline = processLine(newline)
newfile.write(newline)
newfile.write("\n")
finally:
file.close()
newfile.close()
if __name__ == '__main__':
sys.exit(main())
| intel/ipmctl | BaseTools/Scripts/MemoryProfileSymbolGen.py | Python | bsd-3-clause | 10,174 |
# -*- coding: utf-8 -*-
"""
werkzeug.utils
~~~~~~~~~~~~~~
This module implements various utilities for WSGI applications. Most of
them are used by the request and response wrappers but especially for
middleware development it makes sense to use them without the wrappers.
:copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import re
import os
import sys
from werkzeug._internal import _iter_modules, _DictAccessorProperty, \
_parse_signature, _missing
_format_re = re.compile(r'\$(?:(%s)|\{(%s)\})' % (('[a-zA-Z_][a-zA-Z0-9_]*',) * 2))
_entity_re = re.compile(r'&([^;]+);')
_filename_ascii_strip_re = re.compile(r'[^A-Za-z0-9_.-]')
_windows_device_files = ('CON', 'AUX', 'COM1', 'COM2', 'COM3', 'COM4', 'LPT1',
'LPT2', 'LPT3', 'PRN', 'NUL')
class cached_property(object):
"""A decorator that converts a function into a lazy property. The
function wrapped is called the first time to retrieve the result
and then that calculated result is used the next time you access
the value::
class Foo(object):
@cached_property
def foo(self):
# calculate something important here
return 42
The class has to have a `__dict__` in order for this property to
work.
.. versionchanged:: 0.6
the `writeable` attribute and parameter was deprecated. If a
cached property is writeable or not has to be documented now.
For performance reasons the implementation does not honor the
writeable setting and will always make the property writeable.
"""
# implementation detail: this property is implemented as non-data
# descriptor. non-data descriptors are only invoked if there is
# no entry with the same name in the instance's __dict__.
# this allows us to completely get rid of the access function call
# overhead. If one choses to invoke __get__ by hand the property
# will still work as expected because the lookup logic is replicated
# in __get__ for manual invocation.
def __init__(self, func, name=None, doc=None, writeable=False):
if writeable:
from warnings import warn
warn(DeprecationWarning('the writeable argument to the '
'cached property is a noop since 0.6 '
'because the property is writeable '
'by default for performance reasons'))
self.__name__ = name or func.__name__
self.__module__ = func.__module__
self.__doc__ = doc or func.__doc__
self.func = func
def __get__(self, obj, type=None):
if obj is None:
return self
value = obj.__dict__.get(self.__name__, _missing)
if value is _missing:
value = self.func(obj)
obj.__dict__[self.__name__] = value
return value
class environ_property(_DictAccessorProperty):
"""Maps request attributes to environment variables. This works not only
for the Werzeug request object, but also any other class with an
environ attribute:
>>> class Test(object):
... environ = {'key': 'value'}
... test = environ_property('key')
>>> var = Test()
>>> var.test
'value'
If you pass it a second value it's used as default if the key does not
exist, the third one can be a converter that takes a value and converts
it. If it raises :exc:`ValueError` or :exc:`TypeError` the default value
is used. If no default value is provided `None` is used.
Per default the property is read only. You have to explicitly enable it
by passing ``read_only=False`` to the constructor.
"""
read_only = True
def lookup(self, obj):
return obj.environ
class header_property(_DictAccessorProperty):
"""Like `environ_property` but for headers."""
def lookup(self, obj):
return obj.headers
class HTMLBuilder(object):
"""Helper object for HTML generation.
Per default there are two instances of that class. The `html` one, and
the `xhtml` one for those two dialects. The class uses keyword parameters
and positional parameters to generate small snippets of HTML.
Keyword parameters are converted to XML/SGML attributes, positional
arguments are used as children. Because Python accepts positional
arguments before keyword arguments it's a good idea to use a list with the
star-syntax for some children:
>>> html.p(class_='foo', *[html.a('foo', href='foo.html'), ' ',
... html.a('bar', href='bar.html')])
u'<p class="foo"><a href="foo.html">foo</a> <a href="bar.html">bar</a></p>'
This class works around some browser limitations and can not be used for
arbitrary SGML/XML generation. For that purpose lxml and similar
libraries exist.
Calling the builder escapes the string passed:
>>> html.p(html("<foo>"))
u'<p><foo></p>'
"""
from htmlentitydefs import name2codepoint
_entity_re = re.compile(r'&([^;]+);')
_entities = name2codepoint.copy()
_entities['apos'] = 39
_empty_elements = set([
'area', 'base', 'basefont', 'br', 'col', 'command', 'embed', 'frame',
'hr', 'img', 'input', 'keygen', 'isindex', 'link', 'meta', 'param',
'source', 'wbr'
])
_boolean_attributes = set([
'selected', 'checked', 'compact', 'declare', 'defer', 'disabled',
'ismap', 'multiple', 'nohref', 'noresize', 'noshade', 'nowrap'
])
_plaintext_elements = set(['textarea'])
_c_like_cdata = set(['script', 'style'])
del name2codepoint
def __init__(self, dialect):
self._dialect = dialect
def __call__(self, s):
return escape(s)
def __getattr__(self, tag):
if tag[:2] == '__':
raise AttributeError(tag)
def proxy(*children, **arguments):
buffer = '<' + tag
for key, value in arguments.iteritems():
if value is None:
continue
if key[-1] == '_':
key = key[:-1]
if key in self._boolean_attributes:
if not value:
continue
if self._dialect == 'xhtml':
value = '="' + key + '"'
else:
value = ''
else:
value = '="' + escape(value, True) + '"'
buffer += ' ' + key + value
if not children and tag in self._empty_elements:
if self._dialect == 'xhtml':
buffer += ' />'
else:
buffer += '>'
return buffer
buffer += '>'
children_as_string = ''.join([unicode(x) for x in children
if x is not None])
if children_as_string:
if tag in self._plaintext_elements:
children_as_string = escape(children_as_string)
elif tag in self._c_like_cdata and self._dialect == 'xhtml':
children_as_string = '/*<![CDATA[*/' + \
children_as_string + '/*]]>*/'
buffer += children_as_string + '</' + tag + '>'
return buffer
return proxy
def __repr__(self):
return '<%s for %r>' % (
self.__class__.__name__,
self._dialect
)
html = HTMLBuilder('html')
xhtml = HTMLBuilder('xhtml')
def get_content_type(mimetype, charset):
"""Return the full content type string with charset for a mimetype.
If the mimetype represents text the charset will be appended as charset
parameter, otherwise the mimetype is returned unchanged.
:param mimetype: the mimetype to be used as content type.
:param charset: the charset to be appended in case it was a text mimetype.
:return: the content type.
"""
if mimetype.startswith('text/') or \
mimetype == 'application/xml' or \
(mimetype.startswith('application/') and
mimetype.endswith('+xml')):
mimetype += '; charset=' + charset
return mimetype
def format_string(string, context):
"""String-template format a string:
>>> format_string('$foo and ${foo}s', dict(foo=42))
'42 and 42s'
This does not do any attribute lookup etc. For more advanced string
formattings have a look at the `werkzeug.template` module.
:param string: the format string.
:param context: a dict with the variables to insert.
"""
def lookup_arg(match):
x = context[match.group(1) or match.group(2)]
if not isinstance(x, basestring):
x = type(string)(x)
return x
return _format_re.sub(lookup_arg, string)
def secure_filename(filename):
r"""Pass it a filename and it will return a secure version of it. This
filename can then safely be stored on a regular file system and passed
to :func:`os.path.join`. The filename returned is an ASCII only string
for maximum portability.
On windows system the function also makes sure that the file is not
named after one of the special device files.
>>> secure_filename("My cool movie.mov")
'My_cool_movie.mov'
>>> secure_filename("../../../etc/passwd")
'etc_passwd'
>>> secure_filename(u'i contain cool \xfcml\xe4uts.txt')
'i_contain_cool_umlauts.txt'
The function might return an empty filename. It's your responsibility
to ensure that the filename is unique and that you generate random
filename if the function returned an empty one.
.. versionadded:: 0.5
:param filename: the filename to secure
"""
if isinstance(filename, unicode):
from unicodedata import normalize
filename = normalize('NFKD', filename).encode('ascii', 'ignore')
for sep in os.path.sep, os.path.altsep:
if sep:
filename = filename.replace(sep, ' ')
filename = str(_filename_ascii_strip_re.sub('', '_'.join(
filename.split()))).strip('._')
# on nt a couple of special files are present in each folder. We
# have to ensure that the target file is not such a filename. In
# this case we prepend an underline
if os.name == 'nt' and filename and \
filename.split('.')[0].upper() in _windows_device_files:
filename = '_' + filename
return filename
def escape(s, quote=False):
"""Replace special characters "&", "<" and ">" to HTML-safe sequences. If
the optional flag `quote` is `True`, the quotation mark character (") is
also translated.
There is a special handling for `None` which escapes to an empty string.
:param s: the string to escape.
:param quote: set to true to also escape double quotes.
"""
if s is None:
return ''
elif hasattr(s, '__html__'):
return s.__html__()
elif not isinstance(s, basestring):
s = unicode(s)
s = s.replace('&', '&').replace('<', '<').replace('>', '>')
if quote:
s = s.replace('"', """)
return s
def unescape(s):
"""The reverse function of `escape`. This unescapes all the HTML
entities, not only the XML entities inserted by `escape`.
:param s: the string to unescape.
"""
def handle_match(m):
name = m.group(1)
if name in HTMLBuilder._entities:
return unichr(HTMLBuilder._entities[name])
try:
if name[:2] in ('#x', '#X'):
return unichr(int(name[2:], 16))
elif name.startswith('#'):
return unichr(int(name[1:]))
except ValueError:
pass
return u''
return _entity_re.sub(handle_match, s)
def redirect(location, code=302):
"""Return a response object (a WSGI application) that, if called,
redirects the client to the target location. Supported codes are 301,
302, 303, 305, and 307. 300 is not supported because it's not a real
redirect and 304 because it's the answer for a request with a request
with defined If-Modified-Since headers.
.. versionadded:: 0.6
The location can now be a unicode string that is encoded using
the :func:`iri_to_uri` function.
:param location: the location the response should redirect to.
:param code: the redirect status code. defaults to 302.
"""
assert code in (201, 301, 302, 303, 305, 307), 'invalid code'
from werkzeug.wrappers import BaseResponse
display_location = location
if isinstance(location, unicode):
from werkzeug.urls import iri_to_uri
location = iri_to_uri(location)
response = BaseResponse(
'<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">\n'
'<title>Redirecting...</title>\n'
'<h1>Redirecting...</h1>\n'
'<p>You should be redirected automatically to target URL: '
'<a href="%s">%s</a>. If not click the link.' %
(location, display_location), code, mimetype='text/html')
response.headers['Location'] = location
return response
def append_slash_redirect(environ, code=301):
"""Redirect to the same URL but with a slash appended. The behavior
of this function is undefined if the path ends with a slash already.
:param environ: the WSGI environment for the request that triggers
the redirect.
:param code: the status code for the redirect.
"""
new_path = environ['PATH_INFO'].strip('/') + '/'
query_string = environ.get('QUERY_STRING')
if query_string:
new_path += '?' + query_string
return redirect(new_path, code)
def import_string(import_name, silent=False):
"""Imports an object based on a string. This is useful if you want to
use import paths as endpoints or something similar. An import path can
be specified either in dotted notation (``xml.sax.saxutils.escape``)
or with a colon as object delimiter (``xml.sax.saxutils:escape``).
If `silent` is True the return value will be `None` if the import fails.
:param import_name: the dotted name for the object to import.
:param silent: if set to `True` import errors are ignored and
`None` is returned instead.
:return: imported object
"""
# force the import name to automatically convert to strings
if isinstance(import_name, unicode):
import_name = str(import_name)
try:
if ':' in import_name:
module, obj = import_name.split(':', 1)
elif '.' in import_name:
module, obj = import_name.rsplit('.', 1)
else:
return __import__(import_name)
# __import__ is not able to handle unicode strings in the fromlist
# if the module is a package
if isinstance(obj, unicode):
obj = obj.encode('utf-8')
try:
return getattr(__import__(module, None, None, [obj]), obj)
except (ImportError, AttributeError):
# support importing modules not yet set up by the parent module
# (or package for that matter)
modname = module + '.' + obj
__import__(modname)
return sys.modules[modname]
except ImportError, e:
if not silent:
raise ImportStringError(import_name, e), None, sys.exc_info()[2]
def find_modules(import_path, include_packages=False, recursive=False):
"""Find all the modules below a package. This can be useful to
automatically import all views / controllers so that their metaclasses /
function decorators have a chance to register themselves on the
application.
Packages are not returned unless `include_packages` is `True`. This can
also recursively list modules but in that case it will import all the
packages to get the correct load path of that module.
:param import_name: the dotted name for the package to find child modules.
:param include_packages: set to `True` if packages should be returned, too.
:param recursive: set to `True` if recursion should happen.
:return: generator
"""
module = import_string(import_path)
path = getattr(module, '__path__', None)
if path is None:
raise ValueError('%r is not a package' % import_path)
basename = module.__name__ + '.'
for modname, ispkg in _iter_modules(path):
modname = basename + modname
if ispkg:
if include_packages:
yield modname
if recursive:
for item in find_modules(modname, include_packages, True):
yield item
else:
yield modname
def validate_arguments(func, args, kwargs, drop_extra=True):
"""Check if the function accepts the arguments and keyword arguments.
Returns a new ``(args, kwargs)`` tuple that can safely be passed to
the function without causing a `TypeError` because the function signature
is incompatible. If `drop_extra` is set to `True` (which is the default)
any extra positional or keyword arguments are dropped automatically.
The exception raised provides three attributes:
`missing`
A set of argument names that the function expected but where
missing.
`extra`
A dict of keyword arguments that the function can not handle but
where provided.
`extra_positional`
A list of values that where given by positional argument but the
function cannot accept.
This can be useful for decorators that forward user submitted data to
a view function::
from werkzeug.utils import ArgumentValidationError, validate_arguments
def sanitize(f):
def proxy(request):
data = request.values.to_dict()
try:
args, kwargs = validate_arguments(f, (request,), data)
except ArgumentValidationError:
raise BadRequest('The browser failed to transmit all '
'the data expected.')
return f(*args, **kwargs)
return proxy
:param func: the function the validation is performed against.
:param args: a tuple of positional arguments.
:param kwargs: a dict of keyword arguments.
:param drop_extra: set to `False` if you don't want extra arguments
to be silently dropped.
:return: tuple in the form ``(args, kwargs)``.
"""
parser = _parse_signature(func)
args, kwargs, missing, extra, extra_positional = parser(args, kwargs)[:5]
if missing:
raise ArgumentValidationError(tuple(missing))
elif (extra or extra_positional) and not drop_extra:
raise ArgumentValidationError(None, extra, extra_positional)
return tuple(args), kwargs
def bind_arguments(func, args, kwargs):
"""Bind the arguments provided into a dict. When passed a function,
a tuple of arguments and a dict of keyword arguments `bind_arguments`
returns a dict of names as the function would see it. This can be useful
to implement a cache decorator that uses the function arguments to build
the cache key based on the values of the arguments.
:param func: the function the arguments should be bound for.
:param args: tuple of positional arguments.
:param kwargs: a dict of keyword arguments.
:return: a :class:`dict` of bound keyword arguments.
"""
args, kwargs, missing, extra, extra_positional, \
arg_spec, vararg_var, kwarg_var = _parse_signature(func)(args, kwargs)
values = {}
for (name, has_default, default), value in zip(arg_spec, args):
values[name] = value
if vararg_var is not None:
values[vararg_var] = tuple(extra_positional)
elif extra_positional:
raise TypeError('too many positional arguments')
if kwarg_var is not None:
multikw = set(extra) & set([x[0] for x in arg_spec])
if multikw:
raise TypeError('got multiple values for keyword argument ' +
repr(iter(multikw).next()))
values[kwarg_var] = extra
elif extra:
raise TypeError('got unexpected keyword argument ' +
repr(iter(extra).next()))
return values
class ArgumentValidationError(ValueError):
"""Raised if :func:`validate_arguments` fails to validate"""
def __init__(self, missing=None, extra=None, extra_positional=None):
self.missing = set(missing or ())
self.extra = extra or {}
self.extra_positional = extra_positional or []
ValueError.__init__(self, 'function arguments invalid. ('
'%d missing, %d additional)' % (
len(self.missing),
len(self.extra) + len(self.extra_positional)
))
class ImportStringError(ImportError):
"""Provides information about a failed :func:`import_string` attempt."""
#: String in dotted notation that failed to be imported.
import_name = None
#: Wrapped exception.
exception = None
def __init__(self, import_name, exception):
self.import_name = import_name
self.exception = exception
msg = (
'import_string() failed for %r. Possible reasons are:\n\n'
'- missing __init__.py in a package;\n'
'- package or module path not included in sys.path;\n'
'- duplicated package or module name taking precedence in '
'sys.path;\n'
'- missing module, class, function or variable;\n\n'
'Debugged import:\n\n%s\n\n'
'Original exception:\n\n%s: %s')
name = ''
tracked = []
for part in import_name.replace(':', '.').split('.'):
name += (name and '.') + part
imported = import_string(name, silent=True)
if imported:
tracked.append((name, imported.__file__))
else:
track = ['- %r found in %r.' % (n, i) for n, i in tracked]
track.append('- %r not found.' % name)
msg = msg % (import_name, '\n'.join(track),
exception.__class__.__name__, str(exception))
break
ImportError.__init__(self, msg)
def __repr__(self):
return '<%s(%r, %r)>' % (self.__class__.__name__, self.import_name,
self.exception)
# circular dependencies
from werkzeug.http import quote_header_value, unquote_header_value, \
cookie_date
# DEPRECATED
# these objects were previously in this module as well. we import
# them here for backwards compatibility with old pickles.
from werkzeug.datastructures import MultiDict, CombinedMultiDict, \
Headers, EnvironHeaders
from werkzeug.http import parse_cookie, dump_cookie
| r-kitaev/lucid-python-werkzeug | werkzeug/utils.py | Python | bsd-3-clause | 23,011 |
# -*- coding: utf-8 -*-
first_line = raw_input().split()
N = int(first_line[0]) #Longitud secuencia
M = int(first_line[1]) #Longitud subsecuencia
K = int(first_line[2])
n = raw_input().split() #secuencia
menor = 2147483647 #mayor numero posible
for i in range(N):
num = int(n[i])
if num < menor:
menor = num
print menor
| ieeeugrsb/ieeextreme8 | Teams/MineCoders/01_IEEE Electronic Devices Society/01_alternativo.py | Python | gpl-3.0 | 342 |
import copy
from SDWLE.cards.base import SpellCard
from SDWLE.tags.action import Damage, Draw, RemoveFromHand
from SDWLE.tags.base import AuraUntil, Buff, Effect, ActionTag
from SDWLE.tags.card_source import Same
from SDWLE.tags.condition import GreaterThan, IsDamaged
from SDWLE.tags.event import TurnEnded, Drawn
from SDWLE.tags.selector import MinionSelector, HeroSelector, PlayerSelector, Count
from SDWLE.tags.status import Charge as _Charge, MinimumHealth, ManaChange
import SDWLE.targeting
import SDWLE.tags.action
from SDWLE.constants import CHARACTER_CLASS, CARD_RARITY
class BattleRage(SpellCard):
def __init__(self):
super().__init__("Battle Rage", 2, CHARACTER_CLASS.WARRIOR, CARD_RARITY.COMMON)
def use(self, player, game):
def damaged_character(character):
return character.health < character.calculate_max_health()
super().use(player, game)
characters = copy.copy(player.minions)
characters.append(player.hero)
characters = [character for character in characters if damaged_character(character)]
for i in range(0, len(characters)):
player.draw()
class Brawl(SpellCard):
def __init__(self):
super().__init__("Brawl", 5, CHARACTER_CLASS.WARRIOR, CARD_RARITY.EPIC)
def can_use(self, player, game):
return super().can_use(player, game) and len(player.minions) + len(player.opponent.minions) >= 2
def use(self, player, game):
super().use(player, game)
minions = copy.copy(player.minions)
minions.extend(game.other_player.minions)
if len(minions) > 1:
survivor = game.random_choice(minions)
for minion in minions:
if minion is not survivor:
minion.die(self)
class Charge(SpellCard):
def __init__(self):
super().__init__("Charge", 3, CHARACTER_CLASS.WARRIOR, CARD_RARITY.FREE,
target_func=SDWLE.targeting.find_friendly_minion_spell_target)
def use(self, player, game):
super().use(player, game)
self.target.change_attack(2)
self.target.add_buff(Buff(_Charge()))
class Cleave(SpellCard):
def __init__(self):
super().__init__("Cleave", 2, CHARACTER_CLASS.WARRIOR, CARD_RARITY.COMMON)
def use(self, player, game):
super().use(player, game)
minions = copy.copy(game.other_player.minions)
for i in range(0, 2):
minion = game.random_choice(minions)
minions.remove(minion)
minion.damage(player.effective_spell_damage(2), self)
def can_use(self, player, game):
return super().can_use(player, game) and len(game.other_player.minions) >= 2
class CommandingShout(SpellCard):
def __init__(self):
super().__init__("Commanding Shout", 2, CHARACTER_CLASS.WARRIOR, CARD_RARITY.RARE)
def use(self, player, game):
super().use(player, game)
player.add_aura(AuraUntil(MinimumHealth(1), MinionSelector(), TurnEnded()))
player.draw()
class Execute(SpellCard):
def __init__(self):
super().__init__("Execute", 1, CHARACTER_CLASS.WARRIOR, CARD_RARITY.FREE,
target_func=SDWLE.targeting.find_enemy_minion_spell_target,
filter_func=lambda target: target.health != target.calculate_max_health() and
target.spell_targetable())
def use(self, player, game):
super().use(player, game)
self.target.die(self)
class HeroicStrike(SpellCard):
def __init__(self):
super().__init__("Heroic Strike", 2, CHARACTER_CLASS.WARRIOR, CARD_RARITY.FREE)
def use(self, player, game):
super().use(player, game)
player.hero.change_temp_attack(4)
class InnerRage(SpellCard):
def __init__(self):
super().__init__("Inner Rage", 0, CHARACTER_CLASS.WARRIOR, CARD_RARITY.COMMON,
target_func=SDWLE.targeting.find_minion_spell_target)
def use(self, player, game):
super().use(player, game)
self.target.damage(1, self)
self.target.change_attack(2)
class MortalStrike(SpellCard):
def __init__(self):
super().__init__("Mortal Strike", 4, CHARACTER_CLASS.WARRIOR, CARD_RARITY.RARE,
target_func=SDWLE.targeting.find_spell_target)
def use(self, player, game):
super().use(player, game)
if player.hero.health <= 12:
self.target.damage(player.effective_spell_damage(6), self)
else:
self.target.damage(player.effective_spell_damage(4), self)
class Rampage(SpellCard):
def __init__(self):
super().__init__("Rampage", 2, CHARACTER_CLASS.WARRIOR, CARD_RARITY.COMMON,
target_func=SDWLE.targeting.find_minion_spell_target,
filter_func=lambda target: target.health != target.calculate_max_health() and
target.spell_targetable())
def use(self, player, game):
super().use(player, game)
self.target.change_attack(3)
self.target.increase_health(3)
class ShieldBlock(SpellCard):
def __init__(self):
super().__init__("Shield Block", 3, CHARACTER_CLASS.WARRIOR, CARD_RARITY.COMMON)
def use(self, player, game):
super().use(player, game)
player.hero.increase_armor(5)
player.draw()
class ShieldSlam(SpellCard):
def __init__(self):
super().__init__("Shield Slam", 1, CHARACTER_CLASS.WARRIOR, CARD_RARITY.EPIC,
target_func=SDWLE.targeting.find_minion_spell_target)
def use(self, player, game):
super().use(player, game)
self.target.damage(player.effective_spell_damage(player.hero.armor), self)
class Slam(SpellCard):
def __init__(self):
super().__init__("Slam", 2, CHARACTER_CLASS.WARRIOR, CARD_RARITY.COMMON,
target_func=SDWLE.targeting.find_minion_spell_target)
def use(self, player, game):
super().use(player, game)
if self.target.health > player.effective_spell_damage(2) or self.target.divine_shield:
self.target.damage(player.effective_spell_damage(2), self)
player.draw()
else:
self.target.damage(player.effective_spell_damage(2), self)
class Upgrade(SpellCard):
def __init__(self):
super().__init__("Upgrade!", 1, CHARACTER_CLASS.WARRIOR, CARD_RARITY.RARE)
def use(self, player, game):
super().use(player, game)
from SDWLE.cards.weapons.warrior import HeavyAxe
if player.weapon:
player.weapon.durability += 1
player.weapon.base_attack += 1
else:
heavy_axe = HeavyAxe().create_weapon(player)
heavy_axe.equip(player)
class Whirlwind(SpellCard):
def __init__(self):
super().__init__("Whirlwind", 1, CHARACTER_CLASS.WARRIOR, CARD_RARITY.COMMON)
def use(self, player, game):
super().use(player, game)
targets = copy.copy(game.other_player.minions)
targets.extend(game.current_player.minions)
for minion in targets:
minion.damage(player.effective_spell_damage(1), self)
class BouncingBlade(SpellCard):
def __init__(self):
super().__init__("Bouncing Blade", 3, CHARACTER_CLASS.WARRIOR, CARD_RARITY.EPIC)
def can_use(self, player, game):
return super().can_use(player, game) and len(player.minions) + len(player.opponent.minions) >= 1
def use(self, player, game):
super().use(player, game)
# According to https://www.youtube.com/watch?v=7ij_6_Dx47g, Bouncing Blade bounces at most 80 times
# TODO Bouncing blade should only target those minions whose health is above minimum
# See http://us.battle.net/hearthstone/en/forum/topic/15142084659
targets = player.minions[:] + player.opponent.minions[:]
if len(targets):
for bounces in range(80):
target = game.random_choice(targets)
target.damage(player.effective_spell_damage(1), self)
if target.dead:
break
class Crush(SpellCard):
def __init__(self):
super().__init__("Crush", 7, CHARACTER_CLASS.WARRIOR, CARD_RARITY.EPIC,
target_func=SDWLE.targeting.find_minion_spell_target,
buffs=[Buff(ManaChange(-4), GreaterThan(Count(MinionSelector(IsDamaged())), value=0))])
def use(self, player, game):
super().use(player, game)
self.target.die(self)
class BurrowingMine(SpellCard):
def __init__(self):
super().__init__("Burrowing Mine", 0, CHARACTER_CLASS.WARRIOR, CARD_RARITY.COMMON, False,
effects=[Effect(Drawn(), ActionTag(Damage(10), HeroSelector())),
Effect(Drawn(), ActionTag(RemoveFromHand(Same()),
PlayerSelector())),
Effect(Drawn(), ActionTag(Draw(), PlayerSelector()))])
def use(self, player, game):
super().use(player, game)
class Revenge(SpellCard):
def __init__(self):
super().__init__("Revenge", 2, CHARACTER_CLASS.WARRIOR, CARD_RARITY.RARE)
def use(self, player, game):
super().use(player, game)
targets = copy.copy(game.other_player.minions)
targets.extend(game.current_player.minions)
if player.hero.health <= 12:
for minion in targets:
minion.damage(player.effective_spell_damage(3), self)
else:
for minion in targets:
minion.damage(player.effective_spell_damage(1), self)
| jomyhuang/sdwle | SDWLE/cards_copy/spells/warrior.py | Python | mit | 9,699 |
"""Base class for Bundle and Partition databases. This module also includes
interfaces for temporary CSV files and HDF files.
Copyright (c) 2013 Clarinova. This file is licensed under the terms of the
Revised BSD License, included in this distribution as LICENSE.txt
"""
import h5py
import os.path
from numpy import *
class Hdf5File(h5py.File):
def __init__(self, partition):
self.partition = partition
source, name_parts, partition_path = self.partition._path_parts() #@UnusedVariable
self._path = os.path.join(self.partition.bundle.database.base_path, *partition_path)
def open(self):
dir_ = os.path.dirname(self._path)
if not os.path.exists(dir_):
os.makedirs(dir_)
super(Hdf5File, self).__init__(self._path)
def exists(self):
import os.path
return os.path.exists(self._path)
@property
def path(self):
return self._path
def put_geo(self,name, a, aa):
''''''
import json
group = self.require_group("geo")
if name in group:
del group[name]
ds = group.create_dataset(name, data=a, compression=9)
ds.attrs['analysis-area'] = json.dumps(aa.__dict__)
try:
if a.mask is not ma.nomask:
ds.attrs['nodata'] = a.fill_value
except:
pass
def get_geo(self, name):
import json
from databundles.geo.analysisarea import AnalysisArea
group = self.require_group("geo")
try:
ds = group[name]
except KeyError:
raise KeyError("Geo group doesn't have dataset named '{}'".format(name))
aa = AnalysisArea(**(json.loads(ds.attrs['analysis-area'])))
return ds,aa
def list_geo(self):
return self.require_group("geo").keys()
def table(self, table_name, mode='a', expected=None):
import tables #@UnresolvedImport
from databundles.orm import Column
raise NotImplemented()
try:
return self.file.root._f_getChild(table_name)
except tables.NoSuchNodeError:
tdef = self.bundle.schema.table(table_name)
descr = {}
for i, col in enumerate(tdef.columns):
if col.datatype == Column.DATATYPE_INTEGER64:
descr[str(col.name)] = tables.Int64Col(pos=i) #@UndefinedVariable
elif col.datatype == Column.DATATYPE_INTEGER:
descr[str(col.name)] = tables.Int32Col(pos=i) #@UndefinedVariable
elif col.datatype == Column.DATATYPE_REAL:
descr[str(col.name)] = tables.Float32Col(pos=i) #@UndefinedVariable
elif col.datatype == Column.DATATYPE_TEXT:
descr[str(col.name)] = tables.StringCol(pos=i, itemsize= col.width if col.width else 50) #@UndefinedVariable
else:
raise ValueError('Unknown datatype: '+col.datatype)
table = self._file.createTable(self.file.root, table_name, descr, expectedrows=expected)
return table
| treyhunner/databundles | databundles/hdf5.py | Python | bsd-3-clause | 3,289 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2012:
# Gabes Jean, [email protected]
# Gerhard Lausser, [email protected]
# Gregory Starck, [email protected]
# Hartmut Goebel, [email protected]
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from shinken.satellite import Satellite
from shinken.property import PathProp, IntegerProp
# Our main APP class
class Poller(Satellite):
do_checks = True # I do checks
do_actions = False # but no actions
properties = Satellite.properties.copy()
properties.update({
'pidfile': PathProp(default='pollerd.pid'),
'port': IntegerProp(default='7771'),
'local_log': PathProp(default='pollerd.log'),
})
def __init__(self, config_file, is_daemon, do_replace, debug, debug_file):
super(Poller, self).__init__('poller', config_file, is_daemon, do_replace, debug, debug_file)
| wbsavage/shinken | shinken/daemons/pollerdaemon.py | Python | agpl-3.0 | 1,555 |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
from telemetry.internal.results import output_formatter
def ResultsAsDict(page_test_results, benchmark_metadata):
"""Takes PageTestResults to a dict serializable to JSON.
To serialize results as JSON we first convert them to a dict that can be
serialized by the json module. It also requires a benchmark_metadat object
for metadata to be integrated into the results (currently the benchmark
name). This function will also output trace files if they exist.
Args:
page_test_results: a PageTestResults object
benchmark_metadata: a benchmark.BenchmarkMetadata object
"""
result_dict = {
'format_version': '0.2',
'next_version': '0.3',
# TODO(sullivan): benchmark_name should be removed when updating
# format_version to 0.3.
'benchmark_name': benchmark_metadata.name,
'benchmark_metadata': benchmark_metadata.AsDict(),
'summary_values': [v.AsDict() for v in
page_test_results.all_summary_values],
'per_page_values': [v.AsDict() for v in
page_test_results.all_page_specific_values],
'pages': {p.id: p.AsDict() for p in _GetAllPages(page_test_results)}
}
if page_test_results.serialized_trace_file_ids_to_paths:
result_dict['files'] = page_test_results.serialized_trace_file_ids_to_paths
return result_dict
def _GetAllPages(page_test_results):
pages = set(page_run.story for page_run in
page_test_results.all_page_runs)
return pages
class JsonOutputFormatter(output_formatter.OutputFormatter):
def __init__(self, output_stream, benchmark_metadata):
super(JsonOutputFormatter, self).__init__(output_stream)
self._benchmark_metadata = benchmark_metadata
@property
def benchmark_metadata(self):
return self._benchmark_metadata
def Format(self, page_test_results):
json.dump(
ResultsAsDict(page_test_results, self.benchmark_metadata),
self.output_stream, indent=2)
self.output_stream.write('\n')
| catapult-project/catapult-csm | telemetry/telemetry/internal/results/json_output_formatter.py | Python | bsd-3-clause | 2,173 |
# ------------------------------------------------------------------
# Copyright (c) 2020 PyInstaller Development Team.
#
# This file is distributed under the terms of the GNU General Public
# License (version 2.0 or later).
#
# The full license is available in LICENSE.GPL.txt, distributed with
# this software.
#
# SPDX-License-Identifier: GPL-2.0-or-later
# ------------------------------------------------------------------
hiddenimports = ['lxml.etree']
| etherkit/OpenBeacon2 | macos/venv/lib/python3.8/site-packages/_pyinstaller_hooks_contrib/hooks/stdhooks/hook-lxml.objectify.py | Python | gpl-3.0 | 461 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='ExportFilter',
fields=[
('code', models.CharField(max_length=255, serialize=False, primary_key=True)),
('label', models.CharField(max_length=255)),
('order', models.IntegerField()),
('description', models.TextField()),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ExportInstance',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('filter_params', models.CharField(max_length=255)),
('timestamp', models.DateTimeField()),
('errors', models.IntegerField(default=0)),
('warnings', models.IntegerField(default=0)),
('export_filter', models.ForeignKey(to='export.ExportFilter')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ExportType',
fields=[
('code', models.CharField(max_length=255, serialize=False, primary_key=True)),
('path', models.CharField(max_length=255)),
('label', models.CharField(max_length=255)),
('description', models.TextField()),
('order', models.IntegerField()),
('model', models.CharField(max_length=255)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Status',
fields=[
('code', models.CharField(max_length=255, serialize=False, primary_key=True)),
('label', models.CharField(max_length=255)),
('description', models.TextField()),
],
options={
'verbose_name_plural': 'statuses',
},
bases=(models.Model,),
),
migrations.AddField(
model_name='exportinstance',
name='export_type',
field=models.ForeignKey(to='export.ExportType'),
preserve_default=True,
),
migrations.AddField(
model_name='exportinstance',
name='status',
field=models.ForeignKey(to='export.Status'),
preserve_default=True,
),
migrations.AddField(
model_name='exportinstance',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
preserve_default=True,
),
]
| unt-libraries/catalog-api | django/sierra/export/migrations/0001_initial.py | Python | bsd-3-clause | 2,987 |
#!/usr/bin/env python
###############################################################
### NZBGET POST-PROCESSING SCRIPT ###
# Convert a Blu-Ray to MKV.
#
# This script converts a Blu-Ray disc to MKV file with MakeMKV.
# Blu-Ray ISOs and directories can be processed.
###############################################################
### OPTIONS ###
#Directory=${MainDir}/mkv
### NZBGET POST-PROCESSING SCRIPT ###
###############################################################
import logging
import os
import pathlib
import re
import subprocess
import sys
# NZBGet conveys a wealth of information to the post-processing script by using environment variables.
ENVAR_DOWNLOAD_DIRECTORY = "NZBPP_DIRECTORY" # Directory path of downloaded files
ENVAR_DOWNLOAD_STATUS = "NZBPP_TOTALSTATUS" # Status of downloaded files (e.g., success, failure)
ENVAR_MAKEMKV_PROFILE = "NZBPO_PROFILE" # Path of MakeMKV XML profile
ENVAR_MKV_DIRECTORY = "NZBPO_DIRECTORY" # Directory path of converted movies
ENVAR_MOVIE_TITLES = "NZBPP_TITLES"
MAKEMKV_BINARY = "makemkvcon"
MAKEMKV_PATTERN_TITLE_INFO = 'TINFO:(?P<number>\d+),\d+,\d+,'
MAKEMKV_PATTERN_TITLE_FILE = '{}"(?P<fname>.+\.mkv)"'.format(MAKEMKV_PATTERN_TITLE_INFO)
MAKEMKV_PATTERN_TITLE_DETAILS = '{}"(?P<name>.+) - (?P<chapters>\d+) chapter\(s\) , ' \
'(?P<size>\d+\.?\d*) GB"'.format(MAKEMKV_PATTERN_TITLE_INFO)
MSG_END_CONVERSION = "Successfully converted {} to MKV."
NZBGET_LOG_FORMAT = "[%(levelname)s] %(message)s"
POSTPROCESS_EXIT_CODE_ERROR = 94
POSTPROCESS_EXIT_CODE_SUCCESS = 93 # Returned code when post-process is successful
REQUIRED_OPTIONS = (ENVAR_MAKEMKV_PROFILE, ENVAR_MKV_DIRECTORY)
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
def is_configured():
missing_opt = False
for option in REQUIRED_OPTIONS:
if option not in os.environ:
logger.error("The following configuration option must be defined: {}.".format(option))
missing_opt = True
if missing_opt:
return False
return True
def find_makemkv_binary():
try:
bin_path = subprocess.check_output(
['which', MAKEMKV_BINARY], stderr=subprocess.DEVNULL, universal_newlines=True)
except subprocess.CalledProcessError:
logger.error("MakeMKV binary not found.")
return None
return pathlib.PurePath(bin_path.rstrip())
def find_blu_ray_sources(path, multi=1):
sources_type = None
sources = list(path.rglob('BDMV/index.bdmv')) or None
if sources:
sources_type = "file"
for i in range(len(sources)):
sources[i] = sources[i].parents[1]
else:
iso_images = path.rglob('*.iso')
sources = sorted(iso_images, key=lambda iso: iso.stat().st_size, reverse=True) or None
if sources:
sources_type = "iso"
if sources:
sources_number = len(sources)
if multi == 1:
if sources_number > 1:
logger.warning("More than one blu-ray source was found.")
sources = sources[0]
elif multi > 1:
if sources_number != multi:
logger.warning("{0} blu-ray sources were found ({1} asked).".format(sources_number, multi))
sources = sources[:multi]
return sources_type, sources
def identify_movie_titles(source, multi=1):
makemkv = find_makemkv_binary()
title_fname = re.compile(MAKEMKV_PATTERN_TITLE_FILE)
title_details = re.compile(MAKEMKV_PATTERN_TITLE_DETAILS)
titles = list()
with subprocess.Popen(
[makemkv, '-r', 'info', '{type}:{path}'.format(**source)],
stderr=subprocess.STDOUT, stdout=subprocess.PIPE, bufsize=1, universal_newlines=True) as p:
line = p.stdout.readline()
while line:
line = line.rstrip()
m = title_fname.match(line)
if m is not None:
fname = m.group('fname')
line = p.stdout.readline()
while line:
line = line.rstrip()
m = title_details.match(line)
if m is not None:
number = int(m.group('number'))
chapters = int(m.group('chapters'))
size = float(m.group('size'))
titles.append({'number': number, 'fname': fname, 'chapters': chapters, 'size': size})
break
line = p.stdout.readline()
line = p.stdout.readline()
if not titles:
return None
titles = sorted(titles, key=lambda title: title['chapters'], reverse=True)
if multi == 1:
if len(titles) > 1:
if titles[0]['chapters'] == titles[1]['chapters']:
logger.warning("Two movie titles with the same number of chapters were found.")
return None
return titles[0]
elif multi > 1:
titles_number = len(titles)
if multi > titles_number:
logger.warning("Only {0} titles are available ({1} asked).".format(titles_number, multi))
return titles[:multi]
return titles
def convert_to_mkv(movie, source, title, destination, profile):
makemkv = find_makemkv_binary()
p = subprocess.Popen(
[makemkv, '--profile={}'.format(profile), 'mkv', '{type}:{path}'.format(**source), title['number'], destination],
stderr=subprocess.STDOUT, stdout=subprocess.PIPE, bufsize=1, universal_newlines=True)
line = p.stdout.readline()
while line:
line = line.rstrip()
logger.debug(line)
line = p.stdout.readline()
p.wait()
mkv_path = destination / title['fname']
if p.returncode != 0:
logger.error("An error was encountered during the conversion. Please check logs.")
try:
mkv_path.unlink()
except OSError:
pass
return None
mkv_new_path = mkv_path.with_name('{}.mkv'.format(movie))
try:
mkv_path.rename(mkv_new_path)
except OSError:
if not mkv_path.is_file():
logger.error("An error was encountered during the conversion. Please check logs.")
else:
logger.warning("Unable to rename {} to {}".format(mkv_path, mkv_new_path))
return None
return mkv_new_path
if __name__ == '__main__':
logger.setLevel(logging.DEBUG)
console = logging.StreamHandler()
console.setFormatter(NZBGET_LOG_FORMAT)
logger.addHandler(console)
if is_configured() is False:
sys.exit(POSTPROCESS_EXIT_CODE_ERROR)
| alexandre-figura/nzbget-pp-script_bluray-to-mkv | script/bluray_to_mkv.py | Python | gpl-3.0 | 6,687 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.