text
stringlengths 4
1.02M
| meta
dict |
---|---|
from flask import request, render_template
from flask_paginate import Pagination, get_page_parameter
from .. import db
from . import gallery
@gallery.route('/<name>')
def gallery(name):
page = request.args.get(get_page_parameter(), type=int, default=1)
sql = "SELECT date,url FROM `gallery` WHERE name=%s ORDER BY `date` DESC limit %s, %s"
sql2 = "SELECT COUNT(*) FROM `gallery` WHERE name=%s "
fetch = (page-1) * 10
result = db.engine.execute(sql, (name, fetch, fetch+9)).fetchall()
count = db.engine.execute(sql2, name).fetchall()[0]['COUNT(*)']
all_pic_list = []
width_list = []
for x in result:
pic_list = dict()
pic_list['date'] = x[0]
url_list = x[1].split()
length = len(url_list)
width = 0
if length == 1:
width = 12
elif 1 < length < 4:
width = 12 / length
elif length == 4:
width = 6
elif 4 < length < 9:
width = 12 / int(length / 2)
elif length == 9:
width = 4
width_list.append(int(width))
new_url_list = []
for url in url_list:
sub = url.split('/')[3]
small = url.replace(sub, 'mw690')
large = url.replace(sub, 'large')
new_url = {
'small': small,
'large': large
}
new_url_list.append(new_url)
pic_list['url'] = new_url_list
all_pic_list.append(pic_list)
pagination = Pagination(page=page, total=count, bs_version=4, per_page=10, alignment='center')
icon = name+'.ico'
return render_template('gallery/gallery.html', all_pic_list=all_pic_list, width_list=width_list,
pagination=pagination, title=name, filename=icon, page=page)
| {
"content_hash": "c98de9f36670c3232cddde71e0cc2328",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 100,
"avg_line_length": 31.20689655172414,
"alnum_prop": 0.5464088397790056,
"repo_name": "JR--Chen/flasky",
"id": "e6b286399771daf4396b63cd767fd4e3f1325fd3",
"size": "1810",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/gallery/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "332831"
},
{
"name": "HTML",
"bytes": "40033"
},
{
"name": "JavaScript",
"bytes": "139994"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "113702"
}
],
"symlink_target": ""
} |
from urllib.parse import urlencode, urlparse, parse_qsl
from django.shortcuts import render, get_object_or_404
from django.conf import settings
from haystack.query import SearchQuerySet
from haystack.views import FacetedSearchView
from .forms import RulingSearchForm
from .models import Ruling
default_sqs = (SearchQuerySet()
.facet('court', order='term')
.facet('jurisdiction', order='term')
.facet('granted', order='term')
)
class RulingSearchView(FacetedSearchView):
results_per_page = 15
def extra_context(self):
extra = super(RulingSearchView, self).extra_context()
d = dict(parse_qsl(urlparse(self.request.get_full_path()).query))
d.pop('page', None)
extra['getvars'] = '&' + urlencode([
(k.encode('utf-8'), v.encode('latin1')) for k, v in d.items()])
return extra
search = RulingSearchView(form_class=RulingSearchForm,
searchqueryset=default_sqs)
def show_ruling(request, slug):
obj = get_object_or_404(Ruling, slug=slug)
return render(request, 'rulings/show.html', {
'object': obj,
'FILE_URL_PREFIX': settings.FILE_URL_PREFIX
})
| {
"content_hash": "820b5447e2de9bc05549a80cfe8f188c",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 75,
"avg_line_length": 30.25,
"alnum_prop": 0.6495867768595042,
"repo_name": "netzwerkrecherche/auskunftsrecht",
"id": "63eb4adedcf5a391aa4e2004090b677bb29113a6",
"size": "1210",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rulings/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "45"
},
{
"name": "HTML",
"bytes": "14107"
},
{
"name": "Python",
"bytes": "16127"
}
],
"symlink_target": ""
} |
from nose.tools import *
from dateutil.parser import parse as time_parse
import yawhois
class TestWhoisNicBjStatusAvailable(object):
def setUp(self):
fixture_path = "spec/fixtures/responses/whois.nic.bj/status_available.txt"
host = "whois.nic.bj"
part = yawhois.record.Part(open(fixture_path, "r").read(), host)
self.record = yawhois.record.Record(None, [part])
def test_status(self):
eq_(self.record.status, 'available')
def test_available(self):
eq_(self.record.available, True)
def test_domain(self):
eq_(self.record.domain, None)
def test_nameservers(self):
eq_(self.record.nameservers.__class__.__name__, 'list')
eq_(self.record.nameservers, [])
def test_admin_contacts(self):
assert_raises(yawhois.exceptions.AttributeNotSupported, self.record.admin_contacts)
def test_registered(self):
eq_(self.record.registered, False)
def test_created_on(self):
eq_(self.record.created_on, None)
def test_registrar(self):
assert_raises(yawhois.exceptions.AttributeNotSupported, self.record.registrar)
def test_registrant_contacts(self):
eq_(self.record.registrant_contacts.__class__.__name__, 'list')
eq_(self.record.registrant_contacts, [])
def test_technical_contacts(self):
assert_raises(yawhois.exceptions.AttributeNotSupported, self.record.technical_contacts)
def test_updated_on(self):
eq_(self.record.updated_on, None)
def test_domain_id(self):
assert_raises(yawhois.exceptions.AttributeNotSupported, self.record.domain_id)
def test_expires_on(self):
assert_raises(yawhois.exceptions.AttributeNotSupported, self.record.expires_on)
def test_disclaimer(self):
assert_raises(yawhois.exceptions.AttributeNotSupported, self.record.disclaimer)
| {
"content_hash": "083c173333fafa0187c3991b9a50fb93",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 95,
"avg_line_length": 34.38181818181818,
"alnum_prop": 0.6805922792173453,
"repo_name": "huyphan/pyyawhois",
"id": "dd36b25b275dd7e4c2df44bc128334d8cafce3ac",
"size": "2151",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/record/parser/test_response_whois_nic_bj_status_available.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1859653"
}
],
"symlink_target": ""
} |
from docker_registry_client import BaseClient
import pkg_resources
def test_base_client(registry):
cli = BaseClient('http://localhost:5000', api_version=2)
assert cli.catalog() == {'repositories': []}
def test_base_client_edit_manifest(docker_client, registry):
cli = BaseClient('http://localhost:5000', api_version=2)
build = docker_client.build(
pkg_resources.resource_filename(__name__, 'fixtures/base'),
'localhost:5000/x-drc-example:x-drc-test', stream=True,
)
for line in build:
print(line)
push = docker_client.push(
'localhost:5000/x-drc-example', 'x-drc-test', stream=True,
insecure_registry=True,
)
for line in push:
print(line)
m = cli.get_manifest('x-drc-example', 'x-drc-test')
assert m._content['name'] == 'x-drc-example'
assert m._content['tag'] == 'x-drc-test'
cli.put_manifest('x-drc-example', 'x-drc-test-put', m)
pull = docker_client.pull(
'localhost:5000/x-drc-example', 'x-drc-test-put', stream=True,
insecure_registry=True, decode=True,
)
pull = list(pull)
tag = 'localhost:5000/x-drc-example:x-drc-test-put'
expected_statuses = {
'Status: Downloaded newer image for ' + tag,
'Status: Image is up to date for ' + tag,
}
errors = [evt for evt in pull if 'error' in evt]
assert errors == []
assert {evt.get('status') for evt in pull} & expected_statuses
| {
"content_hash": "262e6164da4e88ddd1d0202606205839",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 70,
"avg_line_length": 29.693877551020407,
"alnum_prop": 0.6274914089347079,
"repo_name": "yodle/docker-registry-client",
"id": "6ac2bdfc503e590ca1d1017b694b6d4c2e6fbf21",
"size": "1455",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/integration/test_base_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "41648"
}
],
"symlink_target": ""
} |
import time
import datetime
import subprocess
import multiprocessing
import argparse
import TestConfig
import Configs
import ZfsApi
import Pid
import Common
import MonitorThread
import ReceiveThread
import Results
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--verbose', action="store_true",
help="The script will periodically print stats about TXGs and "
" receive speed")
parser.add_argument('-t', '--threads', type=int, default=4,
choices=xrange(1,32),
help="The number of concurrent receives to perform")
args = parser.parse_args()
# Use TestConfig to ensure this computer is set up properly
TestConfig.check_all()
# This test case will use the test send file, check that it will work
TestConfig.check_testfile()
Pid.create_pid_file()
# Establish where this test will be writing its output
current_min = time.strftime("%Y%m%d%H%M%S")
zfs_receive_path = Configs.test_filesystem_path + '/runs/' + current_min
start_txg = ZfsApi.get_current_txg(Configs.main_pool)
results_collector = Results.ResultsCollector(zfs_receive_path)
results_collector.gather_start_results()
if args.verbose:
monitor_thread = MonitorThread.MonitorThread(zfs_receive_path)
monitor_thread.start()
# Create the base FS that each thread will be receiveing into sub filesystem
ZfsApi.create_filesystem(zfs_receive_path)
start_time = time.time()
def receive_file(zfs_filesystem):
ZfsApi.zfs_recv(Configs.test_file_full_path, zfs_filesystem)
try:
zfs_filesystem_list = []
for count in xrange(args.threads):
zfs_filesystem_list.append(zfs_receive_path + '/' + str(count))
workerPool = multiprocessing.Pool(processes=args.threads)
workerPool.map(receive_file, zfs_filesystem_list)
workerPool.close()
workerPool.join()
except KeyboardInterrupt:
pass
end_time = time.time()
results_collector.gather_end_results()
end_txg = ZfsApi.get_current_txg(Configs.main_pool)
time_elapsed = end_time - start_time
print("that took " + str(datetime.timedelta(seconds=time_elapsed)))
elapsed_txgs = end_txg - start_txg
txgs_per_second = elapsed_txgs / time_elapsed
print("TXGs/second: " + str(txgs_per_second))
property_dictionary = ZfsApi.get_filesystem_properties(zfs_receive_path, ['used'])
used_in_bytes = property_dictionary["used"]
used_in_mebibytes = Common.bytes_to_mebibyte(used_in_bytes)
print("received " + str(used_in_bytes))
bytes_per_second = used_in_mebibytes / time_elapsed
print("Speed: " + str(bytes_per_second) + " MiB/s")
# Clean up the PID file to allow other runs
Pid.destroy_pid_file()
| {
"content_hash": "5ea8fb14eef407d2c2fa09f904658eb3",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 82,
"avg_line_length": 28.384615384615383,
"alnum_prop": 0.7394502516453736,
"repo_name": "datto/zfs-tests",
"id": "3861d31844cd9b755a241438fe61eb8506e0ba73",
"size": "2583",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "MultiReceive.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19146"
}
],
"symlink_target": ""
} |
from model.group import Group
class GroupHelper:
def __init__(self, app):
self.app = app
def open_groups_page(self):
wd = self.app.wd
if not (wd.current_url.endswith("/group.php") and len(wd.find_elements_by_name("new")) > 0):
wd.find_element_by_link_text("groups").click()
def create(self, group):
wd = self.app.wd
self.open_groups_page()
# init group creation
wd.find_element_by_name("new").click()
# fill group form
wd.find_element_by_name("group_name").click()
wd.find_element_by_name("group_name").clear()
wd.find_element_by_name("group_name").send_keys(group.name)
wd.find_element_by_name("group_header").click()
wd.find_element_by_name("group_header").clear()
wd.find_element_by_name("group_header").send_keys(group.header)
wd.find_element_by_name("group_footer").click()
wd.find_element_by_name("group_footer").clear()
wd.find_element_by_name("group_footer").send_keys(group.footer)
# submit group creation
wd.find_element_by_name("submit").click()
self.open_groups_page()
self.group_cache = None
def modify_first_group(self, group):
wd = self.app.wd
self.open_groups_page()
# select first group
wd.find_element_by_name("selected[]").click()
# edit first group
wd.find_element_by_name("edit").click()
wd.find_element_by_name("group_name").click()
wd.find_element_by_name("group_name").clear()
wd.find_element_by_name("group_name").send_keys(group.name)
wd.find_element_by_name("group_header").click()
wd.find_element_by_name("group_header").clear()
wd.find_element_by_name("group_header").send_keys(group.header)
wd.find_element_by_name("group_footer").click()
wd.find_element_by_name("group_footer").clear()
wd.find_element_by_name("group_footer").send_keys(group.footer)
# submit edition
wd.find_element_by_name("update").click()
self.open_groups_page()
self.group_cache = None
def delete_first_group(self):
wd = self.app.wd
self.open_groups_page()
# select first group
wd.find_element_by_name("selected[]").click()
# submit deletion
wd.find_element_by_name("delete").click()
self.open_groups_page()
self.group_cache = None
def count(self):
wd = self.app.wd
self.open_groups_page()
return len(wd.find_elements_by_name("selected[]"))
group_cache = None
def get_group_list(self):
if self.group_cache is None:
wd = self.app.wd
self.open_groups_page()
self.group_cache = []
for element in wd.find_elements_by_css_selector('span.group'):
text = element.text
id = element.find_element_by_name("selected[]").get_attribute("value")
self.group_cache.append(Group(name=text, id=id))
return list(self.group_cache)
# return [Group(name=e.text, id=e.id) for e in wd.find_elements_by_css_selector('span.group') ]
| {
"content_hash": "bf5dc8693f41d52e2f26178113cec9e5",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 103,
"avg_line_length": 39.382716049382715,
"alnum_prop": 0.5971786833855799,
"repo_name": "oostapenko84/python_training",
"id": "396255f605251004a31aa193d4368b72212b934a",
"size": "3190",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fixture/group.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "29868"
}
],
"symlink_target": ""
} |
import unittest
from popper.parser import WorkflowParser
from popper.cli import log
class TestWorkflow(unittest.TestCase):
def setUp(self):
log.setLevel("CRITICAL")
def tearDown(self):
log.setLevel("NOTSET")
def test_empty_file(self):
with open("/tmp/test.yml", "w"):
pass
self.assertRaises(SystemExit, WorkflowParser.parse, **{"file": "/tmp/test.yml"})
def test_new_workflow(self):
wf_data = {}
self.assertRaises(SystemExit, WorkflowParser.parse, **{"wf_data": wf_data})
wf_data = {"unexpected": []}
self.assertRaises(SystemExit, WorkflowParser.parse, **{"wf_data": wf_data})
wf_data = {
"steps": [
{
"uses": "foo",
"id": "step",
"env": {"EN": "EE"},
"secrets": ["S"],
"dir": "/path/to/",
"options": {"name": "spam"},
},
{"uses": "bar", "runs": ["a", "b"], "args": ["c"], "skip_pull": True},
],
"options": {"env": {"FOO": "bar"}, "secrets": ["Z"],},
}
wf = WorkflowParser.parse(wf_data=wf_data)
step = wf.steps[0]
self.assertEqual("step", step.id)
self.assertEqual("foo", step.uses)
self.assertEqual(("Z", "S"), step.secrets)
self.assertEqual({"EN": "EE", "FOO": "bar"}, step.env)
self.assertEqual("/path/to/", step.dir)
self.assertEqual("spam", step.options.name)
self.assertTrue(not step.runs)
self.assertTrue(not step.args)
self.assertFalse(step.skip_pull)
step = wf.steps[1]
self.assertEqual("bar", step.uses)
self.assertEqual(("a", "b"), step.runs)
self.assertEqual(("c",), step.args)
self.assertTrue(step.skip_pull)
self.assertTrue(not step.dir)
self.assertEqual({"FOO": "bar"}, step.env)
self.assertEqual(("Z",), step.secrets)
self.assertEqual({"FOO": "bar"}, wf.options.env)
self.assertEqual(("Z",), wf.options.secrets)
def test_filter_all_but_given_step(self):
wf_data = {
"steps": [
{"uses": "foo", "id": "one"},
{"uses": "bar", "id": "two"},
{"uses": "baz", "id": "three"},
]
}
wf = WorkflowParser.parse(wf_data=wf_data, step="two")
self.assertEqual(1, len(wf.steps))
self.assertEqual("two", wf.steps[0].id)
self.assertEqual("bar", wf.steps[0].uses)
# non-existing name
self.assertRaises(
SystemExit, WorkflowParser.parse, **{"wf_data": wf_data, "step": "four"}
)
# without id
wf_data = {"steps": [{"uses": "foo"}, {"uses": "bar"}, {"uses": "baz"},]}
wf = WorkflowParser.parse(wf_data=wf_data, step="2")
self.assertEqual(1, len(wf.steps))
self.assertEqual("2", wf.steps[0].id)
def test_skip_steps(self):
wf_data = {
"steps": [
{"uses": "foo", "id": "one"},
{"uses": "bar", "id": "two"},
{"uses": "baz", "id": "three"},
]
}
# skip one step
wf = WorkflowParser.parse(wf_data=wf_data, skipped_steps=["two"])
self.assertEqual(2, len(wf.steps))
self.assertEqual("one", wf.steps[0].id)
self.assertEqual("three", wf.steps[1].id)
# more than one
wf = WorkflowParser.parse(wf_data=wf_data, skipped_steps=["one", "three"])
self.assertEqual(1, len(wf.steps))
self.assertEqual("two", wf.steps[0].id)
# non-existing name
self.assertRaises(
SystemExit,
WorkflowParser.parse,
**{"wf_data": wf_data, "skipped_steps": ["four"]}
)
# skip one step
wf = WorkflowParser.parse(wf_data=wf_data, skipped_steps=["two"])
self.assertEqual(2, len(wf.steps))
self.assertEqual("one", wf.steps[0].id)
self.assertEqual("three", wf.steps[1].id)
# without id
wf_data = {"steps": [{"uses": "foo"}, {"uses": "bar"}, {"uses": "baz"},]}
wf = WorkflowParser.parse(wf_data=wf_data, skipped_steps=["1", "3"])
self.assertEqual(1, len(wf.steps))
self.assertEqual("2", wf.steps[0].id)
def test_add_missing_ids(self):
wf_data = {"steps": [{"uses": "foo"}, {"uses": "bar"}]}
# skip one step
wf = WorkflowParser.parse(wf_data=wf_data)
self.assertEqual("1", wf.steps[0].id)
self.assertEqual("2", wf.steps[1].id)
def test_substitutions(self):
# test wrong format for substitution key
wf_data = {"steps": [{"uses": "whatever"}]}
self.assertRaises(
SystemExit,
WorkflowParser.parse,
**{"wf_data": wf_data, "substitutions": ["SUB1=WRONG"]}
)
# expect error when not all given subs are used
wf_data = {
"steps": [
{
"uses": "some_$_SUB1",
"id": "some other $_SUB2",
"env": {"FOO": "env_$_SUB3"},
"secrets": ["secret_$_SUB4"],
}
]
}
substitutions = [
"_SUB1=ONE",
"_SUB2=TWO",
"_SUB3=THREE",
"_SUB4=4",
"_SUB5=UNUSED",
]
self.assertRaises(
SystemExit,
WorkflowParser.parse,
**{"wf_data": wf_data, "substitutions": substitutions}
)
# allow loose substitutions
wf = WorkflowParser.parse(
wf_data=wf_data, substitutions=substitutions, allow_loose=True
)
step = wf.steps[0]
self.assertEqual("some_ONE", step.uses)
self.assertEqual("some other TWO", step.id)
self.assertEqual("env_THREE", step.env["FOO"])
self.assertEqual(("secret_4",), step.secrets)
| {
"content_hash": "7bde82f4e1329172df153cb75e36d06d",
"timestamp": "",
"source": "github",
"line_count": 174,
"max_line_length": 88,
"avg_line_length": 34.43103448275862,
"alnum_prop": 0.499749624436655,
"repo_name": "systemslab/popper",
"id": "afe1c028e99399ffd2fb834a00ddc76216d6c24c",
"size": "5991",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/test/test_parser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HCL",
"bytes": "1473"
},
{
"name": "Python",
"bytes": "105732"
},
{
"name": "Shell",
"bytes": "42622"
}
],
"symlink_target": ""
} |
"""
genres.api
---
This module implements the Genres API.
"""
import logging
from . import finder, db
_db = None
def find(text):
"""
Tries to determine genres for the text.
"""
finder_obj = _create_finder()
return finder_obj.find(text)
def _get_database():
"""
Check if database has been created, otherwise construct it.
"""
global _db
if not _db:
db_obj = db.Db()
_db = db_obj
return _db
def _create_finder():
"""
Create finder object based on db.
"""
db_obj = _get_database()
return finder.Finder(db_obj)
| {
"content_hash": "a8b21fda90b2874c4e6c41060ebf9e3d",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 63,
"avg_line_length": 14.285714285714286,
"alnum_prop": 0.58,
"repo_name": "marteinn/genres",
"id": "ff97a1796973dc24ae2e0a3903069449e4c5c6b0",
"size": "625",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "genres/api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "66"
},
{
"name": "Python",
"bytes": "11794"
}
],
"symlink_target": ""
} |
'''Unit tests for io.FileNode'''
import os
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
import os
import StringIO
import unittest
from grit.node import misc
from grit.node import io
from grit.node import empty
from grit import grd_reader
from grit import util
class FileNodeUnittest(unittest.TestCase):
def testGetPath(self):
root = misc.GritNode()
root.StartParsing(u'grit', None)
root.HandleAttribute(u'latest_public_release', u'0')
root.HandleAttribute(u'current_release', u'1')
root.HandleAttribute(u'base_dir', ur'..\resource')
translations = empty.TranslationsNode()
translations.StartParsing(u'translations', root)
root.AddChild(translations)
file_node = io.FileNode()
file_node.StartParsing(u'file', translations)
file_node.HandleAttribute(u'path', ur'flugel\kugel.pdf')
translations.AddChild(file_node)
root.EndParsing()
self.failUnless(file_node.GetFilePath() ==
util.normpath(
os.path.join(ur'../resource', ur'flugel/kugel.pdf')))
def VerifyCliquesContainEnglishAndFrenchAndNothingElse(self, cliques):
for clique in cliques:
self.failUnlessEquals(len(clique[0].clique), 2)
self.failUnless('en' in cliques[i][0].clique)
self.failUnless('fr' in cliques[i][0].clique)
def testLoadTranslations(self):
xml = '''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
<translations>
<file path="generated_resources_fr.xtb" lang="fr" />
</translations>
<release seq="3">
<messages>
<message name="ID_HELLO">Hello!</message>
<message name="ID_HELLO_USER">Hello <ph name="USERNAME">%s<ex>Joi</ex></ph></message>
</messages>
</release>
</grit>'''
grd = grd_reader.Parse(StringIO.StringIO(xml),
util.PathFromRoot('grit/testdata'))
grd.SetOutputContext('en', {})
grd.RunGatherers(recursive=True)
self.VerifyCliquesContainEnglishAndFrenchAndNothingElse(grd.GetCliques())
def testIffyness(self):
grd = grd_reader.Parse(StringIO.StringIO('''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
<translations>
<if expr="lang == 'fr'">
<file path="generated_resources_fr.xtb" lang="fr" />
</if>
</translations>
<release seq="3">
<messages>
<message name="ID_HELLO">Hello!</message>
<message name="ID_HELLO_USER">Hello <ph name="USERNAME">%s<ex>Joi</ex></ph></message>
</messages>
</release>
</grit>'''), util.PathFromRoot('grit/testdata'))
grd.SetOutputContext('en', {})
grd.RunGatherers(recursive=True)
grd.SetOutputContext('fr', {})
grd.RunGatherers(recursive=True)
def testConditionalLoadTranslations(self):
xml = '''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3"
base_dir=".">
<translations>
<if expr="True">
<file path="generated_resources_fr.xtb" lang="fr" />
</if>
<if expr="False">
<file path="no_such_file.xtb" lang="de" />
</if>
</translations>
<release seq="3">
<messages>
<message name="ID_HELLO">Hello!</message>
<message name="ID_HELLO_USER">Hello <ph name="USERNAME">%s<ex>
Joi</ex></ph></message>
</messages>
</release>
</grit>'''
grd = grd_reader.Parse(StringIO.StringIO(xml),
util.PathFromRoot('grit/testdata'))
grd.SetOutputContext('en', {})
grd.RunGatherers(recursive=True)
self.VerifyCliquesContainEnglishAndFrenchAndNothingElse(grd.GetCliques())
def testConditionalOutput(self):
xml = '''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3"
base_dir=".">
<outputs>
<output filename="resource.h" type="rc_header" />
<output filename="en/generated_resources.rc" type="rc_all"
lang="en" />
<if expr="pp_if('NOT_TRUE')">
<output filename="de/generated_resources.rc" type="rc_all"
lang="de" />
</if>
</outputs>
<release seq="3">
<messages>
<message name="ID_HELLO">Hello!</message>
</messages>
</release>
</grit>'''
grd = grd_reader.Parse(StringIO.StringIO(xml),
util.PathFromRoot('grit/test/data'))
grd.SetOutputContext('en', {})
grd.RunGatherers(recursive=True)
outputs = grd.GetChildrenOfType(io.OutputNode)
self.failUnless(outputs[0].SatisfiesOutputCondition())
self.failUnless(outputs[0].GetType() == 'rc_header')
self.failUnless(outputs[1].SatisfiesOutputCondition())
self.failUnless(outputs[1].GetType() == 'rc_all')
self.failUnless(not outputs[2].SatisfiesOutputCondition())
self.failUnless(outputs[2].GetType() ==
'output_condition_not_satisfied_rc_all')
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "ae66a9a351f66a03ff51a422e299c1f3",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 97,
"avg_line_length": 37.13103448275862,
"alnum_prop": 0.6028974739970282,
"repo_name": "JoKaWare/WTL-DUI",
"id": "6a7069b140d8a7bbeec78b68706410e9bb37d6ff",
"size": "5573",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/grit/grit/node/io_unittest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "15757"
},
{
"name": "C",
"bytes": "6005744"
},
{
"name": "C++",
"bytes": "20436057"
},
{
"name": "Java",
"bytes": "24009"
},
{
"name": "Objective-C",
"bytes": "105435"
},
{
"name": "Objective-C++",
"bytes": "463565"
},
{
"name": "Python",
"bytes": "349752"
},
{
"name": "Shell",
"bytes": "2409"
},
{
"name": "nesC",
"bytes": "8497"
}
],
"symlink_target": ""
} |
'''
Test the output of a newly trained classifier.
'''
import numpy as np
import pandas as pd
import os, sys, glob, argparse
import cv2
import yaml
import pickle
sys.path.append('..')
from models.yolo_models import get_yolo_model, get_yolo_model_feats
from utils.decoder import decode
from utils.utils import md5check
def _interval_overlap(interval_a, interval_b):
x1, x2 = interval_a
x3, x4 = interval_b
if x3 < x1:
if x4 < x1:
return 0
else:
return min(x2, x4) - x1
else:
if x2 < x3:
return 0
else:
return min(x2, x4) - x3
def bbox_iou(box1, box2):
intersect_w = _interval_overlap([box1[0], box1[2]], [box2[0], box2[2]])
intersect_h = _interval_overlap([box1[1], box1[3]], [box2[1], box2[3]])
intersect = intersect_w * intersect_h
w1, h1 = box1[2] - box1[0], box1[3] - box1[1]
w2, h2 = box2[2] - box2[0], box2[3] - box2[1]
union = w1 * h1 + w2 * h2 - intersect
return float(intersect) / union
def main(args):
#Load data
data_dir = args.ddir[0] + '/' #in case we forgot '/'
print('Opening file' + args.config[0])
with open(args.config[0], 'r') as configfile:
config = yaml.safe_load(configfile)
image_dir = data_dir + config['preped_images_dir']
train_dir = data_dir
weights_dir = data_dir + config['weights_dir']
groundtruths_dir = data_dir + config['groundtruths_dir']
predictions_dir = data_dir + config['predictions_dir']
#Training type dependent
tracking_setup = config["tracking_setup"]
trained_weights = weights_dir + config[tracking_setup]['weights']
#based on get_yolo_model defaults and previous makTrain.py files
num_class = config[tracking_setup]['num_class']
obj_thresh = config[tracking_setup]['obj_thresh']
nms_thresh = config[tracking_setup]['nms_thresh']
annotations_dir = data_dir + config['annotations_dir']
list_of_train_files = annotations_dir + config['checked_annotations_fname']
annotations_file = annotations_dir + config['checked_annotations_fname']
print("With annotations file")
print(annotations_file)
with open(annotations_file, 'r') as fp:
all_imgs = yaml.load(fp)
if args.annotated:
print('Opening the already predicted files in file ' + args.annotated[0])
with open(args.annotated[0], 'r') as fp:
pred_imgs = yaml.load(fp)
if args.visual:
cv2.namedWindow('tracker', cv2.WINDOW_GUI_EXPANDED)
cv2.moveWindow('tracker', 20,20)
max_l = config['MAX_L'] #maximal object size in pixels
min_l = config['MIN_L']
im_size_h = config['IMAGE_H'] #size of training imageas for yolo
im_size_w = config['IMAGE_W'] #size of training imageas for yolo
##################################################
print("Loading YOLO models")
print("We will use the following model for testing: ")
print(trained_weights)
yolov3 = get_yolo_model(im_size_w, im_size_h, num_class, trainable=False)
yolov3.load_weights(
trained_weights, by_name=True) #TODO is by_name necessary here?
print("YOLO models loaded, my dear.")
########################################
#read in all images from checked annotations (GROUND TRUTH)
for i in range(len(all_imgs)):
basename = os.path.basename(all_imgs[i]['filename'])
#remove extension from basename:
name_seed_split = basename.split('.')[:-1]
name_seed = '.'.join(name_seed_split)
fname_gt = groundtruths_dir + name_seed + ".txt"
fname_pred = predictions_dir + name_seed + ".txt"
img_data = {'object': []}
img_data['filename'] = basename
img_data['width'] = all_imgs[i]['width']
img_data['height'] = all_imgs[i]['height']
#Reading ground truth
boxes_gt = []
for obj in all_imgs[i]['object']:
boxes_gt.append(
[obj['xmin'], obj['ymin'], obj['xmax'], obj['ymax']])
# sys.stdout.write('GT objects:')
# sys.stdout.write(str(len(boxes_gt)))
# sys.stdout.flush()
#do box processing
img = cv2.imread(image_dir + basename)
# print("File, {}".format(image_dir + basename))
mmFname = basename.split('_f')[-1].split('_')[0]
mmFrame = basename.split('_f')[-1].split('_')[1].split('f')[0]
mmGT = str(len(boxes_gt))
frame = img.copy()
with open(fname_gt, 'w') as file_gt: #left top righ bottom
for b in boxes_gt:
obj = {}
if ((b[2] - b[0]) * (b[3] - b[1])) < 10:
continue
obj['name'] = 'aoi'
obj['xmin'] = int(b[0])
obj['ymin'] = int(b[1])
obj['xmax'] = int(b[2])
obj['ymax'] = int(b[3])
img_data['object'] += [obj]
file_gt.write(obj['name'] + " ")
file_gt.write(str(obj['xmin']) + " ")
file_gt.write(str(obj['ymin']) + " ")
file_gt.write(str(obj['xmax']) + " ")
file_gt.write(str(obj['ymax']))
file_gt.write('\n')
if args.visual:
cv2.rectangle(
frame, (int(obj['xmin']) - 2, int(obj['ymin']) - 2),
(int(obj['xmax']) + 2, int(obj['ymax']) + 2), (200, 0, 0), 1)
if args.annotated:
boxes_pred = []
for obj in pred_imgs[i]['object']:
boxes_pred.append(
[obj['xmin'], obj['ymin'], obj['xmax'], obj['ymax']])
with open(fname_pred, 'w') as file_pred: #left top righ bottom
for b in boxes_pred:
obj = {}
if ((b[2] - b[0]) * (b[3] - b[1])) < 10:
continue
obj['name'] = 'aoi'
obj['xmin'] = int(b[0])
obj['ymin'] = int(b[1])
obj['xmax'] = int(b[2])
obj['ymax'] = int(b[3])
img_data['object'] += [obj]
file_pred.write(obj['name'] + " ")
file_pred.write('100' + " ") # we don't store probability of detection in annotations
file_pred.write(str(obj['xmin']) + " ")
file_pred.write(str(obj['ymin']) + " ")
file_pred.write(str(obj['xmax']) + " ")
file_pred.write(str(obj['ymax']))
file_pred.write('\n')
if args.visual:
cv2.rectangle(
frame, (int(obj['xmin']) - 2, int(obj['ymin']) - 2),
(int(obj['xmax']) + 2, int(obj['ymax']) + 2), (200, 0, 0), 1)
#caluclate scores for this image
mmTP = 0
for bgt in boxes_gt:
for bpred in boxes_pred:
if bbox_iou(bgt,bpred) > 0.5:
mmTP = mmTP + 1 #find one matching prediction
break
mmFP = 0
has_match = False
for bpred in boxes_pred:
for bgt in boxes_gt:
if bbox_iou(bgt,bpred) > 0.5:
has_match = True
break # found a match for predicion
if has_match == True:
has_match = False
else:
mmFP = mmFP + 1
#display scores for this image
print(mmFname + ', ' + mmFrame + ', ' + str(mmGT) + ', ' + str(mmTP) + ', ' + str(mmFP))
else:
# preprocess the image
image_h, image_w, _ = img.shape
new_image = img[:, :, ::-1] / 255.
new_image = np.expand_dims(new_image, 0)
# run the prediction
sys.stdout.write('Yolo predicting...')
sys.stdout.flush()
yolos = yolov3.predict(new_image)
sys.stdout.write('decoding...')
sys.stdout.flush()
boxes_predict = decode(yolos, obj_thresh, nms_thresh)
sys.stdout.write('done!#of boxes_predict:')
sys.stdout.write(str(len(boxes_predict)))
sys.stdout.write('\n')
sys.stdout.flush()
with open(fname_pred, 'w') as file_pred: #left top righ bottom
for b in boxes_predict:
xmin = int(b[0])
xmax = int(b[2])
ymin = int(b[1])
ymax = int(b[3])
confidence = float(b[4])
objpred = {}
objpred['name'] = 'aoi'
if xmin < 0: continue
if ymin < 0: continue
if xmax > im_size_w: continue
if ymax > im_size_h: continue
if (xmax - xmin) < min_l: continue
if (xmax - xmin) > max_l: continue
if (ymax - ymin) < min_l: continue
if (ymax - ymin) > max_l: continue
objpred['xmin'] = xmin
objpred['ymin'] = ymin
objpred['xmax'] = xmax
objpred['ymax'] = ymax
objpred['confidence'] = confidence
file_pred.write(objpred['name'] + " ")
file_pred.write(str(objpred['confidence']) + " ")
file_pred.write(str(objpred['xmin']) + " ")
file_pred.write(str(objpred['ymin']) + " ")
file_pred.write(str(objpred['xmax']) + " ")
file_pred.write(str(objpred['ymax']))
file_pred.write('\n')
if args.visual:
cv2.rectangle(
frame, (int(objpred['xmin']) - 2, int(objpred['ymin']) - 2),
(int(objpred['xmax']) + 2, int(objpred['ymax']) + 2), (0, 0, 198), 1)
str_conf = "{:.1f}".format(objpred['confidence'])
cv2.putText(frame, str_conf, (int(objpred['xmax']),int(objpred['ymax'])), cv2.FONT_HERSHEY_COMPLEX_SMALL, 0.8, (200,200,250), 1);
if args.visual:
cv2.imshow('tracker', frame)
key = cv2.waitKey(1) #& 0xFF
#precision = tp / (tp + fp)
# for box_gt in boxes_gt:
# for box_predict in boxes_predict:
# iou_val = bbox_iou(box_predict,box_gt)
# print(iou_val)
#count prediction which reache a threshold of let's say 0.5
# if we cahnge the dection threshold I think we'll get ROC curve - that'd be cute.
print('Finished! :o)')
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=
'Prepare list of detection of original input files using final classifier. Those files are saved in groundtruths and predictions directories which can be interpreted by program https://github.com/rafaelpadilla/Object-Detection-Metrics',
epilog=
'Any issues and clarifications: github.com/ctorney/uavtracker/issues')
parser.add_argument(
'--config', '-c', required=True, nargs=1, help='Your yml config file')
parser.add_argument(
'--ddir',
'-d',
required=True,
nargs=1,
help='Root of your data directory')
parser.add_argument('--visual', '-v', default=False, action='store_true',
help='Display tracking progress')
parser.add_argument('--annotated', '-a', required=False, nargs=1,
help='Provide file with annotated results if you have already run prediction')
args = parser.parse_args()
main(args)
| {
"content_hash": "d742c3f662ba8fd317f5e419faba04e1",
"timestamp": "",
"source": "github",
"line_count": 304,
"max_line_length": 244,
"avg_line_length": 38.81578947368421,
"alnum_prop": 0.5001694915254238,
"repo_name": "ctorney/ungTracker",
"id": "9eff29c4457b91d7b1a1b21180484df6e77fedcf",
"size": "11800",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "train/postTrainTest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "80398"
}
],
"symlink_target": ""
} |
'''
Created on Oct 2, 2012
@author: Rex White
'''
from optparse import OptionParser
from redhat_support_lib.api import API
import ConfigParser
import unittest
import os
import logging
__author__ = 'Rex White <[email protected]>'
class productTest(unittest.TestCase):
def setUp(self):
self.config = ConfigParser.ConfigParser()
self.config.read('strata.cfg')
self.user = self.config.get('strata', 'external_username')
if (self.user.startswith('$')):
self.user = os.environ.get(self.user[1:])
logging.log(logging.DEBUG, "Username: %s" % self.user)
self.passwd = self.config.get('strata', 'external_password')
if (self.passwd.startswith('$')):
self.passwd = os.environ.get(self.passwd[1:])
logging.log(5, "password: %s" % self.passwd)
self.url = self.config.get('strata', 'url')
if (self.url.startswith('$')):
self.url = os.environ.get(self.url[1:])
print "url: ", self.url
self.api = API(username=self.user,
password=self.passwd,
url=self.url,
no_verify_ssl=True)
def tearDown(self):
self.api.disconnect();
# This is just a place-holder and needs to be replaced with a more meaningful test.
def testGetproducts(self):
print "---testGetproducts---"
prodAry = self.api.products.list()
print "products:"
for prod in prodAry:
print "\n %s:" % prod.get_name()
verAry = prod.get_versions();
for ver in verAry:
print " %s" % ver
pass
if __name__ == "__main__":
# Do something clever with the command line nuggets
use = "Usage: %prog [options] [test_case test_case ...]"
parser = OptionParser(usage=use)
parser.add_option("-q", "--quiet", dest="quiet", action="store_true", default=False, help="Disable verbose debug output")
parser.add_option("-l", "--list", dest="list", action="store_true", default=False, help="List all test cases")
options, args = parser.parse_args()
# enable logging, as needed
if options.quiet == False:
logging.basicConfig(level=logging.DEBUG)
# get testcase list
cases = unittest.defaultTestLoader.getTestCaseNames(productTest)
# handle 'list' option
if options.list:
for case in cases:
print case
quit()
# run all tests if none specified
if args is None or len(args) == 0:
args = cases
testSuite = unittest.TestSuite(map(productTest, args))
unittest.TextTestRunner().run(testSuite)
else:
# set debug option to True when running as a module
class options:
debug = True
| {
"content_hash": "97321cc9404fd7bf7efeb9a8bda6f5e4",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 125,
"avg_line_length": 30.054347826086957,
"alnum_prop": 0.6007233273056057,
"repo_name": "redhataccess/redhat-support-lib-python",
"id": "4d7d00be648a6935a26693e7ebbc185ab5b06f50",
"size": "2765",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/redhat_support_lib/productTest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "2194"
},
{
"name": "Python",
"bytes": "1083764"
}
],
"symlink_target": ""
} |
import os
import logging
import sys
sys.path.insert(1, os.path.join(os.path.dirname(__file__), u'..'))
from tornado.ioloop import IOLoop
from tornado.httpserver import HTTPServer
from tornado.options import define, options
from tornado.httpclient import AsyncHTTPClient
from tormon import core
from tormon.core import utils
root = lambda *x: os.path.join(os.path.dirname(__file__), u'../', *x)
APP_ROOT = root()
STATIC_ROOT = root(u'static')
TEMPLATE_ROOT = root(u'templates')
BLOCKING_THRESHOLD = 0.2
try:
AsyncHTTPClient.configure(u"tornado.curl_httpclient.CurlAsyncHTTPClient")
except ImportError:
pass
define(u'app', default=u'', type=str, help=u'Pusher application to run')
define(u'monitor', default=u'', type=str, help=u'Pusher application to run')
define(u"writer", default=u'memory', help=u"Name of writer that's persisting urls")
define(
u"reader",
default=u"config",
help=u"Name reader for getting urls (by default, they're taken from config file)."
)
define(u'handler', default=u'log', help=u"Name of handler in case resource is failing")
define(u'config', type=str, default=None, help=u'Path to yaml file')
define(u"host", default=u"localhost")
define(u"port", default=8081, type=int)
define(u"debug", default=False, type=bool)
define(u"concurrency", default=0, type=int)
def setup_application(monitor_instance):
ApplicationClass = utils.load_app(options[u'app'])
return ApplicationClass(
monitor_instance=monitor_instance,
debug=options[u'debug'],
static_path=STATIC_ROOT,
template_path=TEMPLATE_ROOT
)
def setup_monitor():
MonitorClass = utils.load_monitor(options[u'monitor'])
return MonitorClass(
reader=options[u'reader'],
writer=options[u'writer'],
handler=options[u'handler'],
concurrency=options[u'concurrency']
)
def validate_options():
if not options[u'config']:
logging.error(u"Config is missing")
sys.exit(1)
def main():
options.parse_command_line()
validate_options()
core.settings.import_config(options[u'config'])
monitor = setup_monitor()
application = setup_application(monitor_instance=monitor)
server = HTTPServer(application)
logging.info(u"Starting APP-MONITOR on {0}:{1}.".format(
options[u'host'], options[u'port']
))
utils.register_shutdown_handlers(server, monitor)
if options[u'debug']:
logging.info(u"Setting blocking threshold to: {}".format(
BLOCKING_THRESHOLD
))
IOLoop.instance().set_blocking_log_threshold(BLOCKING_THRESHOLD)
server.listen(options[u'port'], options[u'host'])
monitor.start()
IOLoop.instance().start()
if __name__ == u'__main__':
main()
| {
"content_hash": "28cf48b0d30523035a8ace5ae7c60904",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 87,
"avg_line_length": 29.516129032258064,
"alnum_prop": 0.6888888888888889,
"repo_name": "jnosal/tormon",
"id": "f29d3db3accb0b78d1a404b67209ea5e72641610",
"size": "2745",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tormon/run.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22078"
}
],
"symlink_target": ""
} |
import os, pprint, sys, math, numpy
from optparse import OptionParser
from netCDF4 import Dataset
def dir_entries(dir_path='', subdir=False, *args):
'''Return a list of file names found in directory 'dir_path'
If 'subdir' is True, recursively access subdirectories under 'dir_path'.
Additional arguments, if any, are file extensions to match filenames. Matched
file names are added to the list.
If there are no additional arguments, all files found in the directory are
added to the list.
Example usage: fileList = dirEntries(r'H:\TEMP', False, 'txt', 'py')
Only files with 'txt' and 'py' extensions will be added to the list.
Example usage: fileList = dirEntries(r'H:\TEMP', True)
All files and all the files in subdirectories under H:\TEMP will be added
to the list.
'''
dir_path = os.getcwd() + os.sep + dir_path
fileList = []
for file in os.listdir(dir_path):
dirfile = os.path.join(dir_path, file)
if os.path.isfile(dirfile):
if not args:
fileList.append(dirfile)
else:
if os.path.splitext(dirfile)[1][1:] in args:
fileList.append(dirfile)
# recursively access file names in subdirectories
elif os.path.isdir(dirfile) and subdir:
fileList.extend(dir_entries(dirfile, subdir, *args))
return fileList
def get_keys_and_descriptions_and_shapes_and_max_values(infile, filter_search):
return [
(key, infile.variables[key].description.strip(), infile.variables[key].shape, numpy.max(infile.variables[key]))
for key in infile.variables.keys()
if hasattr(infile.variables[key], 'description') and (
filter_search == '' or filter_search in key.lower() or filter_search in infile.variables[key].description.lower()
) and (
numpy.max(infile.variables[key]) != 0
)
]
def get_layer_index(infiles, variable_key):
variable = infiles[0].variables[variable_key]
if len(variable.shape) == 4:
return input('Variable has %i vertical layers. Please specify layer index to show:' %(variable.shape[1]))
return None
def get_longitude_and_latitude(infiles):
longitude = numpy.degrees(infiles[0].variables['GLON'][0])
latitude = numpy.degrees(infiles[0].variables['GLAT'][0])
return longitude, latitude
parser = OptionParser()
parser.add_option("-a", "--all-files", action="store_true", dest="all_files")
(options, args) = parser.parse_args()
potential_filenames = [entry.rsplit('/', 1)[1] for entry in dir_entries()]
wrfout_filenames = [entry for entry in potential_filenames if entry.find('wrfout_') == 0]
if len(wrfout_filenames) == 0:
print 'no wrfout files in this directory - aborting'
sys.exit(1)
infiles = None
infile_names = None
if not options.all_files:
print('List of the available wrfout-files in this directory. Choose one index to visualize.')
print('\n'.join(["%i:\t%s" %(index, entry) for index, entry in enumerate(wrfout_filenames)]))
file_index = input('Please enter one index:')
wrfout_file_name = wrfout_filenames[file_index]
infiles = [Dataset(wrfout_file_name)]
infile_names = [wrfout_file_name]
else:
infiles = []
for wrfout_file_name in wrfout_filenames:
try:
infiles.append(Dataset(wrfout_file_name))
except Exception as e:
print "could not import file %s: %s" %(wrfout_file_name, str(e))
infile_names = wrfout_filenames
filter_search = raw_input('Please enter a search term within variable descriptions or keys to filter by:')
filter_search = filter_search.lower().strip()
keys_and_descriptions_and_shapes_and_max_values = get_keys_and_descriptions_and_shapes_and_max_values(infiles[0], filter_search)
if len(keys_and_descriptions_and_shapes_and_max_values) == 0:
print 'no variables found with that term in the description or key'
sys.exit(1)
print('List of the available variables in this file. Choose one index to visualize.')
print ('\n'.join([
"%i:\t%s%s%s (shape: %s, max value: %s)" %(
index,
key_and_description_and_shape[0],
''.join([' ' for index in range(12-len(key_and_description_and_shape[0]))]),
key_and_description_and_shape[1],
key_and_description_and_shape[2],
numpy.max(infiles[0].variables[key_and_description_and_shape[0]])
)
for index, key_and_description_and_shape in enumerate(keys_and_descriptions_and_shapes_and_max_values)
]))
variable_index = input('Please enter one index:')
print('visualizing %s for %s' %(str(keys_and_descriptions_and_shapes_and_max_values[variable_index]), wrfout_filenames[0]))
layer_index = get_layer_index(infiles, keys_and_descriptions_and_shapes_and_max_values[variable_index][0])
longitude, latitude = get_longitude_and_latitude(infiles)
min_longitude = numpy.min(longitude)
max_longitude = numpy.max(longitude)
min_latitude = numpy.min(latitude)
max_latitude = numpy.max(latitude)
print(
'latitude from: %s, to: %s; longitude from: %s, to: %s' %(
str(min_latitude),
str(max_latitude),
str(min_longitude),
str(max_longitude)
)
)
import matplotlib.pyplot as pyplot
from mpl_toolkits.basemap import Basemap
plot_map = Basemap(
projection='lcc',
lat_1=(max_latitude+min_latitude)/2,
lat_0=(max_latitude+min_latitude)/2,
lon_0=(max_longitude+min_longitude)/2,
llcrnrlon=min_longitude,
llcrnrlat=min_latitude,
urcrnrlon=max_longitude,
urcrnrlat=max_latitude,
resolution='l'
)
plot_map.drawcountries()
plot_map.drawcoastlines()
plot_map.drawlsmask()
plot_map.drawrivers()
for index, infile in enumerate(infiles):
variable = infile.variables[keys_and_descriptions_and_shapes_and_max_values[variable_index][0]]
layer = None
if len(variable.shape) == 2:
layer = variable[:, :]
elif len(variable.shape) == 3:
layer = variable[0, :, :]
elif len(variable.shape) == 4:
layer = variable[0, layer_index, :, :]
else:
print 'unrecognized shape, only variables with two, three or four dimensions supported'
sys.exit(1)
min_value = numpy.min(layer)
max_value = numpy.max(layer)
color_mesh = plot_map.pcolormesh(
longitude,
latitude,
layer,
vmin=min_value,
vmax=max_value,
cmap=pyplot.get_cmap('spectral'),
latlon=True
)
if index == 0:
color_bar = pyplot.colorbar(color_mesh)
color_bar.set_label(variable.units)
plot = None
if layer_index != None:
pyplot.title("%s\nat vertical layer %i\n%s" %(variable.description, layer_index, infile_names[index]))
else:
pyplot.title(variable.description)
if layer_index != None:
pyplot.savefig("%s_layer%i_from_%s.png" %(
keys_and_descriptions_and_shapes_and_max_values[variable_index][0],
layer_index,
infile_names[index]
))
else:
pyplot.savefig("%s_from_%s.png" %(
keys_and_descriptions_and_shapes_and_max_values[variable_index][0],
infile_names[index]
))
infile.close()
| {
"content_hash": "cdd81ed75f9f82edd2cbf2a1cf3795ae",
"timestamp": "",
"source": "github",
"line_count": 174,
"max_line_length": 128,
"avg_line_length": 37.71264367816092,
"alnum_prop": 0.7206644315757391,
"repo_name": "muellermichel/pyWRF-NMM",
"id": "626c32a7c16f2f8d91f090093d3927a406cda465",
"size": "6562",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "visualize_wrfout.py",
"mode": "33261",
"license": "mit",
"language": [],
"symlink_target": ""
} |
import json
import os
import subprocess
import sys
env_var = 'GITLAB_SECRET_FILE_QUAY_CREDENTIALS'
if env_var not in os.environ:
print('Error: could not find environment variable ' + env_var)
sys.exit(1)
print('Starting quay.io login process...')
with open(os.environ[env_var], 'r') as f:
keys = json.load(f)
process = subprocess.Popen('docker login quay.io -u "{user}" --password-stdin'.format(user=keys['user']),
stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE,
shell=True)
print('Logging into quay.io...')
stdout, stderr = process.communicate(input=keys['password'].encode('utf-8'))
if 'Login Succeeded' in str(stdout):
print('Login Succeeded.')
else:
print('Error while attempting to log into quay.io:\n' + str(stderr))
sys.exit(1)
| {
"content_hash": "898f30296f0a7454b423fd979d20458a",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 105,
"avg_line_length": 33.6,
"alnum_prop": 0.6642857142857143,
"repo_name": "BD2KGenomics/slugflow",
"id": "1faa83e22a846e200159135de9e59792ce8485c1",
"size": "1452",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup_gitlab_docker.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "4074"
},
{
"name": "Python",
"bytes": "618803"
},
{
"name": "Shell",
"bytes": "19115"
}
],
"symlink_target": ""
} |
from participantCollection import ParticipantCollection
import re
import datetime
import pyperclip
# Edit Me!
# This script gets run on the first day of the following month, and that month's URL is
# what goes here. E.g. If this directory is the directory for February, this script gets
# run on March 1, and this URL is the URL for the March challenge page.
nextMonthURL = "https://www.reddit.com/r/pornfree/comments/ex6nis/stay_clean_february_this_thread_updated_daily/"
# If this directory is the directory for November, this script gets run on December 1,
# and currentMonthIndex gets the index of November, i.e. 11.
currentMonthIndex = datetime.date.today().month - 1
if currentMonthIndex == 0:
currentMonthIndex = 12
currentMonthName = {1: 'January', 2: 'February', 3: 'March', 4: 'April', 5: 'May', 6: 'June', 7: 'July', 8: 'August', 9: 'September', 10: 'October', 11: 'November', 12: 'December'}[currentMonthIndex]
nextMonthIndex = currentMonthIndex % 12 + 1
nextMonthName = {1: 'January', 2: 'February', 3: 'March', 4: 'April', 5: 'May', 6: 'June', 7: 'July', 8: 'August', 9: 'September', 10: 'October', 11: 'November', 12: 'December'}[nextMonthIndex]
participants = ParticipantCollection()
numberStillIn = participants.sizeOfParticipantsWhoAreStillIn()
initialNumber = participants.size()
percentStillIn = int(round(100 * numberStillIn / initialNumber, 0))
def templateForParticipants():
answer = ""
for participant in participants.participantsWhoAreStillInAndHaveCheckedIn():
answer += "/u/" + participant.name
answer += "\n\n"
return answer
def templateToUse():
answer = ""
answer += "The Stay Clean CURRENT_MONTH_NAME challenge is now over. Join us for **[the NEXT_MONTH_NAME challenge](NEXT_MONTH_URL)**.\n"
answer += "\n"
answer += "**NUMBER_STILL_IN** out of INITIAL_NUMBER participants made it all the way through the challenge. That's **PERCENT_STILL_IN%**.\n"
answer += "\n"
answer += "Congratulations to these participants, all of whom were victorious:\n\n"
answer += templateForParticipants()
return answer
def stringToPrint():
answer = templateToUse()
answer = re.sub('NUMBER_STILL_IN', str(numberStillIn), answer)
answer = re.sub('INITIAL_NUMBER', str(initialNumber), answer)
answer = re.sub('PERCENT_STILL_IN', str(percentStillIn), answer)
answer = re.sub('CURRENT_MONTH_INDEX', str(currentMonthIndex), answer)
answer = re.sub('CURRENT_MONTH_NAME', currentMonthName, answer)
answer = re.sub('NEXT_MONTH_INDEX', str(nextMonthIndex), answer)
answer = re.sub('NEXT_MONTH_NAME', nextMonthName, answer)
answer = re.sub('NEXT_MONTH_URL', nextMonthURL, answer)
return answer
outputString = stringToPrint()
print "============================================================="
print outputString
print "============================================================="
pyperclip.copy(outputString)
| {
"content_hash": "d418df0debb0d91e48162bb44d5d09e0",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 199,
"avg_line_length": 45.07692307692308,
"alnum_prop": 0.6808873720136519,
"repo_name": "foobarbazblarg/stayclean",
"id": "074b2a3fc7063bffde7d92858a4e87db60225c3b",
"size": "3056",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stayclean-2020-january/display-final-after-month-is-over.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4232161"
},
{
"name": "Shell",
"bytes": "52056"
}
],
"symlink_target": ""
} |
import numpy as np
import re
import itertools
from collections import Counter
"""
Original taken from https://github.com/dennybritz/cnn-text-classification-tf
"""
def clean_str(string):
"""
Tokenization/string cleaning for all datasets except for SST.
Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py
"""
string = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", string)
string = re.sub(r"\'s", " \'s", string)
string = re.sub(r"\'ve", " \'ve", string)
string = re.sub(r"n\'t", " n\'t", string)
string = re.sub(r"\'re", " \'re", string)
string = re.sub(r"\'d", " \'d", string)
string = re.sub(r"\'ll", " \'ll", string)
string = re.sub(r",", " , ", string)
string = re.sub(r"!", " ! ", string)
string = re.sub(r"\(", " \( ", string)
string = re.sub(r"\)", " \) ", string)
string = re.sub(r"\?", " \? ", string)
string = re.sub(r"\s{2,}", " ", string)
return string.strip().lower()
def load_data_and_labels():
"""
Loads MR polarity data from files, splits the data into words and generates labels.
Returns split sentences and labels.
"""
# Load data from files
#positive_examples = list(open("../data/rt-polarity.pos",encoding='utf-8', errors='ignore').readlines())
positive_examples = list(open("../data/rt-polarity.pos").readlines())
positive_examples = [s.strip() for s in positive_examples]
#negative_examples = list(open("../data/rt-polarity.neg",encoding='utf-8', errors='ignore').readlines())
negative_examples = list(open("../data/rt-polarity.neg").readlines())
negative_examples = [s.strip() for s in negative_examples]
# Split by words
x_text = positive_examples + negative_examples
x_text = [clean_str(sent) for sent in x_text]
x_text = [s.split(" ") for s in x_text]
# Generate labels
positive_labels = [[0, 1] for _ in positive_examples]
negative_labels = [[1, 0] for _ in negative_examples]
y = np.concatenate([positive_labels, negative_labels], 0)
return [x_text, y]
def pad_sentences(sentences, padding_word="<PAD/>"):
"""
Pads all sentences to the same length. The length is defined by the longest sentence.
Returns padded sentences.
"""
sequence_length = max(len(x) for x in sentences)
padded_sentences = []
for i in range(len(sentences)):
sentence = sentences[i]
num_padding = sequence_length - len(sentence)
new_sentence = sentence + [padding_word] * num_padding
padded_sentences.append(new_sentence)
return padded_sentences
def build_vocab(sentences):
"""
Builds a vocabulary mapping from word to index based on the sentences.
Returns vocabulary mapping and inverse vocabulary mapping.
"""
# Build vocabulary
word_counts = Counter(itertools.chain(*sentences))
# Mapping from index to word
vocabulary_inv = [x[0] for x in word_counts.most_common()]
# Mapping from word to index
vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}
return [vocabulary, vocabulary_inv]
def build_input_data(sentences, labels, vocabulary):
"""
Maps sentencs and labels to vectors based on a vocabulary.
"""
x = np.array([[vocabulary[word] for word in sentence] for sentence in sentences])
y = np.array(labels)
return [x, y]
def load_data():
"""
Loads and preprocessed data for the MR dataset.
Returns input vectors, labels, vocabulary, and inverse vocabulary.
"""
# Load and preprocess data
sentences, labels = load_data_and_labels()
sentences_padded = pad_sentences(sentences)
vocabulary, vocabulary_inv = build_vocab(sentences_padded)
x, y = build_input_data(sentences_padded, labels, vocabulary)
return [x, y, vocabulary, vocabulary_inv]
def batch_iter(data, batch_size, num_epochs):
"""
Generates a batch iterator for a dataset.
"""
data = np.array(data)
data_size = len(data)
num_batches_per_epoch = int(len(data)/batch_size) + 1
for epoch in range(num_epochs):
# Shuffle the data at each epoch
shuffle_indices = np.random.permutation(np.arange(data_size))
shuffled_data = data[shuffle_indices]
for batch_num in range(num_batches_per_epoch):
start_index = batch_num * batch_size
end_index = min((batch_num + 1) * batch_size, data_size)
yield shuffled_data[start_index:end_index]
| {
"content_hash": "7bdad5a7b82e5e144af322c7af89663a",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 108,
"avg_line_length": 37.42016806722689,
"alnum_prop": 0.6406916685380643,
"repo_name": "rouseguy/europython2016_dl-nlp",
"id": "9dffa0838475d1981b61209ea1b077852d7e5669",
"size": "4453",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "notebooks/data_helpers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "162028"
},
{
"name": "Python",
"bytes": "8319"
}
],
"symlink_target": ""
} |
import logging
import random
import string
import collections
import re
from pyzabbix import ZabbixAPI, ZabbixAPIException
class ZabbixConn(object):
"""
Zabbix connector class
Defines methods for managing Zabbix users and groups
"""
def __init__(self, config, ldap_conn):
self.ldap_conn = ldap_conn
self.server = config.zbx_server
self.username = config.zbx_username
self.password = config.zbx_password
self.auth = config.zbx_auth
self.dryrun = config.zbx_dryrun
self.nocheckcertificate = config.zbx_nocheckcertificate
self.ldap_groups = config.ldap_groups
self.ldap_media = config.ldap_media
self.media_opt = config.media_opt
self.media_description = config.media_description
self.user_opt = config.user_opt
if self.nocheckcertificate:
from requests.packages.urllib3 import disable_warnings
disable_warnings()
if config.ldap_wildcard_search:
self.ldap_groups = ldap_conn.get_groups_with_wildcard()
# Use logger to log information
self.logger = logging.getLogger()
if config.verbose:
self.logger.setLevel(logging.DEBUG)
else:
self.logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
# Log to stdout
ch = logging.StreamHandler()
if config.verbose:
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
self.logger.addHandler(ch) # Use logger to log information
# Log from pyzabbix
log = logging.getLogger('pyzabbix')
log.addHandler(ch)
if config.verbose:
log.setLevel(logging.DEBUG)
def connect(self):
"""
Establishes a connection to the Zabbix server
Raises:
SystemExit
"""
if self.auth == "webform":
self.conn = ZabbixAPI(self.server)
elif self.auth == "http":
self.conn = ZabbixAPI(self.server, use_authenticate=False)
self.conn.session.auth = (self.username, self.password)
else:
raise SystemExit('api auth method not implemented: %s' % self.conn.auth)
if self.nocheckcertificate:
self.conn.session.verify = False
try:
self.conn.login(self.username, self.password)
except ZabbixAPIException as e:
raise SystemExit('Cannot login to Zabbix server: %s' % e)
self.logger.info("Connected to Zabbix API Version %s" % self.conn.api_version())
def get_users(self):
"""
Retrieves the existing Zabbix users
Returns:
A list of the existing Zabbix users
"""
result = self.conn.user.get(output='extend')
users = [user['alias'] for user in result]
return users
def get_mediatype_id(self, description):
"""
Retrieves the mediatypeid by description
Args:
description (str): Zabbix media type description
Returns:
The mediatypeid for specified media type description
"""
result = self.conn.mediatype.get(filter={'description': description})
if result:
mediatypeid = result[0]['mediatypeid']
else:
mediatypeid = None
return mediatypeid
def get_user_id(self, user):
"""
Retrieves the userid of a specified user
Args:
user (str): The Zabbix username to lookup
Returns:
The userid of the specified user
"""
result = self.conn.user.get(output='extend')
userid = [u['userid'] for u in result if u['alias'] == user].pop()
return userid
def get_groups(self):
"""
Retrieves the existing Zabbix groups
Returns:
A dict of the existing Zabbix groups and their group ids
"""
result = self.conn.usergroup.get(status=0, output='extend')
groups = [{'name': group['name'], 'usrgrpid': group['usrgrpid']} for group in result]
return groups
def get_group_members(self, groupid):
"""
Retrieves group members for a Zabbix group
Args:
groupid (int): The group id
Returns:
A list of the Zabbix users for the specified group id
"""
result = self.conn.user.get(output='extend', usrgrpids=groupid)
users = [user['alias'] for user in result]
return users
def create_group(self, group):
"""
Creates a new Zabbix group
Args:
group (str): The Zabbix group name to create
Returns:
The groupid of the newly created group
"""
result = self.conn.usergroup.create(name=group)
groupid = result['usrgrpids'].pop()
return groupid
def create_user(self, user, groupid, user_opt):
"""
Creates a new Zabbix user
Args:
user (dict): A dict containing the user details
groupid (int): The groupid for the new user
user_opt (dict): User options
"""
random_passwd = ''.join(random.sample(string.ascii_letters + string.digits, 32))
user_defaults = {'autologin': 0, 'type': 1, 'usrgrps': [{'usrgrpid': str(groupid)}], 'passwd': random_passwd}
user_defaults.update(user_opt)
user.update(user_defaults)
result = self.conn.user.create(user)
return result
def delete_user(self, user):
"""
Deletes Zabbix user
Args:
user (string): Zabbix username
"""
userid = self.get_user_id(user)
result = self.conn.user.delete(userid)
return result
def update_user(self, user, groupid):
"""
Adds an existing Zabbix user to a group
Args:
user (dict): A dict containing the user details
groupid (int): The groupid to add the user to
"""
userid = self.get_user_id(user)
result = self.conn.usergroup.massadd(usrgrpids=[str(groupid)], userids=[str(userid)])
return result
def update_media(self, user, description, sendto, media_opt):
"""
Adds media to an existing Zabbix user
Args:
user (dict): A dict containing the user details
description (str): A string containing Zabbix media description
sendto (str): A string containing address, phone number, etc...
media_opt (dict): Media options
"""
userid = self.get_user_id(user)
mediatypeid = self.get_mediatype_id(description)
if mediatypeid:
media_defaults = {
'mediatypeid': mediatypeid,
'sendto': sendto,
'active': '0',
'severity': '63',
'period': '1-7,00:00-24:00'
}
media_defaults.update(media_opt)
self.delete_media_by_description(user, description)
result = self.conn.user.addmedia(users=[{"userid": str(userid)}], medias=media_defaults)
else:
result = None
return result
def delete_media_by_description(self, user, description):
"""
Remove all media from user (with specific mediatype)
Args:
user (dict): A dict containing the user details
description (str): A string containing Zabbix media description
"""
userid = self.get_user_id(user)
mediatypeid = self.get_mediatype_id(description)
if mediatypeid:
user_full = self.conn.user.get(output="extend", userids=userid, selectMedias=["mediatypeid", "mediaid"])
media_ids = [int(u['mediaid']) for u in user_full[0]['medias'] if u['mediatypeid'] == mediatypeid]
if media_ids:
self.logger.info('Remove other exist media from user %s (type=%s)' % (user, description))
for id in media_ids:
self.conn.user.deletemedia(id)
def create_missing_groups(self):
"""
Creates any missing LDAP groups in Zabbix
"""
missing_groups = set(self.ldap_groups) - set([g['name'] for g in self.get_groups()])
for eachGroup in missing_groups:
self.logger.info('Creating Zabbix group %s' % eachGroup)
if not self.dryrun:
grpid = self.create_group(eachGroup)
self.logger.info('Group %s created with groupid %s' % (eachGroup, grpid))
def convert_severity(self, severity):
converted_severity = severity.strip()
if re.match("\d+", converted_severity):
return converted_severity
sev_entries = collections.OrderedDict({
"Disaster": "0",
"High": "0",
"Average": "0",
"Warning": "0",
"Information": "0",
"Not Classified": "0",
})
for sev in converted_severity.split(","):
sev = sev.strip()
if sev not in sev_entries:
raise Exception("wrong argument: %s" % sev)
sev_entries[sev] = "1"
str_bitmask = ""
for sev, digit in sev_entries.items():
str_bitmask += digit
converted_severity = str(int(str_bitmask, 2))
self.logger.info('Converted severity "%s" to "%s"' % (severity, converted_severity))
return severity
def sync_users(self):
"""
Syncs Zabbix with LDAP users
"""
self.ldap_conn.connect()
zabbix_all_users = self.get_users()
for eachGroup in self.ldap_groups:
ldap_users = self.ldap_conn.get_group_members(eachGroup)
# Do nothing if LDAP group contains no users and "--delete-orphans" is not specified
if not ldap_users and not self.deleteorphans:
continue
zabbix_grpid = [g['usrgrpid'] for g in self.get_groups() if g['name'] == eachGroup].pop()
zabbix_group_users = self.get_group_members(zabbix_grpid)
missing_users = set(list(ldap_users.keys())) - set(zabbix_group_users)
# Add missing users
for eachUser in missing_users:
# Create new user if it does not exists already
if eachUser not in zabbix_all_users:
self.logger.info('Creating user "%s", member of Zabbix group "%s"' % (eachUser, eachGroup))
user = {'alias': eachUser}
user['name'] = self.ldap_conn.get_user_givenName(ldap_users[eachUser]).decode('utf8')
user['surname'] = self.ldap_conn.get_user_sn(ldap_users[eachUser]).decode('utf8')
if user['name'] is None:
user['name'] = ''
if user['surname'] is None:
user['surname'] = ''
self.create_user(user, zabbix_grpid, self.user_opt)
zabbix_all_users.append(eachUser)
else:
# Update existing user to be member of the group
self.logger.info('Updating user "%s", adding to group "%s"' % (eachUser, eachGroup))
self.update_user(eachUser, zabbix_grpid)
# Handle any extra users in the groups
extra_users = set(zabbix_group_users) - set(list(ldap_users.keys()))
if extra_users:
self.logger.info('Users in group %s which are not found in LDAP group:' % eachGroup)
for eachUser in extra_users:
if self.deleteorphans:
self.logger.info('Deleting user: "%s"' % eachUser)
if not self.dryrun:
self.delete_user(eachUser)
else:
self.logger.info(' * %s' % eachUser)
# update users media
onlycreate = False
media_opt_filtered = []
for elem in self.media_opt:
if elem[0] == "onlycreate" and elem[1].lower() == "true":
onlycreate = True
if elem[0] == "severity":
media_opt_filtered.append(
(elem[0], self.convert_severity(elem[1]))
)
else:
media_opt_filtered.append(elem)
if onlycreate:
self.logger.info("Add media only on newly created users for group >>>%s<<<" % eachGroup)
zabbix_group_users = missing_users
else:
self.logger.info("Update media on all users for group >>>%s<<<" % eachGroup)
zabbix_group_users = self.get_group_members(zabbix_grpid)
for eachUser in set(zabbix_group_users):
self.logger.info('>>> Updating/create user media for "%s", update "%s"' % (eachUser, self.media_description))
sendto = self.ldap_conn.get_user_media(ldap_users[eachUser], self.ldap_media).decode("utf8")
if sendto and not self.dryrun:
self.update_media(eachUser, self.media_description, sendto, media_opt_filtered)
self.ldap_conn.disconnect()
| {
"content_hash": "814239e82bb9a714756ef3de2cb7087c",
"timestamp": "",
"source": "github",
"line_count": 418,
"max_line_length": 125,
"avg_line_length": 32.04066985645933,
"alnum_prop": 0.5585007093257672,
"repo_name": "dnaeon/zabbix-ldap-sync",
"id": "7dfc3c3e58b299da2923ec1a427fef234c3229b1",
"size": "13393",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/zabbixconn.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "33210"
}
],
"symlink_target": ""
} |
import json, cgi, os, sys, hashlib, time
from urllib.parse import *
from pymongo import *
from urllib import *
# from app.report.report import *
from datetime import datetime, timedelta
from libs.perm.perm import *
from libs.table.table import create_empty_row_
from libs.contents.contents import get_doc, get_mt
from core.core import *
def add_basket_post():
add_basket(get_post('ware_id'), int(get_post('quantity')))
return {"result": "ok", "quantity":basket_count(), "basket": basket_show()}
def add_basket(ware, quantity):
"""получает id товара и количество берет подробности о нем и заносит в сесии"""
s = session()
doc = get_doc(ware)
basket_check()
if not ware in s['basket']:
s['basket'][ware] = {'title': ct(doc['doc']['title']), 'price': doc['doc']['price'],
"amount": 0, 'quantity': 0, 'descr': doc['doc']['descr'],
"_id":doc['_id']
}
s['basket'][ware]['quantity'] += quantity
# die(doc['doc']['count_opt'])
if 'count_opt' in doc['doc'] and doc['doc']['count_opt'] and int(quantity) >= int(ct(doc['doc']['count_opt'])):
amount = float(quantity * doc['doc']['price_opt'])
s['basket'][ware]['amount'] = amount
s.save()
# die( s['basket'][ware]['amount'] )
else:
amount = float(quantity * doc['doc']['price'])
s['basket'][ware]['amount'] += amount
s.save()
def list_basket(request):
quantity = basket_count()
basket = basket_show()
amount = 0
# basket = {'1':'1'}
for i in basket:
# amount += float(basket[i]['quantity']) * float(basket[i]['price'])
amount += float(basket[i]['amount'])
# return templ('app.shop:list_basket', quantity = quantity, basket = basket, amount = amount )
return templ('libs.shop:list_basket', request, dict(quantity = quantity, basket = basket, amount = amount) )
def basket_context(request):
basket = get_const_value("is_basket")
u = urlparse(request.url)
basket_url = u.scheme + '://' + u.netloc + '/basket'
meta_doc = get_mt('des:client_order'); basket_map=None
if meta_doc:
meta_table = check_map_perm('des:order', meta_doc['field_map'])
basket_map = rec_data_t(meta_table)
return {'basket_url':basket_url, 'basket_map':basket_map, }
def clean_basket_post():
basket_clean(get_post('ware_id'))
return json.dumps({"result": "ok", "quantity":basket_count(), "basket": basket_show()})
def show_basket_post():
return json.dumps({"result": "ok", "quantity":basket_count(), "basket": basket_show()})
def make_order_post():
callback(get_post('phone'), get_settings('domain'), get_settings('basket', ''))
add_order(json.loads(get_post('data')))
return {"result":"ok"}
def add_order(request, data):
db = request.db
proc_id = 'des:order'; table_id = 'ware'
sub_data = basket_show()
doc_id = create_empty_row_(proc_id, data)
doc = get_doc(doc_id)
for i in sub_data:
new_id = doc['seq_id']
doc["seq_id"] = new_id+1
new_id = str(new_id)
doc['tables'][table_id][new_id] = sub_data[i]
db.doc.save(doc)
return {"result":"ok"}
def add_order_web_post():
""" web заказы -> на создание -> init_web_order(new_row)
web заказы -> на создание подтаблицы -> update_sum( owner, new_row)
web заказы -> на обновление подтаблицы -> update_sum( owner, new_row)
web заказы товары -> на создание -> update_price_column({}, new_row, doc['owner'])
price_changed( doc['owner'], {}, new_row, False)
web заказы товары -> на обновление -> update_price_column(old_row, new_row, doc['owner'])
price_changed(doc['owner'], old_row, new_row, False)
"""
phone = get_post('phone')
basket = get_post('basket', '')
callback(phone, get_settings('domain'), basket)
s = session()
basket_check()
if len(s['basket']):
owner = get_post('owner')
owner = create_row('des:web_order', None, defaults={'phone':phone})
amount = 0
for _id in s['basket']:
ware = s['basket'][_id]
doc_id = create_row('des:web_order_ware', owner, defaults={"title":ware['_id'], "quantity":ware['quantity'],
"price":ware['price']})
amount += ware['quantity'] * float(ware['price'])
if not doc_id: return '{"result":"fail", "error":"%s"}' %cgi.escape('updated', True)
update_row_( 'des:web_order', owner, {'amount':amount}, '_', no_synh=True)
wares_clean()
return {"result":"ok"}
def get_shop_filter(request):
db = request.db
aaa = []
for res in db.doc.find({"doc_type":"des:producer"}):
aaa.append({"id":res['_id'], "title":ct( res['doc']["title"]) })
return {'produced':aaa}
def basket_clean(ware):
basket_check()
s = session()
if ware in s['basket']:
del s['basket'][ware]
s.save()
def wares_clean():
basket_check()
s = session()
del s['basket']
s.save()
return {"result":"ok"}
def basket_show():
basket_check()
s = session()
return s['basket']
def basket_count():
"""щитает кол-во товаров в корзине"""
basket_check()
s = session(); summ = 0
for i in s['basket']:
summ += s['basket'][i]['quantity']
return summ
def basket_amount():
basket_check()
s = session(); summ = 0
for i in s['basket']:
summ += s['basket'][i]['quantity']*s['basket'][i]['price']
return summ
def basket_check():
s = session()
if not 'basket' in s:
s['basket'] = {}
s.save()
# =====================================================================================================================================
# ====================================== ADVANCED FILTER ===========================================================================
# =====================================================================================================================================
def ware_filter(filter):
# отфильтровует сами товары указаному списку атрибутов
if not isinstance(filter, list): filter = [filter]
categ = {}
for i in filter:
cat = i[:32]
attr = i[33:]
if not cat in categ: categ[cat] = []
categ[cat].append(attr)
cond = dict([('attr.'+i, {'$in': v}) for i, v in categ.items()])
#текущий вариант
# aaa = {'attr':{'diagonal':'17', 'korpus': 'metall'}}
# cond = {'attr.diagonal: {$in: [15, 17]}}
# cond = {'docs: {$in: [15, 17]}}
#текущий для агрегации
#db.test.aggregate({$unwind: "$likes"})
# {'docs':[{'id':1, 'cat': 'diagonal', 'attr':'17'}, {id:2, 'cat':'korpus', 'attr': 'metall'}] }
return cond
def get_ware_cls(request, cls):
""" получаем список для фильтра который справа показывается """
# получаем список категорий которые принадлежат например смартфон на выходе диагональ и тд.
# $cat =
# select c.* from ware_cat as c inner join on c.id = cc.owner ware_class_cat as cc where cc.owner = $cls
# {'doc_type':'ware_class_cat', 'owner':cls}{'doc_type':'ware_cat', '_id':{'$in':cat}}
# select a.* from ware_attr as a where owner in $cat
db = request.db; categ = []; list_cat = []
# собираем нужные данные, собираем фильтры принадлежащии классу
for res in db.doc.find({'doc_type':'des:ware_class_cat', 'owner':cls}):
list_cat.append(res['doc']['cat'])
# собираем фильтры атрибутов
for res in db.doc.find({'doc_type':'des:ware_cat', '_id':{'$in':list_cat}}):
cat = {'id':res['_id'], 'title':ct(res['doc']['title']), 'attr':[]}
categ.append(cat)
# идем по полученым фильтрам и собиарем атрибуты
for rs in db.doc.find({'doc_type':'des:ware_attr', 'owner': cat['id']}):
attr = {'id':rs['_id'], 'title':ct(rs['doc']['title'])}
cat['attr'].append(attr)
return categ
def list_ware(request, cls):
""" вызывается для показа списка товаров """
#ware_class_cat-справочник где хранятся категории которые относятся к классу ( класс-смартфон у него категория диагональ экрана )
# cats = [res['_id'] for res in db.doc.find({'doc_type':'ware_class_cat'})]
cond = {'doc_type':'des:ware', 'doc.class': cls, 'doc.pub':'true'}
if request.method == 'POST':
cond.update(ware_filter(get_post('cat', []))) # cond = {'attr.diagonal: {$in: [15, 17]}}
from libs.sites.sites import get_pagination, get_full_docs
pages, req = get_pagination(cond)
sort = ('doc.date', -1)
if sort: req.sort(*sort)
dv = get_full_docs(req)
filter = get_ware_cls(cls)
return templ('libs.shop:list_ware', request, dict(cls = cls, docs = dv, proc_id='des:ware', pages = pages, filter=filter) )
# ======================================================================================================================
# ======================================================================================================================
# ======================================================================================================================
def list_class_post(cls):
pass
def list_ware_post(cls):
pass
def ware_page(request, doc_id):
u = urlparse(request.url)
url = u.scheme + '://' + u.hostname + u.path
data_tree = []
from libs.sites.sites import get_pagination, get_full_doc, get_full_docs
db = request.db
doc = get_full_doc(doc_id, img_ctr=4)
req_attr = db.doc.find({'doc_type':'des:ware_attr', 'owner':doc['_id']})
ware_attr = get_full_docs( db.doc.find({'doc_type':'des:ware_attr', 'owner':doc['_id']}) )
proc_id = doc['proc_id']
title = ct(doc['doc']['title']) if 'title' in doc['doc'] else ''
cls = doc['doc']['class']
req = db.doc.find( {'doc_type':'des:ware', '_id':{'$ne':doc['_id']}, 'doc.class':cls} ).limit(6)
similar = get_full_docs( req )
url1 = url
seo = db.doc.find_one({'doc.alias':'ware_page_seo'}, {'doc.description':1, 'doc.tags':1, 'doc.body':1, 'doc.footer':1, 'doc.add_title':1})
# if seo:
# seo = seo
# else: seo = ''
return templ('ware_page', request, dict(doc = doc, url = url1, doc_id=doc_id, proc_id=proc_id, similar = similar, seo=seo,
tree = data_tree, page_title=title, ware_attr=ware_attr)) #news_map=news_map, captcha=raw, hash=hash,
def count_ware_(request, cls):
db = request.db
ctr = db.doc.find({'doc_type':'des:ware', 'doc.class':cls}).count()
childs = db.doc.find_one({'_id':cls})
if not 'child' in childs: return ctr
for res in childs['child']:
ctr += count_ware(res)
return ctr
def count_ware(request, cls):
db = request.db
ctr = db.doc.find({'doc_type': 'des:ware', 'doc.class': cls}).count()
childs = db.doc.find_one({'_id': cls})
ctr += sum(count_ware(res) for res in childs.get('child', []))
return ctr
def get_navigate_(request, doc_id):
db = request.db; path = []
parent = db.doc.find_one({'child':{'$in':[doc_id]}}, {'parent':1, 'doc.alias':1})
if not parent: return []
else:
path.append(parent['doc']['alias'])
path = path + get_navigate_(parent['_id'])
return path
def get_navigate(request, doc_id):
db = request.db; path = []
parent = db.doc.find_one({'_id': doc_id}, {'parent':1, 'doc.alias':1, 'doc.title':1})
if not parent: return []
else:
path.append((parent['doc']['alias'], ct(parent['doc']['title'])))
path = path + get_navigate(parent['parent'])
return path
def get_filters(request, cls):
db = request.db
docs=[]
cursor = db.doc.aggregate([
# { '$match' : { 'doc_type' : "des:ware_attr", 'doc.class': { '$exists': True } } },
{ '$match' : { 'doc_type' : "des:ware_attr", 'doc.class': cls } },
{ '$project' : { 'title' : "$doc.title.ru", 'value':"$doc.attr_val.ru", 'class':"$doc.class", '_id':0 } },
{ '$group' : {'_id': {'class' :"$class", 'title': "$title"} , 'filters': { '$addToSet': "$value" } } },
{ '$group' : {'_id' :"$_id.class", 'title':{ '$addToSet': { 'title': "$_id.title", 'filters': "$filters" } } } }
])
for res in cursor:
docs.append(res)
return docs
def list_class(request, cls):
""" показывает список вложеных категорий и товаров для категорий
"""
from libs.sites.sites import get_pagination, get_full_docs, get_curr_img, get_full_doc
from libs.files.files import get_nf
db = request.db; clss = []
parent_id = db.doc.find_one({'doc_type':'des:ware_class', 'doc.alias':cls})
for doc in db.doc.find({'doc_type':'des:ware_class', 'parent':parent_id['_id']}).sort('doc.date', -1):
proc_id = doc['doc_type']
d_img = doc['default_img'] if 'default_img' in doc and doc['default_img'] else None
attachment = get_nf(proc_id, doc['_id'], 1)
data = doc['doc']
try:
count = count_ware(doc['_id'])
except: count='1'
full_doc = {"_id":doc['_id'], "id": doc['_id'],
'count':count,
"doc": data,
"att": attachment, "img":get_curr_img(doc, attachment), 'default_img':d_img, 'proc_id':proc_id}
clss.append(full_doc)
pages= ''
docs = get_full_docs(db.doc.find({'doc_type':'des:ware', 'doc.class':parent_id['_id']}).sort('doc.date', -1))
# docs = get_full_docs(req).sort('doc.date', -1)
filter = get_filters(parent_id['_id'])
# filter = get_ware_cls(cls)
parent_doc = get_full_doc(parent_id['_id'])
# seo = db.doc.find_one({'doc.alias':'class_seo'}, {'doc.title':1, 'doc.tags':1, 'doc.body':1, 'doc.footer':1, 'doc.add_title':1 })
seo = db.doc.find_one({'_id':parent_id['_id']}, {'doc.description':1, 'doc.tags':1, 'doc.footer':1 })
# seo = seo if 'doc' in seo else ''
return templ('list_class', request, dict(cls_docs = clss, cls=cls, docs = docs, proc_id='des:ware', pages = pages,
path=get_navigate(parent_id['_id']), parent_doc=parent_doc, filter=filter, seo=seo) )
def set_filters(request, cls, filters):
db = request.db
url = filters[1:]
url = url.split(';')
docs=[]; cond=[]; ds = {}; attr = []; data = []
for res in url:
res = res.replace('%20', ' ')
aaa = res.split('=');
key = aaa[0]; val = aaa[1]
if key in ds:
if type(ds[key]) == list: ds[key].append(val)
else: ds[key] = [ds[key], val]
else: ds.update({key:val})
for res in ds:
attr.append(res)
for res in ds.items():
if type(res[1]) == list: pr = {'doc.title.ru':res[0], 'doc.attr_val.ru':{'$in':res[1]}}
else: pr = {'doc.title.ru':res[0], 'doc.attr_val.ru':res[1]}
docs.append(pr)
cursor = db.doc.aggregate([
{ '$match' : { 'doc_type' : "des:ware_attr", 'doc.class':cls, '$or': docs} },
{ '$group' : { '_id': "$owner", "attr": { '$push': "$doc.title.ru" } } },
{ '$match' : { "attr": { '$all': attr } } },
{ '$project': {"_id":1 } }
])
for res in cursor:
cond.append(res)
if not len(cond): return None
from libs.sites.sites import get_full_docs
docs = get_full_docs(db.doc.find({ '$or':cond }).sort('doc.date', -1))
return docs
def list_filters(request, cls, filters):
""" если чтото выбрали для фильтров
"""
from libs.sites.sites import get_pagination, get_full_docs, get_curr_img, get_full_doc
from libs.files.files import get_nf
db = request.db; clss = []
parent_id = db.doc.find_one({'doc_type':'des:ware_class', 'doc.alias':cls})
for doc in db.doc.find({'doc_type':'des:ware_class', 'parent':parent_id['_id']}).sort('doc.date', -1):
proc_id = doc['doc_type']
attachment = get_nf(proc_id, doc['_id'], 1)
data = doc['doc']
try:
count = count_ware(doc['_id'])
except: count='1'
full_doc = {"_id":doc['_id'], "id": doc['_id'],
'count':count,
"doc": data,
"att": attachment, "img":get_curr_img(doc, attachment), 'proc_id':proc_id}
clss.append(full_doc)
pages= ''
docs = set_filters( parent_id['_id'], filters )
filter = get_filters(parent_id['_id'])
seo = db.doc.find_one({'doc.alias':'class_seo'}, {'doc.description':1, 'doc.tags':1, 'doc.body':1, 'doc.footer':1, 'doc.add_title':1 })
seo = seo if 'doc' in seo else ''
return templ('list_class', request, {'result':'ok', 'cls_docs':clss, 'cls':cls, 'docs':docs, 'proc_id':'des:ware', 'pages':pages,
'path':get_navigate(parent_id['_id']), 'parent_doc':get_full_doc(parent_id['_id']), 'filter':filter, 'seo':seo})
def get_list_filter(request, cls):
""" формируемая структура [{'id_class':'123', "filter_name":"name", attr:{'id_class':'123', 'title':'title'}]
"""
db = request.db; filters = []
for res in db.doc.find({ 'doc_type':'des:ware_filter', '$or':[{'doc.ware_class':cls}, {} ]}):
filters.append({'id_class':res['doc']['ware_class'], 'title':ct(res['doc']['title'])})
# users = [doc._id for doc in db.doc.find({"doc_type":'des:ware_filter', 'group': {'$all': ['administrator']}})]
users = [doc._id for doc in db.doc.find({"doc_type":'des:ware_filter', 'group': {'$all': ['administrator']}})]
articles = db.doc.find({"doc_type":'blogs', 'user': {'$in': users}})
return filters
def first_cls(request):
""" выводит корневые категории, в основном для главной страницы """
from libs.sites.sites import get_full_docs, get_curr_img
from libs.files.files import get_nf
db = request.db; docs = []
for doc in db.doc.find({'doc_type':'des:ware_class', 'parent':'_'}).sort('doc.date', -1):
proc_id = doc['doc_type']
attachment = get_nf(proc_id, doc['_id'], 1)
data = doc['doc']
try:
count = count_ware(doc['_id'])
except: count = '1'
full_doc = {"_id":doc['_id'], "id": doc['_id'],
'count':count,
"doc": data,
"att": attachment, "img":get_curr_img(doc, attachment), 'proc_id':proc_id}
docs.append(full_doc)
return docs
def list_ware_cls(request, full=False):
"""
получение колва докуентов
Для каждого класса находим сколько в нем документов
Назначаем их кол-во всем его родителям приплюсовыванием
:param выводить с дополнительной информацией типа картинок или просто названия, с доп. информацией выводится олько для главной
"""
db = request.db
docs = [res for res in db.doc.find({'doc_type':'des:ware_class'}, {'doc.title.ru':1, 'doc.alias':1, 'parent':1, 'child':1 }).sort('doc.date', -1) ]
# docs = [res for res in db.doc.find({'doc_type':'des:ware_class'}).sort('doc.date', -1) ]
if full:
docs = [res for res in db.doc.find({'doc_type':'des:ware_class'}).sort('doc.date', -1) ]
from libs.sites.sites import get_full_docs
docs = get_full_docs(docs)
return form_tree_( docs )
# return docs
# def form_tree_(docs):
# tree = {doc['_id']: doc for doc in docs}
# for doc in docs:
# if "child" in doc and doc['child'] != '_':
# doc['child'] = [tree[id] for id in doc['child']]
# docss = {"_id": "_", "child": [doc for doc in docs if "parent" not in doc or doc['parent']=='_']}
# return docss
def form_tree_(docs):
""" формирует из документов дерево
"""
tree = {doc['_id']: doc for doc in docs}
for doc in docs:
doc['child'] = []
for doc in docs:
parent = doc.get("parent", None)
if parent and parent != '_':
tree[parent]['child'].append(doc)
docss = {"_id": "_", "child": [doc for doc in docs if "parent" not in doc or doc['parent'] == '_']}
return docss
# ======================================================================================================================
# ======================================================================================================================
# ======================================================================================================================
def list_orders(request):
from libs.sites.sites import get_full_docs
db = request.db
# web_order = db.doc.find({'doc_type':'web_order'})
# web_order_ware = db.doc.find({'doc_type':'web_order_ware'})
web_order = get_full_docs(db.doc.find({'doc_type':'des:web_order'}).limit(60).sort('doc.date', -1))
web_order_ware = get_full_docs(db.doc.find({'doc_type':'des:web_order_ware'}).limit(60).sort('doc.date', -1))
ware = get_full_docs(db.doc.find({'doc_type':'des:ware'}).limit(60).sort('doc.date', -1))
return templ('libs.shop:list_orders', request, dict(web_order = web_order, web_order_ware = web_order_ware, ware=ware))
def callback_post():
phone = get_post('phone')
basket = get_post('basket', '')
dom = get_settings('domain')
return callback(phone, dom, basket)
def callback(phone, dom, basket):
""" отправка sms с почты на телефон
"""
# phone = get_post('phone')
# dom = get_settings('domain')
# mail = '[email protected]'
# mail = '[email protected]'
# mail = '[email protected]'
# mail = get_const_value('callback_mail')
mail = get_settings('callback_mail')
create_row('des:phone', '_', defaults={'phone':phone})
text = u""" {0} """.format( phone )
if basket == 'true':
route_mail(mail, u'Cайт корзина ', text)
else:
route_mail(mail, u'Запрос на сайте ', text)
# text = u""" {0} -> {1}""".format( dom, phone )
# route_mail(mail, u'Запрос на сайте '+dom, text)
return {"result":"ok"}
| {
"content_hash": "ab0f662f6408995661cc61d9bf865e45",
"timestamp": "",
"source": "github",
"line_count": 535,
"max_line_length": 148,
"avg_line_length": 37.82616822429907,
"alnum_prop": 0.5797796116025102,
"repo_name": "alikzao/tao1",
"id": "fedcf004ae68d7285627c28f4b2a947d542f9040",
"size": "21343",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tao1/libs/shop/shop.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "ABAP",
"bytes": "1037"
},
{
"name": "ActionScript",
"bytes": "1133"
},
{
"name": "Ada",
"bytes": "99"
},
{
"name": "Assembly",
"bytes": "506"
},
{
"name": "AutoHotkey",
"bytes": "720"
},
{
"name": "Batchfile",
"bytes": "390"
},
{
"name": "C#",
"bytes": "151"
},
{
"name": "C++",
"bytes": "808"
},
{
"name": "COBOL",
"bytes": "4"
},
{
"name": "CSS",
"bytes": "1015375"
},
{
"name": "Cirru",
"bytes": "520"
},
{
"name": "Clojure",
"bytes": "794"
},
{
"name": "CoffeeScript",
"bytes": "403"
},
{
"name": "ColdFusion",
"bytes": "86"
},
{
"name": "Common Lisp",
"bytes": "632"
},
{
"name": "Cucumber",
"bytes": "699"
},
{
"name": "D",
"bytes": "324"
},
{
"name": "Dart",
"bytes": "489"
},
{
"name": "Eiffel",
"bytes": "375"
},
{
"name": "Elixir",
"bytes": "692"
},
{
"name": "Elm",
"bytes": "487"
},
{
"name": "Erlang",
"bytes": "487"
},
{
"name": "FORTRAN",
"bytes": "713"
},
{
"name": "Forth",
"bytes": "979"
},
{
"name": "FreeMarker",
"bytes": "1017"
},
{
"name": "GLSL",
"bytes": "512"
},
{
"name": "Go",
"bytes": "641"
},
{
"name": "Groovy",
"bytes": "1080"
},
{
"name": "HLSL",
"bytes": "7907"
},
{
"name": "HTML",
"bytes": "6309233"
},
{
"name": "Haskell",
"bytes": "512"
},
{
"name": "Haxe",
"bytes": "447"
},
{
"name": "Io",
"bytes": "140"
},
{
"name": "JSONiq",
"bytes": "4"
},
{
"name": "Java",
"bytes": "1550"
},
{
"name": "JavaScript",
"bytes": "15329934"
},
{
"name": "Julia",
"bytes": "210"
},
{
"name": "LSL",
"bytes": "2080"
},
{
"name": "Lean",
"bytes": "213"
},
{
"name": "Liquid",
"bytes": "1883"
},
{
"name": "LiveScript",
"bytes": "5747"
},
{
"name": "Lua",
"bytes": "981"
},
{
"name": "Makefile",
"bytes": "8078"
},
{
"name": "Mask",
"bytes": "597"
},
{
"name": "Matlab",
"bytes": "203"
},
{
"name": "NSIS",
"bytes": "486"
},
{
"name": "Nix",
"bytes": "2212"
},
{
"name": "OCaml",
"bytes": "539"
},
{
"name": "Objective-C",
"bytes": "2672"
},
{
"name": "OpenSCAD",
"bytes": "333"
},
{
"name": "PHP",
"bytes": "351"
},
{
"name": "Pascal",
"bytes": "1412"
},
{
"name": "Perl",
"bytes": "678"
},
{
"name": "PowerShell",
"bytes": "418"
},
{
"name": "Protocol Buffer",
"bytes": "274"
},
{
"name": "Python",
"bytes": "350622"
},
{
"name": "R",
"bytes": "2445"
},
{
"name": "Ruby",
"bytes": "6868"
},
{
"name": "Rust",
"bytes": "495"
},
{
"name": "Scala",
"bytes": "1541"
},
{
"name": "Scheme",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "6971"
},
{
"name": "Smarty",
"bytes": "192818"
},
{
"name": "Swift",
"bytes": "476"
},
{
"name": "Tcl",
"bytes": "899"
},
{
"name": "TeX",
"bytes": "1345"
},
{
"name": "TypeScript",
"bytes": "1672"
},
{
"name": "VHDL",
"bytes": "830"
},
{
"name": "Vala",
"bytes": "485"
},
{
"name": "Verilog",
"bytes": "274"
},
{
"name": "Visual Basic",
"bytes": "916"
},
{
"name": "XQuery",
"bytes": "114"
}
],
"symlink_target": ""
} |
from django.shortcuts import render_to_response
from django.template import RequestContext
from humans.models import HumanGroup, Snippet
def humans_txt(request, template='humans.txt'):
snippet = Snippet.objects.random_snippet()
groups = HumanGroup.objects.exclude(members__isnull=True)
return render_to_response(
template_name=template,
dictionary={
'snippet': snippet,
'groups': groups,
},
mimetype='text/plain; charset=utf-8',
context_instance=RequestContext(request),
)
| {
"content_hash": "f6e36d08ae26329395e3ee5dc1211a3b",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 61,
"avg_line_length": 29.31578947368421,
"alnum_prop": 0.6768402154398564,
"repo_name": "valeriansaliou/django-humans",
"id": "67376bf9ffd96708fefee57d3b506f3febf44699",
"size": "557",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "humans/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12709"
}
],
"symlink_target": ""
} |
import argparse
from ghost import Ghost, Session
parser = argparse.ArgumentParser(description="Login to CRRC application.")
parser.add_argument("--baseUrl", required=True, help="Enter base URL for CRRC application. (eg. http://localhost:3000/)" )
parser.add_argument("--user", required=False, default="admin", help="Enter username to use to login to CRRC application." )
parser.add_argument("--password", required=False, default="admin", help="Enter password to use to login to CRRC application." )
args = parser.parse_args()
print( "Logging into to CRRC at: {} as user: {}".format( args.baseUrl, args.user ) )
ghost = Ghost()
with ghost.start() as session:
page, _resources = session.open( args.baseUrl )
assert page.http_status == 200 and 'Username' in page.content
session.set_field_value( "#username", args.user )
session.set_field_value( "#password", args.password )
session.evaluate( "document.getElementById('submit').click();", expect_loading=True )
session.wait_for_page_loaded()
assert session.exists( "#welcome" )
# result, _resources = session.evaluate(
# "document.getElementById('welcome').innerHTML;" )
# print( result )
| {
"content_hash": "b83202836ea452ce39b7ab12627829e7",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 127,
"avg_line_length": 50.34782608695652,
"alnum_prop": 0.7236614853195165,
"repo_name": "sptrakesh/crrc",
"id": "1b937bda6da5804d525dba87ba9eb14de071d921",
"size": "1181",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/ui/login.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "915"
},
{
"name": "C++",
"bytes": "287362"
},
{
"name": "CMake",
"bytes": "1469"
},
{
"name": "Dockerfile",
"bytes": "295"
},
{
"name": "HTML",
"bytes": "77822"
},
{
"name": "JavaScript",
"bytes": "14882"
},
{
"name": "Python",
"bytes": "1181"
},
{
"name": "Shell",
"bytes": "6241"
}
],
"symlink_target": ""
} |
import sys
from functools import partial
from itertools import imap,starmap,repeat,izip,chain, ifilter
from operator import itemgetter, attrgetter, lt
from collections import Counter
from pbcore.io import BasH5Reader
from misc import identityFunc
if not len(sys.argv) >= 2:
sys.exit("zmwstats.py in.bas.h5 [in2.bas.h5]\n")
readers = imap(BasH5Reader, sys.argv[1:])
get_prod = lambda o : getattr(o, "zmwMetric")("Productivity")
get_rt = lambda o : getattr(o, "zmwMetric")("ReadType")
pcg = itemgetter(0,1,2)
rtg = itemgetter(0,1,2,3,4,5,6,7)
print "\t".join(["movie_name","sequencing_zmws","all_sequencing_zmws",
"prod_empty", "prod_productive", "prod_other",
"Empty", "FullHqRead0", "FullHqRead1", "PartialHqRead0",
"PartialHqRead1", "PartialHqRead2", "Multiload", "Indeterminate", "Total_Bases",
"Bases_>10k"])
for cell in readers:
movieName = cell.movieName
good_zmws_cnt = len(cell.sequencingZmws)
all_seq_zmws_cnt = len(cell.allSequencingZmws)
zmwgetters = imap(itemgetter,cell.allSequencingZmws)
allSeqZmws = list(starmap(apply, izip(zmwgetters, repeat([cell]))))
#all subreads
subreads = ifilter(bool,imap(attrgetter("subreads"), allSeqZmws))
subread_lens = map(lambda r: r.readEnd - r.readStart, chain.from_iterable(subreads))
total_bases = sum(subread_lens)
bases_g10k = sum(ifilter(partial(lt,10000), subread_lens))
raw_prods = imap(get_prod, allSeqZmws)
prod_counts = Counter(raw_prods)
prod_summary = pcg(prod_counts)
read_type = imap(get_rt, allSeqZmws)
read_type_counts = Counter(read_type)
read_type_summary = rtg(read_type_counts)
outdata = [movieName, good_zmws_cnt ,all_seq_zmws_cnt]
outdata += list(prod_summary)
outdata += list(read_type_summary)
outdata += [total_bases, bases_g10k]
print "\t".join(map(str, outdata))
cell.close()
| {
"content_hash": "85fb12c38ca453b54b6915543573a131",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 97,
"avg_line_length": 29.454545454545453,
"alnum_prop": 0.6712962962962963,
"repo_name": "jgurtowski/ectools",
"id": "e8a17805ec55f43c05f6e3609f9124f3a920d52a",
"size": "1967",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zmwstats.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "96326"
},
{
"name": "Shell",
"bytes": "5297"
}
],
"symlink_target": ""
} |
import time
import re
import threading
import dbus
import dbus.service
import dbus.mainloop.glib
import ConfigParser
from imaplib import *
class MailWatcher(dbus.service.Object):
def __init__(self, name, session):
dbus.service.Object.__init__(self, name, session)
@dbus.service.signal('com.flocke.MailWatcher', signature='a{ss}')
def Changed(self, data):
pass
class MailCheck(threading.Thread):
def __init__(self, name, url, user, passwd, mailbox, ssl):
threading.Thread.__init__(self)
self.name = name
self.user = user
self.passwd = passwd
self.url = url
self.mailbox = mailbox
self.ssl = ssl
self.result = "N/A"
def run(self):
if self.ssl == "True":
server = IMAP4_SSL(self.url)
else:
server = IMAP4(self.url)
server.login(self.user, self.passwd)
self.result = re.search("UNSEEN (\d+)", server.status(self.mailbox, "(UNSEEN)")[1][0]).group(1)
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
session_bus = dbus.SessionBus()
name = dbus.service.BusName('com.flocke.MailWatcher', session_bus)
watcher = MailWatcher(session_bus, '/com/flocke/MailWatcher')
parser = ConfigParser.SafeConfigParser()
#### Edit to point to your configuration file
parser.read('/path/to/mailwatcher.ini')
while True:
data = {}
threads_running = []
for section in parser.sections():
account = {}
for name, value in parser.items(section):
account[name] = value
thread_current = MailCheck(section, account['server'], account['user'], account['pass'], account['mailbox'], account['ssl'])
threads_running.append(thread_current)
thread_current.start()
for th in threads_running:
th.join()
data[th.name] = th.result
watcher.Changed(data)
#### Edit if you want a different intervall (in seconds)
time.sleep(300)
| {
"content_hash": "010af984cbea66cf753c4a9469a75503",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 126,
"avg_line_length": 27.5625,
"alnum_prop": 0.7063492063492064,
"repo_name": "flocke/scripts",
"id": "31441638f8baeb9739be6d84df1b1aa52780f6db",
"size": "4037",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mailwatcher-dbus/mailwatcher.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Perl",
"bytes": "25326"
},
{
"name": "Python",
"bytes": "4037"
},
{
"name": "Shell",
"bytes": "5098"
}
],
"symlink_target": ""
} |
daemon = {
'deftDB_host' : 'ATLAS_DEFT',
'jediDB_host' : 'ATLAS_PANDA',
'deftDB_INTR' : 'INTR',
'deftDB_ADCR' : 'ADCR',
'jediDB_ADCR' : 'ADCR',
'deftDB_reader' : 'atlas_deft_r',
'deftDB_writer' : 'atlas_deft_w',
# tables
# DEFT
't_prodmanager_request' : 't_prodmanager_request',
't_prodmanager_request_status' : 't_prodmanager_request_status',
't_production_step' : 't_production_step',
't_production_task' : 't_production_task',
't_production_task_p' : 't_production_task_listpart',
't_production_dataset' : 't_production_dataset',
't_production_container' : 't_production_container',
't_projects' : 't_projects',
# DEFT-JEDI
't_task' : 't_task',
't_input_dataset' : 't_input_dataset',
't_jedi_datasets' : 'jedi_datasets',
# defaults
'user_task_step_id' : 201, # default step for users tasks
'user_task_request_id' : 300, # default request for users tasks
}
| {
"content_hash": "63ce00779e174a565d5463abea862fc1",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 68,
"avg_line_length": 40.53846153846154,
"alnum_prop": 0.5569259962049335,
"repo_name": "PanDAWMS/panda-bigmon-atlas",
"id": "c300d970126ce04c0d431a40d9e3912b9b93985f",
"size": "1159",
"binary": false,
"copies": "1",
"ref": "refs/heads/main-py3",
"path": "atlas/postproduction/deft/deft_conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "258278"
},
{
"name": "HTML",
"bytes": "783151"
},
{
"name": "JavaScript",
"bytes": "2301751"
},
{
"name": "Python",
"bytes": "1497882"
},
{
"name": "TypeScript",
"bytes": "142331"
}
],
"symlink_target": ""
} |
"""This program wraps an arbitrary command and prints "1" if the command ran
successfully."""
import subprocess
import sys
if not subprocess.call(sys.argv[1:]):
print 1
else:
print 0
| {
"content_hash": "6abb2a972003d48d6e3755552428975f",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 76,
"avg_line_length": 18.9,
"alnum_prop": 0.7354497354497355,
"repo_name": "patrickm/chromium.src",
"id": "6f0daec3a751bdf4f043274f1196193929b5dd4b",
"size": "374",
"binary": false,
"copies": "1",
"ref": "refs/heads/nw",
"path": "build/check_return_value.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "853"
},
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "52960"
},
{
"name": "Awk",
"bytes": "8660"
},
{
"name": "C",
"bytes": "40737238"
},
{
"name": "C#",
"bytes": "1132"
},
{
"name": "C++",
"bytes": "207930633"
},
{
"name": "CSS",
"bytes": "939170"
},
{
"name": "Java",
"bytes": "5844934"
},
{
"name": "JavaScript",
"bytes": "17837835"
},
{
"name": "Mercury",
"bytes": "10533"
},
{
"name": "Objective-C",
"bytes": "886228"
},
{
"name": "Objective-C++",
"bytes": "6667789"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "Perl",
"bytes": "672770"
},
{
"name": "Python",
"bytes": "10857933"
},
{
"name": "Rebol",
"bytes": "262"
},
{
"name": "Shell",
"bytes": "1326032"
},
{
"name": "Tcl",
"bytes": "277091"
},
{
"name": "XSLT",
"bytes": "13493"
},
{
"name": "nesC",
"bytes": "15206"
}
],
"symlink_target": ""
} |
from nltk.corpus import wordnet as wn
synsets = wn.synsets('wing')
print ([str(syns.part_holonyms() ) for syns in synsets])
| {
"content_hash": "ed95a2d2a05f07550f18aceba7aa7b65",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 56,
"avg_line_length": 41.333333333333336,
"alnum_prop": 0.7338709677419355,
"repo_name": "Shokr/nltk_tutorial",
"id": "99f4f41e600fa29fe0c2e3d1e62da93056de81ad",
"size": "180",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Part of Speech Tagging/Wordnet definitions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6152"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from .base_model_ import Model
from datetime import date, datetime
from typing import List, Dict
from ..util import deserialize_model
class Project(Model):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, id: int=None, parent: int=None, name: str=None, customer: int=None, startdate: datetime=None, enddate: datetime=None, type: str='Kundenprojekt', kind: str='Beratung'):
"""
Project - a model defined in Swagger
:param id: The id of this Project.
:type id: int
:param parent: The parent of this Project.
:type parent: int
:param name: The name of this Project.
:type name: str
:param customer: The customer of this Project.
:type customer: int
:param startdate: The startdate of this Project.
:type startdate: datetime
:param enddate: The enddate of this Project.
:type enddate: datetime
:param type: The type of this Project.
:type type: str
:param kind: The kind of this Project.
:type kind: str
"""
self.swagger_types = {
'id': int,
'parent': int,
'name': str,
'customer': int,
'startdate': datetime,
'enddate': datetime,
'type': str,
'kind': str
}
self.attribute_map = {
'id': 'id',
'parent': 'parent',
'name': 'name',
'customer': 'customer',
'startdate': 'startdate',
'enddate': 'enddate',
'type': 'type',
'kind': 'kind'
}
self._id = id
self._parent = parent
self._name = name
self._customer = customer
self._startdate = startdate
self._enddate = enddate
self._type = type
self._kind = kind
@classmethod
def from_dict(cls, dikt) -> 'Project':
"""
Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The Project of this Project.
:rtype: Project
"""
return deserialize_model(dikt, cls)
@property
def id(self) -> int:
"""
Gets the id of this Project.
internal id of project, gets created by system
:return: The id of this Project.
:rtype: int
"""
return self._id
@id.setter
def id(self, id: int):
"""
Sets the id of this Project.
internal id of project, gets created by system
:param id: The id of this Project.
:type id: int
"""
self._id = id
@property
def parent(self) -> int:
"""
Gets the parent of this Project.
id of parent project so projects can be organized
:return: The parent of this Project.
:rtype: int
"""
return self._parent
@parent.setter
def parent(self, parent: int):
"""
Sets the parent of this Project.
id of parent project so projects can be organized
:param parent: The parent of this Project.
:type parent: int
"""
self._parent = parent
@property
def name(self) -> str:
"""
Gets the name of this Project.
:return: The name of this Project.
:rtype: str
"""
return self._name
@name.setter
def name(self, name: str):
"""
Sets the name of this Project.
:param name: The name of this Project.
:type name: str
"""
self._name = name
@property
def customer(self) -> int:
"""
Gets the customer of this Project.
id of customer this project is done for
:return: The customer of this Project.
:rtype: int
"""
return self._customer
@customer.setter
def customer(self, customer: int):
"""
Sets the customer of this Project.
id of customer this project is done for
:param customer: The customer of this Project.
:type customer: int
"""
self._customer = customer
@property
def startdate(self) -> datetime:
"""
Gets the startdate of this Project.
start date of project
:return: The startdate of this Project.
:rtype: datetime
"""
return self._startdate
@startdate.setter
def startdate(self, startdate: datetime):
"""
Sets the startdate of this Project.
start date of project
:param startdate: The startdate of this Project.
:type startdate: datetime
"""
self._startdate = startdate
@property
def enddate(self) -> datetime:
"""
Gets the enddate of this Project.
end date of project
:return: The enddate of this Project.
:rtype: datetime
"""
return self._enddate
@enddate.setter
def enddate(self, enddate: datetime):
"""
Sets the enddate of this Project.
end date of project
:param enddate: The enddate of this Project.
:type enddate: datetime
"""
self._enddate = enddate
@property
def type(self) -> str:
"""
Gets the type of this Project.
type of this project
:return: The type of this Project.
:rtype: str
"""
return self._type
@type.setter
def type(self, type: str):
"""
Sets the type of this Project.
type of this project
:param type: The type of this Project.
:type type: str
"""
allowed_values = ["Kundenprojekt", "internes Projekt", "untergeordnet", "offen"]
if type not in allowed_values:
raise ValueError(
"Invalid value for `type` ({0}), must be one of {1}"
.format(type, allowed_values)
)
self._type = type
@property
def kind(self) -> str:
"""
Gets the kind of this Project.
kind of project
:return: The kind of this Project.
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind: str):
"""
Sets the kind of this Project.
kind of project
:param kind: The kind of this Project.
:type kind: str
"""
allowed_values = ["Beratung", "SBF", "RA / RRD", "SBF / RA / RRD", "Sonstiges"]
if kind not in allowed_values:
raise ValueError(
"Invalid value for `kind` ({0}), must be one of {1}"
.format(kind, allowed_values)
)
self._kind = kind
| {
"content_hash": "99588e1c4f2663d4df9f5eaf14c09bff",
"timestamp": "",
"source": "github",
"line_count": 270,
"max_line_length": 190,
"avg_line_length": 25.307407407407407,
"alnum_prop": 0.5356358846773013,
"repo_name": "ttraulsen/project-manager",
"id": "c5f6d5b715f9f2f6c081b55ef6a154e2c8d652df",
"size": "6850",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/python-flask-server/swagger_server/models/project.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "28451"
},
{
"name": "Python",
"bytes": "45584"
},
{
"name": "Shell",
"bytes": "3260"
},
{
"name": "TypeScript",
"bytes": "49454"
}
],
"symlink_target": ""
} |
import properties
class Parser(object):
def __init__(self, parse_type, options):
self.parse_type = parse_type
self.options = options
self.css_class = options.TITLE_CLASS
def parse(self, string):
return Title(string.strip(), self.css_class)
class Title(object):
def __init__(self, title, css_class):
"""
Constructor should be only accessed by parse method.
"""
self.css_class = css_class
self.title = title
def to_string(self):
return self.title
def display_ast(self, indents):
indenting = indents * " "
return indenting + "Title:" + self.to_string() + "\n"
def render(self):
return properties.generate_html(self.css_class, self.title)
| {
"content_hash": "6b0036f3e5ed69274a890c29a5d14448",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 63,
"avg_line_length": 25.321428571428573,
"alnum_prop": 0.6544428772919605,
"repo_name": "Kashomon/pyrite",
"id": "f1837842bb4c991f4f13a23d5219baa5c1f2467e",
"size": "835",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyrite/data_structures/title_prop.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "588"
},
{
"name": "Python",
"bytes": "34525"
},
{
"name": "Shell",
"bytes": "209"
}
],
"symlink_target": ""
} |
import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.condenser_equipment_and_heat_exchangers import HeatExchangerFluidToFluid
log = logging.getLogger(__name__)
class TestHeatExchangerFluidToFluid(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_heatexchangerfluidtofluid(self):
pyidf.validation_level = ValidationLevel.error
obj = HeatExchangerFluidToFluid()
# alpha
var_name = "Name"
obj.name = var_name
# object-list
var_availability_schedule_name = "object-list|Availability Schedule Name"
obj.availability_schedule_name = var_availability_schedule_name
# node
var_loop_demand_side_inlet_node_name = "node|Loop Demand Side Inlet Node Name"
obj.loop_demand_side_inlet_node_name = var_loop_demand_side_inlet_node_name
# node
var_loop_demand_side_outlet_node_name = "node|Loop Demand Side Outlet Node Name"
obj.loop_demand_side_outlet_node_name = var_loop_demand_side_outlet_node_name
# real
var_loop_demand_side_design_flow_rate = 0.0001
obj.loop_demand_side_design_flow_rate = var_loop_demand_side_design_flow_rate
# node
var_loop_supply_side_inlet_node_name = "node|Loop Supply Side Inlet Node Name"
obj.loop_supply_side_inlet_node_name = var_loop_supply_side_inlet_node_name
# node
var_loop_supply_side_outlet_node_name = "node|Loop Supply Side Outlet Node Name"
obj.loop_supply_side_outlet_node_name = var_loop_supply_side_outlet_node_name
# real
var_loop_supply_side_design_flow_rate = 0.0001
obj.loop_supply_side_design_flow_rate = var_loop_supply_side_design_flow_rate
# alpha
var_heat_exchange_model_type = "CrossFlowBothUnMixed"
obj.heat_exchange_model_type = var_heat_exchange_model_type
# real
var_heat_exchanger_ufactor_times_area_value = 0.0001
obj.heat_exchanger_ufactor_times_area_value = var_heat_exchanger_ufactor_times_area_value
# alpha
var_control_type = "UncontrolledOn"
obj.control_type = var_control_type
# node
var_heat_exchanger_setpoint_node_name = "node|Heat Exchanger Setpoint Node Name"
obj.heat_exchanger_setpoint_node_name = var_heat_exchanger_setpoint_node_name
# real
var_minimum_temperature_difference_to_activate_heat_exchanger = 25.0
obj.minimum_temperature_difference_to_activate_heat_exchanger = var_minimum_temperature_difference_to_activate_heat_exchanger
# alpha
var_heat_transfer_metering_end_use_type = "FreeCooling"
obj.heat_transfer_metering_end_use_type = var_heat_transfer_metering_end_use_type
# node
var_component_override_loop_supply_side_inlet_node_name = "node|Component Override Loop Supply Side Inlet Node Name"
obj.component_override_loop_supply_side_inlet_node_name = var_component_override_loop_supply_side_inlet_node_name
# node
var_component_override_loop_demand_side_inlet_node_name = "node|Component Override Loop Demand Side Inlet Node Name"
obj.component_override_loop_demand_side_inlet_node_name = var_component_override_loop_demand_side_inlet_node_name
# alpha
var_component_override_cooling_control_temperature_mode = "WetBulbTemperature"
obj.component_override_cooling_control_temperature_mode = var_component_override_cooling_control_temperature_mode
# real
var_sizing_factor = 0.0001
obj.sizing_factor = var_sizing_factor
# real
var_operation_minimum_temperature_limit = 19.19
obj.operation_minimum_temperature_limit = var_operation_minimum_temperature_limit
# real
var_operation_maximum_temperature_limit = 20.2
obj.operation_maximum_temperature_limit = var_operation_maximum_temperature_limit
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.heatexchangerfluidtofluids[0].name, var_name)
self.assertEqual(idf2.heatexchangerfluidtofluids[0].availability_schedule_name, var_availability_schedule_name)
self.assertEqual(idf2.heatexchangerfluidtofluids[0].loop_demand_side_inlet_node_name, var_loop_demand_side_inlet_node_name)
self.assertEqual(idf2.heatexchangerfluidtofluids[0].loop_demand_side_outlet_node_name, var_loop_demand_side_outlet_node_name)
self.assertAlmostEqual(idf2.heatexchangerfluidtofluids[0].loop_demand_side_design_flow_rate, var_loop_demand_side_design_flow_rate)
self.assertEqual(idf2.heatexchangerfluidtofluids[0].loop_supply_side_inlet_node_name, var_loop_supply_side_inlet_node_name)
self.assertEqual(idf2.heatexchangerfluidtofluids[0].loop_supply_side_outlet_node_name, var_loop_supply_side_outlet_node_name)
self.assertAlmostEqual(idf2.heatexchangerfluidtofluids[0].loop_supply_side_design_flow_rate, var_loop_supply_side_design_flow_rate)
self.assertEqual(idf2.heatexchangerfluidtofluids[0].heat_exchange_model_type, var_heat_exchange_model_type)
self.assertAlmostEqual(idf2.heatexchangerfluidtofluids[0].heat_exchanger_ufactor_times_area_value, var_heat_exchanger_ufactor_times_area_value)
self.assertEqual(idf2.heatexchangerfluidtofluids[0].control_type, var_control_type)
self.assertEqual(idf2.heatexchangerfluidtofluids[0].heat_exchanger_setpoint_node_name, var_heat_exchanger_setpoint_node_name)
self.assertAlmostEqual(idf2.heatexchangerfluidtofluids[0].minimum_temperature_difference_to_activate_heat_exchanger, var_minimum_temperature_difference_to_activate_heat_exchanger)
self.assertEqual(idf2.heatexchangerfluidtofluids[0].heat_transfer_metering_end_use_type, var_heat_transfer_metering_end_use_type)
self.assertEqual(idf2.heatexchangerfluidtofluids[0].component_override_loop_supply_side_inlet_node_name, var_component_override_loop_supply_side_inlet_node_name)
self.assertEqual(idf2.heatexchangerfluidtofluids[0].component_override_loop_demand_side_inlet_node_name, var_component_override_loop_demand_side_inlet_node_name)
self.assertEqual(idf2.heatexchangerfluidtofluids[0].component_override_cooling_control_temperature_mode, var_component_override_cooling_control_temperature_mode)
self.assertAlmostEqual(idf2.heatexchangerfluidtofluids[0].sizing_factor, var_sizing_factor)
self.assertAlmostEqual(idf2.heatexchangerfluidtofluids[0].operation_minimum_temperature_limit, var_operation_minimum_temperature_limit)
self.assertAlmostEqual(idf2.heatexchangerfluidtofluids[0].operation_maximum_temperature_limit, var_operation_maximum_temperature_limit) | {
"content_hash": "1bbe65c0e4637efe0bcc6dfae9ce667d",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 187,
"avg_line_length": 61.58771929824562,
"alnum_prop": 0.7300954280017091,
"repo_name": "rbuffat/pyidf",
"id": "bef3e33c5519b6b7a87beb990fc6e009fef33132",
"size": "7021",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_heatexchangerfluidtofluid.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "22271673"
}
],
"symlink_target": ""
} |
from xml.dom import minidom
from os import walk
import ConfigParser
import csv
from string import split
from string import strip
from time import gmtime, localtime, asctime
from os.path import join, dirname, exists, abspath
#log errors stuff
from traceback import format_exception
from sys import exc_info
#this is the log file - made global so all the subroutines can see it
global mylog
#small app to suck InfoPath data files into Excel csv file
def get_fields():
global mylog
mylog.writelines(".. parsing config \n")
fields =[]
cp = ConfigParser.ConfigParser()
cp.read("config.ini")
fields = split(cp.get("InfoPath","fields"),",")
path = cp.get("InfoPath","path")
return fields, path
def read_file(fields, path, writer):
global mylog
#read and write out the files
for root, dirs, files in walk(path):
for filename in files:
if ".xml" in filename:
abspath = join(root, filename)
try:
mylog.write("\n" + abspath + "\n")
f = open(abspath,'r')
dom = minidom.parseString(f.read())
row = []
for field in fields:
try:
data = dom.getElementsByTagName(strip(field))[0].childNodes[0].data
data.encode('utf-8') #put your code set here
except:
mylog.write("...error on " + field + "\n")
mylog.write(''.join(format_exception(*exc_info())))
data = " "
row.append(data)
writer.writerow(row)
f.close()
except:
txt = ''.join(format_exception(*exc_info()))
mylog.write(txt + "\n")
def create_log ():
global mylog
logname = "reportlog.txt"
time_now = asctime(localtime())
mylog = open(logname, 'w')
mylog.writelines(time_now + ".. starting \n")
return
if __name__=="__main__":
#create the log file
create_log()
#get the settings from config.ini in same dir
fields, path = get_fields()
#open csv and write out header fields
writer = csv.writer(open("report.csv", "wb"))
writer.writerow(fields)
#read files and output Excel csv
read_file(fields, path, writer)
| {
"content_hash": "ec5a67f8733bda54bb607badf0f528f0",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 95,
"avg_line_length": 27.2183908045977,
"alnum_prop": 0.5629222972972973,
"repo_name": "ActiveState/code",
"id": "a754d2adf4be6a689346bffdd3dd91ee83328ab9",
"size": "2368",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "recipes/Python/442520_Covert_Microsoft_Office_InfoPath_2003_files_CSV_/recipe-442520.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "35894"
},
{
"name": "C",
"bytes": "56048"
},
{
"name": "C++",
"bytes": "90880"
},
{
"name": "HTML",
"bytes": "11656"
},
{
"name": "Java",
"bytes": "57468"
},
{
"name": "JavaScript",
"bytes": "181218"
},
{
"name": "PHP",
"bytes": "250144"
},
{
"name": "Perl",
"bytes": "37296"
},
{
"name": "Perl 6",
"bytes": "9914"
},
{
"name": "Python",
"bytes": "17387779"
},
{
"name": "Ruby",
"bytes": "40233"
},
{
"name": "Shell",
"bytes": "190732"
},
{
"name": "Tcl",
"bytes": "674650"
}
],
"symlink_target": ""
} |
from setuptools import setup
def readme():
with open('README.rst') as f:
return f.read()
setup(
name='montab',
description='Like Alt-Tab, but for monitors',
long_description=readme(),
license='MIT',
url='https://github.com/rabinv/montab',
author='Rabin Vincent',
author_email='[email protected]',
version='0.1.0',
entry_points={
'console_scripts': 'montab = montab.montab:main'
},
packages=['montab'],
classifiers=[
'Environment :: X11 Applications :: GTK',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX',
'Programming Language :: Python',
'Topic :: Desktop Environment :: Window Managers',
],
)
| {
"content_hash": "8e4f6f2e250d5cde7f8304412ad6cc48",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 58,
"avg_line_length": 27.785714285714285,
"alnum_prop": 0.5989717223650386,
"repo_name": "rabinv/montab",
"id": "1b7effa86043afb793163195c2e70e3398e90524",
"size": "778",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10821"
}
],
"symlink_target": ""
} |
from electrum_mona.i18n import _
fullname = 'Ledger Wallet'
description = 'Provides support for Ledger hardware wallet'
requires = [('btchip', 'github.com/ledgerhq/btchip-python')]
registers_keystore = ('hardware', 'ledger', _("Ledger wallet"))
available_for = ['qt', 'cmdline']
| {
"content_hash": "c2f6c38d33c69967bd673108e8cf0477",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 63,
"avg_line_length": 40,
"alnum_prop": 0.7214285714285714,
"repo_name": "wakiyamap/electrum-mona",
"id": "bdf337edebc83796b2f3a958e4091c333f2ea8f4",
"size": "280",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "electrum_mona/plugins/ledger/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "13043"
},
{
"name": "GLSL",
"bytes": "289"
},
{
"name": "Java",
"bytes": "2929"
},
{
"name": "Makefile",
"bytes": "2162"
},
{
"name": "NSIS",
"bytes": "7779"
},
{
"name": "Python",
"bytes": "4381566"
},
{
"name": "Ruby",
"bytes": "16375"
},
{
"name": "Shell",
"bytes": "100799"
},
{
"name": "kvlang",
"bytes": "67448"
}
],
"symlink_target": ""
} |
__all__ = [
'brogressbar',
'rowgressbar',
]
import sys
import time
def format_interval(t):
mins, s = divmod(int(t), 60)
h, m = divmod(mins, 60)
if h:
return '%d:%02d:%02d' % (h, m, s)
else:
return '%02d:%02d' % (m, s)
def format_meter(n, total, elapsed):
# n - number of finished iterations
# total - total number of iterations, or None
# elapsed - number of seconds passed since start
if n > total:
total = None
elapsed_str = format_interval(elapsed)
rate = '%5.2f' % (n / elapsed) if elapsed else '?'
if total:
frac = float(n) / total
N_BARS = 10
bar_length = int(frac*N_BARS)
bar = '#'*bar_length + '-'*(N_BARS-bar_length)
percentage = '%3d%%' % (frac * 100)
left_str = format_interval(elapsed / n * (total-n)) if n else '?'
return '|%s| %d/%d %s [elapsed: %s left: %s, %s iters/sec]' % (
bar, n, total, percentage, elapsed_str, left_str, rate)
else:
return '%d [elapsed: %s, %s iters/sec]' % (n, elapsed_str, rate)
def format_bytes_meter(rows_read, bytes_read, total_bytes, elapsed):
# elapsed - number of seconds passed since start
if bytes_read > total_bytes:
total_bytes = None
elapsed_str = format_interval(elapsed)
elapsed = float(elapsed)
bytes_read = float(bytes_read)
rows_per_sec = (rows_read / elapsed) if elapsed else 0.0
bytes_per_sec = (bytes_read / elapsed) if elapsed else 0.0
mb_read = (bytes_read / 1024.0 / 1024.0)
mb_per_sec = (bytes_per_sec / 1024.0 / 1024.0)
if total_bytes:
total_bytes = float(total_bytes)
mb_total = total_bytes / 1024.0 / 1024.0
frac = bytes_read / total_bytes
N_BARS = 20
bar_length = int(frac*N_BARS)
bar = '#'*bar_length + '-'*(N_BARS-bar_length)
percentage_str = '%0.2f%%' % (frac * 100)
bytes_left = total_bytes - bytes_read
seconds_left = int(bytes_left / bytes_per_sec) if bytes_per_sec else 0
left_str = format_interval(seconds_left)
#import ipdb
#ipdb.set_trace()
fmt = (
'|%s| %s '
'[%d rows @ %0.2f rows/s, '
'%0.2fMB @ %0.2f MB/s, '
'elapsed: %s, left: %s]'
)
args = (
bar,
percentage_str,
rows_read,
rows_per_sec,
mb_read,
mb_per_sec,
elapsed_str,
left_str,
)
return fmt % args
else:
fmt = (
'%0.3fMB %d '
'[elapsed: %s, %0.2f rows/s, %0.2f MB/s]'
)
args = (
mb_read,
rows_read,
elapsed_str,
rows_per_sec,
mb_per_sec,
)
return fmt % args
def format_rows_meter(rows_read, total_rows, elapsed):
# elapsed - number of seconds passed since start
if rows_read > total_rows:
total_rows = None
elapsed_str = format_interval(elapsed)
elapsed = float(elapsed)
rows_read = float(rows_read)
rows_per_sec = (rows_read / elapsed) if elapsed else 0.0
if total_rows:
total_rows = float(total_rows)
frac = rows_read / total_rows
N_BARS = 20
bar_length = int(frac*N_BARS)
bar = '#'*bar_length + '-'*(N_BARS-bar_length)
percentage_str = '%0.2f%%' % (frac * 100)
rows_left = total_rows - rows_read
seconds_left = int(rows_left / rows_per_sec) if rows_per_sec else 0
left_str = format_interval(seconds_left)
fmt = (
'|%s| %s '
'[%d rows @ %0.2f rows/s, '
'elapsed: %s, left: %s]'
)
args = (
bar,
percentage_str,
rows_read,
rows_per_sec,
elapsed_str,
left_str,
)
return fmt % args
else:
fmt = '%d [elapsed: %s, %0.2f rows/s]'
args = (int(rows_read), elapsed_str, rows_per_sec)
return fmt % args
class StatusPrinter(object):
def __init__(self, file):
self.file = file
self.last_printed_len = 0
def print_status(self, s):
self.file.write('\r'+s+' '*max(self.last_printed_len-len(s), 0))
self.file.flush()
self.last_printed_len = len(s)
def isiterable(i):
return hasattr(i, '__iter__') or hasattr(i, 'next')
def brogressbar(iterable, desc='', total_bytes=None, leave=False,
file=sys.stderr, mininterval=0.5, miniters=1):
try:
dummy = iterable.bytes_read
except AttributeError:
from .util import progressbar as _progressbar
for obj in _progressbar(iterable):
yield obj
raise StopIteration
if total_bytes is None:
try:
total_bytes = len(iterable)
except TypeError:
total_bytes = None
prefix = desc+': ' if desc else ''
sp = StatusPrinter(file)
sp.print_status(prefix + format_bytes_meter(0, 0, total_bytes, 0))
start_t = last_print_t = time.time()
last_print_n = 0
n = 0
rows_read = 0
bytes_read = 0
for obj in iterable:
yield obj
# Now the object was created and processed, so we can print the meter.
n += 1
if isiterable(obj):
rows_read += len(obj)
else:
rows_read += 1
bytes_read = iterable.bytes_read
if n - last_print_n >= miniters:
# We check the counter first, to reduce the overhead of time.time()
cur_t = time.time()
if cur_t - last_print_t >= mininterval:
meter = format_bytes_meter(
rows_read,
bytes_read,
total_bytes,
cur_t - start_t,
)
sp.print_status(prefix + meter)
last_print_n = n
last_print_t = cur_t
if not leave:
sp.print_status('')
sys.stdout.write('\r')
else:
if last_print_n < n:
cur_t = time.time()
meter = format_bytes_meter(
rows_read,
bytes_read,
total_bytes,
cur_t - start_t,
)
sp.print_status(prefix + meter)
file.write('\ndone\n')
def rowgressbar(iterable, desc='', total_rows=None, leave=False,
file=sys.stderr, mininterval=0.5, miniters=1):
if total_rows is None:
try:
total_rows = len(iterable)
except TypeError:
total_rows = None
prefix = desc+': ' if desc else ''
sp = StatusPrinter(file)
sp.print_status(prefix + format_rows_meter(0, total_rows, 0))
start_t = last_print_t = time.time()
last_print_n = 0
n = 0
rows_read = 0
for obj in iterable:
yield obj
# Now the object was created and processed, so we can print the meter.
n += 1
if isiterable(obj):
rows_read += len(obj)
else:
rows_read += 1
if rows_read > total_rows:
total_rows = rows_read
if n - last_print_n >= miniters:
# We check the counter first, to reduce the overhead of time.time()
cur_t = time.time()
if cur_t - last_print_t >= mininterval:
meter = format_rows_meter(
rows_read,
total_rows,
cur_t - start_t,
)
sp.print_status(prefix + meter)
last_print_n = n
last_print_t = cur_t
if not leave:
sp.print_status('')
sys.stdout.write('\r')
else:
cur_t = time.time()
meter = format_rows_meter(
rows_read,
total_rows,
cur_t - start_t,
)
sp.print_status(prefix + meter)
file.write('\ndone\n')
# vim:set ts=8 sw=4 sts=4 tw=78 et:
| {
"content_hash": "bfd66b1c654d4614605cc4c7c8068c13",
"timestamp": "",
"source": "github",
"line_count": 292,
"max_line_length": 79,
"avg_line_length": 27.376712328767123,
"alnum_prop": 0.5068801601200901,
"repo_name": "tpn/tpn",
"id": "4812eb46f76577d83bb2a1c17949dd9b3f79e7f0",
"size": "8039",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/tpn/progressbar.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "49"
},
{
"name": "Jupyter Notebook",
"bytes": "141992"
},
{
"name": "Python",
"bytes": "329484"
}
],
"symlink_target": ""
} |
question_list = [
# (mark, count, [directories])
(1, 3, 'eop/chapter3/linear_search_io_'),
]
practice_mode = True
standalone = False
logged = False
log_dir = ''
| {
"content_hash": "7747ae999ac69956ac61c33685255ec2",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 42,
"avg_line_length": 18.22222222222222,
"alnum_prop": 0.6524390243902439,
"repo_name": "stryder199/RyarkAssignments",
"id": "fa72393b0a6362ae9eba55d0204a7c0655bb2acd",
"size": "164",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Assignment2/quizzes/eop/chapter3/linear_search.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1754"
},
{
"name": "CSS",
"bytes": "126846"
},
{
"name": "JavaScript",
"bytes": "615432"
},
{
"name": "PHP",
"bytes": "14898"
},
{
"name": "Python",
"bytes": "3757772"
},
{
"name": "R",
"bytes": "413"
},
{
"name": "Shell",
"bytes": "38797"
},
{
"name": "VimL",
"bytes": "215"
}
],
"symlink_target": ""
} |
from django.http import HttpResponse
import csv
def format_csv(data, file_name):
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="' + file_name + '.csv"'
writer = csv.writer(response)
writer.writerow(["Location","Name","Room","Date","1"])
for x in data:
writer.writerow([x.location,x.name, x.room, ("{:%Y-%m-%d %H:%M}".format(x.pub_date))])
return response
| {
"content_hash": "a1a4539dbe71612684114bc293169d56",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 94,
"avg_line_length": 37.333333333333336,
"alnum_prop": 0.6517857142857143,
"repo_name": "DukeOfNewYork/Django-Asset-Database",
"id": "df1f9910e4878aa42ced8e4b59771dd5e0cebf5f",
"size": "448",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "AssetDatabase/view_functions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "24853"
},
{
"name": "HTML",
"bytes": "15132"
},
{
"name": "JavaScript",
"bytes": "7032"
},
{
"name": "Python",
"bytes": "22130"
}
],
"symlink_target": ""
} |
import httplib2
import json
import os
import hashlib
import io
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from apiclient import discovery
from apiclient.http import MediaIoBaseDownload
# Print something to the console and log it to the log file
def log_and_print(log_file, log_entry, newline=True):
if newline:
log_file.write(log_entry + "\n")
print(log_entry)
else:
log_file.write(log_entry)
print(log_entry, end="", flush=True)
# Handles authentication
def auth():
gauth = GoogleAuth()
gauth.LoadCredentialsFile("letitrain-creds-gdrive.txt")
if gauth.credentials is None:
gauth.LocalWebserverAuth()
elif gauth.access_token_expired:
gauth.Refresh()
else:
gauth.Authorize()
gauth.SaveCredentialsFile("letitrain-creds-gdrive.txt")
httpauth = gauth.Get_Http_Object()
service = discovery.build('drive', 'v3', http=httpauth)
return gauth, httpauth, service
# Retrieves the information about every file
# Can either do deleted or regular files
def list_files(gauth, deleted, log_file):
drive = GoogleDrive(gauth)
if deleted:
log_and_print(log_file, "Retrieving list of deleted files... ", False)
file_list = drive.ListFile({'q': 'trashed=true'}).GetList()
log_and_print(log_file, "Done!")
else:
log_and_print(log_file, "Retrieving list of regular files... ", False)
file_list = drive.ListFile({'q': 'trashed=false'}).GetList()
log_and_print(log_file, "Done!")
return file_list
# makes the hashmap that determines file type to download when file is a
# Google-apps file
def make_hash_map():
file1 = open('google_file_types.txt', 'r')
file_types = dict()
for line in file1:
attribute_list = line.strip().split(',')
file_types[attribute_list[0]] = [attribute_list[1], attribute_list[2]]
return file_types
# Retrieves version information in JSON format of previous versions
# given a file ID
def download_revisions(httpauth, service, fileID, title, path, counter, log_file):
if not os.path.exists(path + "/" + title):
os.makedirs(path + "/" + title)
url = "https://www.googleapis.com/drive/v3/files/" + fileID + "/revisions"
resp, content = httpauth.request(url, 'GET')
revisions = json.loads(content.decode('utf-8'))
revision_info = []
rev_num = 1
for revision in revisions["revisions"]:
revision_info.append([str(rev_num), revision["id"], revision["modifiedTime"]])
file_path = path + "/" + title + "/" + title + ".rev" + str(rev_num)
orig_title = str(title)
# to prevent duplicate file names being saved
if os.path.exists(file_path):
file_path, title = get_new_file_name(file_path)
log_and_print(log_file, counter + " File named '" + orig_title + "' already exists. Saving as '" + title + "' instead.")
log_and_print(log_file, counter + " Downloading '" + title + ".rev" + str(rev_num) + "'...")
request = service.revisions().get_media(fileId=fileID, revisionId=revision["id"])
fh = io.FileIO(file_path, mode='wb')
downloader = MediaIoBaseDownload(fh, request)
done = False
while done is False:
status, done = downloader.next_chunk()
# Print status of download (mainly for larger files)
print("%d%%\r" % int(status.progress() * 100), end="", flush=True)
fh.close()
log_and_print(log_file, counter + " Hashing '" + title + ".rev" + str(rev_num) + "'...")
with open(path + "/_hashes.txt", "a") as hashes_file:
hashes_file.write(title + ".rev" + str(rev_num) + "\n")
hashes_file.write("--MD5: " + hash_file(file_path, "md5") + "\n")
hashes_file.write("--SHA1: " + hash_file(file_path, "sha1") + "\n")
hashes_file.write("--SHA256: " + hash_file(file_path, "sha256") + "\n")
rev_num += 1
log_and_print(log_file, counter + " Writing revision info for '" + title + "'...")
with open(path + "/" + title + "/" + title + "_revisions.txt", "w") as saved_file:
for item in revision_info:
saved_file.write("Revision Number: " + item[0] + "\n")
saved_file.write("--Revision ID: " + item[1] + "\n")
saved_file.write("--Revision Last Modifed: " + item[2] + "\n")
# Check if there are revisions for a given fileID
def check_revisions(gauth, fileID):
httpauth = gauth
url = "https://www.googleapis.com/drive/v3/files/" + fileID + "/revisions"
resp, content = httpauth.request(url, 'GET')
revisions = json.loads(content.decode('utf-8'))
try:
if len(revisions["revisions"]) > 1:
return True
except:
return False
# sanitizes name to remove invalid characters
def sanitize_name(name, include_period=True):
name = name.replace('/', '_')
name = name.replace(':', '_')
name = name.replace('*', '_')
name = name.replace('?', '_')
name = name.replace('\\', '_')
name = name.replace('|', '_')
name = name.replace('<', '_')
name = name.replace('"', '_')
new_name = name.replace('>', '_')
if include_period:
name = name.replace('.', '_')
return new_name
# Download files from drive when given the fileID
def download_files(gauth, httpauth, service, file_list, path, log_file):
total = len(file_list)
progress = 1
drive = GoogleDrive(gauth)
gdrive_file_type = make_hash_map()
for down_file in file_list:
counter = "[" + str(progress).zfill(len(str(total))) + "/" + str(total) + "]"
if check_revisions(httpauth, down_file['id']):
if 'google-apps' in down_file['mimeType']:
if not export_to_file(down_file, gdrive_file_type, httpauth, service, path, counter, log_file):
file_list.remove(down_file)
else:
download_revisions(httpauth, service, down_file['id'], down_file['title'], path, counter, log_file)
else:
if 'google-apps' in down_file['mimeType']:
if not export_to_file(down_file, gdrive_file_type, httpauth, service, path, counter, log_file):
file_list.remove(down_file)
else:
title = sanitize_name(down_file['title'], False)
file_path = path + "/" + title
# to prevent duplicate file names being saved
if os.path.exists(file_path):
file_path, title = get_new_file_name(file_path)
log_and_print(log_file, counter + " File named '" + down_file['title'] + "' already exists. Saving as '" + title + "' instead.")
log_and_print(log_file, counter + " Downloading '" + title + "'...")
try:
request = service.files().get_media(fileId=down_file['id'])
fh = io.FileIO(file_path, mode='wb')
downloader = MediaIoBaseDownload(fh, request)
done = False
while done is False:
status, done = downloader.next_chunk()
# Print status of download (mainly for larger files)
print("%d%%\r" % int(status.progress() * 100), end="", flush=True)
fh.close()
log_and_print(log_file, counter + " Hashing '" + title + "'...")
with open(path + "/_hashes.txt", "a") as hashes_file:
hashes_file.write(title + "\n")
hashes_file.write("--MD5: " + hash_file(file_path, "md5") + "\n")
hashes_file.write("--SHA1: " + hash_file(file_path, "sha1") + "\n")
hashes_file.write("--SHA256: " + hash_file(file_path, "sha256") + "\n")
except:
fh.close()
try:
if os.path.exists(file_path):
os.remove(file_path)
except:
log_and_print(log_file, counter + " Failed to download '" + title + "'. The user most likely doesn't have permissions to export this file.")
log_and_print(log_file, counter + " Please manually remove '" + title + "' as it is an incomplete download.")
else:
log_and_print(log_file, counter + " Failed to download '" + title + "'. The user most likely doesn't have permissions to export this file.")
progress += 1
def export_to_file(down_file, gdrive_file_type, httpauth, service, path, counter, log_file):
value = gdrive_file_type[down_file['mimeType']]
if value[0] != 'None':
name = sanitize_name(down_file['title'])
file_path = path + "/_google/" + name + value[0]
# to prevent duplicate file names being saved
if os.path.exists(file_path):
file_path, name = get_new_file_name(file_path)
name = name.split(".")[0]
log_and_print(log_file, counter + " File named '" + down_file['title'] + "' already exists. Saving as '" + name + "' instead.")
log_and_print(log_file, counter + " Downloading '" + name + "' as '" + name + value[0] + "'...")
else:
log_and_print(log_file, counter + " Downloading '" + name + "' as '" + name + value[0] + "'...")
try:
request = service.files().export_media(fileId=down_file['id'], mimeType=value[1])
fh = io.FileIO(file_path, mode='wb')
downloader = MediaIoBaseDownload(fh, request)
done = False
while done is False:
status, done = downloader.next_chunk()
# Print status of download (mainly for larger files)
print("%d%%\r" % int(status.progress() * 100), end="", flush=True)
fh.close()
log_and_print(log_file, counter + " Hashing '" + name + value[0] + "'...")
with open(path + "/_google/_hashes.txt", "a") as hashes_file:
hashes_file.write(name + value[0] + "\n")
hashes_file.write("--MD5: " + hash_file(file_path, "md5") + "\n")
hashes_file.write("--SHA1: " + hash_file(file_path, "sha1") + "\n")
hashes_file.write("--SHA256: " + hash_file(file_path, "sha256") + "\n")
return True
except:
fh.close()
try:
if os.path.exists(file_path):
os.remove(file_path)
except:
log_and_print(log_file, counter + " Failed to download '" + name + "'. The user most likely doesn't have permissions to export this file.")
log_and_print(log_file, counter + " Please manually remove '" + name + "' as it is an incomplete download.")
else:
log_and_print(log_file, counter + " Failed to download '" + name + "'. The user most likely doesn't have permissions to export this file.")
else:
log_and_print(log_file, counter + " Skipping '" + down_file['title'] + "' because it is an unsupported MIME type.")
# if there is already a file being saved that has the name of the current file
# being created, this will return a new unique file name
def get_new_file_name(file_path):
file_count = 1
if "." in file_path:
file_beginning, extension = file_path.rsplit('.', 1)
while os.path.exists(file_beginning + str(file_count) + "." + extension):
file_count = file_count + 1
new_file_path = file_beginning + str(file_count) + "." + extension
file_name = file_beginning.split('/')
title = file_name[-1] + str(file_count) + "." + extension
else:
while os.path.exists(file_path + str(file_count)):
file_count = file_count + 1
new_file_path = file_path + str(file_count)
file_name = file_path.split('/')
title = file_name[-1] + str(file_count)
return new_file_path, title
# Hashes a file with a given algorithm and returns the hash value
def hash_file(filename, alg):
blsize = 65536
if alg == "md5":
hasher = hashlib.md5()
elif alg == "sha1":
hasher = hashlib.sha1()
elif alg == "sha256":
hasher = hashlib.sha256()
with open(filename, "rb") as hashfile:
buf = hashfile.read(blsize)
while len(buf) > 0:
hasher.update(buf)
buf = hashfile.read(blsize)
return hasher.hexdigest()
# Create the directories that the tool will store the downloded files and generated reports
def create_dirs(timestamp):
if not os.path.exists("gdrive_dump_{}".format(timestamp)):
os.makedirs("gdrive_dump_{}".format(timestamp))
if not os.path.exists("gdrive_dump_{}/regular".format(timestamp)):
os.makedirs("gdrive_dump_{}/regular".format(timestamp))
if not os.path.exists("gdrive_dump_{}/deleted".format(timestamp)):
os.makedirs("gdrive_dump_{}/deleted".format(timestamp))
regular_dir = "gdrive_dump_{}/regular".format(timestamp)
deleted_dir = "gdrive_dump_{}/deleted".format(timestamp)
if not os.path.exists("{}/_google".format(regular_dir)):
os.makedirs("{}/_google".format(regular_dir))
if not os.path.exists("{}/_google".format(deleted_dir)):
os.makedirs("{}/_google".format(deleted_dir))
return regular_dir, deleted_dir
def google_drive(timestamp, log_file):
gauth, httpauth, service = auth()
log_and_print(log_file, "Sucessfully authenticated to Google Drive.")
log_and_print(log_file, "Creating directories... ", False)
regular_dir, deleted_dir = create_dirs(timestamp)
log_and_print(log_file, "Done!")
file_list = list_files(gauth, False, log_file)
log_and_print(log_file, "Downloading all regular files into '" + regular_dir + "'...")
download_files(gauth, httpauth, service, file_list, regular_dir, log_file)
log_and_print(log_file, "Done!")
deleted_file_list = list_files(gauth, True, log_file)
log_and_print(log_file, "Downloading all deleted files into '" + deleted_dir + "'...")
download_files(gauth, httpauth, service, deleted_file_list, deleted_dir, log_file)
log_and_print(log_file, "Done!")
return "gdrive_dump_" + timestamp, file_list, deleted_file_list
| {
"content_hash": "51462f0f8da62d81f32b709fecbee122",
"timestamp": "",
"source": "github",
"line_count": 291,
"max_line_length": 164,
"avg_line_length": 49.24742268041237,
"alnum_prop": 0.5812574139976275,
"repo_name": "com6056/LetItRain-475-2161_Good_Rodgers",
"id": "dbe7ed9dd6579111bb3ec68cc5f89933ebe5b60a",
"size": "14419",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gdrive.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "44126"
}
],
"symlink_target": ""
} |
from jnpr.junos.exception import ConnectNotMasterError
from jnpr.junos.exception import RpcError
def facts_chassis(junos, facts):
"""
The following facts are assigned:
facts['2RE'] : designates if the device can support two RE, not that it
has them
facts['RE_hw_mi'] : designates if the device is
multi-instance-routing-engine
facts['model'] : product model
facts['serialnumber'] : serial number
NOTES:
(1) if in a 2RE system, this routine will only load the information
from the first chassis item.
(2) hostname, domain, and fqdn are retrieved from configuration data;
inherited configs are checked.
"""
# Set default values.
facts["2RE"] = False
facts["RE_hw_mi"] = False
facts["model"] = "UNKNOWN"
facts["serialnumber"] = "UNKNOWN"
rsp = junos.rpc.get_chassis_inventory()
if rsp.tag == "error":
raise RuntimeError()
if rsp.tag == "output":
# this means that there was an error; due to the
# fact that this connection is not on the master
# @@@ need to validate on VC-member
raise ConnectNotMasterError(junos)
if rsp.tag == "multi-routing-engine-results":
facts["2RE"] = True
facts["RE_hw_mi"] = True
else:
facts["2RE"] = False
facts["model"] = rsp.findtext(".//chassis[1]/description", "UNKNOWN")
facts["serialnumber"] = (
rsp.findtext(".//chassis[1]/serial-number")
or rsp.findtext('.//chassis-module[name="Backplane"]/serial-number')
or rsp.findtext('.//chassis-module[name="Midplane"]/serial-number', "UNKNOWN")
)
if facts["model"] == "UNKNOWN" or facts["serialnumber"] == "UNKNOWN":
raise RpcError()
| {
"content_hash": "90339400d9e8c1cb0c3627a6029d40ee",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 86,
"avg_line_length": 35.1764705882353,
"alnum_prop": 0.6114827201783724,
"repo_name": "Juniper/py-junos-eznc",
"id": "dfe06f78d81d3a27d6f1ea52ce19210aecb80ad9",
"size": "1794",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/jnpr/junos/ofacts/chassis.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "856"
},
{
"name": "Pascal",
"bytes": "408"
},
{
"name": "Puppet",
"bytes": "2263"
},
{
"name": "Python",
"bytes": "1101958"
},
{
"name": "Ruby",
"bytes": "134"
},
{
"name": "Shell",
"bytes": "1516"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.conf import settings
S3DIRECT_UNIQUE_RENAME = getattr(settings, "S3DIRECT_UNIQUE_RENAME", None)
S3DIRECT_ROOT_DIR = getattr(settings, "S3DIRECT_ROOT_DIR", '')
| {
"content_hash": "135d75d0d801750c1d25ccebe3a68cd0",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 74,
"avg_line_length": 42.4,
"alnum_prop": 0.7688679245283019,
"repo_name": "hovel/django-s3direct",
"id": "35d459f10151ed777b16fae9a75417db655bc714",
"size": "226",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "s3direct/defaults.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "232"
},
{
"name": "JavaScript",
"bytes": "13707"
},
{
"name": "Python",
"bytes": "11891"
}
],
"symlink_target": ""
} |
"""An example for using tf_estimator_evaluation."""
from absl import app
from absl import flags
from absl import logging
import numpy as np
import tensorflow as tf
from tensorflow import estimator as tf_estimator
from tensorflow.compat.v1 import estimator as tf_compat_v1_estimator
from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.data_structures import AttackType
from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.data_structures import get_flattened_attack_metrics
from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.data_structures import SlicingSpec
from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.tf_estimator_evaluation import MembershipInferenceTrainingHook
from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.tf_estimator_evaluation import run_attack_on_tf_estimator_model
FLAGS = flags.FLAGS
flags.DEFINE_float('learning_rate', 0.02, 'Learning rate for training')
flags.DEFINE_integer('batch_size', 250, 'Batch size')
flags.DEFINE_integer('epochs', 100, 'Number of epochs')
flags.DEFINE_string('model_dir', None, 'Model directory.')
flags.DEFINE_bool(
'tensorboard_merge_classifiers', False, 'If true, plot '
'different classifiers with the same slicing_spec and metric '
'in the same figure.')
def small_cnn_fn(features, labels, mode):
"""Setup a small CNN for image classification."""
input_layer = tf.reshape(features['x'], [-1, 32, 32, 3])
for _ in range(3):
y = tf.keras.layers.Conv2D(32, (3, 3), activation='relu')(input_layer)
y = tf.keras.layers.MaxPool2D()(y)
y = tf.keras.layers.Flatten()(y)
y = tf.keras.layers.Dense(64, activation='relu')(y)
logits = tf.keras.layers.Dense(10)(y)
if mode != tf_estimator.ModeKeys.PREDICT:
vector_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits)
scalar_loss = tf.reduce_mean(input_tensor=vector_loss)
# Configure the training op (for TRAIN mode).
if mode == tf_estimator.ModeKeys.TRAIN:
optimizer = tf.train.MomentumOptimizer(
learning_rate=FLAGS.learning_rate, momentum=0.9)
global_step = tf.compat.v1.train.get_global_step()
train_op = optimizer.minimize(loss=scalar_loss, global_step=global_step)
return tf_estimator.EstimatorSpec(
mode=mode, loss=scalar_loss, train_op=train_op)
# Add evaluation metrics (for EVAL mode).
elif mode == tf_estimator.ModeKeys.EVAL:
eval_metric_ops = {
'accuracy':
tf.metrics.accuracy(
labels=labels, predictions=tf.argmax(input=logits, axis=1))
}
return tf_estimator.EstimatorSpec(
mode=mode, loss=scalar_loss, eval_metric_ops=eval_metric_ops)
# Output the prediction probability (for PREDICT mode).
elif mode == tf_estimator.ModeKeys.PREDICT:
predictions = tf.nn.softmax(logits)
return tf_estimator.EstimatorSpec(mode=mode, predictions=predictions)
def load_cifar10():
"""Loads CIFAR10 data."""
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data()
x_train = np.array(x_train, dtype=np.float32) / 255
x_test = np.array(x_test, dtype=np.float32) / 255
y_train = np.array(y_train, dtype=np.int32).squeeze()
y_test = np.array(y_test, dtype=np.int32).squeeze()
return x_train, y_train, x_test, y_test
def main(unused_argv):
logging.set_verbosity(logging.ERROR)
# Load training and test data.
x_train, y_train, x_test, y_test = load_cifar10()
# Sample weights are set to `None` by default, but can be changed here.
sample_weight_train, sample_weight_test = None, None
# Instantiate the tf.Estimator.
classifier = tf_estimator.Estimator(
model_fn=small_cnn_fn, model_dir=FLAGS.model_dir)
# A function to construct input_fn given (data, label), to be used by the
# membership inference training hook.
def input_fn_constructor(x, y):
return tf_compat_v1_estimator.inputs.numpy_input_fn(
x={'x': x}, y=y, shuffle=False)
# Get hook for membership inference attack.
mia_hook = MembershipInferenceTrainingHook(
classifier, (x_train, y_train), (x_test, y_test),
input_fn_constructor,
slicing_spec=SlicingSpec(entire_dataset=True, by_class=True),
attack_types=[
AttackType.THRESHOLD_ATTACK, AttackType.K_NEAREST_NEIGHBORS
],
tensorboard_dir=FLAGS.model_dir,
tensorboard_merge_classifiers=FLAGS.tensorboard_merge_classifiers)
# Create tf.Estimator input functions for the training and test data.
train_input_fn = tf_compat_v1_estimator.inputs.numpy_input_fn(
x={'x': x_train},
y=y_train,
batch_size=FLAGS.batch_size,
num_epochs=FLAGS.epochs,
shuffle=True)
eval_input_fn = tf_compat_v1_estimator.inputs.numpy_input_fn(
x={'x': x_test}, y=y_test, num_epochs=1, shuffle=False)
# Training loop.
steps_per_epoch = 50000 // FLAGS.batch_size
for epoch in range(1, FLAGS.epochs + 1):
# Train the model, with the membership inference hook.
classifier.train(
input_fn=train_input_fn, steps=steps_per_epoch, hooks=[mia_hook])
# Evaluate the model and print results
eval_results = classifier.evaluate(input_fn=eval_input_fn)
test_accuracy = eval_results['accuracy']
print('Test accuracy after %d epochs is: %.3f' % (epoch, test_accuracy))
print('End of training attack')
attack_results = run_attack_on_tf_estimator_model(
classifier, (x_train, y_train, sample_weight_train),
(x_test, y_test, sample_weight_test),
input_fn_constructor,
slicing_spec=SlicingSpec(entire_dataset=True, by_class=True),
attack_types=[
AttackType.THRESHOLD_ATTACK, AttackType.K_NEAREST_NEIGHBORS
])
att_types, att_slices, att_metrics, att_values = get_flattened_attack_metrics(
attack_results)
print('\n'.join([
' %s: %.4f' % (', '.join([s, t, m]), v)
for t, s, m, v in zip(att_types, att_slices, att_metrics, att_values)
]))
if __name__ == '__main__':
app.run(main)
| {
"content_hash": "25fecc997a1e822964b3f0ee9a6e042e",
"timestamp": "",
"source": "github",
"line_count": 150,
"max_line_length": 137,
"avg_line_length": 40.20666666666666,
"alnum_prop": 0.7041949925385508,
"repo_name": "tensorflow/privacy",
"id": "b3b4ffa9bd085effd3300bb33018a0d2096e6337",
"size": "6620",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow_privacy/privacy/privacy_tests/membership_inference_attack/tf_estimator_evaluation_example.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "767849"
},
{
"name": "Python",
"bytes": "1466141"
},
{
"name": "Shell",
"bytes": "21949"
},
{
"name": "Starlark",
"bytes": "35224"
}
],
"symlink_target": ""
} |
import sys
from io import StringIO
from lager_ml_common import _NUM_FEATURES, convert_lager_to_numbers, convert_numbers_to_lager, expand_gesture_num_to_target
if (len(sys.argv) < 3):
print("lager_expander [GESTURE_FILE] [TARGET_LENGTH]")
exit()
orig_gesture_file = open(sys.argv[1], "r");
expanded_filename = sys.argv[1][:-4] + "_expanded.dat"
expanded_file = open(expanded_filename, "w")
target_length = int(sys.argv[2])
for gesture in orig_gesture_file:
gesture_num = convert_lager_to_numbers(gesture)
expanded_num = expand_gesture_num_to_target(gesture_num, _NUM_FEATURES, ',')
expanded_file.write(convert_numbers_to_lager(expanded_num) + "\n")
expanded_file.close()
orig_gesture_file.close()
| {
"content_hash": "0edafe88a6d21a9803d73ea093b78c86",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 123,
"avg_line_length": 32.13636363636363,
"alnum_prop": 0.7284299858557284,
"repo_name": "andresodio/lager",
"id": "4ce42cb0eaa24337fd2fe9bcfc74c93d0650b092",
"size": "794",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ml/lager_expander.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "762"
},
{
"name": "C++",
"bytes": "123600"
},
{
"name": "CMake",
"bytes": "2969"
},
{
"name": "Makefile",
"bytes": "3966"
},
{
"name": "Python",
"bytes": "21565"
},
{
"name": "Shell",
"bytes": "911"
}
],
"symlink_target": ""
} |
"""
File: mp3numberify.py
Author: Greenblast
Github: https://github.com/Greenblast
Description: Numberifying mp3 files in a given path
"""
import os
import sys
from mutagen.mp3 import EasyMP3
ARGS_COUNT = 2
def organize(path):
for f in os.listdir(path):
if f.endswith("mp3"):
a = EasyMP3(os.path.join(path, f))
tracknum = str(a["tracknumber"][0].zfill(2))
os.rename(os.path.join(path, f), os.path.join(path, tracknum + "-" + f))
def print_usage():
"""Prints usage """
print("Usage %s filepath", sys.argv[0])
def main():
"""
Main function
Checks arguments and calls main logic
"""
if sys.argv.count() == ARGS_COUNT:
organize(sys.argv[1])
else:
print_usage()
if __name__ == "__main__":
main()
| {
"content_hash": "b35b18d039b0d251ffc3aa7159dcc56d",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 84,
"avg_line_length": 21.675675675675677,
"alnum_prop": 0.5947630922693267,
"repo_name": "GreenBlast/dotfiles",
"id": "5797276874f26f8ba600bd64bd74cdc36e4ea0f8",
"size": "802",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": ".scripts/mp3numberify.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "28800"
},
{
"name": "Shell",
"bytes": "66668"
},
{
"name": "Vim script",
"bytes": "97627"
}
],
"symlink_target": ""
} |
"""
Tests for the nested_choice_calcs.py file.
"""
import unittest
import warnings
from collections import OrderedDict
import numpy as np
import pandas as pd
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
import numpy.testing as npt
import pylogit.nested_choice_calcs as nlc
import pylogit.nested_logit as nested_logit
# Use the following to always show the warnings
np.seterr(all='warn')
warnings.simplefilter("always")
class ComputationalSetUp(unittest.TestCase):
"""
Defines the common setUp method used for the different type of tests.
"""
def setUp(self):
# Create the betas to be used during the tests
self.fake_betas = np.array([0.3, -0.6, 0.2])
# Create the fake nest coefficients to be used during the tests
# Note that these are the 'natural' nest coefficients, i.e. the
# inverse of the scale parameters for each nest. They should be less
# than or equal to 1.
self.natural_nest_coefs = np.array([0.995, 0.5])
# Create an array of all model parameters
self.fake_all_params = np.concatenate((self.natural_nest_coefs,
self.fake_betas))
# The set up being used is one where there are two choice situations,
# The first having three alternatives, and the second having only two.
# The nest memberships of these alternatives are given below.
self.fake_rows_to_nests = csr_matrix(np.array([[1, 0],
[1, 0],
[0, 1],
[1, 0],
[0, 1]]))
# Create a sparse matrix that maps the rows of the design matrix to the
# observatins
self.fake_rows_to_obs = csr_matrix(np.array([[1, 0],
[1, 0],
[1, 0],
[0, 1],
[0, 1]]))
# Create the fake design matrix with columns denoting ASC_1, ASC_2, X
self.fake_design = np.array([[1, 0, 1],
[0, 1, 2],
[0, 0, 3],
[1, 0, 1.5],
[0, 0, 3.5]])
# Create fake versions of the needed arguments for the MNL constructor
self.fake_df = pd.DataFrame({"obs_id": [1, 1, 1, 2, 2],
"alt_id": [1, 2, 3, 1, 3],
"choice": [0, 1, 0, 0, 1],
"x": range(5),
"intercept": [1 for i in range(5)]})
# Record the various column names
self.alt_id_col = "alt_id"
self.obs_id_col = "obs_id"
self.choice_col = "choice"
# Store the choice array
self.choice_array = self.fake_df[self.choice_col].values
# Create a sparse matrix that maps the chosen rows of the design
# matrix to the observatins
self.fake_chosen_rows_to_obs = csr_matrix(np.array([[0, 0],
[1, 0],
[0, 0],
[0, 0],
[0, 1]]))
# Create the index specification and name dictionaryfor the model
self.fake_specification = OrderedDict()
self.fake_specification["intercept"] = [1, 2]
self.fake_specification["x"] = [[1, 2, 3]]
self.fake_names = OrderedDict()
self.fake_names["intercept"] = ["ASC 1", "ASC 2"]
self.fake_names["x"] = ["x (generic coefficient)"]
# Create the nesting specification
self.fake_nest_spec = OrderedDict()
self.fake_nest_spec["Nest 1"] = [1, 2]
self.fake_nest_spec["Nest 2"] = [3]
# Create a nested logit object
args = [self.fake_df,
self.alt_id_col,
self.obs_id_col,
self.choice_col,
self.fake_specification]
kwargs = {"names": self.fake_names,
"nest_spec": self.fake_nest_spec}
self.model_obj = nested_logit.NestedLogit(*args, **kwargs)
# Store a ridge parameter
self.ridge = 0.5
return None
def test_2d_error_in_calc_nested_probs(self):
"""
Ensure that a NotImplementedError is raised whenever calc_nested_probs
is called with a 2D array of nest coefficients or index coefficients.
"""
# Create a 2D array of index coefficients
index_2d = np.concatenate([self.fake_betas[:, None],
self.fake_betas[:, None]], axis=1)
# Create a 2D array of nest coefficients
nest_coef_2d = np.concatenate([self.natural_nest_coefs[:, None],
self.natural_nest_coefs[:, None]],
axis=1)
# Alias the function being tested
func = nlc.calc_nested_probs
# Get the arguments needed for the function. These are not the
# legitimate arguments, but we just need a set of arrays to get to the
# first argument check.
args = [np.arange(5) for x in range(5)]
# Note the error message that should be raised.
msg = "Support for 2D index_coefs or nest_coefs not yet implemented."
for pos, array_2d in enumerate([nest_coef_2d, index_2d]):
# Set the argument to the given 2D array.
args[pos] = array_2d
# Ensure that the appropriate error is raised.
self.assertRaisesRegexp(NotImplementedError,
msg,
func,
*args)
# Set the argument back to None.
args[pos] = None
return None
def test_return_type_error_in_calc_nested_probs(self):
"""
Ensure that a ValueError is raised if return_type is not one of a
handful of accepted values.
"""
# Alias the function being tested
func = nlc.calc_nested_probs
# Get the arguments needed for the function. These are not the
# legitimate arguments, but we just need a set of arrays to get to the
# second argument check.
args = [np.arange(5) for x in range(5)]
# Note the error message that should be raised.
msg = "return_type must be one of the following values: "
# Create the kwargs to be tested
bad_return_types = ["foo", 5, None]
# Perform the tests
kwargs = {"return_type": "long_probs"}
for return_string in bad_return_types:
kwargs["return_type"] = return_string
self.assertRaisesRegexp(ValueError,
msg,
func,
*args,
**kwargs)
return None
def test_return_type_mismatch_error_in_calc_nested_probs(self):
"""
Ensure that a ValueError is raised if return_type includes chosen_probs
but chosen_row_to_obs is None.
"""
# Alias the function being tested
func = nlc.calc_nested_probs
# Get the arguments needed for the function. These are not the
# legitimate arguments, but we just need a set of arrays to get to the
# third argument check.
args = [np.arange(5) for x in range(5)]
# Note the error message that should be raised.
msg = "chosen_row_to_obs is None AND return_type in"
# Create the kwargs to be tested
bad_return_types = ['chosen_probs', 'long_and_chosen_probs']
# Perform the tests
kwargs = {"return_type": "long_probs",
"chosen_row_to_obs": None}
for return_string in bad_return_types:
kwargs["return_type"] = return_string
self.assertRaisesRegexp(ValueError,
msg,
func,
*args,
**kwargs)
return None
def test_calc_probabilities(self):
"""
Ensure that the calc_probabilities function returns correct results
when executed.
"""
# Calculate the index values, i.e. the systematic utilities for each
# person.
index_array = self.model_obj.design.dot(self.fake_betas)
# Scale the index array by the nest coefficients
long_nests = self.fake_rows_to_nests.dot(self.natural_nest_coefs)
scaled_index_array = index_array / long_nests
# Exponentiate the scaled index array
exp_scaled_index = np.exp(scaled_index_array)
# Calculate the sum of the exponentiated scaled index values, per nest
nest_1_sum = np.array([exp_scaled_index[[0, 1]].sum(),
exp_scaled_index[3]])
nest_2_sum = np.array([exp_scaled_index[2], exp_scaled_index[4]])
# Raise the nest sums to the power of the nest coefficients
# There will be one element for each person.
powered_nest_1_sum = nest_1_sum**self.natural_nest_coefs[0]
powered_nest_2_sum = nest_2_sum**self.natural_nest_coefs[1]
# Create 'long-format' versions of the nest sums and the powered nest
# sums
long_nest_sums = np.array([nest_1_sum[0],
nest_1_sum[0],
nest_2_sum[0],
nest_1_sum[1],
nest_2_sum[1]])
long_powered_nest_sums = np.array([powered_nest_1_sum[0],
powered_nest_1_sum[0],
powered_nest_2_sum[0],
powered_nest_1_sum[1],
powered_nest_2_sum[1]])
# Calculate the powered-denominators
sum_powered_nests = powered_nest_1_sum + powered_nest_2_sum
long_powered_denoms = self.fake_rows_to_obs.dot(sum_powered_nests)
# print long_powered_denoms
# Calculate the probability
probs = ((exp_scaled_index / long_nest_sums) *
(long_powered_nest_sums / long_powered_denoms))
# Isolate the chosen probabilities
condition = self.fake_df[self.choice_col].values == 1
expected_chosen_probs = probs[np.where(condition)]
# Alias the function being tested
func = nlc.calc_nested_probs
# Gather the arguments needed for the function
args = [self.natural_nest_coefs,
self.fake_betas,
self.model_obj.design,
self.fake_rows_to_obs,
self.fake_rows_to_nests]
kwargs = {"return_type": "long_probs"}
# Get and test the function results
function_results = func(*args, **kwargs)
self.assertIsInstance(function_results, np.ndarray)
self.assertEqual(function_results.shape, probs.shape)
npt.assert_allclose(function_results, probs)
# Check the function results again, this time looking for chosen probs
kwargs["return_type"] = "chosen_probs"
kwargs["chosen_row_to_obs"] = self.fake_chosen_rows_to_obs
function_results_2 = func(*args, **kwargs)
self.assertIsInstance(function_results_2, np.ndarray)
self.assertEqual(function_results_2.shape, expected_chosen_probs.shape)
npt.assert_allclose(function_results_2, expected_chosen_probs)
# Check the function result when we return long_and_chosen_probs
kwargs["return_type"] = "long_and_chosen_probs"
function_results_3 = func(*args, **kwargs)
self.assertIsInstance(function_results_3, tuple)
self.assertTrue([all(isinstance(x, np.ndarray)
for x in function_results_3)])
self.assertEqual(function_results_3[0].shape,
expected_chosen_probs.shape)
self.assertEqual(function_results_3[1].shape, probs.shape)
npt.assert_allclose(function_results_3[0], expected_chosen_probs)
npt.assert_allclose(function_results_3[1], probs)
return None
def test_calc_log_likelihood(self):
"""
Ensure that calc_log_likelihood returns the expected results.
"""
# Gather the arguments needed for the calc_probabilities function
args = [self.natural_nest_coefs,
self.fake_betas,
self.model_obj.design,
self.fake_rows_to_obs,
self.fake_rows_to_nests]
kwargs = {"return_type": "chosen_probs",
"chosen_row_to_obs": self.fake_chosen_rows_to_obs}
chosen_prob_array = nlc.calc_nested_probs(*args, **kwargs)
# Calculate the expected log-likelihood
expected_log_likelihood = np.log(chosen_prob_array).sum()
penalized_log_likelihood = (expected_log_likelihood -
self.ridge *
((self.natural_nest_coefs - 1)**2).sum() -
self.ridge *
(self.fake_betas**2).sum())
# Alias the function being tested
func = nlc.calc_nested_log_likelihood
# Gather the arguments for the function being tested
likelihood_args = [self.natural_nest_coefs,
self.fake_betas,
self.model_obj.design,
self.fake_rows_to_obs,
self.fake_rows_to_nests,
self.choice_array]
likelihood_kwargs = {"ridge": self.ridge}
# Get and test the function results.
function_results = func(*likelihood_args)
self.assertAlmostEqual(expected_log_likelihood, function_results)
# Repeat the tests with a weighted log likelihood.
weights = 2 * np.ones(self.fake_design.shape[0])
likelihood_kwargs["weights"] = weights
likelihood_kwargs['ridge'] = None
function_results_2 = func(*likelihood_args, **likelihood_kwargs)
self.assertAlmostEqual(2 * expected_log_likelihood, function_results_2)
likelihood_kwargs["weights"] = None
likelihood_kwargs['ridge'] = self.ridge
# Repeat the test with the ridge penalty
function_results_3 = func(*likelihood_args, **likelihood_kwargs)
self.assertAlmostEqual(penalized_log_likelihood, function_results_3)
return None
def test_naturalize_nest_coefs(self):
"""
Ensure that we return expected results when using
naturalize_nest_coefs.
"""
# Create a set of reparametrized nest coefficients
orig_nest_coefs = np.array([-800, -5, -1, 0, 1, 5])
# Calculate what the results should be
expected_coefs = (1.0 + np.exp(-1 * orig_nest_coefs))**-1
expected_coefs[0] = nlc.min_comp_value
# Get and test the results of the naturalize_nest_coefs function
function_results = nlc.naturalize_nest_coefs(orig_nest_coefs)
self.assertIsInstance(function_results, np.ndarray)
self.assertEqual(function_results.shape, expected_coefs.shape)
npt.assert_allclose(function_results, expected_coefs)
return None
return None
def test_calc_nested_gradient(self):
"""
Ensure that we return the correct gradient when passing correct
arguments to calc_nested_gradient(). For formulas used to
'hand'-calculate the gradient, see page 34 of "Estimation of
multinomial logit models in R : The mlogit Packages"
"""
# Get the logit of the natural nest coefficients
nest_coefs = np.log(self.natural_nest_coefs /
(1 - self.natural_nest_coefs))
#####
# Calculate what the gradient should be for the observations in the
# test case.
#####
# Create the index array for each alternative
index_array = self.fake_design.dot(self.fake_betas)
# Create an array of long, natural nest parameters
long_nests = self.fake_rows_to_nests.dot(self.natural_nest_coefs)
# Exponentiate the index array
exp_scaled_index = np.exp(index_array / long_nests)
# Calculate the sum of exp_scaled_index by obs by nest
# Note the resulting array will be num_obs by num_nests
exp_scaled_index_2d = exp_scaled_index[:, None]
interim_array = self.fake_rows_to_nests.multiply(exp_scaled_index_2d)
nest_sum = self.fake_rows_to_obs.T.dot(interim_array)
if issparse(nest_sum):
nest_sum = nest_sum.toarray()
elif isinstance(nest_sum, np.matrixlib.defmatrix.matrix):
nest_sum = np.asarray(nest_sum)
# Create a 1D array that notes the nest-sum for the given nest and
# observation that corresponds to a given row
long_nest_sums = self.fake_rows_to_obs.dot(nest_sum)
long_nest_sums = (self.fake_rows_to_nests
.multiply(long_nest_sums)
.sum(axis=1))
if issparse(long_nest_sums):
long_nest_sums = long_nest_sums.toarray()
elif isinstance(long_nest_sums, np.matrixlib.defmatrix.matrix):
long_nest_sums = np.asarray(long_nest_sums)
long_nest_sums = long_nest_sums.ravel()
# Get the probability of each individual choosing each available
# alternative, given the alternative's nest.
prob_alt_given_nest = exp_scaled_index / long_nest_sums
# Get the probability of each individual choosing a given nest
# Note that this array will be num_obs by num_nests
nest_probs_numerator = np.power(nest_sum,
self.natural_nest_coefs[None, :])
nest_probs_denominator = nest_probs_numerator.sum(axis=1)
nest_probs = nest_probs_numerator / nest_probs_denominator[:, None]
# Get the "average" value of the design matrix, in the chosen nests for
# each observation. Note that observation 1 chosen nest 1 and
# observation 2 chose nest 2.
prob_by_design = prob_alt_given_nest[:, None] * self.fake_design
x_bar_obs_1_nest_1 = prob_by_design[0:2, :].sum(axis=0)
x_bar_obs_1_nest_2 = prob_by_design[2, :]
x_bar_array = np.concatenate([x_bar_obs_1_nest_1[None, :],
x_bar_obs_1_nest_2[None, :]],
axis=0)
x_bar_obs_1 = nest_probs[0, :][None, :].dot(x_bar_array).ravel()
# x_bar_obs_1 = nest_probs[0, :][:, None] * x_bar_array
x_bar_obs_2_nest_1 = prob_by_design[3, :]
x_bar_obs_2_nest_2 = prob_by_design[4, :]
x_bar_array_2 = np.concatenate([x_bar_obs_2_nest_1[None, :],
x_bar_obs_2_nest_2[None, :]],
axis=0)
x_bar_obs_2 = nest_probs[1, :][None, :].dot(x_bar_array_2).ravel()
# x_bar_obs_2 = (nest_probs[1, :][:, None] * x_bar_array_2)
index_bar_obs_1_nest_1 = (prob_alt_given_nest * index_array)[:2].sum()
index_bar_obs_1_nest_2 = index_array[2]
index_bar_obs_2_nest_1 = index_array[3]
index_bar_obs_2_nest_2 = index_array[4]
# Note that the order of the gradient will be nest coef 1, nest coef 2,
# then the index coefficients.
obs_1_gradient = np.zeros(self.fake_all_params.shape[0])
obs_2_gradient = np.zeros(self.fake_all_params.shape[0])
# Calculate the gradient for observation 1
term_1 = index_array[1]
term_2 = (self.natural_nest_coefs[0]**2 *
(1 - nest_probs[0, 0]) *
np.log(nest_sum[0, 0]))
term_3 = ((1 - self.natural_nest_coefs[0] * (1 - nest_probs[0, 0])) *
index_bar_obs_1_nest_1)
obs_1_gradient[0] = (-1 * self.natural_nest_coefs[0]**-2 *
(term_1 - term_2 - term_3))
term_4 = nest_probs[0, 1] / self.natural_nest_coefs[1]
term_5 = index_bar_obs_1_nest_2
term_6 = self.natural_nest_coefs[1] * np.log(nest_sum[0, 1])
obs_1_gradient[1] = term_4 * (term_5 - term_6)
term_7 = 1.0 / self.natural_nest_coefs[0]
term_8 = self.fake_design[1]
term_9 = (1 - self.natural_nest_coefs[0]) * x_bar_obs_1_nest_1
term_10 = x_bar_obs_1
obs_1_gradient[2:] = term_7 * (term_8 - term_9) - term_10
# Calculate the gradient for observation 2
term_1 = index_array[4]
term_2 = (self.natural_nest_coefs[1]**2 *
(1 - nest_probs[1, 1]) *
np.log(nest_sum[1, 1]))
term_3 = ((1 - self.natural_nest_coefs[1] * (1 - nest_probs[1, 1])) *
index_bar_obs_2_nest_2)
# Note the calculates above are for the chosen nest which is nest 2
# for this observation
obs_2_gradient[1] = (-1 * self.natural_nest_coefs[1]**-2 *
(term_1 - term_2 - term_3))
term_4 = nest_probs[1, 0] / self.natural_nest_coefs[0]
term_5 = index_bar_obs_2_nest_1
term_6 = self.natural_nest_coefs[0] * np.log(nest_sum[1, 0])
obs_2_gradient[0] = term_4 * (term_5 - term_6)
term_7 = 1.0 / self.natural_nest_coefs[1]
term_8 = self.fake_design[4]
term_9 = (1 - self.natural_nest_coefs[1]) * x_bar_obs_2_nest_2
term_10 = x_bar_obs_2
obs_2_gradient[2:] = term_7 * (term_8 - term_9) - term_10
# Calculate the overall gradient
expected_gradient = obs_1_gradient + obs_2_gradient
# Don't forget to account for the jacobian
jacobian = self.natural_nest_coefs * (1.0 - self.natural_nest_coefs)
expected_gradient[:2] *= jacobian
# Get the arguments necessary for the nested gradient function
args = [nest_coefs,
self.fake_betas,
self.fake_design,
self.choice_array,
self.fake_rows_to_obs,
self.fake_rows_to_nests]
# Alias the function being tested
func = nlc.calc_nested_gradient
# Get the function results
func_results = func(*args)
# Test the returned results
self.assertIsInstance(func_results, np.ndarray)
self.assertEqual(len(func_results.shape), 1)
self.assertEqual(func_results.shape, expected_gradient.shape)
npt.assert_allclose(func_results, expected_gradient)
# Test the Gradient function with weights
new_weights = 2 * np.ones(self.fake_design.shape[0])
kwargs = {'weights': new_weights}
expected_gradient_weighted = 2 * expected_gradient
func_result_weighted = func(*args, **kwargs)
self.assertIsInstance(func_result_weighted, np.ndarray)
self.assertEqual(func_result_weighted.shape,
expected_gradient_weighted.shape)
npt.assert_allclose(func_result_weighted, expected_gradient_weighted)
# Ensure the function works when using a ridge penalty
# Note we have to create an adjusted array for penalization because we
# have reparameterized the nest coefficients
params_for_penalty = np.concatenate([(20 - nest_coefs),
self.fake_betas], axis=0)
ridge_penalty = 2 * self.ridge * params_for_penalty
penalized_gradient = expected_gradient - ridge_penalty
kwargs = {"ridge": self.ridge}
new_func_results = func(*args, **kwargs)
# Test the returned results
self.assertIsInstance(new_func_results, np.ndarray)
self.assertEqual(len(new_func_results.shape), 1)
self.assertEqual(new_func_results.shape, penalized_gradient.shape)
npt.assert_allclose(new_func_results, penalized_gradient)
return None
def test_prep_vectors_for_gradient(self):
"""
Ensure that the dictionary returned by this function contains the
desired arrays.
"""
# Calculate the arrays that should be returned for our test case.
# Create the index array for each alternative
index_array = self.model_obj.design.dot(self.fake_betas)
# Create an array of long, natural nest parameters
long_nests = self.fake_rows_to_nests.dot(self.natural_nest_coefs)
# Exponentiate the index array
exp_scaled_index = np.exp(index_array / long_nests)
# Calculate the sum of exp_scaled_index by obs by nest
# Note the resulting array will be num_obs by num_nests
exp_scaled_index_2d = exp_scaled_index[:, None]
interim_array = self.fake_rows_to_nests.multiply(exp_scaled_index_2d)
nest_sum = self.fake_rows_to_obs.T.dot(interim_array)
if issparse(nest_sum):
nest_sum = nest_sum.toarray()
elif isinstance(nest_sum, np.matrixlib.defmatrix.matrix):
nest_sum = np.asarray(nest_sum)
# Create a 1D array that notes the nest-sum for the given nest and
# observation that corresponds to a given row
long_nest_sums = self.fake_rows_to_obs.dot(nest_sum)
long_nest_sums = (self.fake_rows_to_nests
.multiply(long_nest_sums)
.sum(axis=1))
if issparse(long_nest_sums):
long_nest_sums = long_nest_sums.toarray()
elif isinstance(long_nest_sums, np.matrixlib.defmatrix.matrix):
long_nest_sums = np.asarray(long_nest_sums)
long_nest_sums = long_nest_sums.ravel()
# Get the probability of each individual choosing each available
# alternative, given the alternative's nest.
prob_alt_given_nest = exp_scaled_index / long_nest_sums
# Get the probability of each individual choosing a given nest
# Note that this array will be num_obs by num_nests
nest_probs_numerator = np.power(nest_sum,
self.natural_nest_coefs[None, :])
nest_probs_denominator = nest_probs_numerator.sum(axis=1)
nest_probs = nest_probs_numerator / nest_probs_denominator[:, None]
# Get the probability of each alternative being chosen
args = [self.natural_nest_coefs,
self.fake_betas,
self.model_obj.design,
self.fake_rows_to_obs,
self.fake_rows_to_nests]
kwargs = {"return_type": "long_probs"}
long_probs = nlc.calc_nested_probs(*args, **kwargs)
# Create an expected dictionary that containing the same keys and
# hopefully the same falues ans the function results.
expected_dict = {}
expected_dict["long_nest_params"] = long_nests
expected_dict["scaled_y"] = self.choice_array / long_nests
long_chosen_nest = np.array([1, 1, 0, 0, 1])
expected_dict["long_chosen_nest"] = long_chosen_nest
obs_to_chosen_nests = np.array([[1, 0], [0, 1]])
expected_dict["obs_to_chosen_nests"] = obs_to_chosen_nests
expected_dict["prob_given_nest"] = prob_alt_given_nest
expected_dict["nest_choice_probs"] = nest_probs
expected_dict["ind_sums_per_nest"] = nest_sum
expected_dict["long_probs"] = long_probs
expected_dict["p_tilde_given_nest"] = (prob_alt_given_nest *
long_chosen_nest /
long_nests)
# Alias the function being tested
func = nlc.prep_vectors_for_gradient
# Gather the necessary function arguments
args = [self.natural_nest_coefs,
self.fake_betas,
self.model_obj.design,
self.choice_array,
self.fake_rows_to_obs,
self.fake_rows_to_nests]
function_results = func(*args)
# Perform the desired tests
for key in expected_dict:
self.assertTrue(key in function_results)
self.assertIsInstance(function_results[key], np.ndarray)
npt.assert_allclose(function_results[key], expected_dict[key])
return None
def test_calc_bhhh_hessian_approximation(self):
"""
Ensure that we return the correct BHHH matrix when passing correct
arguments to calc_bhhh_hessian_approximation(). For formulas used to
'hand'-calculate the gradient of each observation, see page 34 of
"Estimation of multinomial logit models in R : The mlogit Packages"
"""
# Get the logit of the natural nest coefficients
nest_coefs = np.log(self.natural_nest_coefs /
(1 - self.natural_nest_coefs))
#####
# Calculate what the gradient should be for the observations in the
# test case.
#####
# Create the index array for each alternative
index_array = self.fake_design.dot(self.fake_betas)
# Create an array of long, natural nest parameters
long_nests = self.fake_rows_to_nests.dot(self.natural_nest_coefs)
# Exponentiate the index array
exp_scaled_index = np.exp(index_array / long_nests)
# Calculate the sum of exp_scaled_index by obs by nest
# Note the resulting array will be num_obs by num_nests
exp_scaled_index_2d = exp_scaled_index[:, None]
interim_array = self.fake_rows_to_nests.multiply(exp_scaled_index_2d)
nest_sum = self.fake_rows_to_obs.T.dot(interim_array)
if issparse(nest_sum):
nest_sum = nest_sum.toarray()
elif isinstance(nest_sum, np.matrixlib.defmatrix.matrix):
nest_sum = np.asarray(nest_sum)
# Create a 1D array that notes the nest-sum for the given nest and
# observation that corresponds to a given row
long_nest_sums = self.fake_rows_to_obs.dot(nest_sum)
long_nest_sums = (self.fake_rows_to_nests
.multiply(long_nest_sums)
.sum(axis=1))
# Ensure long_nest_sums is a numpy array
if issparse(long_nest_sums):
long_nest_sums = long_nest_sums.toarray()
elif isinstance(long_nest_sums, np.matrixlib.defmatrix.matrix):
long_nest_sums = np.asarray(long_nest_sums)
# Ensure long_nest_sums is 1D
long_nest_sums = long_nest_sums.ravel()
# Get the probability of each individual choosing each available
# alternative, given the alternative's nest.
prob_alt_given_nest = exp_scaled_index / long_nest_sums
# Get the probability of each individual choosing a given nest
# Note that this array will be num_obs by num_nests
nest_probs_numerator = np.power(nest_sum,
self.natural_nest_coefs[None, :])
nest_probs_denominator = nest_probs_numerator.sum(axis=1)[:, None]
nest_probs = nest_probs_numerator / nest_probs_denominator
# Get the "average" value of the design matrix, in the chosen nests for
# each observation. Note that observation 1 chosen nest 1 and
# observation 2 chose nest 2.
prob_by_design = prob_alt_given_nest[:, None] * self.fake_design
x_bar_obs_1_nest_1 = prob_by_design[0:2, :].sum(axis=0)
x_bar_obs_1_nest_2 = prob_by_design[2, :]
x_bar_array = np.concatenate([x_bar_obs_1_nest_1[None, :],
x_bar_obs_1_nest_2[None, :]],
axis=0)
x_bar_obs_1 = nest_probs[0, :][None, :].dot(x_bar_array).ravel()
x_bar_obs_2_nest_1 = prob_by_design[3, :]
x_bar_obs_2_nest_2 = prob_by_design[4, :]
x_bar_array_2 = np.concatenate([x_bar_obs_2_nest_1[None, :],
x_bar_obs_2_nest_2[None, :]],
axis=0)
x_bar_obs_2 = nest_probs[1, :][None, :].dot(x_bar_array_2).ravel()
index_bar_obs_1_nest_1 = (prob_alt_given_nest * index_array)[:2].sum()
index_bar_obs_1_nest_2 = index_array[2]
index_bar_obs_2_nest_1 = index_array[3]
index_bar_obs_2_nest_2 = index_array[4]
# Note that the order of the gradient will be nest coef 1, nest coef 2,
# then the index coefficients.
obs_1_gradient = np.zeros(self.fake_all_params.shape[0])
obs_2_gradient = np.zeros(self.fake_all_params.shape[0])
# Calculate the gradient for observation 1
term_1 = index_array[1]
term_2 = (self.natural_nest_coefs[0]**2 *
(1 - nest_probs[0, 0]) *
np.log(nest_sum[0, 0]))
term_3 = ((1 - self.natural_nest_coefs[0] * (1 - nest_probs[0, 0])) *
index_bar_obs_1_nest_1)
obs_1_gradient[0] = (-1 * self.natural_nest_coefs[0]**-2 *
(term_1 - term_2 - term_3))
term_4 = nest_probs[0, 1] / self.natural_nest_coefs[1]
term_5 = index_bar_obs_1_nest_2
term_6 = self.natural_nest_coefs[1] * np.log(nest_sum[0, 1])
obs_1_gradient[1] = term_4 * (term_5 - term_6)
term_7 = 1.0 / self.natural_nest_coefs[0]
term_8 = self.fake_design[1]
term_9 = (1 - self.natural_nest_coefs[0]) * x_bar_obs_1_nest_1
term_10 = x_bar_obs_1
obs_1_gradient[2:] = term_7 * (term_8 - term_9) - term_10
# Calculate the gradient for observation 2
term_1 = index_array[4]
term_2 = (self.natural_nest_coefs[1]**2 *
(1 - nest_probs[1, 1]) *
np.log(nest_sum[1, 1]))
term_3 = ((1 - self.natural_nest_coefs[1] * (1 - nest_probs[1, 1])) *
index_bar_obs_2_nest_2)
# Note the calculates above are for the chosen nest which is nest 2
# for this observation
obs_2_gradient[1] = (-1 * self.natural_nest_coefs[1]**-2 *
(term_1 - term_2 - term_3))
term_4 = nest_probs[1, 0] / self.natural_nest_coefs[0]
term_5 = index_bar_obs_2_nest_1
term_6 = self.natural_nest_coefs[0] * np.log(nest_sum[1, 0])
obs_2_gradient[0] = term_4 * (term_5 - term_6)
term_7 = 1.0 / self.natural_nest_coefs[1]
term_8 = self.fake_design[4]
term_9 = (1 - self.natural_nest_coefs[1]) * x_bar_obs_2_nest_2
term_10 = x_bar_obs_2
obs_2_gradient[2:] = term_7 * (term_8 - term_9) - term_10
# Calculate the overall gradient
stacked_gradient = np.concatenate([obs_1_gradient[None, :],
obs_2_gradient[None, :]], axis=0)
# Don't forget to account for the jacobian
jacobian = self.natural_nest_coefs * (1.0 - self.natural_nest_coefs)
stacked_gradient[:, :2] *= jacobian[None, :]
# Calculate the BHHH matrix that we expect to be returned
# Note the -1 is because the bhhh should approximate the hessian, and
# the hessian should be negative (think downward opening parabola) in
# order for the log-likelihood to achieve a maximum.
expected_bhhh = -1 * (np.outer(stacked_gradient[0, :],
stacked_gradient[0, :]) +
np.outer(stacked_gradient[1, :],
stacked_gradient[1, :]))
# Get the arguments necessary for the nested gradient function
args = [nest_coefs,
self.fake_betas,
self.fake_design,
self.choice_array,
self.fake_rows_to_obs,
self.fake_rows_to_nests]
# Alias the function being tested
func = nlc.calc_bhhh_hessian_approximation
# Get the function results
func_results = func(*args)
# Test the returned results
self.assertIsInstance(func_results, np.ndarray)
self.assertEqual(len(func_results.shape), 2)
self.assertEqual(func_results.shape, expected_bhhh.shape)
npt.assert_allclose(func_results, expected_bhhh)
# Test the Gradient function with weights
new_weights = 2 * np.ones(self.fake_design.shape[0])
kwargs = {'weights': new_weights}
expected_bhhh_weighted = 2 * expected_bhhh
func_result_weighted = func(*args, **kwargs)
self.assertIsInstance(func_result_weighted, np.ndarray)
self.assertEqual(func_result_weighted.shape,
expected_bhhh_weighted.shape)
npt.assert_allclose(func_result_weighted, expected_bhhh_weighted)
# Ensure the function works when using a ridge penalty
# Note we have to create an adjusted array for penalization because we
# have reparameterized the nest coefficients
ridge_penalty = 2 * self.ridge * np.identity(expected_bhhh.shape[0])
penalized_bhhh = expected_bhhh - ridge_penalty
kwargs = {"ridge": self.ridge}
new_func_results = func(*args, **kwargs)
# Test the returned results
self.assertIsInstance(new_func_results, np.ndarray)
self.assertEqual(len(new_func_results.shape), 2)
self.assertEqual(new_func_results.shape, penalized_bhhh.shape)
npt.assert_allclose(new_func_results, penalized_bhhh)
return None
| {
"content_hash": "f7720d8c38d7756b0a3648cf93f1389f",
"timestamp": "",
"source": "github",
"line_count": 855,
"max_line_length": 79,
"avg_line_length": 44.33567251461988,
"alnum_prop": 0.5645659113092568,
"repo_name": "timothyb0912/pylogit",
"id": "4b636db577811e0dff035f72ed6938fc78d84372",
"size": "37907",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_nested_choice_calcs.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1370409"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import mds
import json, logging, time, sys, Queue
from socketIO_client import SocketIO
logger = logging.getLogger(__name__)
logger.debug('mdsVolumio2 module loading')
class mdsVolumio2Comms(mds.playerComms):
def __init__(self, ipaddr, port):
self.ipaddr = ipaddr
self.port = port
def __str__(self):
return '{0}:{1}'.format(self.ipaddr, self.port)
class mdsVolumio2(mds.mds):
def establishConnection(self):
for i in range(self.retriesAllowed):
self.socketIO = None
try:
self.socketIO = SocketIO(self.playerComms.ipaddr, self.playerComms.port)
self.socketIO.on('pushQueue', self.pushQueue)
self.socketIO.on('pushState', self.pushState)
# self.socketIO.on(u'pushState', self.on_state_response)
# self.socketIO.on(u'pushQueue', self.on_queue_response)
# Request initial values
self.socketIO.emit(u'getQueue')
self.socketIO.emit(u'getState')
return
except Exception as ex:
del(self.socketIO)
logger.exception('Error connecting on attempt {0}'.format(i+1))
time.sleep(0.5)
pass
raise RuntimeError('Unable to connect')
def shutdownConnection(self):
if self.socketIO:
del(self.socketIO)
pass
def listen(self):
logger.debug('LISTENING')
self.socketIO.wait(seconds=10)
self.socketIO.emit(u'getQueue')
self.socketIO.emit(u'getState')
return True
def pushQueue(self,*args):
list = args[0]
with self.lMDS:
self.playerState['queue'] = list
self.sendUpdate()
def pushState(self, *args):
# Read musicplayer status and update musicdata
status = args[0]
with self.lMDS:
for k, v in status.iteritems():
self.playerState[k] = v
self.sendUpdate()
if __name__ == u'__main__':
import moment, getopt
try:
opts, args = getopt.getopt(sys.argv[1:],u"hs:p:l:",[u"server=",u"port=", u"level="])
except getopt.GetoptError:
print u'musicdata_volumio2.py -s <server> -p <port> -l <debug level>'
sys.exit(2)
# Set defaults
server = u'localhost'
port = 3000
level = 30
# pwd= ''
for opt, arg in opts:
if opt == u'-h':
print u'mdsVolumio2.py -s <server> -p <port> -l <debug level>'
sys.exit()
elif opt in (u"-s", u"--server"):
server = arg
elif opt in (u"-p", u"--port"):
port = arg
elif opt in (u"-l", u"--level"):
try:
level = {'NOTSET': 0, 'DEBUG':10, 'INFO': 20, 'WARNING':30, 'ERROR':40, 'CRITICAL':50}[arg.upper()]
except KeyError:
try:
level = int(arg)
except ValueError:
pass
logging.basicConfig(format=u'%(asctime)s:%(levelname)s:%(module)s:%(message)s', level=level)
# logging.getLogger().addHandler(logging.StreamHandler())
logging.getLogger(u'socketIO-client').setLevel(logging.WARNING)
exitapp = [ False ]
q = Queue.Queue()
logger.info('Starting Volumio MDS')
mdr = mdsVolumio2(name = 'Volumio2', queue= q, playerComms = mdsVolumio2Comms(server, port), retriesAllowed=3, exitApp = exitapp)
try:
while True:
try:
logger.info('Waiting for queue data')
status = q.get(timeout=1000)
q.task_done()
logger.info('Processing queue data')
ctime = moment.utcnow().timezone(u"US/Eastern").strftime(u"%-I:%M:%S %p").strip()
print u"\nStatus at time {0}".format(ctime)
for item,value in status.iteritems():
print u" [{0}]={1} {2}".format(item,value, type(value))
except Queue.Empty:
pass
except KeyboardInterrupt:
print ''
pass
finally:
exitapp[0] = True
print u"Exiting..."
| {
"content_hash": "76a82503609518672f2bbb74c0fb7f09",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 130,
"avg_line_length": 25.197080291970803,
"alnum_prop": 0.6665701042873696,
"repo_name": "dhrone/pydPiper",
"id": "a1e490638e41baaf9ece78cb28ce05c2970925da",
"size": "3557",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sources/mdsVolumio2.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "312"
},
{
"name": "Python",
"bytes": "450103"
},
{
"name": "Shell",
"bytes": "1072"
}
],
"symlink_target": ""
} |
import asyncio
def async_test(loop):
def real_decorator(f):
def func_wrapper(*args, **kwargs):
coro = asyncio.coroutine(f)
future = coro(*args, **kwargs)
loop.run_until_complete(future)
return func_wrapper
return real_decorator
| {
"content_hash": "5795f3ce22df327883a2949e51015f1c",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 43,
"avg_line_length": 26.363636363636363,
"alnum_prop": 0.596551724137931,
"repo_name": "incnone/necrobot",
"id": "81c4c02e06969834bd456051698c95ed96dcb860",
"size": "290",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "necrobot/test/asynctest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "637785"
}
],
"symlink_target": ""
} |
import itertools
import networkx as nx
from pgmpy.base import UndirectedGraph
class DirectedGraph(nx.DiGraph):
"""
Base class for all Directed Graphical Models.
Each node in the graph can represent either a random variable, `Factor`,
or a cluster of random variables. Edges in the graph represent the
dependencies between these.
Parameters
----------
data: input graph
Data to initialize graph. If data=None (default) an empty graph is
created. The data can be an edge list or any Networkx graph object.
Examples
--------
Create an empty DirectedGraph with no nodes and no edges
>>> from pgmpy.base import DirectedGraph
>>> G = DirectedGraph()
G can be grown in several ways:
**Nodes:**
Add one node at a time:
>>> G.add_node(node='a')
Add the nodes from any container (a list, set or tuple or the nodes
from another graph).
>>> G.add_nodes_from(nodes=['a', 'b'])
**Edges:**
G can also be grown by adding edges.
Add one edge,
>>> G.add_edge(u='a', v='b')
a list of edges,
>>> G.add_edges_from(ebunch=[('a', 'b'), ('b', 'c')])
If some edges connect nodes not yet in the model, the nodes
are added automatically. There are no errors when adding
nodes or edges that already exist.
**Shortcuts:**
Many common graph features allow python syntax for speed reporting.
>>> 'a' in G # check if node in graph
True
>>> len(G) # number of nodes in graph
3
"""
def __init__(self, ebunch=None):
super(DirectedGraph, self).__init__(ebunch)
def add_node(self, node, weight=None):
"""
Adds a single node to the Graph.
Parameters
----------
node: str, int, or any hashable python object.
The node to add to the graph.
weight: int, float
The weight of the node.
Examples
--------
>>> from pgmpy.base import DirectedGraph
>>> G = DirectedGraph()
>>> G.add_node(node='A')
>>> sorted(G.nodes())
['A']
Adding a node with some weight.
>>> G.add_node(node='B', weight=0.3)
The weight of these nodes can be accessed as:
>>> G.node['B']
{'weight': 0.3}
>>> G.node['A']
{'weight': None}
"""
# Check for networkx 2.0 syntax
if isinstance(node, tuple) and len(node) == 2 and isinstance(node[1], dict):
node, attrs = node
if attrs.get('weight', None) is not None:
attrs['weight'] = weight
else:
attrs = {'weight': weight}
super(DirectedGraph, self).add_node(node, weight=weight)
def add_nodes_from(self, nodes, weights=None):
"""
Add multiple nodes to the Graph.
**The behviour of adding weights is different than in networkx.
Parameters
----------
nodes: iterable container
A container of nodes (list, dict, set, or any hashable python
object).
weights: list, tuple (default=None)
A container of weights (int, float). The weight value at index i
is associated with the variable at index i.
Examples
--------
>>> from pgmpy.base import DirectedGraph
>>> G = DirectedGraph()
>>> G.add_nodes_from(nodes=['A', 'B', 'C'])
>>> sorted(G.nodes())
['A', 'B', 'C']
Adding nodes with weights:
>>> G.add_nodes_from(nodes=['D', 'E'], weights=[0.3, 0.6])
>>> G.node['D']
{'weight': 0.3}
>>> G.node['E']
{'weight': 0.6}
>>> G.node['A']
{'weight': None}
"""
nodes = list(nodes)
if weights:
if len(nodes) != len(weights):
raise ValueError("The number of elements in nodes and weights"
"should be equal.")
for index in range(len(nodes)):
self.add_node(node=nodes[index], weight=weights[index])
else:
for node in nodes:
self.add_node(node=node)
def add_edge(self, u, v, weight=None):
"""
Add an edge between u and v.
The nodes u and v will be automatically added if they are
not already in the graph.
Parameters
----------
u, v : nodes
Nodes can be any hashable Python object.
weight: int, float (default=None)
The weight of the edge
Examples
--------
>>> from pgmpy.base import DirectedGraph
>>> G = DirectedGraph()
>>> G.add_nodes_from(nodes=['Alice', 'Bob', 'Charles'])
>>> G.add_edge(u='Alice', v='Bob')
>>> G.nodes()
['Alice', 'Bob', 'Charles']
>>> G.edges()
[('Alice', 'Bob')]
When the node is not already present in the graph:
>>> G.add_edge(u='Alice', v='Ankur')
>>> G.nodes()
['Alice', 'Ankur', 'Bob', 'Charles']
>>> G.edges()
[('Alice', 'Bob'), ('Alice', 'Ankur')]
Adding edges with weight:
>>> G.add_edge('Ankur', 'Maria', weight=0.1)
>>> G.edge['Ankur']['Maria']
{'weight': 0.1}
"""
super(DirectedGraph, self).add_edge(u, v, weight=weight)
def add_edges_from(self, ebunch, weights=None):
"""
Add all the edges in ebunch.
If nodes referred in the ebunch are not already present, they
will be automatically added. Node names can be any hashable python
object.
**The behavior of adding weights is different than networkx.
Parameters
----------
ebunch : container of edges
Each edge given in the container will be added to the graph.
The edges must be given as 2-tuples (u, v).
weights: list, tuple (default=None)
A container of weights (int, float). The weight value at index i
is associated with the edge at index i.
Examples
--------
>>> from pgmpy.base import DirectedGraph
>>> G = DirectedGraph()
>>> G.add_nodes_from(nodes=['Alice', 'Bob', 'Charles'])
>>> G.add_edges_from(ebunch=[('Alice', 'Bob'), ('Bob', 'Charles')])
>>> G.nodes()
['Alice', 'Bob', 'Charles']
>>> G.edges()
[('Alice', 'Bob'), ('Bob', 'Charles')]
When the node is not already in the model:
>>> G.add_edges_from(ebunch=[('Alice', 'Ankur')])
>>> G.nodes()
['Alice', 'Bob', 'Charles', 'Ankur']
>>> G.edges()
[('Alice', 'Bob'), ('Bob', 'Charles'), ('Alice', 'Ankur')]
Adding edges with weights:
>>> G.add_edges_from([('Ankur', 'Maria'), ('Maria', 'Mason')],
... weights=[0.3, 0.5])
>>> G.edge['Ankur']['Maria']
{'weight': 0.3}
>>> G.edge['Maria']['Mason']
{'weight': 0.5}
"""
ebunch = list(ebunch)
if weights:
if len(ebunch) != len(weights):
raise ValueError("The number of elements in ebunch and weights"
"should be equal")
for index in range(len(ebunch)):
self.add_edge(ebunch[index][0], ebunch[index][1],
weight=weights[index])
else:
for edge in ebunch:
self.add_edge(edge[0], edge[1])
def get_parents(self, node):
"""
Returns a list of parents of node.
Throws an error if the node is not present in the graph.
Parameters
----------
node: string, int or any hashable python object.
The node whose parents would be returned.
Examples
--------
>>> from pgmpy.base import DirectedGraph
>>> G = DirectedGraph(ebunch=[('diff', 'grade'), ('intel', 'grade')])
>>> G.get_parents(node='grade')
['diff', 'intel']
"""
return list(self.predecessors(node))
def moralize(self):
"""
Removes all the immoralities in the DirectedGraph and creates a moral
graph (UndirectedGraph).
A v-structure X->Z<-Y is an immorality if there is no directed edge
between X and Y.
Examples
--------
>>> from pgmpy.base import DirectedGraph
>>> G = DirectedGraph(ebunch=[('diff', 'grade'), ('intel', 'grade')])
>>> moral_graph = G.moralize()
>>> moral_graph.edges()
[('intel', 'grade'), ('intel', 'diff'), ('grade', 'diff')]
"""
moral_graph = UndirectedGraph(self.to_undirected().edges())
for node in self.nodes():
moral_graph.add_edges_from(
itertools.combinations(self.get_parents(node), 2))
return moral_graph
def get_leaves(self):
"""
Returns a list of leaves of the graph.
Examples
--------
>>> from pgmpy.base import DirectedGraph
>>> graph = DirectedGraph([('A', 'B'), ('B', 'C'), ('B', 'D')])
>>> graph.get_leaves()
['C', 'D']
"""
return [node for node, out_degree in self.out_degree_iter() if
out_degree == 0]
def out_degree_iter(self, nbunch=None, weight=None):
if nx.__version__.startswith('1'):
return super(DirectedGraph, self).out_degree_iter(nbunch, weight)
else:
return iter(self.out_degree(nbunch, weight))
def in_degree_iter(self, nbunch=None, weight=None):
if nx.__version__.startswith('1'):
return super(DirectedGraph, self).in_degree_iter(nbunch, weight)
else:
return iter(self.in_degree(nbunch, weight))
def get_roots(self):
"""
Returns a list of roots of the graph.
Examples
--------
>>> from pgmpy.base import DirectedGraph
>>> graph = DirectedGraph([('A', 'B'), ('B', 'C'), ('B', 'D'), ('E', 'B')])
>>> graph.get_roots()
['A', 'E']
"""
return [node for node, in_degree in dict(self.in_degree()).items() if in_degree == 0]
def get_children(self, node):
"""
Returns a list of children of node.
Throws an error if the node is not present in the graph.
Parameters
----------
node: string, int or any hashable python object.
The node whose children would be returned.
Examples
--------
>>> from pgmpy.base import DirectedGraph
>>> g = DirectedGraph(ebunch=[('A', 'B'), ('C', 'B'), ('B', 'D'),
('B', 'E'), ('B', 'F'), ('E', 'G')])
>>> g.get_children(node='B')
['D', 'E', 'F']
"""
return list(self.successors(node))
| {
"content_hash": "95ec14cf22b039cd219d72e3f3d79bc2",
"timestamp": "",
"source": "github",
"line_count": 356,
"max_line_length": 93,
"avg_line_length": 30.46629213483146,
"alnum_prop": 0.519454176654988,
"repo_name": "khalibartan/pgmpy",
"id": "68ee7d4c521b49b197d96510a1c2d47a881ecb9b",
"size": "10870",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "pgmpy/base/DirectedGraph.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "PowerShell",
"bytes": "2970"
},
{
"name": "Python",
"bytes": "1375595"
},
{
"name": "Shell",
"bytes": "1058"
}
],
"symlink_target": ""
} |
import abc
from oslo_utils import uuidutils
import osprofiler.profiler
import osprofiler.web
from requests_mock.contrib import fixture as mock_fixture
import six
import testtools
from neutronclient import client
from neutronclient.common import exceptions
AUTH_TOKEN = 'test_token'
END_URL = 'test_url'
METHOD = 'GET'
URL = 'http://test.test:1234/v2.0/test'
BODY = 'IAMFAKE'
@six.add_metaclass(abc.ABCMeta)
class TestHTTPClientMixin(object):
def setUp(self):
super(TestHTTPClientMixin, self).setUp()
self.requests = self.useFixture(mock_fixture.Fixture())
self.http = self.initialize()
@abc.abstractmethod
def initialize(self):
"""Return client class, instance."""
def _test_headers(self, expected_headers, **kwargs):
# Test headers.
self.requests.register_uri(METHOD, URL,
request_headers=expected_headers)
self.http.request(URL, METHOD, **kwargs)
self.assertEqual(kwargs.get('body'), self.requests.last_request.body)
def test_headers_without_body(self):
self._test_headers({'Accept': 'application/json'})
def test_headers_with_body(self):
headers = {'Accept': 'application/json',
'Content-Type': 'application/json'}
self._test_headers(headers, body=BODY)
def test_headers_without_body_with_content_type(self):
headers = {'Accept': 'application/json'}
self._test_headers(headers, content_type='application/json')
def test_headers_with_body_with_content_type(self):
headers = {'Accept': 'application/json',
'Content-Type': 'application/json'}
self._test_headers(headers, body=BODY, content_type='application/json')
def test_headers_defined_in_headers(self):
headers = {'Accept': 'application/json',
'Content-Type': 'application/json'}
self._test_headers(headers, body=BODY, headers=headers)
def test_osprofiler_headers_are_injected(self):
osprofiler.profiler.init('SWORDFISH')
self.addCleanup(osprofiler.profiler.clean)
headers = {'Accept': 'application/json'}
headers.update(osprofiler.web.get_trace_id_headers())
self._test_headers(headers)
class TestHTTPClient(TestHTTPClientMixin, testtools.TestCase):
def initialize(self):
return client.HTTPClient(token=AUTH_TOKEN, endpoint_url=END_URL)
def test_request_error(self):
def cb(*args, **kwargs):
raise Exception('error msg')
self.requests.get(URL, body=cb)
self.assertRaises(
exceptions.ConnectionFailed,
self.http._cs_request,
URL, METHOD
)
def test_request_success(self):
text = 'test content'
self.requests.register_uri(METHOD, URL, text=text)
resp, resp_text = self.http._cs_request(URL, METHOD)
self.assertEqual(200, resp.status_code)
self.assertEqual(text, resp_text)
def test_request_unauthorized(self):
text = 'unauthorized message'
self.requests.register_uri(METHOD, URL, status_code=401, text=text)
e = self.assertRaises(exceptions.Unauthorized,
self.http._cs_request, URL, METHOD)
self.assertEqual(text, e.message)
def test_request_forbidden_is_returned_to_caller(self):
text = 'forbidden message'
self.requests.register_uri(METHOD, URL, status_code=403, text=text)
resp, resp_text = self.http._cs_request(URL, METHOD)
self.assertEqual(403, resp.status_code)
self.assertEqual(text, resp_text)
def test_do_request_success(self):
text = 'test content'
self.requests.register_uri(METHOD, END_URL + URL, text=text)
resp, resp_text = self.http.do_request(URL, METHOD)
self.assertEqual(200, resp.status_code)
self.assertEqual(text, resp_text)
def test_do_request_with_headers_success(self):
text = 'test content'
self.requests.register_uri(METHOD, END_URL + URL, text=text,
request_headers={'key': 'value'})
resp, resp_text = self.http.do_request(URL, METHOD,
headers={'key': 'value'})
self.assertEqual(200, resp.status_code)
self.assertEqual(text, resp_text)
class TestHTTPClientWithReqId(TestHTTPClientMixin, testtools.TestCase):
"""Tests for when global_request_id is set."""
def initialize(self):
self.req_id = "req-%s" % uuidutils.generate_uuid()
return client.HTTPClient(token=AUTH_TOKEN, endpoint_url=END_URL,
global_request_id=self.req_id)
def test_request_success(self):
headers = {
'Accept': 'application/json',
'X-OpenStack-Request-ID': self.req_id
}
self.requests.register_uri(METHOD, URL, request_headers=headers)
self.http.request(URL, METHOD)
| {
"content_hash": "66809e7e5cbb74c8f67bcd4c37135929",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 79,
"avg_line_length": 34.6875,
"alnum_prop": 0.6334334334334334,
"repo_name": "noironetworks/python-neutronclient",
"id": "d76e9bce977ddd443e274993988bc531847cbd97",
"size": "5636",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neutronclient/tests/unit/test_http.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1738805"
},
{
"name": "Shell",
"bytes": "10126"
}
],
"symlink_target": ""
} |
from django.db import models
from wq.db.patterns import models as patterns
class IdentifiedModel(patterns.LabelModel):
slug = models.SlugField()
name = models.CharField(max_length=255)
class FilterableModel(patterns.LabelModel):
name = models.CharField(max_length=10)
parent = models.ForeignKey(
IdentifiedModel, models.CASCADE
)
class CustomPatternModel(models.Model):
name = models.CharField(max_length=10)
class CustomAttachment(models.Model):
name = models.CharField(max_length=10)
parent = models.ForeignKey(
CustomPatternModel, models.CASCADE, related_name='attachments',
)
class CustomTypedPatternModel(models.Model):
name = models.CharField(max_length=10)
class CustomType(models.Model):
name = models.CharField(max_length=10)
def __str__(self):
return self.name
class CustomTypedAttachment(models.Model):
name = models.CharField(max_length=10, null=True, blank=True)
value = models.FloatField(null=True, blank=True)
type = models.ForeignKey(CustomType, models.CASCADE)
parent = models.ForeignKey(
CustomTypedPatternModel,
models.CASCADE,
related_name='attachments'
)
class Campaign(models.Model):
pass
class Attribute(models.Model):
name = models.CharField(max_length=10)
campaign = models.ForeignKey(Campaign, models.CASCADE)
is_active = models.BooleanField()
category = models.CharField(max_length=10, blank=True)
class Entity(models.Model):
campaign = models.ForeignKey(Campaign, models.CASCADE)
class Meta:
verbose_name_plural = 'entities'
class Value(models.Model):
attribute = models.ForeignKey(Attribute, models.CASCADE)
entity = models.ForeignKey(Entity, models.CASCADE, related_name='values')
| {
"content_hash": "6bdf99bee9985d20629e05faa7f6092e",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 77,
"avg_line_length": 25.642857142857142,
"alnum_prop": 0.716991643454039,
"repo_name": "wq/wq.db",
"id": "f021d851cc177e6110aaccbdaa2bc323c1b3c0c1",
"size": "1795",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/patterns_app/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "152"
},
{
"name": "Python",
"bytes": "165934"
},
{
"name": "Shell",
"bytes": "66"
}
],
"symlink_target": ""
} |
"""Helpful functions when parsing JSON blobs."""
from __future__ import print_function
import json
import re
import sys
from chromite.lib import osutils
assert sys.version_info >= (3, 6), 'This module requires Python 3.6+'
def AssertIsInstance(instance, expected_type, description):
"""Raise an error if |instance| is not of |expected_type|.
Args:
instance: instance of a Python object.
expected_type: expected type of |instance|.
description: short string describing |instance| used in error reporting.
"""
if not isinstance(instance, expected_type):
raise ValueError(
'Expected %s to be a %s, but found %s' %
(description, expected_type.__name__, instance.__class__.__name__))
def GetValueOfType(a_dict, key, value_type, value_description):
"""Raise an exception if we cannot get |key| from |a_dict| with |value_type|.
Args:
a_dict: a dictionary.
key: string key that should be in the dictionary.
value_type: expected type of the value at a_dict[key].
value_description: string describing the value used in error reporting.
"""
try:
value = a_dict[key]
except KeyError:
raise ValueError('Missing %s in JSON dictionary (key "%s")' %
(value_description, key))
AssertIsInstance(value, value_type, value_description)
return value
def PopValueOfType(a_dict, key, value_type, value_description):
"""Raise an exception if we cannnot pop |key| from |a_dict| with |value_type|.
Args:
a_dict: a dictionary.
key: string key that should be in the dictionary.
value_type: expected type of the value at a_dict[key].
value_description: string describing the value used in error reporting.
"""
ret = GetValueOfType(a_dict, key, value_type, value_description)
# We were able to get that value, so the key must exist.
a_dict.pop(key)
return ret
def ParseJsonFileWithComments(path):
"""Parse a JSON file with bash style comments.
Strips out comments from JSON blobs.
Args:
path: path to JSON file.
Returns:
Python representation of contents of JSON file.
"""
prog = re.compile(r'\s*#.*')
lines = osutils.ReadFile(path).splitlines()
lines = ['' if prog.match(line) else line for line in lines]
parsed_contents = json.loads('\n'.join(lines))
return parsed_contents
def GetNestedDictValue(a_dict, nested_key):
"""Obtains nested dict's value given hierarchical key sequence.
For example, given d['a']['b']['c'] = 'z':
GetNestedDictValue(d, ['a', 'b', 'c']) returns 'z'
Args:
a_dict: nested dict.
nested_key: hierarchical key sequence.
Returns:
Value if found. None if any of keys doesn't exist.
"""
obj = a_dict
for k in nested_key:
if not isinstance(obj, dict):
return None
obj = obj.get(k)
if obj is None:
return None
return obj
| {
"content_hash": "f0941a8d2da78759edd23136642789e5",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 80,
"avg_line_length": 28.45,
"alnum_prop": 0.6790861159929701,
"repo_name": "endlessm/chromium-browser",
"id": "51988ca44b195253c8905f2119cef54f672290bf",
"size": "3035",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "third_party/chromite/lib/json_lib.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import unittest
from apache.aurora.client import base
from gen.apache.aurora.api.ttypes import (
PopulateJobResult,
Response,
ResponseCode,
ResponseDetail,
Result,
TaskConfig
)
class TestBase(unittest.TestCase):
def test_format_response_with_message(self):
resp = Response(responseCode=ResponseCode.ERROR, details=[ResponseDetail(message='Error')])
formatted = base.format_response(resp)
assert formatted == 'Response from scheduler: ERROR (message: Error)'
def test_format_response_with_details(self):
resp = Response(responseCode=ResponseCode.ERROR, details=[ResponseDetail(message='Error')])
formatted = base.format_response(resp)
assert formatted == 'Response from scheduler: ERROR (message: Error)'
def test_combine_messages(self):
resp = Response(responseCode=ResponseCode.ERROR)
assert base.combine_messages(resp) == ''
resp = Response(responseCode=ResponseCode.ERROR, details=[])
assert base.combine_messages(resp) == ''
resp = Response(responseCode=ResponseCode.ERROR, details=[ResponseDetail(message='Error')])
assert base.combine_messages(resp) == 'Error'
resp = Response(responseCode=ResponseCode.ERROR, details=[ResponseDetail()])
assert base.combine_messages(resp) == 'Unknown error'
resp = Response(
responseCode=ResponseCode.ERROR,
details=[ResponseDetail(message='Error1'), ResponseDetail(message='Error2')])
assert base.combine_messages(resp) == 'Error1, Error2'
def test_get_populated_task_config_set(self):
config = TaskConfig()
resp = Response(responseCode=ResponseCode.OK, result=Result(populateJobResult=PopulateJobResult(
taskConfig=config)))
assert config == resp.result.populateJobResult.taskConfig
| {
"content_hash": "493b220b68273b95268bf30ae8cd0b8e",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 100,
"avg_line_length": 39.22222222222222,
"alnum_prop": 0.7320113314447592,
"repo_name": "kidaa/aurora",
"id": "1a560088279ac945cce14d02454e50b8483771e4",
"size": "2313",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/test/python/apache/aurora/client/test_base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "5916"
},
{
"name": "Groovy",
"bytes": "12868"
},
{
"name": "HTML",
"bytes": "43050"
},
{
"name": "Java",
"bytes": "2603733"
},
{
"name": "JavaScript",
"bytes": "101261"
},
{
"name": "Makefile",
"bytes": "6121"
},
{
"name": "Python",
"bytes": "1447260"
},
{
"name": "Ruby",
"bytes": "4315"
},
{
"name": "Shell",
"bytes": "91263"
},
{
"name": "Smarty",
"bytes": "25233"
},
{
"name": "Thrift",
"bytes": "53782"
}
],
"symlink_target": ""
} |
from flask import Flask, render_template, jsonify
from stock_scraper import get_data
app = Flask(__name__)
@app.route("/")
def index():
return render_template("index.html")
@app.route("/data")
def data():
return jsonify(get_data())
if __name__ == "__main__":
app.run(debug=True)
| {
"content_hash": "711c8db45fb629abc870f3dc4e769e14",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 49,
"avg_line_length": 18.3125,
"alnum_prop": 0.6518771331058021,
"repo_name": "HiroIshikawa/21playground",
"id": "be002b1923accb2c6c726248515455faed0d09ea",
"size": "293",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "visualizer/_app_boilerplate/app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "51720"
},
{
"name": "CSS",
"bytes": "57775"
},
{
"name": "HTML",
"bytes": "40205"
},
{
"name": "JavaScript",
"bytes": "73667"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "38714409"
},
{
"name": "Shell",
"bytes": "30454"
}
],
"symlink_target": ""
} |
import numpy as np
from numpy import diff, concatenate
import gc
from .goodsectionsresults import GoodSectionsResults
from ..timeframe import TimeFrame
from ..utils import timedelta64_to_secs
from ..node import Node
from ..timeframe import list_of_timeframes_from_list_of_dicts, timeframe_from_dict
class GoodSections(Node):
"""Locate sections of data where the sample period is <= max_sample_period.
Attributes
----------
previous_chunk_ended_with_open_ended_good_section : bool
"""
requirements = {'device': {'max_sample_period': 'ANY VALUE'}}
postconditions = {'statistics': {'good_sections': []}}
results_class = GoodSectionsResults
def reset(self):
self.previous_chunk_ended_with_open_ended_good_section = False
def process(self):
metadata = self.upstream.get_metadata()
self.check_requirements()
self.results = GoodSectionsResults(
metadata['device']['max_sample_period'])
for chunk in self.upstream.process():
self._process_chunk(chunk, metadata)
yield chunk
def _process_chunk(self, df, metadata):
"""
Parameters
----------
df : pd.DataFrame
with attributes:
- look_ahead : pd.DataFrame
- timeframe : nilmtk.TimeFrame
metadata : dict
with ['device']['max_sample_period'] attribute
Returns
-------
None
Notes
-----
Updates `self.results`
Each good section in `df` is marked with a TimeFrame.
If this df ends with an open-ended good section (assessed by
examining df.look_ahead) then the last TimeFrame will have
`end=None`. If this df starts with an open-ended good section
then the first TimeFrame will have `start=None`.
"""
# Retrieve relevant metadata
max_sample_period = metadata['device']['max_sample_period']
look_ahead = getattr(df, 'look_ahead', None)
timeframe = df.timeframe
# Process dataframe
good_sections = get_good_sections(
df, max_sample_period, look_ahead,
self.previous_chunk_ended_with_open_ended_good_section)
# Set self.previous_chunk_ended_with_open_ended_good_section
if good_sections:
self.previous_chunk_ended_with_open_ended_good_section = (
good_sections[-1].end is None)
# Update self.results
self.results.append(timeframe, {'sections': [good_sections]})
def get_good_sections(df, max_sample_period, look_ahead=None,
previous_chunk_ended_with_open_ended_good_section=False):
"""
Parameters
----------
df : pd.DataFrame
look_ahead : pd.DataFrame
max_sample_period : number
Returns
-------
sections : list of TimeFrame objects
Each good section in `df` is marked with a TimeFrame.
If this df ends with an open-ended good section (assessed by
examining `look_ahead`) then the last TimeFrame will have
`end=None`. If this df starts with an open-ended good section
then the first TimeFrame will have `start=None`.
"""
index = df.dropna().sort_index().index
del df
if len(index) < 2:
return []
timedeltas_sec = timedelta64_to_secs(diff(index.values))
timedeltas_check = timedeltas_sec <= max_sample_period
# Memory management
del timedeltas_sec
gc.collect()
timedeltas_check = concatenate(
[[previous_chunk_ended_with_open_ended_good_section],
timedeltas_check])
transitions = diff(timedeltas_check.astype(np.int))
# Memory management
last_timedeltas_check = timedeltas_check[-1]
del timedeltas_check
gc.collect()
good_sect_starts = list(index[:-1][transitions == 1])
good_sect_ends = list(index[:-1][transitions == -1])
# Memory management
last_index = index[-1]
del index
gc.collect()
# Use look_ahead to see if we need to append a
# good sect start or good sect end.
look_ahead_valid = look_ahead is not None and not look_ahead.empty
if look_ahead_valid:
look_ahead_timedelta = look_ahead.dropna().index[0] - last_index
look_ahead_gap = look_ahead_timedelta.total_seconds()
if last_timedeltas_check: # current chunk ends with a good section
if not look_ahead_valid or look_ahead_gap > max_sample_period:
# current chunk ends with a good section which needs to
# be closed because next chunk either does not exist
# or starts with a sample which is more than max_sample_period
# away from df.index[-1]
good_sect_ends += [last_index]
elif look_ahead_valid and look_ahead_gap <= max_sample_period:
# Current chunk appears to end with a bad section
# but last sample is the start of a good section
good_sect_starts += [last_index]
# Work out if this chunk ends with an open ended good section
if len(good_sect_ends) == 0:
ends_with_open_ended_good_section = (
len(good_sect_starts) > 0 or
previous_chunk_ended_with_open_ended_good_section)
elif len(good_sect_starts) > 0:
# We have good_sect_ends and good_sect_starts
ends_with_open_ended_good_section = (
good_sect_ends[-1] < good_sect_starts[-1])
else:
# We have good_sect_ends but no good_sect_starts
ends_with_open_ended_good_section = False
# If this chunk starts or ends with an open-ended
# good section then the relevant TimeFrame needs to have
# a None as the start or end.
if previous_chunk_ended_with_open_ended_good_section:
good_sect_starts = [None] + good_sect_starts
if ends_with_open_ended_good_section:
good_sect_ends += [None]
assert len(good_sect_starts) == len(good_sect_ends)
sections = [TimeFrame(start, end)
for start, end in zip(good_sect_starts, good_sect_ends)
if not (start == end and start is not None)]
# Memory management
del good_sect_starts
del good_sect_ends
gc.collect()
return sections
| {
"content_hash": "186fa2dc2de6fd1a2eafae62224b2784",
"timestamp": "",
"source": "github",
"line_count": 177,
"max_line_length": 82,
"avg_line_length": 35.23728813559322,
"alnum_prop": 0.6270642937309604,
"repo_name": "nilmtk/nilmtk",
"id": "8f82fea7c22e3649698402fbf3a48a1aac2a2062",
"size": "6237",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nilmtk/stats/goodsections.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "106"
},
{
"name": "Jupyter Notebook",
"bytes": "56819"
},
{
"name": "Python",
"bytes": "641879"
},
{
"name": "Shell",
"bytes": "1220"
}
],
"symlink_target": ""
} |
from mock import Mock, patch
from django.core.exceptions import NON_FIELD_ERRORS
from django.http.response import HttpResponseRedirect
from django.test import TestCase
from django.test.client import RequestFactory
from ..views import PleaOnlineForms
from ..models import Case, Court
from ..standardisers import standardise_name
class TestCaseBase(TestCase):
def get_request_mock(self, url="/", url_name="", url_kwargs=None):
request_factory = RequestFactory()
if not url_kwargs:
url_kwargs = {}
request = request_factory.get(url)
request.resolver_match = Mock()
request.resolver_match.url_name = url_name
request.resolver_match.kwargs = url_kwargs
return request
class TestPleaFormIssues(TestCaseBase):
def setUp(self):
self.session = {}
self.request_context = {}
def test_used_urn_in_session(self):
case = Case.objects.create(urn="06AA000000000",
extra_data={"FirstName1": "Frank",
"Surname": "Marsh"},
name="frank marsh",
sent=True)
case.save()
self.session = {"notice_type": {"complete": True,
"sjp": False},
"case": {"complete": True,
"date_of_hearing": "2015-01-01",
"urn": "06AA000000000",
"number_of_charges": 1,
"plea_made_by": "Defendant"},
"your_details": {"first_name": "Frank",
"last_name": "Marsh"}}
save_data = {"date_of_hearing": "2015-01-01",
"urn": "06/AA/0000000/00",
"number_of_charges": 1,
"plea_made_by": "Defendant"}
form = PleaOnlineForms(self.session, "case")
form.save(save_data, self.request_context)
result = form.render(self.get_request_mock())
self.assertIsInstance(result, HttpResponseRedirect)
class TestDuplicateCaseIssues(TestCaseBase):
def setUp(self):
Court.objects.create(region_code="51",
enabled=True)
def create_person_case(self, urn, first_name, last_name):
return Case.objects.create(urn=urn,
name=standardise_name(first_name, last_name),
extra_data={"FirstName1": first_name,
"Surname": last_name},
sent=True)
def create_company_case(self, urn, first_name, last_name, org_name):
return Case.objects.create(urn=urn,
extra_data={"OrganisationName": org_name,
"FirstName1": first_name,
"Surname": last_name},
sent=True)
def get_session_data(self, urn, type):
return {"notice_type": {"complete": True,
"sjp": False},
"case": {"complete": True,
"date_of_hearing": "2016-01-01",
"urn": urn,
"number_of_charges": 1,
"plea_made_by": type}}
def get_person_details_save_data(self, first_name, last_name):
return {"first_name": first_name,
"last_name": last_name,
"correct_address": "True",
"updated_address": "",
"contact_number": "0236578493",
"date_of_birth_0": "01",
"date_of_birth_1": "01",
"date_of_birth_2": "1970",
"email": "[email protected]",
"have_ni_number": "False",
"no_ni_number_reason": "Lost my NI card",
"have_driving_licence_number": "False"}
def get_company_details_save_data(self, company_name, first_name, last_name):
return {"company_name": company_name,
"correct_address": "True",
"first_name": first_name,
"last_name": last_name,
"position_in_company": "Director",
"contact_number": "0236578493",
"email": "[email protected]"}
def test_dup_person_same_name(self):
self.create_person_case("51aa0000015", "Frank", "Marsh")
session = self.get_session_data("51aa0000015", "Defendant")
form = self.get_person_details_save_data("Frank", "Marsh")
stages = PleaOnlineForms(session, "your_details")
stages.save(form, {})
self.assertEqual(len(stages.current_stage.form.errors[NON_FIELD_ERRORS]), 1)
def test_dup_person_different_names(self):
self.create_person_case("51aa0000015", "Frank", "Marsh")
session = self.get_session_data("51aa0000015", "Defendant")
form = self.get_person_details_save_data("Franky", "Marshington III")
stages = PleaOnlineForms(session, "your_details")
stages.save(form, {})
self.assertEqual(stages.current_stage.form.errors, {})
def test_dup_company(self):
self.create_company_case("51bb0000015", "Frank", "Marsh", "Frank Marsh inc.")
session = self.get_session_data("51bb0000015", "Company representative")
form = self.get_company_details_save_data("Frank Marsh inc.", "Frank", "Marsh")
stages = PleaOnlineForms(session, "company_details")
stages.save(form, {})
self.assertEqual(len(stages.current_stage.form.errors[NON_FIELD_ERRORS]), 1)
def test_dup_company_different_names(self):
self.create_company_case("51bb0000015", "Frank", "Marsh", "Frank Marsh inc.")
session = self.get_session_data("51bb0000015", "Company representative")
form = self.get_company_details_save_data("Frank Marsh inc.", "Frankie", "Marshington III")
stages = PleaOnlineForms(session, "company_details")
stages.save(form, {})
self.assertEqual(len(stages.current_stage.form.errors[NON_FIELD_ERRORS]), 1)
def test_person_then_company(self):
self.create_person_case("51aa0000015", "Frank", "Marsh")
session = self.get_session_data("51aa0000015", "Company representative")
form = self.get_company_details_save_data("Frank Marsh inc.", "Frank", "Marsh")
stages = PleaOnlineForms(session, "company_details")
stages.save(form, {})
self.assertEqual(len(stages.current_stage.form.errors[NON_FIELD_ERRORS]), 1)
def test_company_then_person_different_name(self):
self.create_company_case("51aa0000015", "Frank", "Marsh", "Frank Marsh inc.")
session = self.get_session_data("51aa0000015", "Defendant")
form = self.get_person_details_save_data("Franky", "Marshington III")
stages = PleaOnlineForms(session, "your_details")
stages.save(form, {})
self.assertEqual(len(stages.current_stage.form.errors[NON_FIELD_ERRORS]), 1)
def test_company_then_person_same_name(self):
self.create_company_case("51aa0000015", "Frank", "Marsh", "Frank Marsh inc.")
session = self.get_session_data("51aa0000015", "Defendant")
form = self.get_person_details_save_data("Frank", "Marsh")
stages = PleaOnlineForms(session, "your_details")
stages.save(form, {})
self.assertEqual(len(stages.current_stage.form.errors[NON_FIELD_ERRORS]), 1)
| {
"content_hash": "e11d1361fba45bc9511faffb0c9ba363",
"timestamp": "",
"source": "github",
"line_count": 184,
"max_line_length": 99,
"avg_line_length": 41.46195652173913,
"alnum_prop": 0.5548564687377113,
"repo_name": "ministryofjustice/manchester_traffic_offences_pleas",
"id": "94f40a22aaf91ec24b78c2079a147aba820bc78b",
"size": "7629",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/plea/tests/test_issues.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "867"
},
{
"name": "Gherkin",
"bytes": "10122"
},
{
"name": "HTML",
"bytes": "184454"
},
{
"name": "JavaScript",
"bytes": "52955"
},
{
"name": "Python",
"bytes": "792658"
},
{
"name": "SCSS",
"bytes": "43568"
},
{
"name": "Shell",
"bytes": "1766"
}
],
"symlink_target": ""
} |
"""
This file is part of Urban Mediator software.
Copyright (c) 2008 University of Art and Design Helsinki
See the file LICENSE.txt for copying permission.
Module for database queries. Used mostly by model.py
but also for low-level things in code.py
"""
import web
import config
import re
import datetime
import geo_support
def LOCATIONS_GROUP_BY(tbl="locations"):
return ", ".join([tbl + "." + fld for fld in
"""id lat lon title added user_id origin ranking url uuid begins expires ends visible type""".split()])
TAGS_GROUP_BY = """tags.id, tags.tag, tags.tag_namespace, tag_namespaces.tag_system_id"""
USERS_GROUP_BY = """users.id, users.username, users.added, users.description"""
BASE_LOCATIONS_QUERY = """
SELECT locations.*,
0 as distance,
MAX(notes.added) as last_comment,
COUNT(DISTINCT(notes.id)) as comments_count,
COUNT(DISTINCT(projects_points.location_id)) as points_count,
users.username as author
FROM locations
LEFT JOIN users ON (locations.user_id = users.id)
LEFT JOIN notes ON (notes.visible = 1 AND notes.location_id = locations.id)
LEFT JOIN projects_points ON (projects_points.project_id = locations.id and projects_points.visible = 1)
"""
BASE_PROJECTS_QUERY = """
SELECT locations.*,
0 as distance,
MAX(notes.added) as last_comment,
COUNT(DISTINCT(notes.id)) as comments_count,
users.username as author
FROM locations
LEFT JOIN users ON (locations.user_id = users.id)
LEFT JOIN notes ON (notes.location_id = locations.id)
"""
def object_by_(cond, **query):
"""This may be obsoleted anytime """
qry = (BASE_LOCATIONS_QUERY
+ """WHERE locations.visible = 1 AND """ + cond + """
GROUP BY """ + LOCATIONS_GROUP_BY() + """, users.username
LIMIT 1;
""")
return web.query(qry, vars=query)
def object_by_hard_(cond, **query):
"""This may be obsoleted anytime """
qry = (BASE_LOCATIONS_QUERY
+ """WHERE """ + cond + """
GROUP BY """ + LOCATIONS_GROUP_BY() + """, users.username
LIMIT 1;
""")
return web.query(qry, vars=query)
def object_by_id(id):
return object_by_("""locations.id = $id""", id=id)
def object_by_id_hard(id):
return object_by_hard_("""locations.id = $id""", id=id)
def point_by_id(id):
return object_by_("""locations.id = $id
AND locations.type = 'point'""", id=id)
def point_by_id_hard(id):
""" hard means - also invisible """
return object_by_hard_("""locations.id = $id
AND locations.type = 'point'""", id=id)
def object_by_note(note_id):
return object_by_("""notes.id = $note_id
""", note_id=note_id)
def point_by_uuid(uuid):
return object_by_("""locations.uuid = $uuid
AND locations.type = 'point'""", uuid=uuid)
def project_by_id(id):
return object_by_("""locations.id = $id
AND locations.type = 'project'""", id=id)
def comments():
return web.query("""
SELECT n.*, l.lat as lat, l.lon as lon, l.id as location_id, l.title as location_title, u.username as author
FROM notes n, locations l, users u
WHERE l.visible = 1 AND n.visible = 1
AND n.location_id = l.id
AND n.user_id = u.id
AND l.type = 'point'
ORDER BY added DESC;
""")
def comment_by_id(id):
return web.query("""
SELECT n.*,
l.lat as lat,
l.lon as lon,
l.id as location_id,
l.title as location_title,
u.username as author
FROM notes n, locations l, users u
WHERE l.visible = 1 AND n.visible = 1
AND n.location_id = l.id
AND n.user_id = u.id
-- AND l.type = 'point'
AND n.id = $comment_id
LIMIT 1;
""", vars=dict(comment_id=id))
def comments_by_point(point):
return web.query("""
SELECT n.*, l.lat as lat, l.lon as lon, l.id as location_id,
l.title as location_title, u.username as author
FROM notes n, locations l, users u
WHERE l.visible = 1 AND n.visible = 1
AND n.location_id = $point_id
AND n.location_id = l.id
AND n.user_id = u.id
-- AND l.type = 'point'
ORDER BY n.ord, n.added;
""", vars=dict(point_id=point.id))
def comments_by_user(user):
return web.query("""
SELECT n.*, l.lat as lat, l.lon as lon, l.id as location_id,
l.title as location_title, $username as author
FROM notes n, locations l
WHERE l.visible = 1 AND n.visible = 1
AND n.user_id = $user_id
AND n.location_id = l.id
AND n.origin = $origin
AND l.type = 'point'
ORDER BY added DESC;
""", vars=dict(user_id=user.id, username=user.username, origin=config.origin))
def point_insert(point, user, times=[]):
return web.insert("locations",
lat=point.lat,
lon=point.lon,
title=point.title,
user_id=user.id,
origin=point.origin,
url=point.url,
visible=point.visible,
# added=datetime.datetime.utcnow(), #!!!
uuid=point.get("uuid", ''))
def project_insert(project, user):
return web.insert("locations",
lat=project.lat, #!!!
lon=project.lon, #!!!
title=project.title,
user_id=user.id,
origin=project.origin,
type='project',
url=project.url)
def trigger_insert(trigger, project, user):
return web.insert("triggers",
trigger_condition=trigger.trigger_condition,
trigger_action=trigger.trigger_action,
adapter=trigger.adapter,
url=trigger.url,
description=trigger.description,
user_id=user.id,
project_id=project.id,
)
def trigger_update(trigger, project, user):
return web.update("triggers", where='id=$id',
trigger_condition=trigger.trigger_condition,
trigger_action=trigger.trigger_action,
adapter=trigger.adapter,
url=trigger.url,
description=trigger.description,
user_id=user.id,
project_id=project.id,
vars={'id': trigger.id},
)
def comment_insert(comment, point, user):
if comment.get("text", ""):
return web.insert("notes",
text=comment.text,
location_id=point.id,
user_id=user.id,
origin=comment.origin,
ranking=0,
type=comment.type,
)
def comment_update(comment, point, user):
return web.update("notes", where='id=$id',
text=comment.text,
# location_id=point.id,
# user_id=user.id,
# origin=comment.origin,
type=comment.get("type", "comment"),
# ranking=0,
vars={'id': comment.id},
)
def comment_order_update(comment):
return web.update("notes", where='id=$id',
ord=comment.ord,
vars={'id': comment.id,},
)
def _has_tags(tag_list):
""" Forms a condition to check for certain tags """
query_str = ""
for tag_namespace, tag in tag_list:
anded = "(tags.tag_namespace = " + web.sqlquote(tag_namespace) + \
" AND " + "tags.tag = " + web.sqlquote(tag) + ")"
query_str += (query_str and (" OR " + anded) or anded)
return query_str and ("(" + query_str + ")") or query_str
WORDCHARS = re.compile(r'["\n'+r"'\[\]><\\;\*\?\+]") #!!! better escape
if web.config.db_parameters["dbn"] == "mysql":
REGEXQ = """ %s REGEXP %s """
else:
REGEXQ = """ %s ~ %s """
def _has_query_str(fieldname, qs):
query_str = ""
for q in [WORDCHARS.sub('', c.strip()) for c in qs.split()]:
qq = '[[:<:]]%s[[:>:]]' % q
cond = REGEXQ % (fieldname, web.sqlquote(qq))
query_str += query_str and (" OR " + cond) or cond
return query_str
def search_locations(search_query, tag_list, loctype='point'):
conds1 = "(" + " OR ".join(
["(" + _has_query_str(fieldname, search_query) + ")"
for fieldname in ("title", )]) + ")"
select1 = """(SELECT id, 2 as weight FROM locations
WHERE type = """ + web.sqlquote(loctype) + """
AND """ + conds1 + """)"""
conds2 = "(" + " OR ".join(
["(" + _has_query_str(fieldname, search_query) + ")"
for fieldname in ("text", )]) + ")"
select2 = """(SELECT location_id as id, 2 as weight FROM notes
WHERE """ + conds2 + """)"""
select3 = """(SELECT locations_users_tags.location_id as id,
4 as weight
FROM tags, locations_users_tags
WHERE """ + _has_tags(tag_list) + """
AND locations_users_tags.tag_id = tags.id
)"""
selects = (""
+ select1
+ "\nUNION "
+ select2
+ "\nUNION "
+ select3
)
wq = """
SELECT locations.*,
locids.weight,
0 as distance,
MAX(notes.added) as last_comment,
COUNT(notes.location_id) as comments_count,
users.username as author
FROM (""" + selects + """) AS locids
LEFT JOIN locations ON (locations.id = locids.id)
LEFT JOIN notes ON (notes.visible = 1 AND notes.location_id = locids.id)
LEFT JOIN users ON (users.id = locations.user_id)
WHERE
locations.visible = 1
AND locations.type = """ + web.sqlquote(loctype) + """
GROUP BY """ + LOCATIONS_GROUP_BY('locations') + """, locids.weight, users.username
ORDER BY locids.weight DESC, last_comment DESC
;"""
return web.query(wq)
def search_locations_of_project(search_query, tag_list, project=None, loctype='point'):
project_id = project.id
conds1 = "(" + " OR ".join(
["(" + _has_query_str(fieldname, search_query) + ")"
for fieldname in ("title", )]) + ")"
select1 = """(SELECT id, 2 as weight FROM locations
WHERE type = """ + web.sqlquote(loctype) + """
AND """ + conds1 + """)"""
conds2 = "(" + " OR ".join(
["(" + _has_query_str(fieldname, search_query) + ")"
for fieldname in ("text", )]) + ")"
select2 = """(SELECT location_id as id, 2 as weight FROM notes
WHERE """ + conds2 + """)"""
select3 = """(SELECT locations_users_tags.location_id as id,
4 as weight
FROM tags, locations_users_tags
WHERE """ + _has_tags(tag_list) + """
AND locations_users_tags.tag_id = tags.id
)"""
selects = (""
+ select1
+ "\nUNION "
+ select2
+ "\nUNION "
+ select3
)
wq = """
SELECT locations.*,
locids.weight,
0 as distance,
MAX(notes.added) as last_comment,
COUNT(notes.location_id) as comments_count,
users.username as author
FROM (""" + selects + """) AS locids
LEFT JOIN locations ON (locations.id = locids.id)
LEFT JOIN projects_points as pp ON
(pp.location_id = locations.id AND pp.visible = 1)
LEFT JOIN notes ON (notes.visible = 1 AND notes.location_id = locids.id)
LEFT JOIN users ON (users.id = locations.user_id)
WHERE
locations.visible = 1
AND pp.project_id = """ + str(int(project_id)) + """
AND locations.type = """ + web.sqlquote(loctype) + """
GROUP BY """ + LOCATIONS_GROUP_BY('locations') + """, locids.weight, users.username
ORDER BY locids.weight DESC, last_comment DESC
;"""
return web.query(wq)
def points_by_tags(tag_list):
# union!!!
if not tag_list:
raise "invalid arguments"
return web.query("""
SELECT locations.*
FROM
( """ + BASE_LOCATIONS_QUERY + """
WHERE locations.visible = 1
AND locations.type = 'point'
GROUP BY """ + LOCATIONS_GROUP_BY() + """, users.username
ORDER BY last_comment DESC
) as locations,
tags, locations_users_tags
WHERE
locations_users_tags.location_id = locations.id
AND """ + _has_tags(tag_list) + """
AND locations_users_tags.tag_id = tags.id
GROUP BY """ + LOCATIONS_GROUP_BY() + """,
locations.distance, locations.last_comment,
locations.comments_count, locations.author, locations.points_count
""")
def points_by_project_and_tags(project, tag_list):
# union!!!
if not tag_list:
raise "invalid arguments"
project_id = project.id
return web.query("""
SELECT locations.*
FROM
( """ + BASE_LOCATIONS_QUERY + """
LEFT JOIN projects_points as pp ON
(pp.location_id = locations.id AND pp.visible = 1)
WHERE locations.visible = 1
AND locations.type = 'point'
AND pp.project_id = """ + str(int(project_id)) + """
GROUP BY """ + LOCATIONS_GROUP_BY() + """, users.username
ORDER BY last_comment DESC
) as locations,
tags, locations_users_tags
WHERE locations_users_tags.location_id = locations.id
AND """ + _has_tags(tag_list) + """
AND locations_users_tags.tag_id = tags.id
GROUP BY """ + LOCATIONS_GROUP_BY() + """,
locations.distance, locations.last_comment,
locations.comments_count, locations.author, locations.points_count
""", vars=dict(project_id=project_id))
def projects_by_tags(tag_list):
# union!!!
if not tag_list:
raise "invalid arguments"
q = ("""
SELECT locations.*
FROM
( """ + BASE_LOCATIONS_QUERY + """
WHERE locations.visible = 1
AND locations.type = 'project'
GROUP BY """ + LOCATIONS_GROUP_BY() + """, users.username
ORDER BY last_comment DESC
) as locations,
tags, locations_users_tags
WHERE locations_users_tags.location_id = locations.id
AND """ + _has_tags(tag_list) + """
AND locations_users_tags.tag_id = tags.id
GROUP BY """ + LOCATIONS_GROUP_BY() + """,
locations.distance, locations.last_comment,
locations.comments_count, locations.author, locations.points_count
""")
return web.query(q)
def projects_by_point_and_tags(point, tag_list):
# union!!!
if not tag_list:
raise "invalid arguments"
point_id = point.id
return web.query("""
SELECT locations.*
FROM
( """ + BASE_PROJECTS_QUERY + """
LEFT JOIN projects_points as pp ON
(pp.project_id = locations.id AND pp.visible = 1)
WHERE locations.visible = 1
AND locations.type = 'project'
AND pp.location_id = $point_id
GROUP BY """ + LOCATIONS_GROUP_BY() + """, users.username
ORDER BY last_comment DESC
) as locations,
tags, locations_users_tags
WHERE locations_users_tags.location_id = locations.id
AND """ + _has_tags(tag_list) + """
AND locations_users_tags.tag_id = tags.id
GROUP BY """ + LOCATIONS_GROUP_BY() + """
""", vars=dict(point_id=point_id))
def points():
qry = BASE_LOCATIONS_QUERY + """
WHERE locations.visible = 1
AND locations.type = 'point'
GROUP BY """ + LOCATIONS_GROUP_BY() + """, users.username
ORDER BY last_comment DESC;
"""
return web.query(qry)
def points_nearby(lat, lon, radius=None, limit=None, project=None):
limit = limit and ("LIMIT %i" % limit) or ""
radius_cond = radius and (""" AND sqrt(pow($lat - lat, 2) * $y + pow($lon - lon, 2) * $x) < $r """) or ""
x, y = geo_support.meters_per_deg(lat, lon)
if project:
project_id = project.id
qry = BASE_PROJECTS_QUERY + """
LEFT JOIN projects_points ON
(projects_points.location_id = locations.id AND projects_points.visible = 1)
WHERE locations.visible = 1
AND locations.type = 'point'
""" + radius_cond + """
AND projects_points.project_id = """ + str(int(project_id)) + """
GROUP BY """ + LOCATIONS_GROUP_BY() + """, users.username
ORDER BY last_comment DESC
""" + limit
else:
qry = BASE_LOCATIONS_QUERY + """
WHERE locations.visible = 1
AND locations.type = 'point'
""" + radius_cond + """
GROUP BY """ + LOCATIONS_GROUP_BY() + """, users.username
ORDER BY last_comment DESC
""" + limit
return web.query(qry, vars=dict(x=x**2, y=y**2, r=radius, lat=lat, lon=lon))
def projects():
q = BASE_LOCATIONS_QUERY + """
WHERE locations.visible = 1
AND locations.type = 'project'
GROUP BY """ + LOCATIONS_GROUP_BY() + """, users.username
ORDER BY last_comment DESC;
"""
return web.query(q)
def projects_by_point(point):
point_id = point.id
return web.query(BASE_PROJECTS_QUERY + """
LEFT JOIN projects_points ON
(projects_points.project_id = locations.id
AND projects_points.visible = 1)
WHERE locations.visible = 1
AND locations.type = 'project'
AND projects_points.location_id = $point_id
GROUP BY """ + LOCATIONS_GROUP_BY() + """, users.username
ORDER BY last_comment DESC
;""", vars=dict(point_id=point_id))
def projects_by_ds(ds): #!!!
ds_id = ds.id
return web.query(BASE_LOCATIONS_QUERY + """
LEFT JOIN locations_datasources ON
(locations_datasources.location_id = locations.id)
WHERE locations.visible = 1
AND locations.type = 'project'
AND locations_datasources.datasource_id = $ds_id
GROUP BY """ + LOCATIONS_GROUP_BY() + """, users.username
ORDER BY last_comment DESC
;""", vars=dict(ds_id=ds_id))
def projects_by_user(user):
user_id = user.id
return web.query(BASE_LOCATIONS_QUERY + """
LEFT JOIN locations_datasources ON
(locations_datasources.location_id = locations.id)
WHERE locations.visible = 1
AND locations.type = 'project'
AND users.id = $user_id
GROUP BY """ + LOCATIONS_GROUP_BY() + """, users.username
ORDER BY last_comment DESC
;""", vars=dict(user_id=user_id))
def points_by_user(user):
user_id = user.id
return web.query(BASE_LOCATIONS_QUERY + """
LEFT JOIN locations_datasources ON
(locations_datasources.location_id = locations.id)
WHERE locations.visible = 1
AND locations.type = 'point'
AND users.id = $user_id
GROUP BY """ + LOCATIONS_GROUP_BY() + """, users.username
ORDER BY last_comment DESC
;""", vars=dict(user_id=user_id))
def objects_by_user_role(user, role, type="project"):
user_id = user.id
return web.query(BASE_LOCATIONS_QUERY + """
LEFT JOIN locations_policy_table ON
(locations_policy_table.user_id = $user_id
AND locations_policy_table.location_id = locations.id)
WHERE locations.visible = 1
AND locations.type = $type
AND locations_policy_table.role = $role
GROUP BY """ + LOCATIONS_GROUP_BY() + """, users.username
ORDER BY last_comment DESC
;""", vars=dict(user_id=user_id, role=role, type=type))
def points_by_comment(comment):
comment_id = comment.id
q = BASE_LOCATIONS_QUERY + """
LEFT JOIN locations_datasources ON
(locations_datasources.location_id = locations.id)
WHERE locations.visible = 1
AND locations.type = 'point'
AND notes.id = $comment_id
GROUP BY """ + LOCATIONS_GROUP_BY() + """, users.username
ORDER BY last_comment DESC
;"""
return web.query(q, vars=dict(comment_id=comment_id))
def point_tags():
# !!! experimental
# to be searchable, tag_namespace should have tag_namespace!
return web.query("""
SELECT locations_users_tags.location_id as location_id,
tags.id as id,
CONCAT(tags.tag_namespace, ':', tags.tag) as ns_tag,
tags.tag as tag,
tag_namespaces.id as tag_namespace,
tag_namespaces.tag_system_id as tag_system_id,
locations_users_tags.location_id as location_id,
users.username as username
FROM tags, tag_namespaces, locations_users_tags, users
WHERE locations_users_tags.tag_id = tags.id
AND tags.tag_namespace = tag_namespaces.id
AND locations_users_tags.user_id = users.id
ORDER BY locations_users_tags.location_id
;
""", vars=locals())
def project_tags():
# !!! experimental
return web.query("""
SELECT locations_users_tags.location_id as location_id,
tags.id as id,
CONCAT(tags.tag_namespace, ':', tags.tag) as ns_tag,
tags.tag as tag,
tag_namespaces.id as tag_namespace,
tag_namespaces.tag_system_id as tag_system_id,
locations_users_tags.location_id as location_id,
users.username as username
FROM tags, tag_namespaces, locations_users_tags, users
WHERE locations_users_tags.tag_id = tags.id
AND tags.tag_namespace = tag_namespaces.id
AND locations_users_tags.user_id = users.id
ORDER BY locations_users_tags.location_id
;
""", vars=locals())
def user_by_id(id):
return web.select("users", where="id=$id", vars=dict(id=id))
def user_by_username(username):
return web.select("users", where="upper(username)=upper($username)",
vars=dict(username=username))
def tags_remove(point, tags):
for tag in tags:
tag_data = dict(point_id=point.id,
tag=tag.tag,
tag_namespace=tag.tag_namespace)
web.query("""
DELETE FROM locations_users_tags
USING locations_users_tags, tags
WHERE tags.id = locations_users_tags.tag_id
AND location_id = $point_id
AND tags.tag = $tag
AND tags.tag_namespace = $tag_namespace;
""", vars=tag_data)
def tags_insert(point, user, tags, deny_namespaces=[]):
for tag in tags:
if tag.tag_namespace in deny_namespaces:
continue #!!! logic could be: drop namespace or make ns:tag...
existing_tags = web.query("""
SELECT id, tag, tag_namespace FROM tags
WHERE tag = $tag AND tag_namespace = $tag_namespace;
""", vars=tag.copy())
if not existing_tags:
web.query("""
INSERT INTO tags (tag, tag_namespace)
VALUES ($tag, $tag_namespace);
""", vars=tag.copy())
existing_tags = web.query("""
SELECT id, tag, tag_namespace FROM tags
WHERE tag = $tag AND tag_namespace = $tag_namespace;
""", vars=tag.copy())
assert len(existing_tags) == 1
tag_id = existing_tags[0].id
tag_triad = dict(point_id=point.id, user_id=user.id, tag_id=tag_id)
already_tagged = web.query("""
SELECT location_id, user_id, tag_id FROM locations_users_tags
WHERE location_id = $point_id
AND user_id = $user_id
AND tag_id = $tag_id;
""", vars=tag_triad)
if not already_tagged:
web.query("""
INSERT INTO locations_users_tags (location_id, user_id, tag_id)
VALUES ($point_id, $user_id, $tag_id);
""", vars=tag_triad)
def update_projects_points(project, point):
project_id = project.id
if not hasattr(point, "id"):
db_point = list(point_by_uuid(point.uuid))
if db_point:
# !!! should we update lat, lon too?
point_id = db_point[0].id
else:
point_id = 0 #!!!
else:
point_id = point.id
exists = web.query("""
SELECT * FROM projects_points
WHERE
location_id=$point_id
AND project_id=$project_id
AND projects_points.visible = 1
LIMIT 1;
""", vars=locals())
if not exists:
web.insert("projects_points",
location_id=point_id,
project_id=project_id,
visible=project.visible and getattr(point, "visible", 1),
)
def points_by_project(project, limit=None):
project_id = project.id
if limit:
limit = "LIMIT %i" % limit
else:
limit = ""
return web.query(BASE_PROJECTS_QUERY + """
LEFT JOIN projects_points ON
(projects_points.location_id = locations.id AND projects_points.visible = 1)
WHERE locations.visible = 1
AND locations.type = 'point'
AND projects_points.project_id = """ + str(int(project_id)) + """
GROUP BY """ + LOCATIONS_GROUP_BY() + """, users.username
ORDER BY last_comment DESC """
+ limit, vars=locals())
def points_count_by_project(project):
project_id = project.id
return web.query("""
SELECT count(projects_points.project_id) as points_count
FROM locations, projects_points
WHERE locations.visible = 1
AND locations.type = 'point'
AND projects_points.location_id = locations.id
AND projects_points.visible = 1
AND projects_points.project_id = """ + str(int(project_id)) + """
GROUP BY projects_points.project_id
""", vars=locals())
def external_point_update(point, user):
db_point = list(point_by_uuid(point.uuid) or [])
if db_point:
# !!! should we update lat, lon too?
point.id = db_point[0].id
web.query("""
UPDATE locations
SET title=$title
WHERE id = $id
;
""", vars=point.copy())
# how to update tags???
# tags_insert(point, user, point.tags, deny_namespaces=[])
return "update"
else:
web.insert("locations",
lat=point.lat,
lon=point.lon,
title=point.title,
uuid=point.uuid,
user_id=user.id,
origin=point.origin,
added=point.added, #!!!?
url=point.url)
db_point = list(point_by_uuid(point.uuid))[0]
tags_insert(db_point, user, point.tags, deny_namespaces=[])
point.id = db_point.id
return "insert"
def point_update(point, user):
web.query("""
UPDATE locations
SET title=$title,
uuid=$uuid
WHERE id = $id
;
""", vars=point.copy())
def point_full_update(point, user):
web.query("""
UPDATE locations
SET title=$title,
uuid=$uuid,
lat=$lat,
lon=$lon,
visible=$visible,
url=$url
WHERE id = $id
;
""", vars=point.copy())
def project_update(project, user):
if "lat" not in project:
web.query("""
UPDATE locations
SET title=$title,
uuid=$uuid,
origin=$origin,
type='project'
WHERE id = $id
;
""", vars=project.copy())
else:
web.query("""
UPDATE locations
SET title=$title,
uuid=$uuid,
origin=$origin,
lat=$lat,
lon=$lon,
type='project'
WHERE id = $id
;
""", vars=project.copy())
def triggers_by_id(id):
return web.query("""
SELECT DISTINCT *
FROM triggers
WHERE
id = $id
;""", vars={'id': id})
def triggers_by_project_id(project_id):
return web.query("""
SELECT triggers.*
FROM triggers
WHERE
triggers.project_id = $project_id
;""", vars={'project_id': project_id})
def triggers_by_project_id_with_condition(project_id, condition):
return web.query("""
SELECT triggers.*
FROM triggers
WHERE
triggers.project_id = $project_id
AND triggers.trigger_condition = $cond
;""", vars={'project_id': project_id, 'cond': condition})
def datasources_by_project_id(project_id):
return web.query("""
SELECT DISTINCT ds.*
FROM datasources ds, locations_datasources lds
WHERE
lds.location_id = $project_id
AND lds.datasource_id = ds.id
;""", vars={'project_id': project_id})
def datasources_by_id(id):
return web.query("""
SELECT DISTINCT *
FROM datasources ds
WHERE
id = $id
;""", vars={'id': id})
def datasources_by_url_and_type(url, type_):
return web.query("""
SELECT DISTINCT *
FROM datasources ds
WHERE
url = $url
AND type = $type
;""", vars={'url': url, 'type': type_})
def datasource_insert(datasource, project, user):
#!!! check for doubles
ds_id = web.insert("datasources",
type=datasource.type,
adapter=datasource.adapter,
url=datasource.url,
frequency=datasource.frequency,
description=datasource.description)
web.insert("locations_datasources",
datasource_id=ds_id,
location_id=project.id,
)
return ds_id
def datasource_update(datasource, project, user):
return web.update("datasources", where='id=$id',
type=datasource.type,
adapter=datasource.adapter,
url=datasource.url,
frequency=datasource.frequency,
description=datasource.description,
vars={'id': datasource.id},
)
BASE_TAGS_QUERY = """
SELECT tags.id as id,
CONCAT(tags.tag_namespace, ':', tags.tag) as ns_tag,
tags.tag as tag,
tags.tag_namespace as tag_namespace,
tag_namespaces.tag_system_id as tag_system_id,
COUNT(DISTINCT locations_users_tags.location_id) as count_locations,
COUNT(DISTINCT locations_users_tags.user_id) as count_users
FROM tags
LEFT JOIN tag_namespaces ON (tags.tag_namespace = tag_namespaces.id)
LEFT JOIN locations_users_tags ON (locations_users_tags.tag_id = tags.id)
"""
def tags():
return web.query(BASE_TAGS_QUERY + """
LEFT JOIN locations ON (locations.visible = 1 AND locations.id = locations_users_tags.location_id)
WHERE locations.type = 'point' OR locations.type = 'project'
GROUP BY """ + TAGS_GROUP_BY + """
ORDER BY tags.tag_namespace, tags.tag
;""", vars=locals())
def tags_of_points():
return web.query(BASE_TAGS_QUERY + """
LEFT JOIN locations ON (locations.visible = 1 AND locations.id = locations_users_tags.location_id)
WHERE locations.type = 'point'
GROUP BY """ + TAGS_GROUP_BY + """
ORDER BY tags.tag_namespace, tags.tag
;""", vars=locals())
def tags_of_project_points(project):
# !!! experimental
project_id = project.id
return web.query(BASE_TAGS_QUERY + """
LEFT JOIN projects_points ON (projects_points.location_id = locations_users_tags.location_id AND projects_points.visible = 1)
LEFT JOIN locations ON (locations.visible = 1 AND locations.id = locations_users_tags.location_id)
WHERE locations.type = 'point'
AND projects_points.project_id = $project_id
GROUP BY """ + TAGS_GROUP_BY + """
ORDER BY tags.tag_namespace, tags.tag
;""", vars=locals())
def tags_by_namespace(tag_namespace):
return web.query(BASE_TAGS_QUERY + """
LEFT JOIN locations ON (locations.visible = 1 AND locations.id = locations_users_tags.location_id)
WHERE (locations.type = 'point' OR locations.type = 'project')
AND tags.tag_namespace = $tag_namespace
GROUP BY """ + TAGS_GROUP_BY + """
ORDER BY tags.tag_namespace, tags.tag
;""", vars=locals())
def hide_point(point):
web.query("""
UPDATE locations
SET visible = 0
WHERE id = $id
;
""", vars=point.copy())
web.query("""
UPDATE projects_points
SET visible = 0
WHERE location_id = $id
;
""", vars=point.copy())
def hide_project(project):
web.query("""
UPDATE locations
SET visible = 0
WHERE id = $id
;
""", vars=project.copy())
# quick fix: otherwise count is broken!!!
web.query("""
DELETE FROM project_users
WHERE project_id = $id
""", vars=project.copy())
def hide_comment(comment):
return web.query("""
UPDATE notes
SET visible = 0
WHERE id = $id
;
""", vars=comment.copy())
def delete_point(point):
web.query("""
DELETE FROM locations
WHERE id = $id
;
""", vars=point.copy())
web.query("""
DELETE FROM projects_points
WHERE location_id = $id
;
""", vars=point.copy())
web.query("""
DELETE FROM locations_users_tags
WHERE location_id = $id
;
""", vars=point.copy())
web.query("""
DELETE FROM notes
WHERE location_id = $id
;
""", vars=point.copy())
def delete_project(project):
web.query("""
DELETE FROM projects_points
WHERE project_id = $id
""", vars=project.copy())
web.query("""
DELETE FROM locations_datasources
WHERE location_id = $id
""", vars=project.copy())
web.query("""
DELETE FROM project_users
WHERE project_id = $id
""", vars=project.copy())
web.query("""
DELETE FROM locations_users_tags
WHERE location_id = $id
;
""", vars=project.copy())
web.query("""
DELETE FROM notes
WHERE location_id = $id
;
""", vars=project.copy())
web.query("""
DELETE FROM locations
WHERE id = $id
""", vars=project.copy())
#triggers also!!!
def delete_comment(comment):
web.query("""
DELETE FROM notes
WHERE id = $id
""", vars=comment.copy())
def delete_trigger(trigger):
web.query("""
DELETE FROM triggers
WHERE id = $id
""", vars=trigger.copy())
def remove_point_from_project(point, project):
web.query("""
UPDATE projects_points
SET visible = 0
WHERE location_id = $point_id
AND project_id = $project_id
;
""", vars=dict(point_id=point.id, project_id=project.id))
def remove_datasource_from_project(ds, project):
web.query("""
DELETE FROM locations_datasources
WHERE locations_datasources.location_id = $project_id
AND locations_datasources.datasource_id = $ds_id
;
""", vars=dict(ds_id=ds.id, project_id=project.id))
# !!! credentials is used in UM in two different meanings:
# just password, as in database
# password, username in a {} fashion.
# !!! refactoring needed
def check_credentials(credentials):
return web.query("""
SELECT DISTINCT * FROM users
WHERE
credentials = $password
AND upper(username) = upper($username)
;""", vars=credentials.copy())
def pwd_function(username):
try:
return list(web.query("""
SELECT DISTINCT credentials FROM users
WHERE upper(username) = upper($username)
;""", vars=vars()))[0].credentials
except:
raise ValueError, "Password not found"
def groups_by_user(user):
return web.query("""
SELECT groups.id as id, groupname
FROM group_users, groups
WHERE groups.id = group_users.group_id
AND user_id=$id;
;""", vars=user.copy())
def user_insert(user):
return web.query("""
INSERT INTO users (username, credentials, description)
VALUES ($username, $password, $description);
""", vars=user.copy())
def user_update(user):
web.update("users", where='id=$id',
credentials=user.password,
vars=user.copy(),
)
def profile_update(user, profile):
for k, v in profile.items():
web.query("""
DELETE FROM user_profiles
WHERE user_id = $user_id
AND prop_key=$prop_key;
""", vars=dict(user_id=user.id, prop_key=k,))
web.query("""
INSERT INTO user_profiles (user_id, prop_key, prop_value)
VALUES ($user_id, $prop_key, $prop_value);
""", vars=dict(user_id=user.id,
prop_key=k,
prop_value=v))
def search_profile_email(email):
return web.query("""
SELECT user_id FROM user_profiles
WHERE prop_key = $prop_key
AND LOWER(prop_value) = LOWER($prop_value);
""", vars=dict(prop_key="email", prop_value=email))
def user_profile(user):
return dict([(p.prop_key, p.prop_value) for p in web.query("""
SELECT * FROM user_profiles
WHERE user_id = $user_id;
""", vars=dict(user_id=user.id,))])
def object_profile(object):
return dict([(p.prop_key, p.prop_value) for p in web.query("""
SELECT * FROM location_profiles
WHERE location_id = $location_id;
""", vars=dict(location_id=object.id,))])
def object_profile_update(object, profile):
for k, v in profile.items():
web.query("""
DELETE FROM location_profiles
WHERE location_id = $location_id
AND prop_key=$prop_key;
""", vars=dict(location_id=object.id, prop_key=k,))
if v is not None:
web.query("""
INSERT INTO location_profiles (location_id, prop_key, prop_value)
VALUES ($location_id, $prop_key, $prop_value);
""", vars=dict(location_id=object.id,
prop_key=k,
prop_value=v))
def search_object_profiles(key, value):
# not tested!!! not used!!!
return web.query("""
SELECT location_id FROM location_profiles
WHERE prop_key = $prop_key
AND LOWER(prop_value) = LOWER($prop_value);
""", vars=dict(prop_key=key, prop_value=value))
def note_profile(object):
return dict([(p.prop_key, p.prop_value) for p in web.query("""
SELECT * FROM note_profiles
WHERE note_id = $note_id;
""", vars=dict(note_id=object.id,))])
def note_profile_update(object, profile):
for k, v in profile.items():
web.query("""
DELETE FROM note_profiles
WHERE note_id = $location_id
AND prop_key=$prop_key;
""", vars=dict(note_id=object.id, prop_key=k,))
if v is not None:
web.query("""
INSERT INTO note_profiles (note_id, prop_key, prop_value)
VALUES ($note_id, $prop_key, $prop_value);
""", vars=dict(note_id=object.id,
prop_key=k,
prop_value=v))
def search_note_profiles(key, value):
# not tested!!! not used!!!
return web.query("""
SELECT note_id FROM note_profiles
WHERE prop_key = $prop_key
AND LOWER(prop_value) = LOWER($prop_value);
""", vars=dict(prop_key=key, prop_value=value))
def group_update(user, group):
#!!! delete?
web.query("""
DELETE FROM group_users
WHERE user_id = $user_id
AND group_id = $group_id;
""", vars=dict(user_id=user.id, group_id=group.id))
return web.query("""
INSERT INTO group_users (user_id, group_id)
VALUES ($user_id, $group_id);
""", vars=dict(user_id=user.id, group_id=group.id))
def groups():
return web.query("""SELECT * from groups;""")
def group_by_id(id):
return web.query("""SELECT *
FROM groups WHERE id=$id;""", vars=dict(id=id))
def group_by_name(groupname):
return web.query("""SELECT *
FROM groups WHERE groupname=$groupname;""", vars=dict(groupname=groupname))
def delete_user_by_description(description):
""" Description is used to temporary store some info"""
web.query("""
DELETE FROM users
WHERE description = $description
""", vars=dict(description=description))
def user_by_description(description):
return web.select("users", where="description=$description",
vars=dict(description=description))
def user_by_location_role(location):
return web.query("""SELECT """ + USERS_GROUP_BY + """
FROM users, locations_policy_table
WHERE locations_policy_table.user_id = users.id
AND locations_policy_table.location_id=$location_id
GROUP BY """ + USERS_GROUP_BY + """;""",
vars=dict(location_id=location.id))
def update_user_description(user, description):
return web.query("""
UPDATE users
SET description = $description
WHERE id = $id
;
""", vars=dict(id=user.id, description=description))
def version():
return web.query("""
SELECT version as "Latest patch:", added as "Applied at:"
FROM version ORDER BY added DESC
""")
def db_version():
return web.query("""
SELECT version();
""")
def num_of_records(table):
return web.query("""
SELECT count(*) as "count" FROM %s;
""" % table)
def locations_stats():
return web.query("""
select count(*) as "count", type, origin, visible
from locations
group by type, origin, visible;""")
def check_connection():
return web.query("""SELECT 2 + 2;""")
def get_policies(object, user):
return web.query("""
SELECT role
FROM locations_policy_table
WHERE user_id = $user_id
AND location_id=$location_id;
;""", vars={'user_id': user.id, 'location_id': object.id})
def set_policies(object, user, roles, adder_user):
for role in roles:
web.insert("locations_policy_table",
user_id=user.id,
location_id=object.id,
adder_user_id=adder_user.id,
role=role
)
def unset_policies(object, user, roles, adder_user):
for role in roles:
web.query("""
DELETE FROM locations_policy_table
WHERE user_id = $user_id
AND location_id=$location_id
AND role=$role
;""", vars={'user_id': user.id, 'location_id': object.id,
'role': role})
def enable_point_by_id(point_id, user):
web.query("""
UPDATE locations
SET visible=1,
user_id=$user_id
WHERE id = $id
;
""", vars={'id': point_id, 'user_id': user.id})
web.query("""
UPDATE projects_points
SET visible=1
WHERE location_id = $id
;
""", vars={'id': point_id})
def replace_and_delete_user(old_user_id, user_id):
web.query("""
UPDATE locations_users_tags
SET user_id=$user_id
WHERE user_id = $old_user_id
;
""", vars={'user_id': user_id, 'old_user_id': old_user_id})
web.query("""
UPDATE notes
SET user_id=$user_id
WHERE user_id = $old_user_id
;
""", vars={'user_id': user_id, 'old_user_id': old_user_id})
web.query("""
DELETE FROM users
WHERE id = $old_user_id
;
""", vars={'old_user_id': old_user_id})
| {
"content_hash": "68b9601ebf0eba67bc10d7871e2dfd41",
"timestamp": "",
"source": "github",
"line_count": 1342,
"max_line_length": 133,
"avg_line_length": 33.70715350223547,
"alnum_prop": 0.5407759478280093,
"repo_name": "rnd0101/urbanmediator",
"id": "3d1c4db44be4ce4b8659cb015f957b9b2efaa6f2",
"size": "45260",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "urbanmediator/database.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "66414"
},
{
"name": "CSS",
"bytes": "164027"
},
{
"name": "ColdFusion",
"bytes": "170360"
},
{
"name": "HTML",
"bytes": "779532"
},
{
"name": "JavaScript",
"bytes": "1985771"
},
{
"name": "Lasso",
"bytes": "33315"
},
{
"name": "PHP",
"bytes": "66919"
},
{
"name": "PLpgSQL",
"bytes": "20888"
},
{
"name": "Perl",
"bytes": "66286"
},
{
"name": "Python",
"bytes": "702129"
},
{
"name": "Shell",
"bytes": "1924"
}
],
"symlink_target": ""
} |
import json
from pymel.core import confirmDialog, menu, menuBarLayout, objExists, PyNode, selected
from .. import _core as core
from .._add import alt
_WEIGHTS = {}
@alt.name('Save Weights Locally', 'Weights')
def saveWeightsLocally():
''' Save the weights on the selected objects in a global var for use in this maya session.
'''
global _WEIGHTS
_WEIGHTS.clear()
for obj in selected():
_WEIGHTS[obj.name()] = core.weights.get(obj)
@alt.name('Load Weights Locally', 'Weights')
def loadWeightsLocally():
''' Loads the weights in this session, applying to the same objects or the selection.
Useful in case the namespaces changed.
'''
global _WEIGHTS
multiLoadWeights( _WEIGHTS )
@alt.name('Save Weights To Clipboard', 'Weights')
def saveWeightsToClipboard():
''' Save weights as json to text clipboard
'''
core.text.clipboard.set(
json.dumps(
{obj.name(): core.weights.get(obj) for obj in selected() }
)
)
@alt.name('Load Weights From Clipboard', 'Weights')
def loadWeightsFromClipboard():
''' Loads the weights from json in the clipboard on the same objects or the selection.
Useful in case the namespaces changed.
'''
multiLoadWeights( json.loads( core.text.clipboard.get() ) )
def multiLoadWeights(data):
''' Takes dict of {obj:weight_info}, applies by name, or to selection.
'''
allObjsExist = True
for name in data:
if not objExists(name):
allObjsExist = False
break
if allObjsExist:
for obj, vals in data.items():
core.weights.apply(PyNode(obj), vals)
elif len(data) == len(selected()):
msg = 'Target objects do not exist, apply in this order?\n'
for selObj, name in zip(selected(), data):
msg += name + ' -> ' + selObj.name()
res = confirmDialog(m=msg, b=['Yes', 'No'])
if res == 'Yes':
for selObj, (obj, data) in zip(selected(), data.items()):
core.weights.apply(selObj, data)
def toolsWindow():
with core.ui.singleWindow('Various Tools'):
menuBarLayout()
a = menu()
alt.buildMenus(a) | {
"content_hash": "af732325b91f03bb85b7c61882d366ff",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 94,
"avg_line_length": 27.44578313253012,
"alnum_prop": 0.602721685689201,
"repo_name": "patcorwin/fossil",
"id": "e7fdd192b81169b36705c15b919799bb4f42a02e",
"size": "2278",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pdil/tool/misc.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1011429"
}
],
"symlink_target": ""
} |
"""Script to get, put and delete secrets stored in credstash."""
import argparse
import getpass
from homeassistant.util.yaml import _SECRET_NAMESPACE
REQUIREMENTS = ['credstash==1.14.0', 'botocore==1.7.34']
def run(args):
"""Handle credstash script."""
parser = argparse.ArgumentParser(
description=("Modify Home Assistant secrets in credstash."
"Use the secrets in configuration files with: "
"!secret <name>"))
parser.add_argument(
'--script', choices=['credstash'])
parser.add_argument(
'action', choices=['get', 'put', 'del', 'list'],
help="Get, put or delete a secret, or list all available secrets")
parser.add_argument(
'name', help="Name of the secret", nargs='?', default=None)
parser.add_argument(
'value', help="The value to save when putting a secret",
nargs='?', default=None)
# pylint: disable=import-error
import credstash
import botocore
args = parser.parse_args(args)
table = _SECRET_NAMESPACE
try:
credstash.listSecrets(table=table)
except botocore.errorfactory.ClientError:
credstash.createDdbTable(table=table)
if args.action == 'list':
secrets = [i['name'] for i in credstash.listSecrets(table=table)]
deduped_secrets = sorted(set(secrets))
print('Saved secrets:')
for secret in deduped_secrets:
print(secret)
return 0
if args.name is None:
parser.print_help()
return 1
if args.action == 'put':
if args.value:
the_secret = args.value
else:
the_secret = getpass.getpass('Please enter the secret for {}: '
.format(args.name))
current_version = credstash.getHighestVersion(args.name, table=table)
credstash.putSecret(args.name,
the_secret,
version=int(current_version) + 1,
table=table)
print('Secret {} put successfully'.format(args.name))
elif args.action == 'get':
the_secret = credstash.getSecret(args.name, table=table)
if the_secret is None:
print('Secret {} not found'.format(args.name))
else:
print('Secret {}={}'.format(args.name, the_secret))
elif args.action == 'del':
credstash.deleteSecrets(args.name, table=table)
print('Deleted secret {}'.format(args.name))
| {
"content_hash": "a4c8b893d2c8418d0802693d76e37634",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 77,
"avg_line_length": 34.84722222222222,
"alnum_prop": 0.5894778796333201,
"repo_name": "ewandor/home-assistant",
"id": "12516e55c7df007d43c7a627dc2384d2ad19f9be",
"size": "2509",
"binary": false,
"copies": "5",
"ref": "refs/heads/dev",
"path": "homeassistant/scripts/credstash.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "8860790"
},
{
"name": "Ruby",
"bytes": "517"
},
{
"name": "Shell",
"bytes": "12639"
}
],
"symlink_target": ""
} |
'''#!/Users/lily/.virtualenvs/[lily]/lib/python2.7'''
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "django_orthg.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| {
"content_hash": "fca1c0f392408f2ab3930c15586a4461",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 76,
"avg_line_length": 28.7,
"alnum_prop": 0.7003484320557491,
"repo_name": "LighthouseHPC/lighthouse",
"id": "9247adfb32ba7d5af7c3fe516f83c9acfb5fc727",
"size": "309",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sandbox/lily/django_orthg/manage.py",
"mode": "33261",
"license": "mit",
"language": [],
"symlink_target": ""
} |
"""Train the Joint CTC-Attention model (TIMIT corpus)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from os.path import join, isfile, abspath
import sys
import time
import tensorflow as tf
from setproctitle import setproctitle
import yaml
import shutil
sys.path.append(abspath('../../../'))
from examples.timit.data.load_dataset_joint_ctc_attention import Dataset
from examples.timit.metrics.attention import do_eval_per, do_eval_cer
from utils.io.labels.sparsetensor import list2sparsetensor
from utils.training.learning_rate_controller import Controller
from utils.training.plot import plot_loss, plot_ler
from utils.directory import mkdir_join, mkdir
from utils.parameter import count_total_parameters
from models.attention.joint_ctc_attention import JointCTCAttention
def do_train(model, params):
"""Run training. If target labels are phone, the model is evaluated by PER
with 39 phones.
Args:
model: the model to train
params (dict): A dictionary of parameters
"""
map_file_path_train = '../metrics/mapping_files/' + \
params['label_type'] + '.txt'
map_file_path_eval = '../metrics/mapping_files/' + \
params['label_type'] + '.txt'
# Load dataset
train_data = Dataset(
data_type='train', label_type=params['label_type'],
batch_size=params['batch_size'], map_file_path=map_file_path_train,
max_epoch=params['num_epoch'], splice=params['splice'],
num_stack=params['num_stack'], num_skip=params['num_skip'],
sort_utt=True, sort_stop_epoch=params['sort_stop_epoch'])
dev_data = Dataset(
data_type='dev', label_type=params['label_type'],
batch_size=params['batch_size'], map_file_path=map_file_path_train,
splice=params['splice'],
num_stack=params['num_stack'], num_skip=params['num_skip'],
sort_utt=False)
if params['label_type'] in ['character', 'character_capital_divide']:
test_data = Dataset(
data_type='test', label_type=params['label_type'],
batch_size=1, map_file_path=map_file_path_eval,
splice=params['splice'],
num_stack=params['num_stack'], num_skip=params['num_skip'],
sort_utt=False)
else:
test_data = Dataset(
data_type='test', label_type='phone39',
batch_size=1, map_file_path=map_file_path_eval,
splice=params['splice'],
num_stack=params['num_stack'], num_skip=params['num_skip'],
sort_utt=False)
# Tell TensorFlow that the model will be built into the default graph
with tf.Graph().as_default():
# Define placeholders
model.create_placeholders()
learning_rate_pl = tf.placeholder(tf.float32, name='learning_rate')
# Add to the graph each operation (including model definition)
loss_op, logits, ctc_logits, decoder_outputs_train, decoder_outputs_infer = model.compute_loss(
model.inputs_pl_list[0],
model.labels_pl_list[0],
model.ctc_labels_pl_list[0],
model.inputs_seq_len_pl_list[0],
model.labels_seq_len_pl_list[0],
model.keep_prob_encoder_pl_list[0],
model.keep_prob_decoder_pl_list[0],
model.keep_prob_embedding_pl_list[0])
train_op = model.train(loss_op,
optimizer=params['optimizer'],
learning_rate=learning_rate_pl)
_, decode_op_infer = model.decode(
decoder_outputs_train,
decoder_outputs_infer)
ler_op = model.compute_ler(model.labels_st_true_pl,
model.labels_st_pred_pl)
# Define learning rate controller
lr_controller = Controller(
learning_rate_init=params['learning_rate'],
decay_start_epoch=params['decay_start_epoch'],
decay_rate=params['decay_rate'],
decay_patient_epoch=params['decay_patient_epoch'],
lower_better=True)
# Build the summary tensor based on the TensorFlow collection of
# summaries
summary_train = tf.summary.merge(model.summaries_train)
summary_dev = tf.summary.merge(model.summaries_dev)
# Add the variable initializer operation
init_op = tf.global_variables_initializer()
# Create a saver for writing training checkpoints
saver = tf.train.Saver(max_to_keep=None)
# Count total param
parameters_dict, total_parameters = count_total_parameters(
tf.trainable_variables())
for parameter_name in sorted(parameters_dict.keys()):
print("%s %d" % (parameter_name, parameters_dict[parameter_name]))
print("Total %d variables, %s M param" %
(len(parameters_dict.keys()),
"{:,}".format(total_parameters / 1000000)))
csv_steps, csv_loss_train, csv_loss_dev = [], [], []
csv_ler_train, csv_ler_dev = [], []
# Create a session for running operation on the graph
with tf.Session() as sess:
# Instantiate a SummaryWriter to output summaries and the graph
summary_writer = tf.summary.FileWriter(
model.save_path, sess.graph)
# Initialize param
sess.run(init_op)
# Train model
start_time_train = time.time()
start_time_epoch = time.time()
start_time_step = time.time()
ler_dev_best = 1
learning_rate = float(params['learning_rate'])
for step, (data, is_new_epoch) in enumerate(train_data):
# Create feed dictionary for next mini batch (train)
inputs, labels_train, ctc_labels, inputs_seq_len, labels_seq_len, _ = data
feed_dict_train = {
model.inputs_pl_list[0]: inputs[0],
model.labels_pl_list[0]: labels_train[0],
model.ctc_labels_pl_list[0]: list2sparsetensor(
ctc_labels[0], padded_value=train_data.ctc_padded_value),
model.inputs_seq_len_pl_list[0]: inputs_seq_len[0],
model.labels_seq_len_pl_list[0]: labels_seq_len[0],
model.keep_prob_encoder_pl_list[0]: 1 - float(params['dropout_encoder']),
model.keep_prob_decoder_pl_list[0]: 1 - float(params['dropout_decoder']),
model.keep_prob_embedding_pl_list[0]: 1 - float(params['dropout_embedding']),
learning_rate_pl: learning_rate
}
# Update parameters
sess.run(train_op, feed_dict=feed_dict_train)
if (step + 1) % params['print_step'] == 0:
# Create feed dictionary for next mini batch (dev)
(inputs, labels_dev, ctc_labels, inputs_seq_len,
labels_seq_len, _), _ = dev_data.next()
feed_dict_dev = {
model.inputs_pl_list[0]: inputs[0],
model.labels_pl_list[0]: labels_dev[0],
model.ctc_labels_pl_list[0]: list2sparsetensor(
ctc_labels[0], padded_value=train_data.ctc_padded_value),
model.inputs_seq_len_pl_list[0]: inputs_seq_len[0],
model.labels_seq_len_pl_list[0]: labels_seq_len[0],
model.keep_prob_encoder_pl_list[0]: 1.0,
model.keep_prob_decoder_pl_list[0]: 1.0,
model.keep_prob_embedding_pl_list[0]: 1.0
}
# Compute loss
loss_train = sess.run(loss_op, feed_dict=feed_dict_train)
loss_dev = sess.run(loss_op, feed_dict=feed_dict_dev)
csv_steps.append(step)
csv_loss_train.append(loss_train)
csv_loss_dev.append(loss_dev)
# Change to evaluation mode
feed_dict_train[model.keep_prob_encoder_pl_list[0]] = 1.0
feed_dict_train[model.keep_prob_decoder_pl_list[0]] = 1.0
feed_dict_train[model.keep_prob_embedding_pl_list[0]] = 1.0
# Predict class ids & update even files
predicted_ids_train, summary_str_train = sess.run(
[decode_op_infer, summary_train], feed_dict=feed_dict_train)
predicted_ids_dev, summary_str_dev = sess.run(
[decode_op_infer, summary_dev], feed_dict=feed_dict_dev)
summary_writer.add_summary(summary_str_train, step + 1)
summary_writer.add_summary(summary_str_dev, step + 1)
summary_writer.flush()
# Convert to sparsetensor to compute LER
feed_dict_ler_train = {
model.labels_st_true_pl: list2sparsetensor(
labels_train[0], padded_value=train_data.padded_value),
model.labels_st_pred_pl: list2sparsetensor(
predicted_ids_train, padded_value=train_data.padded_value)
}
feed_dict_ler_dev = {
model.labels_st_true_pl: list2sparsetensor(
labels_dev[0], padded_value=dev_data.padded_value),
model.labels_st_pred_pl: list2sparsetensor(
predicted_ids_dev, padded_value=dev_data.padded_value)
}
# Compute accuracy
ler_train = sess.run(ler_op, feed_dict=feed_dict_ler_train)
ler_dev = sess.run(ler_op, feed_dict=feed_dict_ler_dev)
csv_ler_train.append(ler_train)
csv_ler_dev.append(ler_dev)
duration_step = time.time() - start_time_step
print("Step %d (epoch: %.3f): loss = %.3f (%.3f) / ler = %.3f (%.3f) / lr = %.5f (%.3f min)" %
(step + 1, train_data.epoch_detail, loss_train, loss_dev, ler_train, ler_dev,
learning_rate, duration_step / 60))
sys.stdout.flush()
start_time_step = time.time()
# Save checkpoint and evaluate model per epoch
if is_new_epoch:
duration_epoch = time.time() - start_time_epoch
print('-----EPOCH:%d (%.3f min)-----' %
(train_data.epoch, duration_epoch / 60))
# Save fugure of loss & ler
plot_loss(csv_loss_train, csv_loss_dev, csv_steps,
save_path=model.save_path)
plot_ler(csv_ler_train, csv_ler_dev, csv_steps,
label_type=params['label_type'],
save_path=model.save_path)
# if train_data.epoch >= params['eval_start_epoch']:
if train_data.epoch >= 5:
start_time_eval = time.time()
if 'char' in params['label_type']:
print('=== Dev Data Evaluation ===')
ler_dev_epoch, wer_dev_epoch = do_eval_cer(
session=sess,
decode_op=decode_op_infer,
model=model,
dataset=dev_data,
label_type=params['label_type'],
eval_batch_size=1,
is_jointctcatt=True)
print(' CER: %f %%' % (ler_dev_epoch * 100))
print(' WER: %f %%' % (wer_dev_epoch * 100))
if ler_dev_epoch < ler_dev_best:
ler_dev_best = ler_dev_epoch
print('■■■ ↑Best Score (CER)↑ ■■■')
# Save model only when best accuracy is
# obtained (check point)
checkpoint_file = join(
model.save_path, 'model.ckpt')
save_path = saver.save(
sess, checkpoint_file, global_step=train_data.epoch)
print("Model saved in file: %s" % save_path)
print('=== Test Data Evaluation ===')
ler_test, wer_test = do_eval_cer(
session=sess,
decode_op=decode_op_infer,
model=model,
dataset=test_data,
label_type=params['label_type'],
is_test=True,
eval_batch_size=1,
is_jointctcatt=True)
print(' CER: %f %%' % (ler_test * 100))
print(' WER: %f %%' % (wer_test * 100))
else:
print('=== Dev Data Evaluation ===')
ler_dev_epoch = do_eval_per(
session=sess,
decode_op=decode_op_infer,
per_op=ler_op,
model=model,
dataset=dev_data,
label_type=params['label_type'],
eval_batch_size=1,
is_jointctcatt=True)
print(' PER: %f %%' % (ler_dev_epoch * 100))
if ler_dev_epoch < ler_dev_best:
ler_dev_best = ler_dev_epoch
print('■■■ ↑Best Score (PER)↑ ■■■')
# Save model only when best accuracy is
# obtained (check point)
checkpoint_file = join(
model.save_path, 'model.ckpt')
save_path = saver.save(
sess, checkpoint_file, global_step=train_data.epoch)
print("Model saved in file: %s" % save_path)
print('=== Test Data Evaluation ===')
ler_test = do_eval_per(
session=sess,
decode_op=decode_op_infer,
per_op=ler_op,
model=model,
dataset=test_data,
label_type=params['label_type'],
is_test=True,
eval_batch_size=1,
is_jointctcatt=True)
print(' PER: %f %%' % (ler_test * 100))
duration_eval = time.time() - start_time_eval
print('Evaluation time: %.3f min' %
(duration_eval / 60))
# Update learning rate
learning_rate = lr_controller.decay_lr(
learning_rate=learning_rate,
epoch=train_data.epoch,
value=ler_dev_epoch)
start_time_epoch = time.time()
duration_train = time.time() - start_time_train
print('Total time: %.3f hour' % (duration_train / 3600))
# Training was finished correctly
with open(join(model.save_path, 'complete.txt'), 'w') as f:
f.write('')
def main(config_path, model_save_path):
# Load a config file (.yml)
with open(config_path, "r") as f:
config = yaml.load(f)
params = config['param']
# Except for a <SOS> and <EOS> class
if params['label_type'] == 'phone61':
params['num_classes'] = 61
elif params['label_type'] == 'phone48':
params['num_classes'] = 48
elif params['label_type'] == 'phone39':
params['num_classes'] = 39
elif params['label_type'] == 'character':
params['num_classes'] = 28
elif params['label_type'] == 'character_capital_divide':
params['num_classes'] = 72
else:
raise TypeError
# Model setting
model = JointCTCAttention(
input_size=params['input_size'] * params['num_stack'],
encoder_type=params['encoder_type'],
encoder_num_units=params['encoder_num_units'],
encoder_num_layers=params['encoder_num_layers'],
encoder_num_proj=params['encoder_num_proj'],
attention_type=params['attention_type'],
attention_dim=params['attention_dim'],
decoder_type=params['decoder_type'],
decoder_num_units=params['decoder_num_units'],
decoder_num_layers=params['decoder_num_layers'],
embedding_dim=params['embedding_dim'],
lambda_weight=params['lambda_weight'],
num_classes=params['num_classes'],
sos_index=params['num_classes'],
eos_index=params['num_classes'] + 1,
max_decode_length=params['max_decode_length'],
lstm_impl='LSTMBlockCell',
use_peephole=params['use_peephole'],
parameter_init=params['weight_init'],
clip_grad_norm=params['clip_grad_norm'],
clip_activation_encoder=params['clip_activation_encoder'],
clip_activation_decoder=params['clip_activation_decoder'],
weight_decay=params['weight_decay'],
time_major=True,
sharpening_factor=params['sharpening_factor'],
logits_temperature=params['logits_temperature'])
# Set process name
setproctitle('tf_timit_' + model.name + '_' +
params['label_type'] + '_' + params['attention_type'])
model.name += '_en' + str(params['encoder_num_units'])
model.name += '_' + str(params['encoder_num_layers'])
model.name += '_att' + str(params['attention_dim'])
model.name += '_de' + str(params['decoder_num_units'])
model.name += '_' + str(params['decoder_num_layers'])
model.name += '_' + params['optimizer']
model.name += '_lr' + str(params['learning_rate'])
model.name += '_' + params['attention_type']
if params['dropout_encoder'] != 0:
model.name += '_dropen' + str(params['dropout_encoder'])
if params['dropout_decoder'] != 0:
model.name += '_dropde' + str(params['dropout_decoder'])
if params['dropout_embedding'] != 0:
model.name += '_dropem' + str(params['dropout_embedding'])
if params['num_stack'] != 1:
model.name += '_stack' + str(params['num_stack'])
if params['weight_decay'] != 0:
model.name += 'wd' + str(params['weight_decay'])
if params['sharpening_factor'] != 1:
model.name += '_sharp' + str(params['sharpening_factor'])
if params['logits_temperature'] != 1:
model.name += '_temp' + str(params['logits_temperature'])
model.name += '_lambda' + str(params['lambda_weight'])
# Set save path
model.save_path = mkdir_join(
model_save_path, 'joint_ctc_attention', params['label_type'], model.name)
# Reset model directory
model_index = 0
new_model_path = model.save_path
while True:
if isfile(join(new_model_path, 'complete.txt')):
# Training of the first model have been finished
model_index += 1
new_model_path = model.save_path + '_' + str(model_index)
elif isfile(join(new_model_path, 'config.yml')):
# Training of the first model have not been finished yet
model_index += 1
new_model_path = model.save_path + '_' + str(model_index)
else:
break
model.save_path = mkdir(new_model_path)
# Save config file
shutil.copyfile(config_path, join(model.save_path, 'config.yml'))
sys.stdout = open(join(model.save_path, 'train.log'), 'w')
# TODO(hirofumi): change to logger
do_train(model=model, params=params)
if __name__ == '__main__':
args = sys.argv
if len(args) != 3:
raise ValueError('Length of args should be 3.')
main(config_path=args[1], model_save_path=args[2])
| {
"content_hash": "0b9cda145c38379df46755c375c57b5e",
"timestamp": "",
"source": "github",
"line_count": 446,
"max_line_length": 114,
"avg_line_length": 46.412556053811656,
"alnum_prop": 0.5108695652173914,
"repo_name": "hirofumi0810/tensorflow_end2end_speech_recognition",
"id": "8d4945860928ebaedb678d354f4323ae596041b7",
"size": "20780",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/timit/training/train_joint_ctc_attention.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "535815"
},
{
"name": "Shell",
"bytes": "2247"
}
],
"symlink_target": ""
} |
class QueryMixin:
"""The default query object used for models, and exposed as
:attr:`~SQLAlchemy.Query`. This can be subclassed and
replaced for individual models by setting the :attr:`~Model.query_class`
attribute. This is a subclass of a standard SQLAlchemy
:class:`~sqlalchemy.orm.query.Query` class and has all the methods of a
standard query as well.
"""
def get_or_404(self, ident):
"""Like :meth:`get` but aborts with 404 if not found instead of
returning `None`.
"""
rv = self.get(ident)
if rv is None:
raise Http404
return rv
def first_or_404(self):
"""Like :meth:`first` but aborts with 404 if not found instead of
returning `None`.
"""
rv = self.first()
if rv is None:
raise Http404
return rv
def paginate(self, request, page=None, per_page=None, error_out=True):
"""Returns `per_page` items from page `page`. By default it will
abort with 404 if no items were found and the page was larger than
1. This behavor can be disabled by setting `error_out` to `False`.
If page or per_page are None, they will be retrieved from the
request query. If the values are not ints and ``error_out`` is
true, it will abort with 404. If there is no request or they
aren't in the query, they default to page 1 and 20
respectively.
Returns an :class:`Pagination` object.
"""
if page is None:
try:
page = int(request.args.get('page', 1))
except (TypeError, ValueError):
if error_out:
raise Http404
page = 1
if per_page is None:
try:
per_page = int(request.args.get('per_page', 20))
except (TypeError, ValueError):
if error_out:
raise Http404
per_page = 20
if error_out and page < 1:
raise Http404
items = self.limit(per_page).offset((page - 1) * per_page).all()
if not items and page != 1 and error_out:
raise Http404
# No need to count if we're on the first page and there are fewer
# items than we expected.
if page == 1 and len(items) < per_page:
total = len(items)
else:
total = self.order_by(None).count()
return Pagination(self, page, per_page, total, items)
class Pagination(object):
"""Internal helper class returned by :meth:`BaseQuery.paginate`. You
can also construct it from any other SQLAlchemy query object if you are
working with other libraries. Additionally it is possible to pass `None`
as query object in which case the :meth:`prev` and :meth:`next` will
no longer work.
"""
def __init__(self, query, page, per_page, total, items):
#: the unlimited query object that was used to create this
#: pagination object.
self.query = query
#: the current page number (1 indexed)
self.page = page
#: the number of items to be displayed on a page.
self.per_page = per_page
#: the total number of items matching the query
self.total = total
#: the items for the current page
self.items = items
@property
def pages(self):
"""The total number of pages"""
if self.per_page == 0:
pages = 0
else:
pages = int(ceil(self.total / float(self.per_page)))
return pages
def prev(self, error_out=False):
"""Returns a :class:`Pagination` object for the previous page."""
assert self.query is not None, 'a query object is required ' \
'for this method to work'
return self.query.paginate(self.page - 1, self.per_page, error_out)
@property
def prev_num(self):
"""Number of the previous page."""
return self.page - 1
@property
def has_prev(self):
"""True if a previous page exists"""
return self.page > 1
def next(self, error_out=False):
"""Returns a :class:`Pagination` object for the next page."""
assert self.query is not None, 'a query object is required ' \
'for this method to work'
return self.query.paginate(self.page + 1, self.per_page, error_out)
@property
def has_next(self):
"""True if a next page exists."""
return self.page < self.pages
@property
def next_num(self):
"""Number of the next page"""
return self.page + 1
| {
"content_hash": "2eedcec537109f50e2daf627e05bfa8a",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 77,
"avg_line_length": 34.35766423357664,
"alnum_prop": 0.5755258126195029,
"repo_name": "tazo90/lux",
"id": "04a45d376bdf2335f114e1e2c49694c0bebba512",
"size": "4708",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lux/extensions/odm/query.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "85029"
},
{
"name": "HTML",
"bytes": "17331"
},
{
"name": "JavaScript",
"bytes": "354892"
},
{
"name": "Python",
"bytes": "543161"
}
],
"symlink_target": ""
} |
from TASSELpy.java.util.Iterator import Iterator
from TASSELpy.utils.Overloading import javaOverload,javaGenericOverload,javaConstructorOverload
from TASSELpy.utils.helper import make_sig
java_imports = {'ListIterator':'java/util/ListIterator',
'Object':'java/lang/Object'}
class ListIterator(Iterator):
_java_name = java_imports['ListIterator']
@javaConstructorOverload(java_imports['ListIterator'])
def __init__(self, *args, **kwargs):
super(ListIterator,self).__init__(*args, **kwargs)
## Inserts the specified element into the list
# @param e the element you want to add to the list
@javaGenericOverload("add",
(make_sig([java_imports['Object']],'void'),('/@1/',),None))
def add(self, *args):
"""
Inserts the specified element into the list
Signatures:
void add(E e)
Arguments:
e -- the element you want to add to the list
"""
pass
## Returns true if this list iterator has more elements when traversing the list
# in the reverse direction
# @return true if the list iterator has more elements in the reverse direction
@javaOverload("hasPrevious",
(make_sig([],'boolean'),(),None))
def hasPrevious(self, *args):
"""
Returns true if this list iterator has more elements when traversing the list
in the reverse direction
Signatures:
boolean hasPrevious()
Returns:
true if the list iterator has more elements in the reverse direction
"""
pass
## Returns the index of the element that would be returned by a subsequent call to
# next()
# @return the element that would be returned by a subsequent call to next()
@javaOverload("nextIndex",
(make_sig([],'int'),(),None))
def nextIndex(self, *args):
"""
Returns the index of the element that would be returned by a subsequent call to
next()
Signatures:
int nextIndex()
Returns:
the index of the element that would be returned by a subsequent call to next()
"""
pass
## Returns the previous element in the list and moves the cursor position backwards
# @return the previous element in the list
@javaGenericOverload("previous",
(make_sig([],java_imports['Object']),('/@1/',),None))
def previous(self, *args):
"""
Returns the previous element in the list and moves the cursor position backwards
Signatures:
E previous()
Returns:
The previous element in the list
"""
pass
## Returns the index of the element that would be returned by a subsequent call
# to previous()
# @return the index of the element that would be returned by a subsequent call to
# previous()
@javaOverload("previousIndex",
(make_sig([],'int'),(),None))
def previousIndex(self, *args):
"""
Returns the index of the element that would be returned by a subsequent call
to previous()
Signatures:
int previousIndex()
Returns:
The index of the element that would be returned by a subsequent call to previous()
"""
pass
## Replaces the last element returned by next() or previous() with the specified element
# @param e the element with which to replace the last element returned by next or previous
@javaGenericOverload("set",
(make_sig([java_imports['Object']],'void'),('/@1/',),None))
def set(self, *args):
"""
Replaces the last element returned by next() or previous() with the specified
element
Signatures:
void set(E e)
Arguments:
e -- the element with which to replace the last element returned by next or previous
"""
pass
| {
"content_hash": "bf196460fb75cb8c2c65542977d7f0c1",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 95,
"avg_line_length": 32.27049180327869,
"alnum_prop": 0.6182372364744729,
"repo_name": "er432/TASSELpy",
"id": "d9cd1dd218574e6d63b48d40012e3a06ab4ae71d",
"size": "3937",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "TASSELpy/java/util/ListIterator.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "947691"
},
{
"name": "Shell",
"bytes": "6705"
}
],
"symlink_target": ""
} |
from . import util as _util
from .engine import create_engine
from .engine import create_mock_engine
from .engine import engine_from_config
from .inspection import inspect
from .schema import BLANK_SCHEMA
from .schema import CheckConstraint
from .schema import Column
from .schema import ColumnDefault
from .schema import Computed
from .schema import Constraint
from .schema import DDL
from .schema import DefaultClause
from .schema import FetchedValue
from .schema import ForeignKey
from .schema import ForeignKeyConstraint
from .schema import Identity
from .schema import Index
from .schema import MetaData
from .schema import PrimaryKeyConstraint
from .schema import Sequence
from .schema import Table
from .schema import ThreadLocalMetaData
from .schema import UniqueConstraint
from .sql import alias
from .sql import all_
from .sql import and_
from .sql import any_
from .sql import asc
from .sql import between
from .sql import bindparam
from .sql import case
from .sql import cast
from .sql import collate
from .sql import column
from .sql import delete
from .sql import desc
from .sql import distinct
from .sql import except_
from .sql import except_all
from .sql import exists
from .sql import extract
from .sql import false
from .sql import func
from .sql import funcfilter
from .sql import insert
from .sql import intersect
from .sql import intersect_all
from .sql import join
from .sql import LABEL_STYLE_DEFAULT
from .sql import LABEL_STYLE_DISAMBIGUATE_ONLY
from .sql import LABEL_STYLE_NONE
from .sql import LABEL_STYLE_TABLENAME_PLUS_COL
from .sql import lambda_stmt
from .sql import lateral
from .sql import literal
from .sql import literal_column
from .sql import modifier
from .sql import not_
from .sql import null
from .sql import nulls_first
from .sql import nulls_last
from .sql import nullsfirst
from .sql import nullslast
from .sql import or_
from .sql import outerjoin
from .sql import outparam
from .sql import over
from .sql import select
from .sql import subquery
from .sql import table
from .sql import tablesample
from .sql import text
from .sql import true
from .sql import tuple_
from .sql import type_coerce
from .sql import union
from .sql import union_all
from .sql import update
from .sql import values
from .sql import within_group
from .types import ARRAY
from .types import BIGINT
from .types import BigInteger
from .types import BINARY
from .types import BLOB
from .types import BOOLEAN
from .types import Boolean
from .types import CHAR
from .types import CLOB
from .types import DATE
from .types import Date
from .types import DATETIME
from .types import DateTime
from .types import DECIMAL
from .types import Enum
from .types import FLOAT
from .types import Float
from .types import INT
from .types import INTEGER
from .types import Integer
from .types import Interval
from .types import JSON
from .types import LargeBinary
from .types import NCHAR
from .types import NUMERIC
from .types import Numeric
from .types import NVARCHAR
from .types import PickleType
from .types import REAL
from .types import SMALLINT
from .types import SmallInteger
from .types import String
from .types import TEXT
from .types import Text
from .types import TIME
from .types import Time
from .types import TIMESTAMP
from .types import TypeDecorator
from .types import Unicode
from .types import UnicodeText
from .types import VARBINARY
from .types import VARCHAR
__version__ = "1.4.26"
def __go(lcls):
global __all__
from . import events
from . import util as _sa_util
import inspect as _inspect
__all__ = sorted(
name
for name, obj in lcls.items()
if not (name.startswith("_") or _inspect.ismodule(obj))
)
_sa_util.preloaded.import_prefix("sqlalchemy")
from . import exc
exc._version_token = "".join(__version__.split(".")[0:2])
__go(locals())
| {
"content_hash": "2443e51bc1a6afe66bebc77e629afb2a",
"timestamp": "",
"source": "github",
"line_count": 150,
"max_line_length": 63,
"avg_line_length": 25.606666666666666,
"alnum_prop": 0.7781827649049726,
"repo_name": "monetate/sqlalchemy",
"id": "232d24364cbb5f3746f8331c8ad94b441ba6f674",
"size": "4085",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/sqlalchemy/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "49142"
},
{
"name": "Python",
"bytes": "11790244"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('teacher', '0002_auto_20170621_0741'),
]
operations = [
migrations.AddField(
model_name='exam',
name='marks_per_question',
field=models.IntegerField(default=1),
),
migrations.AddField(
model_name='exam',
name='total_marks',
field=models.IntegerField(blank=True, null=True),
),
]
| {
"content_hash": "a9ec2bc77c6998c477608148f7c9d331",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 61,
"avg_line_length": 23.91304347826087,
"alnum_prop": 0.5709090909090909,
"repo_name": "sehgalayush1/labquiz",
"id": "a074a57c9438cb81e7563ac3863abcd5f339fd28",
"size": "623",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "labquiz/teacher/migrations/0003_auto_20170711_1526.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "14085"
},
{
"name": "JavaScript",
"bytes": "484"
},
{
"name": "Python",
"bytes": "29653"
}
],
"symlink_target": ""
} |
import SocketServer
import socket
import random
class ThermometerHandler(SocketServer.BaseRequestHandler):
# Collection of temperatures
#temperatures = ["11","18","20","5","70"]
def handle(self):
self.data = self.request.recv(1024).strip()
print "{} wrote :".format(self.client_address[0])
if self.data.upper() == "GET_DATA":
temperature = str(random.randrange(0,22))
#self.request.sendall(temperature,self.Heater)
self.request.send(temperature)
else:
self.temperatures[0] = self.data
self.request.send(self.temperatures[0])
if __name__ == "__main__":
HOST, PORT = "localhost", 8000
server = SocketServer.TCPServer((HOST,PORT),ThermometerHandler)
server.serve_forever()
| {
"content_hash": "48bd6debd4571dc0c81b20df20616b11",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 67,
"avg_line_length": 32.916666666666664,
"alnum_prop": 0.6341772151898735,
"repo_name": "stephenoken/-SNMP-Management-Station-for-Wireless-Indoor-Network",
"id": "87a8481d7b081c26a0f8cdeabb5c2670fcef1f86",
"size": "790",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/sensors/thermometer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "40803"
}
],
"symlink_target": ""
} |
from quick_orm.core import Database
from sqlalchemy import Column, String
class DefaultModel:
name = Column(String(70))
__metaclass__ = Database.MetaBuilder(DefaultModel)
class User:
pass
class Group:
pass
Database.register()
if __name__ == '__main__':
db = Database('sqlite://')
db.create_tables()
user = User(name = 'tylerlong')
db.session.add(user)
group = Group(name = 'python')
db.session.add_then_commit(group)
print user.name
print group.name | {
"content_hash": "577da682be41b9f0f8ca19d3588777ee",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 50,
"avg_line_length": 20.192307692307693,
"alnum_prop": 0.6304761904761905,
"repo_name": "tylerlong/quick_orm",
"id": "a1e092fb40b7f25b9a5d42443770c9f089bc9e91",
"size": "525",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "quick_orm/examples/meta_builder.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "41788"
}
],
"symlink_target": ""
} |
"""Definition provenance collector. Handle multiple files/visitors"""
from __future__ import (absolute_import, print_function,
division, unicode_literals)
import sys
import weakref
from collections import defaultdict
from future.builtins import map as cvmap
from future.utils import viewitems
import pyposast
from .slicing_visitor import SlicingVisitor
from ...persistence.models import FunctionDef, Object
from ...utils.io import print_msg
from ...utils.metaprofiler import meta_profiler
class Definition(object): # pylint: disable=too-many-instance-attributes
"""Collect definition provenance"""
def __init__(self, metascript):
self.metascript = weakref.proxy(metascript)
self.paths = []
# Map of dependencies by line
self.line_dependencies = {}
# Map of dependencies by line
self.line_gen_dependencies = {}
# Map of loops by line
self.loops = {}
# Map of conditional statements (if, while) by line
self.conditions = {}
# Map of name_refs by line
self.line_usages = {}
# Map of calls by line and col
self.call_by_col = {}
# Map of calls by offset line and lasti
self.call_by_lasti = {}
# Map of with __enter__ by line and lasti
self.with_enter_by_lasti = {}
# Map of with __exit__ by line and lasti
self.with_exit_by_lasti = {}
# Set of imports
self.imports = {}
# Set of GET_ITER and FOR_ITER lasti by line
self.iters = {}
# Function definitions
self.function_globals = defaultdict(lambda: defaultdict(list))
@meta_profiler("definition")
def collect_provenance(self):
"""Collect definition provenance from scripts in metascript.paths"""
metascript = self.metascript
print_msg(" registering user-defined functions")
for path, file_definition in viewitems(metascript.paths):
visitor = self._visit_ast(file_definition)
if visitor:
if metascript.disasm:
print("--------------------------------------------------")
print(path)
print("--------------------------------------------------")
print("\n".join(cvmap(repr, visitor.disasm)))
print("--------------------------------------------------")
self._add_visitor(visitor)
def store_provenance(self):
"""Store definition provenance"""
metascript = self.metascript
tid = metascript.trial_id
# Remove after save
partial = True
FunctionDef.fast_store(tid, metascript.definitions_store, partial)
Object.fast_store(tid, metascript.objects_store, partial)
def _visit_ast(self, file_definition):
"""Return a visitor that visited the tree"""
metascript = self.metascript
try:
tree = pyposast.parse(file_definition.code, file_definition.name)
except SyntaxError:
print_msg("Syntax error on file {}. Skipping file.".format(
file_definition.name))
return None
visitor = SlicingVisitor(metascript, file_definition)
visitor.result = visitor.visit(tree)
if sys.version_info < (3, 6):
visitor.extract_disasm()
visitor.teardown()
return visitor
def _add_visitor(self, visitor):
"""Add visitor data to Definition object"""
self.paths.append(visitor.path)
self.line_dependencies[visitor.path] = visitor.dependencies
self.line_gen_dependencies[visitor.path] = visitor.gen_dependencies
self.line_usages[visitor.path] = visitor.line_usages
self.call_by_col[visitor.path] = visitor.call_by_col
self.call_by_lasti[visitor.path] = visitor.function_calls_by_lasti
self.with_enter_by_lasti[visitor.path] = visitor.with_enter_by_lasti
self.with_exit_by_lasti[visitor.path] = visitor.with_exit_by_lasti
self.imports[visitor.path] = visitor.imports
self.iters[visitor.path] = visitor.iters
self.function_globals[visitor.path] = visitor.function_globals
self.loops[visitor.path] = visitor.loops
self.conditions[visitor.path] = visitor.conditions
| {
"content_hash": "a4e8f08e5d5617ef484bddb796e7941c",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 127,
"avg_line_length": 39.85454545454545,
"alnum_prop": 0.5978558394160584,
"repo_name": "gems-uff/noworkflow",
"id": "7427f8f0cf2e317fb72d7bd3033bfc601c4cee1f",
"size": "4602",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "capture/noworkflow/now/collection/prov_definition/definition.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "176047"
},
{
"name": "HTML",
"bytes": "238"
},
{
"name": "JavaScript",
"bytes": "787748"
},
{
"name": "Jupyter Notebook",
"bytes": "5241520"
},
{
"name": "Prolog",
"bytes": "18527"
},
{
"name": "Python",
"bytes": "656680"
},
{
"name": "TypeScript",
"bytes": "122003"
}
],
"symlink_target": ""
} |
from setuptools import setup
setup(name='pylgnetcast',
version='0.3.1',
description='Client for the LG Smart TV running NetCast 3 or 4.',
url='https://github.com/wokar/pylgnetcast',
license='MIT',
packages=['pylgnetcast'],
install_requires=[],
zip_safe=False) | {
"content_hash": "f64137a4281360a721d758a66f6bd538",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 77,
"avg_line_length": 34.4,
"alnum_prop": 0.563953488372093,
"repo_name": "wokar/pylgnetcast",
"id": "5dcdb450389db39cd33c3c9e61afc0c79c939fae",
"size": "345",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9162"
}
],
"symlink_target": ""
} |
import os
def mk_reports_dir(report_dir):
"""create directory structure for a new report"""
# approximation of `mkdir -p`
# https://mail.python.org/pipermail/python-dev/2010-July/102092.html
try:
os.makedirs(report_dir)
except OSError, e:
if e.errno == 17:
mk_reports_dir(report_dir.rsplit(os.path.sep, 1)[0])
def existing_reports(report_dir):
"""read reports dir and return list of reports avail there (a list of dates)
The heirarchy of the reports directory is:
{app.config['DITM_OUTPUT_DIR']}
/{section}
/{report_id}
/YYYY-MM-DD
...stuff...
"""
try:
return os.listdir(report_dir)
except OSError:
mk_reports_dir(report_dir)
return []
| {
"content_hash": "581b53f1e882cdbb789168c1e6ae4d24",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 80,
"avg_line_length": 27.20689655172414,
"alnum_prop": 0.5893536121673004,
"repo_name": "kennethd/data-in-the-mines",
"id": "fb21d4e3600fb133a13f2ec41cc6afb855bc9b5e",
"size": "790",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "data-in-the-mines/ditm/reports.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "668"
},
{
"name": "JavaScript",
"bytes": "44"
},
{
"name": "Python",
"bytes": "11318"
},
{
"name": "Shell",
"bytes": "1546"
}
],
"symlink_target": ""
} |
import cPickle as pickle
import datetime
import uuid
from twext.python.log import Logger
from twext.who.expression import MatchType, MatchFlags, Operand
from twisted.application import service
from twisted.application.strports import service as strPortsService
from twisted.internet.defer import inlineCallbacks, returnValue
from twisted.internet.protocol import Factory
from twisted.plugin import IPlugin
from twisted.protocols import amp
from twisted.python.constants import Names, NamedConstant
from twisted.python.usage import Options, UsageError
from twistedcaldav.config import config
from twistedcaldav.stdconfig import DEFAULT_CONFIG, DEFAULT_CONFIG_FILE
from txdav.dps.commands import (
RecordWithShortNameCommand, RecordWithUIDCommand, RecordWithGUIDCommand,
RecordsWithRecordTypeCommand, RecordsWithEmailAddressCommand,
RecordsMatchingTokensCommand, RecordsMatchingFieldsCommand,
MembersCommand, GroupsCommand, SetMembersCommand,
VerifyPlaintextPasswordCommand, VerifyHTTPDigestCommand,
WikiAccessForUIDCommand, ContinuationCommand
# UpdateRecordsCommand, RemoveRecordsCommand
)
from txdav.who.util import directoryFromConfig
from txdav.who.wiki import WikiAccessLevel
from zope.interface import implementer
log = Logger()
##
## Server implementation of Directory Proxy Service
##
class DirectoryProxyAMPProtocol(amp.AMP):
"""
Server side of directory proxy
"""
def __init__(self, directory):
"""
"""
amp.AMP.__init__(self)
self._directory = directory
# How to large we let an AMP response get before breaking it up
self._maxSize = 60000
# The cache of results we have not fully responded with. A dictionary
# whose keys are "continuation tokens" and whose values are tuples of
# (timestamp, list-of-records). When a response does not fit within
# AMP size limits, the remaining records are stored in this dictionary
# keyed by an opaque token we generate to return to the client so that
# it can ask for the remaining results later.
self._continuations = {}
def _storeContinuation(self, records):
"""
Store an iterable of records and generate an opaque token we can
give back to the client so they can later retrieve these remaining
results that did not fit in the previous AMP response.
@param records: an iterable of records
@return: a C{str} token
"""
token = str(uuid.uuid4())
# FIXME: I included a timestamp just in case we want to have code that
# looks for stale continuations to expire them.
self._continuations[token] = (datetime.datetime.now(), records)
return token
def _retrieveContinuation(self, token):
"""
Retrieve the previously stored iterable of records associated with
the token, and remove the token.
@param token: a C{str} token previously returned by _storeContinuation
@return: an iterable of records, or None if the token does not exist
"""
if token in self._continuations:
_ignore_timestamp, records = self._continuations[token]
del self._continuations[token]
else:
records = None
return records
@ContinuationCommand.responder
def continuation(self, continuation):
"""
The client calls this command in order to retrieve records that did
not fit into an earlier response.
@param continuation: the token returned via the "continuation" key
in the previous response.
"""
log.debug("Continuation: {c}", c=continuation)
records = self._retrieveContinuation(continuation)
response = self._recordsToResponse(records)
log.debug("Responding with: {response}", response=response)
return response
def _recordsToResponse(self, records):
"""
Craft an AMP response containing as many records as will fit within
the size limit. Remaining records are stored as a "continuation",
identified by a token that is returned to the client to fetch later
via the ContinuationCommand.
@param records: an iterable of records
@return: the response dictionary, with a list of pickled records
stored in the "fieldsList" key, and if there are leftover
records that did not fit, there will be a "continuation" key
containing the token the client must send via ContinuationCommand.
"""
fieldsList = []
count = 0
if records:
size = 0
while size < self._maxSize:
try:
record = records.pop()
except (KeyError, IndexError):
# We're done.
# Note: because records is an iterable (list or set)
# we're catching both KeyError and IndexError.
break
pickled = pickle.dumps(self.recordToDict(record))
size = size + len(pickled)
fieldsList.append(pickled)
count += 1
response = {"fieldsList": fieldsList}
if records:
response["continuation"] = self._storeContinuation(records)
return response
def recordToDict(self, record):
"""
Turn a record in a dictionary of fields which can be reconstituted
within the client
"""
fields = {}
if record is not None:
for field, value in record.fields.iteritems():
valueType = record.service.fieldName.valueType(field)
# print("%s: %s (%s)" % (field.name, value, valueType))
if valueType in (unicode, bool):
fields[field.name] = value
elif valueType is uuid.UUID:
fields[field.name] = str(value)
elif issubclass(valueType, (Names, NamedConstant)):
fields[field.name] = value.name if value else None
# print("Server side fields", fields)
return fields
@RecordWithShortNameCommand.responder
@inlineCallbacks
def recordWithShortName(self, recordType, shortName):
recordType = recordType # keep as bytes
shortName = shortName.decode("utf-8")
log.debug("RecordWithShortName: {r} {n}", r=recordType, n=shortName)
record = (yield self._directory.recordWithShortName(
self._directory.recordType.lookupByName(recordType), shortName)
)
fields = self.recordToDict(record)
response = {
"fields": pickle.dumps(fields),
}
log.debug("Responding with: {response}", response=response)
returnValue(response)
@RecordWithUIDCommand.responder
@inlineCallbacks
def recordWithUID(self, uid):
uid = uid.decode("utf-8")
log.debug("RecordWithUID: {u}", u=uid)
try:
record = (yield self._directory.recordWithUID(uid))
except Exception as e:
log.error("Failed in recordWithUID", error=e)
record = None
fields = self.recordToDict(record)
response = {
"fields": pickle.dumps(fields),
}
log.debug("Responding with: {response}", response=response)
returnValue(response)
@RecordWithGUIDCommand.responder
@inlineCallbacks
def recordWithGUID(self, guid):
guid = uuid.UUID(guid)
log.debug("RecordWithGUID: {g}", g=guid)
record = (yield self._directory.recordWithGUID(guid))
fields = self.recordToDict(record)
response = {
"fields": pickle.dumps(fields),
}
log.debug("Responding with: {response}", response=response)
returnValue(response)
@RecordsWithRecordTypeCommand.responder
@inlineCallbacks
def recordsWithRecordType(self, recordType):
recordType = recordType # as bytes
log.debug("RecordsWithRecordType: {r}", r=recordType)
records = (yield self._directory.recordsWithRecordType(
self._directory.recordType.lookupByName(recordType))
)
response = self._recordsToResponse(records)
log.debug("Responding with: {response}", response=response)
returnValue(response)
@RecordsWithEmailAddressCommand.responder
@inlineCallbacks
def recordsWithEmailAddress(self, emailAddress):
emailAddress = emailAddress.decode("utf-8")
log.debug("RecordsWithEmailAddress: {e}", e=emailAddress)
records = (yield self._directory.recordsWithEmailAddress(emailAddress))
response = self._recordsToResponse(records)
log.debug("Responding with: {response}", response=response)
returnValue(response)
@RecordsMatchingTokensCommand.responder
@inlineCallbacks
def recordsMatchingTokens(self, tokens, context=None):
tokens = [t.decode("utf-8") for t in tokens]
log.debug("RecordsMatchingTokens: {t}", t=(", ".join(tokens)))
records = yield self._directory.recordsMatchingTokens(
tokens, context=context
)
response = self._recordsToResponse(records)
log.debug("Responding with: {response}", response=response)
returnValue(response)
@RecordsMatchingFieldsCommand.responder
@inlineCallbacks
def recordsMatchingFields(self, fields, operand="OR", recordType=None):
log.debug("RecordsMatchingFields")
newFields = []
for fieldName, searchTerm, matchFlags, matchType in fields:
fieldName = fieldName.decode("utf-8")
searchTerm = searchTerm.decode("utf-8")
try:
field = self._directory.fieldName.lookupByName(fieldName)
except ValueError:
field = None
if field:
valueType = self._directory.fieldName.valueType(field)
if valueType is uuid.UUID:
searchTerm = uuid.UUID(searchTerm)
matchFlags = matchFlags.decode("utf-8")
if matchFlags.startswith("{") and matchFlags.endswith("}"):
flags = MatchFlags.none
for flag in matchFlags[1:-1].split(","):
flags |= MatchFlags.lookupByName(flag)
matchFlags = flags
else:
matchFlags = MatchFlags.lookupByName(matchFlags)
matchType = MatchType.lookupByName(matchType.decode("utf-8"))
newFields.append((fieldName, searchTerm, matchFlags, matchType))
operand = Operand.lookupByName(operand)
if recordType:
recordType = self._directory.recordType.lookupByName(recordType)
records = yield self._directory.recordsMatchingFields(
newFields, operand=operand, recordType=recordType
)
response = self._recordsToResponse(records)
log.debug("Responding with: {response}", response=response)
returnValue(response)
@MembersCommand.responder
@inlineCallbacks
def members(self, uid):
uid = uid.decode("utf-8")
log.debug("Members: {u}", u=uid)
try:
record = (yield self._directory.recordWithUID(uid))
except Exception as e:
log.error("Failed in members", error=e)
record = None
records = []
if record is not None:
for member in (yield record.members()):
records.append(member)
response = self._recordsToResponse(records)
log.debug("Responding with: {response}", response=response)
returnValue(response)
@SetMembersCommand.responder
@inlineCallbacks
def setMembers(self, uid, memberUIDs):
uid = uid.decode("utf-8")
memberUIDs = [m.decode("utf-8") for m in memberUIDs]
log.debug("Set Members: {u} -> {m}", u=uid, m=memberUIDs)
try:
record = (yield self._directory.recordWithUID(uid))
except Exception as e:
log.error("Failed in setMembers", error=e)
record = None
if record is not None:
memberRecords = []
for memberUID in memberUIDs:
memberRecord = yield self._directory.recordWithUID(memberUID)
if memberRecord is not None:
memberRecords.append(memberRecord)
yield record.setMembers(memberRecords)
success = True
else:
success = False
response = {
"success": success,
}
log.debug("Responding with: {response}", response=response)
returnValue(response)
@GroupsCommand.responder
@inlineCallbacks
def groups(self, uid):
uid = uid.decode("utf-8")
log.debug("Groups: {u}", u=uid)
try:
record = (yield self._directory.recordWithUID(uid))
except Exception as e:
log.error("Failed in groups", error=e)
record = None
records = []
for group in (yield record.groups()):
records.append(group)
response = self._recordsToResponse(records)
log.debug("Responding with: {response}", response=response)
returnValue(response)
@VerifyPlaintextPasswordCommand.responder
@inlineCallbacks
def verifyPlaintextPassword(self, uid, password):
uid = uid.decode("utf-8")
log.debug("VerifyPlaintextPassword: {u}", u=uid)
record = (yield self._directory.recordWithUID(uid))
authenticated = False
if record is not None:
authenticated = (yield record.verifyPlaintextPassword(password))
response = {
"authenticated": authenticated,
}
log.debug("Responding with: {response}", response=response)
returnValue(response)
@VerifyHTTPDigestCommand.responder
@inlineCallbacks
def verifyHTTPDigest(
self, uid, username, realm, uri, nonce, cnonce,
algorithm, nc, qop, response, method,
):
uid = uid.decode("utf-8")
username = username.decode("utf-8")
realm = realm.decode("utf-8")
uri = uri.decode("utf-8")
nonce = nonce.decode("utf-8")
cnonce = cnonce.decode("utf-8")
algorithm = algorithm.decode("utf-8")
nc = nc.decode("utf-8")
qop = qop.decode("utf-8")
response = response.decode("utf-8")
method = method.decode("utf-8")
log.debug("VerifyHTTPDigest: {u}", u=username)
record = (yield self._directory.recordWithUID(uid))
authenticated = False
if record is not None:
authenticated = (
yield record.verifyHTTPDigest(
username, realm, uri, nonce, cnonce,
algorithm, nc, qop, response, method,
)
)
response = {
"authenticated": authenticated,
}
log.debug("Responding with: {response}", response=response)
returnValue(response)
@WikiAccessForUIDCommand.responder
@inlineCallbacks
def wikiAccessForUID(self, wikiUID, uid):
wikiUID = wikiUID.decode("utf-8")
uid = uid.decode("utf-8")
log.debug("WikiAccessForUID: {w} {u}", w=wikiUID, u=uid)
access = WikiAccessLevel.none
wikiRecord = (yield self._directory.recordWithUID(wikiUID))
userRecord = (yield self._directory.recordWithUID(uid))
if wikiRecord is not None and userRecord is not None:
access = (yield wikiRecord.accessForRecord(userRecord))
response = {
"access": access.name.encode("utf-8"),
}
log.debug("Responding with: {response}", response=response)
returnValue(response)
class DirectoryProxyAMPFactory(Factory):
"""
"""
protocol = DirectoryProxyAMPProtocol
def __init__(self, directory):
self._directory = directory
def buildProtocol(self, addr):
return DirectoryProxyAMPProtocol(self._directory)
class DirectoryProxyOptions(Options):
optParameters = [[
"config", "f", DEFAULT_CONFIG_FILE, "Path to configuration file."
]]
def __init__(self, *args, **kwargs):
super(DirectoryProxyOptions, self).__init__(*args, **kwargs)
self.overrides = {}
def _coerceOption(self, configDict, key, value):
"""
Coerce the given C{val} to type of C{configDict[key]}
"""
if key in configDict:
if isinstance(configDict[key], bool):
value = value == "True"
elif isinstance(configDict[key], (int, float, long)):
value = type(configDict[key])(value)
elif isinstance(configDict[key], (list, tuple)):
value = value.split(',')
elif isinstance(configDict[key], dict):
raise UsageError(
"Dict options not supported on the command line"
)
elif value == 'None':
value = None
return value
def _setOverride(self, configDict, path, value, overrideDict):
"""
Set the value at path in configDict
"""
key = path[0]
if len(path) == 1:
overrideDict[key] = self._coerceOption(configDict, key, value)
return
if key in configDict:
if not isinstance(configDict[key], dict):
raise UsageError(
"Found intermediate path element that is not a dictionary"
)
if key not in overrideDict:
overrideDict[key] = {}
self._setOverride(
configDict[key], path[1:],
value, overrideDict[key]
)
def opt_option(self, option):
"""
Set an option to override a value in the config file. True, False, int,
and float options are supported, as well as comma separated lists. Only
one option may be given for each --option flag, however multiple
--option flags may be specified.
"""
if "=" in option:
path, value = option.split('=')
self._setOverride(
DEFAULT_CONFIG,
path.split('/'),
value,
self.overrides
)
else:
self.opt_option('%s=True' % (option,))
opt_o = opt_option
def postOptions(self):
config.load(self['config'])
config.updateDefaults(self.overrides)
self.parent['pidfile'] = None
@implementer(IPlugin, service.IServiceMaker)
class DirectoryProxyServiceMaker(object):
tapname = "caldav_directoryproxy"
description = "Directory Proxy Service"
options = DirectoryProxyOptions
def makeService(self, options):
"""
Return a service
"""
try:
from setproctitle import setproctitle
except ImportError:
pass
else:
setproctitle("CalendarServer Directory Proxy Service")
try:
directory = directoryFromConfig(config)
except Exception as e:
log.error("Failed to create directory service", error=e)
raise
log.info("Created directory service")
return strPortsService(
"unix:{path}:mode=660".format(
path=config.DirectoryProxy.SocketPath
),
DirectoryProxyAMPFactory(directory)
)
| {
"content_hash": "704f9824b88de8bb3af130b19f288957",
"timestamp": "",
"source": "github",
"line_count": 572,
"max_line_length": 79,
"avg_line_length": 34.16783216783217,
"alnum_prop": 0.61123618501842,
"repo_name": "trevor/calendarserver",
"id": "3fcd4c242222ff1b35ac933aac0170eb74c91a1e",
"size": "20146",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "txdav/dps/server.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "4214"
},
{
"name": "D",
"bytes": "13143"
},
{
"name": "JavaScript",
"bytes": "76566"
},
{
"name": "Python",
"bytes": "9260291"
},
{
"name": "Shell",
"bytes": "78964"
}
],
"symlink_target": ""
} |
"""empty message
Revision ID: 885ec8d01cfc
Revises: 82edf10df5bc
Create Date: 2017-02-05 00:06:21.245761
"""
# revision identifiers, used by Alembic.
revision = '885ec8d01cfc'
down_revision = '82edf10df5bc'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('integration', sa.Column('slug', sa.String(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('integration', 'slug')
### end Alembic commands ###
| {
"content_hash": "237855c98add9203d6e3b12f3fa98866",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 79,
"avg_line_length": 23.46153846153846,
"alnum_prop": 0.6901639344262295,
"repo_name": "whittlbc/jarvis",
"id": "2a5f6f0f199adbb9bf78afb1aaffabb1908d7562",
"size": "610",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "migrations/versions/885ec8d01cfc_.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "3356"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "273630"
},
{
"name": "Ruby",
"bytes": "2963"
}
],
"symlink_target": ""
} |
'''
This is going the be the different word class
used to help evaluate which words are
different, and which are not.
'''
class DiffWord:
# all the words should be DiffWords
def __init__(self, OWord="", isDiff=True, idex=[-1, -1]):
# default constructor for a word for some reason.
self.word = OWord # what the word is
self.isDifferent = isDiff # if the word is different or nah
self.index = idex # the first position
# of the list is the position of the
# word in the original file, the second position of it is in the
# derived file.
def __str__(self):
return self.word
# Observers
def getWord(self): # get the word
ans = self.word
return ans
def isDiff(self):
return self.isDifferent is True
def getIndex(self):
return self.index
def get_pos_in_original(self):
return self.index[0]
def get_pos_in_derived(self):
return self.index[1]
| {
"content_hash": "f5b3b7ef4ff9f84d6215ac576dcf978b",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 73,
"avg_line_length": 25.743589743589745,
"alnum_prop": 0.6125498007968128,
"repo_name": "faroos3/QuoteR",
"id": "14da66001f24ef850df5ca90e3f08272a42b8c7d",
"size": "1004",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "client/GUI/DiffWord.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "32106"
}
],
"symlink_target": ""
} |
"""
inlining/optimization test (the print statements should all print constant values and there shouldnt be any loads, though the stores might not be eliminated. also all refcounting should be elided.)
"""
l = []
l.append(123)
l.append(321)
print l[0], len(l)
l[1] = 666
print l[1]
| {
"content_hash": "19b1134f6b9694e252787f9a09040b7d",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 198,
"avg_line_length": 28.4,
"alnum_prop": 0.7288732394366197,
"repo_name": "kmod/icbd",
"id": "5d3d104dad0d929c057a8c913bc0b707c2d135fd",
"size": "284",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "icbd/compiler/tests/9.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "33042"
},
{
"name": "C++",
"bytes": "35981"
},
{
"name": "CSS",
"bytes": "8888"
},
{
"name": "JavaScript",
"bytes": "3602"
},
{
"name": "Makefile",
"bytes": "48655"
},
{
"name": "Objective-C",
"bytes": "88"
},
{
"name": "Python",
"bytes": "10340340"
},
{
"name": "Shell",
"bytes": "18865"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from django.http import HttpResponse
from django.utils.translation import pgettext as _
def test_view(request):
return HttpResponse(_('view context', 'test view string'))
| {
"content_hash": "3c1a56c90d721f30c3912d82668c5934",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 62,
"avg_line_length": 29.5,
"alnum_prop": 0.7627118644067796,
"repo_name": "kmichel/po-localization",
"id": "10081ee5540383e00b6a93e10669564e0e19da40",
"size": "311",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "po_localization/tests/test_app/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "91595"
}
],
"symlink_target": ""
} |
"""Fichier contenant le type instrument."""
from .base import BaseType
class Instrument(BaseType):
"""Type d'objet: instrument.
"""
nom_type = "instrument"
| {
"content_hash": "189acc179e43c878730eac9b6dfa6516",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 43,
"avg_line_length": 16.818181818181817,
"alnum_prop": 0.6162162162162163,
"repo_name": "vlegoff/tsunami",
"id": "783af518ada29d12c3e8f9e9a52a3984e7b18dce",
"size": "1755",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/primaires/objet/types/instrument.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "7930908"
},
{
"name": "Ruby",
"bytes": "373"
}
],
"symlink_target": ""
} |
"""ECC secp256k1 crypto routines
WARNING: This module does not mlock() secrets; your private keys may end up on
disk in swap! Use with caution!
"""
import ctypes
import ctypes.util
import hashlib
import sys
ssl = ctypes.cdll.LoadLibrary(ctypes.util.find_library ('ssl') or 'libeay32')
ssl.BN_new.restype = ctypes.c_void_p
ssl.BN_new.argtypes = []
ssl.BN_bin2bn.restype = ctypes.c_void_p
ssl.BN_bin2bn.argtypes = [ctypes.c_char_p, ctypes.c_int, ctypes.c_void_p]
ssl.BN_CTX_free.restype = None
ssl.BN_CTX_free.argtypes = [ctypes.c_void_p]
ssl.BN_CTX_new.restype = ctypes.c_void_p
ssl.BN_CTX_new.argtypes = []
ssl.ECDH_compute_key.restype = ctypes.c_int
ssl.ECDH_compute_key.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p]
ssl.ECDSA_sign.restype = ctypes.c_int
ssl.ECDSA_sign.argtypes = [ctypes.c_int, ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
ssl.ECDSA_verify.restype = ctypes.c_int
ssl.ECDSA_verify.argtypes = [ctypes.c_int, ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p]
ssl.EC_KEY_free.restype = None
ssl.EC_KEY_free.argtypes = [ctypes.c_void_p]
ssl.EC_KEY_new_by_curve_name.restype = ctypes.c_void_p
ssl.EC_KEY_new_by_curve_name.argtypes = [ctypes.c_int]
ssl.EC_KEY_get0_group.restype = ctypes.c_void_p
ssl.EC_KEY_get0_group.argtypes = [ctypes.c_void_p]
ssl.EC_KEY_get0_public_key.restype = ctypes.c_void_p
ssl.EC_KEY_get0_public_key.argtypes = [ctypes.c_void_p]
ssl.EC_KEY_set_private_key.restype = ctypes.c_int
ssl.EC_KEY_set_private_key.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
ssl.EC_KEY_set_conv_form.restype = None
ssl.EC_KEY_set_conv_form.argtypes = [ctypes.c_void_p, ctypes.c_int]
ssl.EC_KEY_set_public_key.restype = ctypes.c_int
ssl.EC_KEY_set_public_key.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
ssl.i2o_ECPublicKey.restype = ctypes.c_void_p
ssl.i2o_ECPublicKey.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
ssl.EC_POINT_new.restype = ctypes.c_void_p
ssl.EC_POINT_new.argtypes = [ctypes.c_void_p]
ssl.EC_POINT_free.restype = None
ssl.EC_POINT_free.argtypes = [ctypes.c_void_p]
ssl.EC_POINT_mul.restype = ctypes.c_int
ssl.EC_POINT_mul.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
# this specifies the curve used with ECDSA.
NID_secp256k1 = 714 # from openssl/obj_mac.h
# Thx to Sam Devlin for the ctypes magic 64-bit fix.
def _check_result(val, func, args):
if val == 0:
raise ValueError
else:
return ctypes.c_void_p (val)
ssl.EC_KEY_new_by_curve_name.restype = ctypes.c_void_p
ssl.EC_KEY_new_by_curve_name.errcheck = _check_result
class CECKey(object):
"""Wrapper around OpenSSL's EC_KEY"""
POINT_CONVERSION_COMPRESSED = 2
POINT_CONVERSION_UNCOMPRESSED = 4
def __init__(self):
self.k = ssl.EC_KEY_new_by_curve_name(NID_secp256k1)
def __del__(self):
if ssl:
ssl.EC_KEY_free(self.k)
self.k = None
def set_secretbytes(self, secret):
priv_key = ssl.BN_bin2bn(secret, 32, ssl.BN_new())
group = ssl.EC_KEY_get0_group(self.k)
pub_key = ssl.EC_POINT_new(group)
ctx = ssl.BN_CTX_new()
if not ssl.EC_POINT_mul(group, pub_key, priv_key, None, None, ctx):
raise ValueError("Could not derive public key from the supplied secret.")
ssl.EC_POINT_mul(group, pub_key, priv_key, None, None, ctx)
ssl.EC_KEY_set_private_key(self.k, priv_key)
ssl.EC_KEY_set_public_key(self.k, pub_key)
ssl.EC_POINT_free(pub_key)
ssl.BN_CTX_free(ctx)
return self.k
def set_privkey(self, key):
self.mb = ctypes.create_string_buffer(key)
return ssl.d2i_ECPrivateKey(ctypes.byref(self.k), ctypes.byref(ctypes.pointer(self.mb)), len(key))
def set_pubkey(self, key):
self.mb = ctypes.create_string_buffer(key)
return ssl.o2i_ECPublicKey(ctypes.byref(self.k), ctypes.byref(ctypes.pointer(self.mb)), len(key))
def get_privkey(self):
size = ssl.i2d_ECPrivateKey(self.k, 0)
mb_pri = ctypes.create_string_buffer(size)
ssl.i2d_ECPrivateKey(self.k, ctypes.byref(ctypes.pointer(mb_pri)))
return mb_pri.raw
def get_pubkey(self):
size = ssl.i2o_ECPublicKey(self.k, 0)
mb = ctypes.create_string_buffer(size)
ssl.i2o_ECPublicKey(self.k, ctypes.byref(ctypes.pointer(mb)))
return mb.raw
def get_raw_ecdh_key(self, other_pubkey):
ecdh_keybuffer = ctypes.create_string_buffer(32)
r = ssl.ECDH_compute_key(ctypes.pointer(ecdh_keybuffer), 32,
ssl.EC_KEY_get0_public_key(other_pubkey.k),
self.k, 0)
if r != 32:
raise Exception('CKey.get_ecdh_key(): ECDH_compute_key() failed')
return ecdh_keybuffer.raw
def get_ecdh_key(self, other_pubkey, kdf=lambda k: hashlib.sha256(k).digest()):
# FIXME: be warned it's not clear what the kdf should be as a default
r = self.get_raw_ecdh_key(other_pubkey)
return kdf(r)
def sign(self, hash):
# FIXME: need unit tests for below cases
if not isinstance(hash, bytes):
raise TypeError('Hash must be bytes instance; got %r' % hash.__class__)
if len(hash) != 32:
raise ValueError('Hash must be exactly 32 bytes long')
sig_size0 = ctypes.c_uint32()
sig_size0.value = ssl.ECDSA_size(self.k)
mb_sig = ctypes.create_string_buffer(sig_size0.value)
result = ssl.ECDSA_sign(0, hash, len(hash), mb_sig, ctypes.byref(sig_size0), self.k)
assert 1 == result
return mb_sig.raw[:sig_size0.value]
def verify(self, hash, sig):
"""Verify a DER signature"""
return ssl.ECDSA_verify(0, hash, len(hash), sig, len(sig), self.k) == 1
def set_compressed(self, compressed):
if compressed:
form = self.POINT_CONVERSION_COMPRESSED
else:
form = self.POINT_CONVERSION_UNCOMPRESSED
ssl.EC_KEY_set_conv_form(self.k, form)
class CPubKey(bytes):
"""An encapsulated public key
Attributes:
is_valid - Corresponds to CPubKey.IsValid()
is_fullyvalid - Corresponds to CPubKey.IsFullyValid()
is_compressed - Corresponds to CPubKey.IsCompressed()
"""
def __new__(cls, buf, _cec_key=None):
self = super(CPubKey, cls).__new__(cls, buf)
if _cec_key is None:
_cec_key = CECKey()
self._cec_key = _cec_key
self.is_fullyvalid = _cec_key.set_pubkey(self) != 0
return self
@property
def is_valid(self):
return len(self) > 0
@property
def is_compressed(self):
return len(self) == 33
def verify(self, hash, sig):
return self._cec_key.verify(hash, sig)
def __str__(self):
return repr(self)
def __repr__(self):
# Always have represent as b'<secret>' so test cases don't have to
# change for py2/3
if sys.version > '3':
return '%s(%s)' % (self.__class__.__name__, super(CPubKey, self).__repr__())
else:
return '%s(b%s)' % (self.__class__.__name__, super(CPubKey, self).__repr__())
| {
"content_hash": "d9e2f23c33b122ebcc22548d53521de9",
"timestamp": "",
"source": "github",
"line_count": 208,
"max_line_length": 130,
"avg_line_length": 34.86057692307692,
"alnum_prop": 0.6400496483243691,
"repo_name": "hideoussquid/aureus-12-bitcore",
"id": "ec7fb690292cc4eccde1ac91536e937ac3393188",
"size": "7365",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qa/rpc-tests/test_framework/key.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "619477"
},
{
"name": "C++",
"bytes": "4294663"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "HTML",
"bytes": "50621"
},
{
"name": "Java",
"bytes": "2099"
},
{
"name": "M4",
"bytes": "141054"
},
{
"name": "Makefile",
"bytes": "90935"
},
{
"name": "Objective-C",
"bytes": "2785"
},
{
"name": "Objective-C++",
"bytes": "7236"
},
{
"name": "Python",
"bytes": "703055"
},
{
"name": "QMake",
"bytes": "2019"
},
{
"name": "Roff",
"bytes": "3753"
},
{
"name": "Shell",
"bytes": "35693"
}
],
"symlink_target": ""
} |
from ..math import vectorops,so3,se3
class SimLogger:
"""A CSV logger for a simulation. """
def __init__(self,sim,state_fn,contact_fn=None,colliding='all',saveheader=True):
"""
Logs a simulation to a CSV file.
Arguments:
sim (Simulator): the klampt.Simulator object you wish to use
state_fn (str): the file that you want to save state to
contact_fn (str, optional): the file that you want to save contacts to
(or None if you don't want them)
colliding (list, optional): either 'all' (default) or a list of all objects
and object ids that you want to check self collisions between
saveheader (bool, optional): true if you want a CSV header giving the name of
each value. Default = True
"""
self.saveSensors = False
self.sim = sim
self.fn = state_fn
self.f = None
if state_fn != None:
print "SimLogger: Saving state to",state_fn
self.f = open(state_fn,'w')
self.f_contact = None
if contact_fn != None:
print "SimLogger: Saving contacts to",contact_fn
self.f_contact = open(contact_fn,'w')
self.colliding = []
if colliding=='all':
self.sim.enableContactFeedbackAll()
n = self.sim.world.numIDs()
self.colliding = range(n)
else:
for i in colliding:
if isinstance(i,int):
self.colliding.append(i)
elif hasattr(i,'getID'):
self.colliding.append(i.getID())
elif isinstance(i,str):
raise NotImplementedError("Lookup id from entity name")
else:
raise ValueError("Invalid object given in the colliding list")
if saveheader:
#need to call simulate to get proper sensor readings...
self.sim.simulate(0)
self.saveHeader()
self.saveContactHeader()
return
def saveHeader(self,extra=[]):
if self.f is None:
print "SimLogger: No state file specified"
return
world = self.sim.world
elements = ['time']
for i in xrange(world.numRobots()):
n = world.robot(i).getName()
elements.append(n+'_cmx')
elements.append(n+'_cmy')
elements.append(n+'_cmz')
for j in xrange(world.robot(i).numLinks()):
elements.append(n+'_qcmd['+world.robot(i).link(j).getName()+']')
for j in xrange(world.robot(i).numLinks()):
elements.append(n+'_dqcmd['+world.robot(i).link(j).getName()+']')
for j in xrange(world.robot(i).numLinks()):
elements.append(n+'_q['+world.robot(i).link(j).getName()+']')
for j in xrange(world.robot(i).numLinks()):
elements.append(n+'_dq['+world.robot(i).link(j).getName()+']')
for j in xrange(world.robot(i).numDrivers()):
elements.append(n+'_t['+str(j)+']')
if self.saveSensors:
j = 0
while True:
s = self.sim.controller(i).sensor(j)
if len(s.name())==0:
break
names = s.measurementNames()
for sn in range(len(names)):
elements.append(n+'_'+s.name()+'['+names[sn]+']')
j += 1
for i in xrange(world.numRigidObjects()):
n = world.rigidObject(i).getName()
elements += [n+'_'+suffix for suffix in ['comx','comy','comz','x','y','z','rx','ry','rz','dx','dy','dz','wx','wy','wz']]
if extra:
elements += extra
self.f.write(','.join(elements))
self.f.write('\n')
return
def saveContactHeader(self):
if self.f_contact is None:
print "SimLogger: No contact file specified"
return
elements = ['time','body1','body2']
elements += ['numContacts']
elements += ['cpx_avg','cpy_avg','cpz_avg','cnx_avg','cny_avg','cnz_avg','fx_avg','fy_avg','fz_avg','mx_avg','my_avg','mz_avg']
self.f_contact.write(','.join(elements))
self.f_contact.write('\n')
def saveStep(self,extra=[]):
sim = self.sim
world = sim.world
sim.updateWorld()
values = []
values.append(sim.getTime())
for i in xrange(world.numRobots()):
robot = world.robot(i)
values += robot.getCom()
controller = sim.controller(i)
try:
values += controller.getCommandedConfig()
values += controller.getCommandedVelocity()
except Exception:
values += [0.0]*robot.numLinks()
values += [0.0]*robot.numLinks()
values += sim.getActualConfig(i)
values += sim.getActualVelocity(i)
assert len(sim.getActualTorques(i)) == world.robot(i).numDrivers()
values += sim.getActualTorques(i)
if self.saveSensors:
j = 0
while True:
s = self.sim.controller(i).sensor(j)
if len(s.name())==0:
break
meas = s.getMeasurements()
assert len(meas) == len(s.measurementNames())
values += meas
j += 1
for i in xrange(world.numRigidObjects()):
obj = world.rigidObject(i)
T = obj.getTransform()
values += se3.apply(T,obj.getMass().getCom())
values += T[1]
values += so3.moment(T[0])
values += sim.body(obj).getVelocity()[1]
values += sim.body(obj).getVelocity()[0]
if self.f_contact:
for i,id in enumerate(self.colliding):
for j in range(i+1,len(self.colliding)):
id2 = self.colliding[j]
if sim.hadContact(id,id2):
clist = sim.getContacts(id,id2);
f = sim.contactForce(id,id2)
m = sim.contactTorque(id,id2)
pavg = [0.0]*3
navg = [0.0]*3
for c in clist:
pavg = vectorops.add(pavg,c[0:3])
navg = vectorops.add(navg,c[3:6])
if len(clist) > 0:
pavg = vectorops.div(pavg,len(clist))
navg = vectorops.div(navg,len(clist))
body1 = world.getName(id)
body2 = world.getName(id2)
cvalues = [sim.getTime(),body1,body2,len(clist)]
cvalues += pavg
cvalues += navg
cvalues += f
cvalues += m
self.f_contact.write(','.join(str(v) for v in cvalues))
self.f_contact.write('\n')
if extra:
values += extra
if not (self.f is None):
self.f.write(','.join([str(v) for v in values]))
self.f.write('\n')
def close(self):
if not (self.f is None):
self.f.close()
if not (self.f_contact is None):
self.f_contact.close()
class SimLogPlayback:
"""A replay class for simulation traces from SimLogger or the SimTest app. """
def __init__(self,sim,state_fn,contact_fn=None):
"""
Loads from a CSV file.
Arguments:
sim (Simulator): the klampt.Simulator object you wish to use. This should be
instantiated with all objects that you recorded from.
state_fn (str): the state file that you want to load
contact_fn (str, optional): the contact file that you want to load
"""
import csv
self.sim = sim
self.state_header = []
self.state_array = []
self.contact_header = []
self.contact_array = []
self.state_to_index = {}
self.contact_to_index = {}
if state_fn != None:
print "SimLogPlayback: Loading state from",state_fn
f = open(state_fn,'r')
reader = csv.reader(f)
rowno = 0
for row in reader:
if rowno == 0:
self.state_header = row
self.state_to_index = dict((v,i) for (i,v) in enumerate(self.state_header))
else:
try:
self.state_array.append([float(v) for v in row])
except ValueError:
print "Error in CSV file",state_fn,"on line",rowno+1,"value is not a float"
raise
rowno += 1
f.close()
if contact_fn != None:
print "SimLogPlayback: Loading contacts from",contact_fn
self.f_contact = open(contact_fn,'r')
reader = csv.reader(f)
rowno = 0
for row in reader:
if rowno == 0:
self.contact_header = row
self.contact_to_index = dict((v,i) for (i,v) in enumerate(self.contact_header))
else:
try:
self.contact_array.append([float(v) for v in row])
except ValueError:
print "Error in CSV file",contact_fn,"on line",rowno+1,"value is not a float"
raise
rowno += 1
f.close()
#check that the simulation matches the log
warned = False
self.robot_indices = []
self.rigid_object_indices = []
sim = self.sim
world = sim.world
if "time" not in self.state_to_index:
print "SimLogPlayback: Warning, 'time' column is not present in log file"
robot_patterns = {'q':'%s_q[%s]','dq':'%s_dq[%s]'}
for i in xrange(world.numRobots()):
indices = {}
found = True
robot = world.robot(i)
for name,p in robot_patterns.iteritems():
nindices = []
for j in xrange(robot.numLinks()):
item = p % (robot.getName(),robot.link(j).getName())
if item not in self.state_to_index:
found=False
break
nindices.append(self.state_to_index[item])
if not found:
break
indices[name] = nindices
if not found:
print "SimLogPlayback: Warning, not all elements of robot",robot.getName(),"present in log file"
warned = True
self.robot_indices.append(None)
continue
#TODO: load sensor measurements
self.robot_indices.append(indices)
rigid_object_items = ['x','y','z','rx','ry','rz','dx','dy','dz','wx','wy','wz']
for i in xrange(world.numRigidObjects()):
indices = {}
found = True
obj = world.rigidObject(i)
for name in rigid_object_items:
item = obj.getName()+'_'+name
if item not in self.state_to_index:
print "Missing item",item
found=False
break
indices[name] = self.state_to_index[item]
if not found:
print "SimLogPlayback: Warning, not all elements of rigid object",obj.getName(),"present in log file"
warned = True
self.rigid_object_indices.append(None)
continue
#TODO: load sensor measurements
self.rigid_object_indices.append(indices)
if warned:
raw_input("Press enter to continue")
return
def updateSim(self,time=-1,timestep=-1):
sim = self.sim
world = sim.world
if time >= 0:
try:
timeindex = self.state_to_index['time']
except IndexError:
raise ValueError("'time' column is not present in playback file, can't update by time")
timelist = [v[timeindex] for v in self.state_array]
for i in xrange(len(timelist)-1):
if time < timelist[i]:
break
#print "Time",time,"Time step",timestep
self.updateSim(timestep = i)
return
if timestep >= len(self.state_array):
timestep = len(self.state_array)-1
state = self.state_array[timestep]
try:
timeindex = self.state_to_index['time']
#sim.fakeSimulate(state[timeindex] - sim.getTime())
#TODO: change the simulation time
except IndexError:
pass
for i in xrange(world.numRobots()):
indices = self.robot_indices[i]
if indices == None:
continue
robot = world.robot(i)
robot.setConfig([state[ind] for ind in indices['q']])
robot.setVelocity([state[ind] for ind in indices['dq']])
for j in xrange(robot.numLinks()):
link = robot.link(j)
sim.body(link).setTransform(*link.getTransform())
sim.body(link).setVelocity(link.getAngularVelocity(),link.getVelocity())
#TODO: update sensors
for i in xrange(world.numRigidObjects()):
obj = world.rigidObject(i)
indices = self.rigid_object_indices[i]
if indices == None:
continue
t = (state[indices['x']],state[indices['y']],state[indices['z']])
R = so3.from_moment((state[indices['rx']],state[indices['ry']],state[indices['rz']]))
v = (state[indices['dx']],state[indices['dy']],state[indices['dz']])
w = (state[indices['wx']],state[indices['wy']],state[indices['wz']])
obj.setTransform(R,t)
obj.setVelocity(w,v)
sim.body(obj).setTransform(R,t)
sim.body(obj).setVelocity(w,v)
| {
"content_hash": "1e72dfc2b7d3edd17f53a90889ebfef2",
"timestamp": "",
"source": "github",
"line_count": 338,
"max_line_length": 135,
"avg_line_length": 42.319526627218934,
"alnum_prop": 0.4951062639821029,
"repo_name": "krishauser/Klampt",
"id": "12551dac49b630a63df3d55edfa7f3eb1e1766e0",
"size": "14304",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/python2_version/klampt/sim/simlog.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1578"
},
{
"name": "C",
"bytes": "114047"
},
{
"name": "C++",
"bytes": "9529503"
},
{
"name": "CMake",
"bytes": "53132"
},
{
"name": "CSS",
"bytes": "365"
},
{
"name": "Dockerfile",
"bytes": "514"
},
{
"name": "HTML",
"bytes": "39706"
},
{
"name": "JavaScript",
"bytes": "62209"
},
{
"name": "Makefile",
"bytes": "6997"
},
{
"name": "Python",
"bytes": "6060576"
},
{
"name": "QMake",
"bytes": "3587"
},
{
"name": "SWIG",
"bytes": "327289"
},
{
"name": "Shell",
"bytes": "279"
}
],
"symlink_target": ""
} |
__author__ = "Raffaele Mazzitelli"
__credits__ = ["Raffaele Mazzitelli"]
__maintainer__ = "Raffaele Mazzitelli"
__email__ = "[email protected]"
__status__ = "Test"
from lib.cloud_lib.cloud_spooler import CloudSpooler
from pprint import pprint
import sys
import json
import readline
import os
import magic
import time
def get_params(argv):
if(len(argv)<2):
print "You must specify the configuration file"
print "usage:"
print "%s /path/to/conf_file"%argv[0]
exit(1)
else:
try:
conf_file=argv[1]
print "loading conf file %s ..."%conf_file
params=json.load(open(conf_file))
params["conf_file"]=conf_file
return params
except:
print "your conf file : %s"
print "is not valid json or doesn't exist"
exit(1)
def getFileToPrint():
readline.parse_and_bind('tab: complete')
while True:
line = raw_input('File to Print: ')
if os.path.isfile(line):
mime_type=magic.from_file(line,mime=True)
print mime_type
if mime_type == 'application/pdf' :
return line
else:
print "only pdf file are accepted"
else:
print "%s is not a file, please choose a valid file" % line
def choosePrinter(cloudSpooler):
printers=cloudSpooler.getPrinters()
possible_choise=[]
for printer in printers:
possible_choise.append(printers[printer])
print "\nAvailable printer : "
for i in range(len(possible_choise)):
display_name=possible_choise[i]['displayName'].lower()
print "%d) %s"%(i,display_name)
print ""
while True:
line = raw_input('Choose a number from the list above : ')
if line.isdigit():
selected=int(line)
if selected in range(len(possible_choise)):
return possible_choise[selected]
else:
last=len(possible_choise)-1
print "please select a valid number between 0 and %d"%last
else:
print "please select a number"
def getPrinter(cloudSpooler,printer_in):
printers=cloudSpooler.getPrinters()
if printer_in:
printer_in=printer_in.lower()
for printer in printers:
display_name=printers[printer]['displayName'].lower()
if printer_in == display_name:
#found
return printers[printer]
print "The printer you in your conf file '%s' is not available on google cloud print"%printer_in
return choosePrinter(cloudSpooler)
def checkSpooler(cloudSpooler,job_id=None):
alljobs=cloudSpooler.getJobs()
#pprint(alljobs)
if job_id in alljobs.keys():
statej=alljobs[job_id]['uiState']['summary']
file_name=alljobs[job_id]['title'].split("/")[-1]
createTime=int(alljobs[job_id]['createTime'])/1000
createTime=time.strftime("%Y-%m-%d %H:%M:%S",time.localtime(createTime))
return {'file':file_name,'state':statej,'time':createTime}
print "File non present in spooler status"
return None
def main(argv) :
params=get_params(argv)
cloudSpooler=CloudSpooler(params['client_email'],params['private_key'])
if "printer" not in params:
params['printer']=None
while True:
printer=getPrinter(cloudSpooler,params['printer'])
printer_name=printer['displayName']
printer_id=printer['id']
print "checking status of printer %s"%printer_name
status=cloudSpooler.getPrinterStatus(printer_id)
print status
if status != "ONLINE":
print "the printer you choosed is not ONLINE, please choose another printer"
else:
file_to_print=getFileToPrint()
print "Printing %s ..." %file_to_print
job=None
job=cloudSpooler.submitPdf(printer_id,file_to_print)
job_id=job['job']['id']
if job and job['success']:
print "your file has been correctly been updated, checking status ..."
while True:
status=checkSpooler(cloudSpooler,job_id)
if status:
print "%s %s %s"%(status['time'],status['file'],status['state'])
if status['state']=="DONE":
exit(0);
#sleep 10 seconds
time.sleep(10)
else:
print "sorry was not possble to upload your file for printing"
if __name__ == '__main__' :
sys.exit(main(sys.argv))
| {
"content_hash": "b6319e4f682b864e4845b9fecf7911ba",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 104,
"avg_line_length": 30.95,
"alnum_prop": 0.5454361873990307,
"repo_name": "escube/GoogleCloudSpooler",
"id": "55deb46f5f1a400c3c0b2af81243ba5ed447f28a",
"size": "4971",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "standalone.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "18399"
}
],
"symlink_target": ""
} |
"""
Pocket Class
for lap_joint script
Pocket:
.type string name of pocket type
.joint Joint corresponding Joint object
.post Post parent Post object
.index Integer currently 0 or 1. index of pocket in list self.joint.pockets
.origin Point3D center of pocket face
.orientation Plane at origin, normal pointing towards other post,
x axis aligned to Post's axis
.profilePlane Plane orientation rotated parallel to Post's profile
.normal Vector3D normal of orientation plane
.rotation float rotation of joint off of vertical on oriented post in degrees
.profileBounds BoundingBox Post's end face in pocket's orientation
.face NurbsSurface sheared millable pocket face
.holes list list of bolt hole center Lines
.toolpath Toolpath group of operations for milling this pocket
"""
import Rhino
import scriptcontext as sc
import rhinoscriptsyntax as rs
from copy import copy
import math
import common
from toolpath import *
class Pocket(object):
"""One half of a joint, cut into a post."""
def __init__(self, post, index, joint):
"""Gather information about this Pocket."""
#index: Pocket/post number in joint
self.joint = joint
self.post = post
self.post.isConnected = True
self.type = 'default'
post.pockets.append(self)
#set pocket index so we can find other pockets in this joint
self.index = index
#joint axis is from p0 to p1
self.normal = self.joint.axis
if self.index == 1:
#flip normal line if pocket is on p1
self.normal = -self.normal
#origin is same as joint's
self.origin = self.joint.origin
self.orientation = rs.PlaneFromNormal(self.origin, self.normal, post.axis.Direction)
self.profilePlane = rs.RotatePlane(self.orientation, 90, self.orientation.YAxis)
self.profilePlane.Origin = self.post.origin
#find extents of this post at joint
self.profileBounds = self.post.profile.GetBoundingBox(self.orientation)
#find rotation of pocket on post in degrees
self.rotation = common.vectorAngle(self.post.orientation.XAxis,
self.normal, self.post.orientation) + 180
#transform from global to rotated post local
self.globalToMill = self.post.globalToSelf * Rhino.Geometry.Transform.Rotation(-self.rotation * math.pi/180,
self.post.axis.UnitTangent, self.post.origin)
#transform from rotated post local to global
t = self.globalToMill.TryGetInverse()
if t[0]:
self.millToGlobal = t[1]
else:
raise NameError("Couldn't find inverse of rotated post -> global transform")
###########
#Pocket Class Functions
def info(self):
"""Displays a text summary of this Pocket."""
print "Pocket: <{0}> on {1}\nOrigin: {2}\n----".format(
self.type, self.post.printId(), common.printPoint3d(self.origin))
def display(self, objects=None):
"""Create objects in viewport to display information about this Joint
Creates: postLabel id of other post
jointLabel id of joint
orientation orientation plane
bounds bounding box of post's end face
center "center" of pocket face
face millable pocket face
farthest plane on post's farthest edge into joint
holes center lines for drill holes
toolpath milling path for pocket
axis normal of pocket face, at center
Returns: list of guids of added objects
"""
guids = []
if objects == None:
objects = ['holes', 'toolpath']
if 'postLabel' in objects:
guids.append(rs.AddTextDot(self.joint.posts[not self.index].printId(), self.origin))
if 'jointLabel' in objects:
guids.append(rs.AddTextDot(self.joint.printId(), self.origin))
if 'orientation' in objects:
#display orientation plane
guids.append(common.displayPlane(self.orientation))
if 'bounds' in objects:
#display post profile bounding box
guids.append(common.displayBoundingBox(self.profileBounds, self.orientation, self.profilePlane))
if 'center' in objects:
guids.append(sc.doc.Objects.AddPoint(self.orientation.Origin))
if 'face' in objects:
#display pocket face
guids.append(sc.doc.Objects.AddSurface(self.face))
if 'holes' in objects:
#display any drill holes
for h in self.holes:
guids.append(sc.doc.Objects.AddLine(h))
if 'toolpath' in objects:
#display milling paths
newguids = self.toolpath.display(transform=self.post.selfToGlobal)
#print newguids
guids.extend(newguids)
if 'axis' in objects:
#display pocket face normal
g = sc.doc.Objects.AddLine(self.origin, self.origin + self.normal)
guids.append(g)
return guids
def UVToPost(self, p, A=0):
"""change from UV coordinates on skewed pocket face to post's coordinates
A: additional rotation relative to pocket's rotation
"""
if A:
globalToMill = self.post.globalToSelf * \
Rhino.Geometry.Transform.Rotation(-(self.rotation + A) * math.pi/180,
self.post.axis.UnitTangent, self.post.origin)
else:
globalToMill = self.globalToMill
point = (self.face.PointAt(p.X, p.Y) + self.normal * p.Z)
point.Transform(globalToMill)
return point
def getBounds(self):
"""Find the extents of this post at pocket
Returns: bounding box of post profile oriented to pocket orientation
"""
return self.post.profile.GetBoundingBox(self.orientation)
def create(self):
"""Finish creating this pocket, once common joint info has been determined"""
#find pocket face boundary
self.face = self.createPocketFace()
#find bolt hole
self.holes = self.createHoles()
#create milling path
self.toolpath = self.makeToolpath()
def createPocketFace(self):
"""Find pocket face boundary
Returns: millable pocket face
"""
surface = copy(self.joint.face)
#swap U and V for second Post
if self.index == 1:
surface = surface.Transpose()
#extend U edges to allow for endmill to clear post
surface = surface.Extend(Rhino.Geometry.IsoStatus.West, common.settings.gcode['millDiameter'] * 3, False)
surface = surface.Extend(Rhino.Geometry.IsoStatus.East, common.settings.gcode['millDiameter'] * 3, False)
#extend V edges for reveal
if 'reveal' in common.settings.pocket:
surface = surface.Extend(Rhino.Geometry.IsoStatus.North, common.settings.pocket['reveal'], False)
surface = surface.Extend(Rhino.Geometry.IsoStatus.South, common.settings.pocket['reveal'], False)
return surface
def createHoles(self):
"""find center of bolt hole(s)
This version creates a single hole at the center of the pocket face
Returns: list of center lines starting at pocket face
"""
#get u,v center of pocket face
center = [self.joint.face.Domain(a).Mid for a in [0,1]]
#translate to global
cPoint = self.joint.face.PointAt(*center)
#return center line
return [Rhino.Geometry.Line(cPoint, -self.orientation.Normal, 2)]
def getSection(self, d):
"""Find bounding box of post geometry above given distance from pocket face
Returns: bounding box containing relevant section of profile
"""
#construct plane with which to slice profile
plane = copy(self.orientation)
#move joint plane
plane.Translate(plane.ZAxis * d)
#intersect with Post profile
intersections = Rhino.Geometry.Intersect.Intersection.CurvePlane(self.post.profile, plane, 0)
if intersections:
#split profile at intersection points
pieces = self.post.profile.Split([i.ParameterA for i in intersections])
else:
pieces = [self.post.profile]
#initialize bounding box
bounds = Rhino.Geometry.BoundingBox.Empty
for p in pieces:
#get bounding box for each piece of profile curve
box = p.GetBoundingBox(self.orientation)
#keep this bounding box if its center is above the plane
center = box.Center
#bounding box coordinates are local to the plane where the box was created - convert to global
center.Transform(Rhino.Geometry.Transform.ChangeBasis(self.orientation,
Rhino.Geometry.Plane.WorldXY))
if plane.DistanceTo(center) > 0:
#add this bounding box to running total
bounds.Union(box)
#common.displayBoundingBox(bounds, self.orientation, self.profilePlane)
return bounds
def getFaceBounds(self, d, uEnds=False, vEnds=False):
"""Calculate bounds for a face, using default ranges if inputs are False
Returns: (uRange, vRange)
"""
#zigs
#find post width at this height, adding room for endmill diameter
bounds = self.getSection(d)
uPost = Rhino.Geometry.Interval(
*[self.face.ClosestPoint(self.orientation.PointAt(0,y,0))[1] for y in
[bounds.Min.Y - 1.5*common.settings.gcode['millDiameter'],
bounds.Max.Y + 1.5*common.settings.gcode['millDiameter']]])
if uEnds:
uRange = list(uEnds)
if uRange[0] is False:
uRange[0] = uPost.Min
if uRange[1] is False:
uRange[1] = uPost.Max
uRange = Rhino.Geometry.Interval(*uRange)
else:
uRange = uPost
#zags
if vEnds:
vRange = Rhino.Geometry.Interval(*vEnds)
else:
vRange = self.face.Domain(1)
#shrink ranges to account for endmill radius
UVmillR = common.settings.gcode['millDiameter'] * self.joint.skewFactor / 2
uRange = Rhino.Geometry.Interval(uRange.Min + UVmillR, uRange.Max - UVmillR)
vRange = Rhino.Geometry.Interval(vRange.Min + UVmillR, vRange.Max - UVmillR)
return [uRange, vRange]
def facePath(self, d, start=[False, False], uEnds=False, vEnds=False, dir=1):
"""create minimal zig-zag facing path given distance from pocket face
start in corner specified by `start`:
[0,0] - min U, minV
[1,1] - max U, max V
dir: 0 = long moves along same V, 1 = long moves along same U
Returns: [end corner, list of global points]
"""
UVmillD = common.settings.gcode['millDiameter'] * self.joint.skewFactor
#get bounds on pocket face for this level
bounds = self.getFaceBounds(d, uEnds, vEnds)
uRange = bounds[0]
vRange = bounds[1]
#start in correct corner
path = [Rhino.Geometry.Point3d(uRange.Max if start[0] else uRange.Min,
vRange.Max if start[1] else vRange.Min, d)]
#switchback in U or V. more concise way to do this?
if dir:
#long moves along same U
#direction keeps track of alternating switchback directions (U)
uDirection = not start[0]
vDirection = -1 if start[1] else 1
#switchback until we've gone just over the edge in either direction
while vRange.Min <= path[-1].Y <= vRange.Max:
path.append(copy(path[-1]))
#move to other side of pocket
path[-1].X = uRange.Max if uDirection else uRange.Min
path.append(copy(path[-1]))
#step over to next zig
path[-1].Y += vDirection * common.settings.gcode['stepOver'] * UVmillD
#reverse direction
uDirection = not uDirection
#shorten the final zag to be at the edge of the pocket
path[-1].Y = vRange.Min if start[1] else vRange.Max
#add a final zig to get the exact pocket width
path.append(copy(path[-1]))
path[-1].X = uRange.Max if uDirection else uRange.Min
#change to rotated post's coordinates
path = [self.UVToPost(p) for p in path]
return [[uDirection, vDirection > 0], path]
else:
#long moves along same V
#direction keeps track of alternating switchback directions (U)
uDirection = -1 if start[0] else 1
vDirection = not start[1]
#switchback until we've gone just over the edge in either direction
while uRange.Min <= path[-1].X <= uRange.Max:
path.append(copy(path[-1]))
#move to other side of pocket
path[-1].Y = vRange.Max if vDirection else vRange.Min
path.append(copy(path[-1]))
#step over to next zig
path[-1].X += uDirection * common.settings.gcode['stepOver'] * UVmillD
#reverse direction
vDirection = not vDirection
#shorten the final zag to be at the edge of the pocket
path[-1].X = uRange.Min if start[0] else uRange.Max
#add a final zig to get the exact pocket width
path.append(copy(path[-1]))
path[-1].Y = vRange.Max if vDirection else vRange.Min
#change to rotated post's coordinates
path = [self.UVToPost(p) for p in path]
return [[uDirection > 0, vDirection], path]
def blockPath(self, startZ, endZ, uEnds=False, vEnds=False, finish=False, dir=1):
"""Create path to mill a face from startZ to endZ
uEnds, vEnds: list of endpoints for pocket range in this direction.
False uses default value on that end of that direction
Returns: toolpath object
"""
#start at first cut layer for block, or bottom of pocket if closer
currentZ = max([startZ - \
(common.settings.gcode['stepDown'] * common.settings.gcode['millDiameter']),
endZ])
#create toolpath object
toolpath = Toolpath()
result = [[False, False], False]
while True:
#mill pocket face
result = self.facePath(currentZ, result[0], uEnds, vEnds, dir=dir)
toolpath.operations.append(Mill(result[1]))
#insert rapid move before this mill
rapid = copy(toolpath.operations[-1].path[0])
toolpath.operations[-1].path.pop(0)
toolpath.operations.insert(-1, Rapid(rapid, A=None, clear=False))
if currentZ > endZ:
#at least one more pass - move to next level
currentZ -= (common.settings.gcode['stepDown'] * common.settings.gcode['millDiameter'])
if currentZ < endZ:
#within one layer of pocket face - finish at pocket face
currentZ = endZ
else:
#done!
break
#clean up pocket edge if needed
if finish:
#get bounds on pocket face for this level
bounds = self.getFaceBounds(currentZ, uEnds, vEnds)
uRange = bounds[0]
vRange = bounds[1]
#corner vertices around edge of pocket
edge = [Rhino.Geometry.Point3d(p[0],p[1],currentZ) for p in [[uRange.Min,
vRange.Min],[uRange.Max,vRange.Min],[uRange.Max,vRange.Max],[uRange.Min,vRange.Max]]]
#sorry. incomprehensible. This finds the index of the starting vertex in edge
start = int((1 if result[0][1] else -1) * (result[0][0] + .5) + 1.5)
#rotate edge so that start is at the front
rotated = edge[start:] + edge[:start]
#cut along the two sides with ridges from the zigzags
path = rotated[0:3]
#change to post's coordinates
path = [self.UVToPost(p) for p in path]
toolpath.operations.append(Mill(path))
return toolpath
def makeToolpath(self):
"""Create path to mill this pocket
Returns: Toolpath object
"""
#start at top of post
startZ = self.profileBounds.Max.Z
#mill down to pocket face
toolpath = self.blockPath(startZ, 0)
#add initial rotation
toolpath.operations[0].A = self.rotation
#clear post at beginning of pocket
toolpath.operations[0].clear = True
return toolpath
def makeGcode(self, gcode=False):
"""Make gcode for this pocket
Returns: gcode string
"""
gcode.text += "\n(Starting Pocket {0})\n".format(self.joint.printId())
#generate gcode from toolpath
self.toolpath.makeGcode(gcode=gcode)
return gcode
# End Pocket Class #
###########
#Pocket Variants
class Pocket_mfBar(Pocket):
"""One half of a joint, cut into a post.
Creates a pocket with sliding bar.
index 0: female - groove perpendicular to post axis
index 1: male - bar parallel to post axis
"""
def __init__(self, post, index, joint):
"""Gather information about this Pocket."""
super(Pocket_mfBar, self).__init__(post, index, joint)
self.type = 'Male/Female Bar'
###########
#Pocket_mfBar Class Functions
def display(self, objects=None):
"""Change defaults from base Pocket class"""
if objects == None:
objects = ['toolpath']
return super(Pocket_mfBar, self).display(objects)
def create(self):
"""Finish creating this pocket, once common joint info has been determined"""
#find pocket face boundary
self.face = self.createPocketFace()
#find pocket holes
self.holes = self.createHoles()
#create milling path
self.toolpath = self.makeToolpath()
def makeToolpath(self):
"""Create path to mill this pocket
index 0: female - groove perpendicular to post axis
index 1: male - bar parallel to post axis
Returns: Toolpath object
"""
#start at top of post
startZ = self.profileBounds.Max.Z
localOrigin = self.face.ClosestPoint(self.origin)
if self.index == 0:
#female - groove perpendicular to post axis
#mill down to top of groove
toolpath = self.blockPath(startZ, 0)
#find half of groove width
offset = (float(common.settings.pocket['barWidth'])/2 + common.settings.pocket['gap']) \
* round(self.joint.skewFactor,5)
#create V interval for groove
grooveRange = [localOrigin[2] - offset, localOrigin[2] + offset]
#mill groove
newPath = self.blockPath(0,
-common.settings.pocket['barHeight'], vEnds=grooveRange)
#newPath.operations[0].clear = True
toolpath.extend(newPath)
else:
#male - bar parallel to post axis
#mill down to top of bar
toolpath = self.blockPath(startZ, common.settings.pocket['barHeight'])
#find half of bar width
offset = float(common.settings.pocket['barWidth'])/2 * round(self.joint.skewFactor,5)
#create U interval for bar
barRange = [localOrigin[1] - offset, localOrigin[1] + offset]
#mill sides of bar
newPath = self.blockPath(common.settings.pocket['barHeight'], 0,
uEnds=[False, barRange[0]], finish=True, dir=0)
newPath.operations[0].clear = True
toolpath.extend(newPath)
newPath = self.blockPath(common.settings.pocket['barHeight'], 0,
uEnds=[barRange[1], False], finish=True, dir=0)
newPath.operations[0].clear = True
toolpath.extend(newPath)
#mark correct location for female post
"""
#construct plane with which to slice profile
plane = copy(self.orientation)
#move joint plane
plane.Translate(plane.ZAxis * common.settings.pocket['barHeight'])
#intersect with Post profile
intersections = Rhino.Geometry.Intersect.Intersection.CurvePlane(
self.joint.posts[not self.index].profile, plane, 0)
if len(intersections) == 2:
guides = []
toLocal = Rhino.Geometry.Transform.ChangeBasis(
Rhino.Geometry.Plane.WorldXY, self.profilePlane)
for i in intersections:
tempPoint = copy(i.PointA)
tempPoint.Transform(toLocal)
guides.append(Rhino.Geometry.Point3D(-offset - common.settings.gcode['millDiameter'], tempPoint.Y, common.settings.pocket['barHeight'] - common.settings.pocket['guideDepth']))
guides.append(Rhino.Geometry.Point3D(offset + common.settings.gcode['millDiameter'], tempPoint.Y, common.settings.pocket['barHeight'] - common.settings.pocket['guideDepth']))
if len(guides) == 2:
#flip direction of first guide to avoid need for clearance
guides.reverse()
#change to rotated post's coordinates
guides = [self.UVToPost(p) for p in guides]
toolpath.operations.extend(guides)
else:
raise NameError("locating mark didn't find two intersections with other post's profile!")
"""
#add initial rotation
toolpath.operations[0].A = self.rotation
toolpath.operations[0].clear = True
#add hole mark if wanted
if common.settings.pocket['markDatum'] > 0:
toolpath.extend(self.makeHoleToolpath())
return toolpath
def createHoles(self):
"""find center of bolt hole
Returns: list of center lines from back of post to post axis
"""
toLocal = Rhino.Geometry.Transform.ChangeBasis(Rhino.Geometry.Plane.WorldXY, self.profilePlane)
toGlobal = toLocal.TryGetInverse()
if toGlobal[0]:
toGlobal = toGlobal[1]
else:
raise NameError("Can't invert toLocal transform!")
return False
#axis line, from post axis, directed away from other post (local)
#axis = Rhino.Geometry.Line(0,0,0, 0,-12,0)
#axis line, from pocket face center, directed away from other post (local)
originLocal = copy(self.origin)
originLocal.Transform(toLocal)
start = Rhino.Geometry.Point3d(originLocal.X, 0, 0)
axis = Rhino.Geometry.Line(start, Rhino.Geometry.Vector3d.XAxis, 12)
#post profile rectangle in profilePlane coordinates
rectangle = copy(self.post.profile)
rectangle.Transform(toLocal)
#print axis, rectangle.TryGetPolyline()[1].ToArray()
intersections = Rhino.Geometry.Intersect.Intersection.CurveCurve(axis.ToNurbsCurve(),
rectangle, 0, 0)
if intersections.Count == 1:
#get intersection distance along axis
length = intersections[0].PointA.X - originLocal.X
else:
raise NameError("Found {0} intersections of axis with post profile".
format(intersections.Count))
#return center line
#return [Rhino.Geometry.Line(self.joint.intersection[self.index], -self.orientation.Normal, end)]
startGlobal = copy(start)
startGlobal.Transform(toGlobal)
return [Rhino.Geometry.Line(startGlobal, -self.orientation.Normal, length)]
def makeHoleToolpathV(self):
"""Create path to mill mark of drill hole in back of post (V shape)
Returns: toolpath
"""
#Z height of mark on rotated post
mark = self.holes[0].Length - common.settings.pocket['markDepth']
toolpath = Toolpath()
localOrigin = self.post.globalToSelf * self.origin
path = [Rhino.Geometry.Point3d(localOrigin.X,
common.settings.gcode['clearance'] - mark,
common.settings.gcode['clearance']) for x in range(3)]
#move to center, bottom of mark
path[1].Y = 0; path[1].Z = mark
#back up to other side
path[2].Y = -path[2].Y
#rapid to start
toolpath.operations.append(Rapid(path[0], A=self.rotation - 180, clear=True))
#rest of path
toolpath.operations.append(Mill([path[1], path[2]]))
return toolpath
def makeHoleToolpathC(self):
"""Create path to mill countersink for screw
Returns: toolpath
"""
#Z height of mark on rotated post
mark = self.holes[0].Length - common.settings.pocket['markDepth']
radius = common.settings.pocket['csRadius'] - \
common.settings.gcode['millDiameter'] / 2
toolpath = Toolpath()
localOrigin = self.post.globalToSelf * self.origin
start = Rhino.Geometry.Point3d(localOrigin.X, radius,
common.settings.gcode['clearance'])
center = Rhino.Geometry.Point3d(localOrigin.X, 0, mark)
#rapid to start
toolpath.operations.append(Rapid(start, A=self.rotation - 180, clear=True))
#circle
toolpath.operations.append(Arc(center, radius, 0, 360))
return toolpath
def makeHoleToolpath(self):
"""Create path to mill mark for screw
Returns: toolpath
"""
#Z height of mark on rotated post
if common.settings.pocket['markDatum'] == 1:
#distance from post surface
mark = self.holes[0].Length - common.settings.pocket['markDepth']
elif common.settings.pocket['markDatum'] == 2:
#distance from pocket center
if self.holes[0].Length > common.settings.pocket['markDepth']:
mark = common.settings.pocket['markDepth']
else:
mark = self.holes[0].Length - 0.25
else:
raise NameError("Screw mark requested, but unknown datum option specified!")
return '';
toolpath = Toolpath()
#start of screw axis in skewed pocket face coordinates
localOrigin = self.face.ClosestPoint(self.origin)
localOrigin = Rhino.Geometry.Point3d(localOrigin[1],localOrigin[2],0)
if self.index == 0:
#female - negative offset along male's axis
localOrigin.X -= common.settings.pocket['holeOffset']
else:
#male - offset screw along post's axis
localOrigin.Y += common.settings.pocket['holeOffset']
localCenter = self.UVToPost(localOrigin, A=180)
top = Rhino.Geometry.Point3d(localCenter.X, localCenter.Y,
common.settings.gcode['clearance'])
bottom = Rhino.Geometry.Point3d(localCenter.X, localCenter.Y, localCenter.Z + mark)
#rapid to bottom (moves slowly down to z) wouldn't need top, except for non-empty preview line in Rhino
toolpath.operations.append(Rapid(top, A=self.rotation - 180, clear=True))
toolpath.operations.append(Mill([top]))
toolpath.operations.append(Rapid(bottom))
return toolpath
# End Pocket_mfBar Class # | {
"content_hash": "e0a3c675fa6e6a85aeca72b21ed41356",
"timestamp": "",
"source": "github",
"line_count": 742,
"max_line_length": 180,
"avg_line_length": 32.4177897574124,
"alnum_prop": 0.6954352706410576,
"repo_name": "twastvedt/FRAMEWORK",
"id": "a6feb8dbbe1e23410b135c1fac6621679db544b3",
"size": "24054",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/python/pocket.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
import os
import fcntl
import socket
from .poller import POLL_EVENT_TYPE
class PipeNotifier(object):
def __init__(self, poller, callback = None):
self.__callback = callback
self.__pipeR, self.__pipeW = os.pipe()
flag = fcntl.fcntl(self.__pipeR, fcntl.F_GETFD)
fcntl.fcntl(self.__pipeR, fcntl.F_SETFL, flag | os.O_NONBLOCK)
flag = fcntl.fcntl(self.__pipeW, fcntl.F_GETFD)
fcntl.fcntl(self.__pipeW, fcntl.F_SETFL, flag | os.O_NONBLOCK)
poller.subscribe(self.__pipeR, self.__onNewNotification, POLL_EVENT_TYPE.READ)
def notify(self):
os.write(self.__pipeW, b'o')
def __onNewNotification(self, descr, eventMask):
try:
while os.read(self.__pipeR, 1024):
pass
except OSError as e:
if e.errno not in (socket.errno.EAGAIN, socket.errno.EWOULDBLOCK):
raise
if self.__callback is not None:
self.__callback()
| {
"content_hash": "f5e3c8e3be64013e15dd0e5fbaf26087",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 86,
"avg_line_length": 30.4375,
"alnum_prop": 0.5995893223819302,
"repo_name": "bakwc/PySyncObj",
"id": "652ab4cfdb3e54b560d0610f0859d03aa2c3bfbe",
"size": "974",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pysyncobj/pipe_notifier.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "250256"
}
],
"symlink_target": ""
} |
"""
test_models
----------------------------------
Tests for bpz models
"""
from django.test import TestCase
from bpz.models import Case, HomeOwnersAssociation
class CaseTestCase(TestCase):
def test_str(self):
case = Case(case_id='BOA-21745')
self.assertEqual(str(case), 'BOA-21745')
class HomeOwnersAssociationTestCase(TestCase):
def test_str(self):
hoa = HomeOwnersAssociation()
self.assertEqual(str(hoa), '<unnamed>')
hoa.name = 'Home Owners United'
self.assertEqual(str(hoa), 'Home Owners United')
| {
"content_hash": "544e6f587d45182a6eca65186b0446da",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 56,
"avg_line_length": 21.807692307692307,
"alnum_prop": 0.6313932980599647,
"repo_name": "codefortulsa/BPZAround.me",
"id": "f5c505556c0ddd62f8736f86ffb40e5cadda5f81",
"size": "614",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1371"
},
{
"name": "JavaScript",
"bytes": "6176"
},
{
"name": "Python",
"bytes": "37105"
},
{
"name": "Shell",
"bytes": "6466"
}
],
"symlink_target": ""
} |
import math
orientations = [(1, 0), (0, 1), (-1, 0), (0, -1)]
def turn_heading(heading, inc, headings=orientations):
return headings[(headings.index(heading) + inc) % len(headings)]
def turn_right(heading):
return turn_heading(heading, -1)
def turn_left(heading):
return turn_heading(heading, +1)
def distance(a, b):
"""The distance between two (x, y) points.
>>> distance((1,2),(5,5))
5.0
"""
return math.hypot((a[0] - b[0]), (a[1] - b[1]))
def distance_squared(a, b):
"""The square of the distance between two (x, y) points.
>>> distance_squared((1,2),(5,5))
25.0
"""
return (a[0] - b[0])**2 + (a[1] - b[1])**2
def distance2(a, b):
"The square of the distance between two (x, y) points."
return distance_squared(a, b)
def clip(x, lowest, highest):
"""Return x clipped to the range [lowest..highest].
>>> [clip(x, 0, 1) for x in [-1, 0.5, 10]]
[0, 0.5, 1]
"""
return max(lowest, min(x, highest))
def vector_clip(vector, lowest, highest):
"""Return vector, except if any element is less than the corresponding
value of lowest or more than the corresponding value of highest, clip to
those values.
>>> vector_clip((-1, 10), (0, 0), (9, 9))
(0, 9)
"""
return type(vector)(list(map(clip, vector, lowest, highest)))
| {
"content_hash": "6889f206b4a23ac60c4f4ceef3ce5c45",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 76,
"avg_line_length": 24.78181818181818,
"alnum_prop": 0.5810711665443874,
"repo_name": "gokul-uf/aima-python",
"id": "45bce40f3067f2cfed93fa1784ac228b47164f35",
"size": "1664",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aimaPy/grid.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "562"
},
{
"name": "Python",
"bytes": "237974"
}
],
"symlink_target": ""
} |
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import google.api_core
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
import pkg_resources
from google.maps.routing_v2.types import routes_service
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-maps-routing",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class RoutesTransport(abc.ABC):
"""Abstract transport class for Routes."""
AUTH_SCOPES = ()
DEFAULT_HOST: str = "routes.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: Optional[ga_credentials.Credentials] = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
api_audience: Optional[str] = None,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES}
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# Don't apply audience if the credentials file passed from user.
if hasattr(credentials, "with_gdch_audience"):
credentials = credentials.with_gdch_audience(
api_audience if api_audience else host
)
# If the credentials are service account credentials, then always try to use self signed JWT.
if (
always_use_jwt_access
and isinstance(credentials, service_account.Credentials)
and hasattr(service_account.Credentials, "with_always_use_jwt_access")
):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.compute_routes: gapic_v1.method.wrap_method(
self.compute_routes,
default_timeout=None,
client_info=client_info,
),
self.compute_route_matrix: gapic_v1.method.wrap_method(
self.compute_route_matrix,
default_timeout=None,
client_info=client_info,
),
}
def close(self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()
@property
def compute_routes(
self,
) -> Callable[
[routes_service.ComputeRoutesRequest],
Union[
routes_service.ComputeRoutesResponse,
Awaitable[routes_service.ComputeRoutesResponse],
],
]:
raise NotImplementedError()
@property
def compute_route_matrix(
self,
) -> Callable[
[routes_service.ComputeRouteMatrixRequest],
Union[
routes_service.RouteMatrixElement,
Awaitable[routes_service.RouteMatrixElement],
],
]:
raise NotImplementedError()
@property
def kind(self) -> str:
raise NotImplementedError()
__all__ = ("RoutesTransport",)
| {
"content_hash": "59f842245d5d13e395c0d1c47c061569",
"timestamp": "",
"source": "github",
"line_count": 165,
"max_line_length": 101,
"avg_line_length": 36.551515151515154,
"alnum_prop": 0.6131653125518156,
"repo_name": "googleapis/google-cloud-python",
"id": "b2121f5c1760c08f6da1365fade0ec9a7112c2c5",
"size": "6631",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "packages/google-maps-routing/google/maps/routing_v2/services/routes/transports/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2895"
},
{
"name": "Python",
"bytes": "5620713"
},
{
"name": "Shell",
"bytes": "51704"
}
],
"symlink_target": ""
} |
import unittest
from . import issues
class InboundMessageStub(object):
"""
Stub InboundEmailMessage class.
(google.appengine.api.mail.InboundEmailMessage)
"""
def __init__(self):
self.subject = u'It doesn\'t work'
def bodies(self, content_type):
return iter([(u'plain/text', EncodedPayloadStub())])
class EncodedPayloadStub(object):
"""
Stub EncodedPayload class.
(google.appengine.api.mail.EncodedPayload)
"""
def decode(self):
return u'I clicked on the button but nothing happened.'
class ExtractIssueTitleTestCase(unittest.TestCase):
def setUp(self):
self.inbound_message = InboundMessageStub()
def test_extracts_issue_title(self):
expected = u'It doesn\'t work'
result = issues.extract_issue_title(self.inbound_message)
self.assertEqual(expected, result)
class ExtractIssueBodyTestCase(unittest.TestCase):
def setUp(self):
self.inbound_message = InboundMessageStub()
def test_extracts_issue_body(self):
expected = u'I clicked on the button but nothing happened.'
result = issues.extract_issue_body(self.inbound_message)
self.assertEqual(expected, result)
def test_decodes_issue_body(self):
# A real InboundEmailMessage returns an object which returns
# a string when decode() is called on it.
result = issues.extract_issue_body(self.inbound_message)
self.assertTrue(
isinstance(result, unicode),
'Expected unicode, got {}'.format(type(result)))
class CreatePayloadTestCase(unittest.TestCase):
def test_creates_payload(self):
expected = {'title': 'issue title', 'body': 'issue body'}
result = issues.create_payload('issue title', 'issue body')
self.assertEqual(expected, result)
class CreateURLTestCase(unittest.TestCase):
def test_creates_url(self):
expected = 'https://api.github.com/repos/kdwyer/issue-mailer/issues'
config = {
'repo_owner': 'kdwyer',
'repo_name': 'issue-mailer',
'base_url': 'https://api.github.com'
}
result = issues.create_url(config)
self.assertEqual(expected, result)
class CreateHeadersTestCase(unittest.TestCase):
def test_creates_headers(self):
expected = {
'Accept': 'application/vnd.github.v3+json',
'Authorization': 'token abcdef',
'Content-Type': 'application/json',
'User-Agent': 'kdwyer-issue-mailer'
}
config = {
'auth_token': 'abcdef',
'user_agent_string': 'kdwyer-issue-mailer'
}
result = issues.create_headers(config)
self.assertEqual(expected, result)
| {
"content_hash": "644c211f177d4f0ef259ebf87b2341aa",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 76,
"avg_line_length": 28.14,
"alnum_prop": 0.6254442075337597,
"repo_name": "kdwyer/issue-forwarder",
"id": "768c08ce72b7b5f5a255b87154a967da7dadf189",
"size": "2814",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "issueforwarder/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5358"
}
],
"symlink_target": ""
} |
import datetime
from news_website.extensions import db
from news_website.models.relationships import topics_newses
from news_website.database import (
Model,
SurrogatePK,
)
class News(SurrogatePK, Model):
__tablename__ = 'newses'
title = db.Column(db.Text, default='')
abstract = db.Column(db.Text, default='')
content = db.Column(db.Text, default='')
source_url = db.Column(db.Text, default='')
topics = db.relationship('Topic', secondary=topics_newses, backref='newses_br', lazy='dynamic')
created_at = db.Column(db.DateTime, nullable=False, default=datetime.datetime.now())
report_time = db.Column(db.DateTime, default=datetime.datetime.now())
status = db.Column(db.Integer, default=1)
# tags = db.relationship('Tag', secondary=tags_posts, backref=db.backref('posts_br', lazy='dynamic'))
def __init__(self, **kwargs):
db.Model.__init__(self, **kwargs) | {
"content_hash": "6bf9507611cb5173546eb11588b96c64",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 105,
"avg_line_length": 38.25,
"alnum_prop": 0.6873638344226579,
"repo_name": "binking/News_website",
"id": "ab7c6dfd5b848e862222e4519cd277f416e90e1d",
"size": "918",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "news_website/models/news.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1170"
},
{
"name": "HTML",
"bytes": "46071"
},
{
"name": "JavaScript",
"bytes": "306251"
},
{
"name": "Python",
"bytes": "85517"
}
],
"symlink_target": ""
} |
from numpy import mean,cov,double,cumsum,dot,linalg,array,rank,size,flipud
from pylab import *
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
from mvpa.clfs.knn import kNN
from mvpa.datasets import Dataset
from mvpa.clfs.transerror import TransferError
from mvpa.misc.data_generators import normalFeatureDataset
from mvpa.algorithms.cvtranserror import CrossValidatedTransferError
from mvpa.datasets.splitters import NFoldSplitter
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/Classification/Data/Single_Contact_kNN/Scaled')
from data_method_V import Fmat_original
def pca(X):
#get dimensions
num_data,dim = X.shape
#center data
mean_X = X.mean(axis=1)
M = (X-mean_X) # subtract the mean (along columns)
Mcov = cov(M)
###### Sanity Check ######
i=0
n=0
while i < 41:
j=0
while j < 140:
if X[i,j] != X[i,j]:
print X[i,j]
print i,j
n=n+1
j = j+1
i=i+1
print n
##########################
print 'PCA - COV-Method used'
val,vec = linalg.eig(Mcov)
#return the projection matrix, the variance and the mean
return vec,val,mean_X, M, Mcov
def my_mvpa(Y,num2):
#Using PYMVPA
PCA_data = np.array(Y)
PCA_label_2 = ['Styrofoam-Fixed']*5 + ['Books-Fixed']*5 + ['Bucket-Fixed']*5 + ['Bowl-Fixed']*5 + ['Can-Fixed']*5 + ['Box-Fixed']*5 + ['Pipe-Fixed']*5 + ['Styrofoam-Movable']*5 + ['Container-Movable']*5 + ['Books-Movable']*5 + ['Cloth-Roll-Movable']*5 + ['Black-Rubber-Movable']*5 + ['Can-Movable']*5 + ['Box-Movable']*5 + ['Rug-Fixed']*5 + ['Bubble-Wrap-1-Fixed']*5 + ['Pillow-1-Fixed']*5 + ['Bubble-Wrap-2-Fixed']*5 + ['Sponge-Fixed']*5 + ['Foliage-Fixed']*5 + ['Pillow-2-Fixed']*5 + ['Rug-Movable']*5 + ['Bubble-Wrap-1-Movable']*5 + ['Pillow-1-Movable']*5 + ['Bubble-Wrap-2-Movable']*5 + ['Pillow-2-Movable']*5 + ['Cushion-Movable']*5 + ['Sponge-Movable']*5
clf = kNN(k=num2)
terr = TransferError(clf)
ds1 = Dataset(samples=PCA_data,labels=PCA_label_2)
cvterr = CrossValidatedTransferError(terr,NFoldSplitter(cvtype=1),enable_states=['confusion'])
error = cvterr(ds1)
return (1-error)*100
def result(eigvec_total,eigval_total,mean_data_total,B,C,num_PC):
# Reduced Eigen-Vector Matrix according to highest Eigenvalues..(Considering First 20 based on above figure)
W = eigvec_total[:,0:num_PC]
m_W, n_W = np.shape(W)
# Normalizes the data set with respect to its variance (Not an Integral part of PCA, but useful)
length = len(eigval_total)
s = np.matrix(np.zeros(length)).T
i = 0
while i < length:
s[i] = sqrt(C[i,i])
i = i+1
Z = np.divide(B,s)
m_Z, n_Z = np.shape(Z)
#Projected Data:
Y = (W.T)*B # 'B' for my Laptop: otherwise 'Z' instead of 'B'
m_Y, n_Y = np.shape(Y.T)
return Y.T
if __name__ == '__main__':
Fmat = Fmat_original[82:123,:]
# Checking the Data-Matrix
m_tot, n_tot = np.shape(Fmat)
print 'Total_Matrix_Shape:',m_tot,n_tot
eigvec_total, eigval_total, mean_data_total, B, C = pca(Fmat)
#print eigvec_total
#print eigval_total
#print mean_data_total
m_eigval_total, n_eigval_total = np.shape(np.matrix(eigval_total))
m_eigvec_total, n_eigvec_total = np.shape(eigvec_total)
m_mean_data_total, n_mean_data_total = np.shape(np.matrix(mean_data_total))
print 'Eigenvalue Shape:',m_eigval_total, n_eigval_total
print 'Eigenvector Shape:',m_eigvec_total, n_eigvec_total
print 'Mean-Data Shape:',m_mean_data_total, n_mean_data_total
#Recall that the cumulative sum of the eigenvalues shows the level of variance accounted by each of the corresponding eigenvectors. On the x axis there is the number of eigenvalues used.
perc_total = cumsum(eigval_total)/sum(eigval_total)
num_PC=1
while num_PC <=20:
Proj = np.zeros((140,num_PC))
Proj = result(eigvec_total,eigval_total,mean_data_total,B,C,num_PC)
# PYMVPA:
num=0
cv_acc = np.zeros(21)
while num <=20:
cv_acc[num] = my_mvpa(Proj,num)
num = num+1
plot(np.arange(21),cv_acc,'-s')
grid('True')
hold('True')
num_PC = num_PC+1
legend(('1-PC', '2-PCs', '3-PCs', '4-PCs', '5-PCs', '6-PCs', '7-PCs', '8-PCs', '9-PCs', '10-PCs', '11-PC', '12-PCs', '13-PCs', '14-PCs', '15-PCs', '16-PCs', '17-PCs', '18-PCs', '19-PCs', '20-PCs'))
ylabel('Cross-Validation Accuracy')
xlabel('k in k-NN Classifier')
show()
| {
"content_hash": "c6cf5fe2acabf1567920f885127e7df9",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 664,
"avg_line_length": 34.6241134751773,
"alnum_prop": 0.6224907824662024,
"repo_name": "tapomayukh/projects_in_python",
"id": "50c180908735373455cd9d9879d87e04646f9dd6",
"size": "4923",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "classification/Classification_with_kNN/Single_Contact_Classification/Feature_Comparison/single_feature/best_kNN_PCA/test11_cross_validate_objects_1200ms_scaled_method_v_motion.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Matlab",
"bytes": "4903"
},
{
"name": "Python",
"bytes": "4451912"
}
],
"symlink_target": ""
} |
from django import forms
class AddToCartForm(forms.Form):
quantity = forms.DecimalField(initial=1)
def __init__(self, data=None, *args, **kwargs):
typ = kwargs.pop('typ')
# validate only when correct button was pressed
if data and not typ in data:
data = None
self.cart = kwargs.pop('cart', None)
super(AddToCartForm, self).__init__(data=data, *args, **kwargs)
def clean(self):
data = super(AddToCartForm, self).clean()
if 'quantity' in data:
qty = data['quantity']
add_result = self.cart.add_item(self.get_variant(), qty, dry_run=True)
if add_result.quantity_delta < qty:
raise forms.ValidationError(add_result.reason)
return data
def save(self):
return self.cart.add_item(self.get_variant(), self.cleaned_data['quantity'])
class AddToWishlistForm(AddToCartForm):
def save(self):
return self.cart.add_item(self.get_variant(), 1)
| {
"content_hash": "7cac2080efd52595d5db7d2e8953f97e",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 84,
"avg_line_length": 34.48275862068966,
"alnum_prop": 0.615,
"repo_name": "fusionbox/satchless",
"id": "60f2863c9d714af7ca2d4ccf465d256d02adaa89",
"size": "1023",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/demo/carts/forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "47040"
},
{
"name": "HTML",
"bytes": "87824"
},
{
"name": "JavaScript",
"bytes": "23123"
},
{
"name": "Python",
"bytes": "376774"
}
],
"symlink_target": ""
} |
from setuptools import setup
from setuptools.command.test import test as TestCommand # noqa
import os
import re
import sys
name = 'djes'
package = 'djes'
description = "Connecting Django and elasticsearch-dsl"
url = "https://github.com/theonion/djes"
author = "Chris Sinchok"
author_email = '[email protected]'
license = 'MIT'
setup_requires = []
dev_requires = [
"flake8>=2.0,<2.1",
"pytest==2.8.7",
"pytest-django==2.8.0",
"pytest-cov==1.8.1",
"model_mommy==1.2.4",
"coveralls==0.5",
"mkdocs==0.12.2",
"six==1.10.0"
]
install_requires = [
"elasticsearch-dsl==0.0.9",
"django>=1.8,<1.9"
]
server_requires = []
if 'test' in sys.argv:
setup_requires.extend(dev_requires)
def get_version(package):
"""
Return package version as listed in `__version__` in `init.py`.
"""
init_py = open(os.path.join(package, "__init__.py")).read()
return re.search("^__version__ = ['\"]([^'\"]+)['\"]", init_py, re.MULTILINE).group(1)
def get_packages(package):
"""
Return root package and all sub-packages.
"""
return [dirpath
for dirpath, dirnames, filenames in os.walk(package)
if os.path.exists(os.path.join(dirpath, '__init__.py'))]
def get_package_data(package):
"""
Return all files under the root package, that are not in a
package themselves.
"""
walk = [(dirpath.replace(package + os.sep, '', 1), filenames)
for dirpath, dirnames, filenames in os.walk(package)
if not os.path.exists(os.path.join(dirpath, '__init__.py'))]
filepaths = []
for base, filenames in walk:
filepaths.extend([os.path.join(base, filename)
for filename in filenames])
return {package: filepaths}
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = ['tests']
self.test_suite = True
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
setup(
name=name,
version=get_version(package),
url=url,
license=license,
description=description,
author=author,
author_email=author_email,
packages=get_packages(package),
install_requires=install_requires,
tests_require=dev_requires,
extras_require={
'dev': dev_requires,
},
cmdclass={'test': PyTest}
)
| {
"content_hash": "a10cfd93ac1973dfa458f20bddf255de",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 90,
"avg_line_length": 24.603960396039604,
"alnum_prop": 0.6169014084507042,
"repo_name": "theonion/djes",
"id": "3a97b12e44a3c70c71716853f8ee04ced6b9ea6e",
"size": "2532",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "74133"
}
],
"symlink_target": ""
} |
"""Tests for `tf.data.Dataset.map()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import threading
import time
import warnings
from absl.testing import parameterized
import numpy as np
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python import pywrap_sanitizers
from tensorflow.python.data.kernel_tests import checkpoint_test_base
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import options as options_lib
from tensorflow.python.eager import context
from tensorflow.python.framework import combinations
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import map_fn
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.ops.ragged import ragged_concat_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.platform import test
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training.tracking import util as trackable_utils
try:
import attr # pylint:disable=g-import-not-at-top
except ImportError:
attr = None
def _test_combinations_with_mode_v1(mode):
def new_map_fn(dataset, *args, **kwargs):
return dataset.map(*args, **kwargs)
def legacy_map_fn(dataset, *args, **kwargs):
return dataset.map_with_legacy_function(*args, **kwargs)
new_map_combinations = combinations.combine(
tf_api_version=1,
mode=mode,
apply_map=combinations.NamedObject("map_fn", new_map_fn))
legacy_map_combinations = combinations.combine(
tf_api_version=1,
mode=mode,
apply_map=combinations.NamedObject("legacy_map_fn", legacy_map_fn))
return new_map_combinations + legacy_map_combinations
def _test_combinations_with_mode_v2(mode):
def new_map_fn(dataset, *args, **kwargs):
return dataset.map(*args, **kwargs)
return combinations.combine(
tf_api_version=2,
mode=mode,
apply_map=combinations.NamedObject("map_fn", new_map_fn))
def _test_combinations_with_mode(mode):
return _test_combinations_with_mode_v1(
mode) + _test_combinations_with_mode_v2(mode)
def _test_combinations():
return _test_combinations_with_mode("eager") + _test_combinations_with_mode(
"graph")
def _short_circuit_test_cases():
cases = [
("Identity", None, lambda x: x),
("Replicate", None, lambda x: (x, x)),
("Swap", (None, None), lambda x, y: (y, x)),
("Project", (None, None), lambda x, y: x)
]
def reduce_fn(x, y):
name, structure, fn = y
return x + combinations.combine(
structure=structure, fn=combinations.NamedObject(name, fn))
return functools.reduce(reduce_fn, cases, [])
class Foo(object):
"""Dummy class used for invalid return value tests."""
def __init__(self):
pass
class MapTest(test_base.DatasetTestBase, parameterized.TestCase):
def _map_dataset_factory(self, components, apply_map, count):
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
dataset = dataset_ops.Dataset.from_tensor_slices(components)
dataset = apply_map(dataset, _map_fn).repeat(count)
self.assertEqual(
[c.shape[1:] for c in components],
[shape for shape in dataset_ops.get_legacy_output_shapes(dataset)])
return dataset
@combinations.generate(_test_combinations())
def testMapDataset(self, apply_map):
"""Test an dataset that maps a TF function across its input elements."""
# The pipeline is TensorSliceDataset -> MapDataset(square_3) ->
# RepeatDataset(count).
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
# Test single-threaded access to the iterator.
get_next = self.getNext(
self._map_dataset_factory(components, apply_map, count=14))
for _ in range(14):
for i in range(7):
result = self.evaluate(get_next())
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# TODO(b/117581999): add eager coverage
@combinations.generate(_test_combinations_with_mode("graph"))
def testMapDatasetMultiThreaded(self, apply_map):
# Test multi-threaded access to the same iterator.
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
get_next = self.getNext(
self._map_dataset_factory(components, apply_map, count=18))
results = []
with self.cached_session() as sess:
def iterator_thread():
while True:
try:
results.append(sess.run(get_next()))
except errors.OutOfRangeError:
return
threads = [self.checkedThread(target=iterator_thread) for _ in range(8)]
for t in threads:
t.start()
for t in threads:
t.join()
# `results` will contain the same elements components**2
# repeated 18 times, but in a non-deterministic order. Sort the
# results, and assert that each element of components**2 is
# produced 18 times.
results.sort(key=lambda x: x[0])
for i in range(7):
for j in range(18):
for component, result_component in zip(components,
results[i * 18 + j]):
self.assertAllEqual(component[i]**2, result_component)
def _parallel_map_dataset_factory(self, components, apply_map, count,
num_parallel_calls, buffer_size):
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
dataset = dataset_ops.Dataset.from_tensor_slices(components)
dataset = apply_map(dataset, _map_fn, num_parallel_calls=num_parallel_calls)
dataset = dataset.prefetch(buffer_size).repeat(count)
self.assertEqual(
[c.shape[1:] for c in components],
[shape for shape in dataset_ops.get_legacy_output_shapes(dataset)])
return dataset
@combinations.generate(
combinations.times(
_test_combinations(),
combinations.combine(num_parallel_calls=1, buffer_size=1) +
combinations.combine(num_parallel_calls=1, buffer_size=2) +
combinations.combine(num_parallel_calls=2, buffer_size=2) +
combinations.combine(num_parallel_calls=2, buffer_size=4) +
combinations.combine(num_parallel_calls=8, buffer_size=8) +
combinations.combine(num_parallel_calls=8, buffer_size=16)))
def testParallelMapDataset(self, apply_map, num_parallel_calls, buffer_size):
"""Test an dataset that maps a TF function across its input elements."""
# The pipeline is TensorSliceDataset -> ParallelMapDataset(square_3) ->
# RepeatDataset(count).
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
# Test single-threaded access to the iterator.
get_next = self.getNext(
self._parallel_map_dataset_factory(components, apply_map, 14,
num_parallel_calls, buffer_size))
for _ in range(14):
for i in range(7):
result = self.evaluate(get_next())
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# TODO(b/117581999): add eager coverage
@combinations.generate(
combinations.times(
_test_combinations_with_mode("graph"),
combinations.combine(num_parallel_calls=1, buffer_size=1) +
combinations.combine(num_parallel_calls=1, buffer_size=2) +
combinations.combine(num_parallel_calls=2, buffer_size=2) +
combinations.combine(num_parallel_calls=2, buffer_size=4) +
combinations.combine(num_parallel_calls=8, buffer_size=8) +
combinations.combine(num_parallel_calls=8, buffer_size=16)))
def testParallelMapDatasetMultiThreaded(self, apply_map, num_parallel_calls,
buffer_size):
# Test multi-threaded access to the same iterator.
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
get_next = self.getNext(
self._parallel_map_dataset_factory(components, apply_map, 18,
num_parallel_calls, buffer_size))
results = []
with self.cached_session() as sess:
def iterator_thread():
while True:
try:
results.append(sess.run(get_next()))
except errors.OutOfRangeError:
return
threads = [self.checkedThread(target=iterator_thread) for _ in range(64)]
for t in threads:
t.start()
for t in threads:
t.join()
# `results` will contain the same elements components**2
# repeated 18 times, but in a non-deterministic order. Sort the
# results, and assert that each element of components**2 is
# produced 18 times.
results.sort(key=lambda x: x[0])
for i in range(7):
for j in range(18):
for component, result_component in zip(components,
results[i * 18 + j]):
self.assertAllEqual(component[i]**2, result_component)
@combinations.generate(_test_combinations())
def testImplicitDisposeParallelMapDataset(self, apply_map):
# Tests whether a parallel map dataset will be cleaned up correctly when
# the pipeline does not run it until exhaustion.
# The pipeline is TensorSliceDataset -> MapDataset(square_3) ->
# RepeatDataset(1000).
components = (np.arange(1000),
np.array([[1, 2, 3]]) * np.arange(1000)[:, np.newaxis],
np.array(37.0) * np.arange(1000))
dataset = self._parallel_map_dataset_factory(components, apply_map, 1000,
100, 100)
# NOTE(mrry): Also test that the prefetching thread is cancelled correctly.
dataset = dataset.prefetch(100)
get_next = self.getNext(dataset)
for _ in range(3):
self.evaluate(get_next())
@combinations.generate(_test_combinations())
def testParallelMapUnspecifiedOutputSize(self, apply_map):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices(components)
dataset = apply_map(
dataset,
lambda x: array_ops.check_numerics(x, "message"),
num_parallel_calls=2)
get_next = self.getNext(dataset)
for _ in range(3):
self.evaluate(get_next())
@combinations.generate(_test_combinations())
def testParallelMapError(self, apply_map):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices(components)
dataset = apply_map(
dataset,
lambda x: array_ops.check_numerics(x, "message"),
num_parallel_calls=2)
get_next = self.getNext(dataset)
for _ in range(3):
self.evaluate(get_next())
# The 4th element is NaN, so `array_ops.check_numerics()` should fail.
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(get_next())
self.evaluate(get_next())
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(_test_combinations())
def testPrefetchError(self, apply_map):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices(components)
dataset = apply_map(
dataset, lambda x: array_ops.check_numerics(x, "message")).prefetch(2)
get_next = self.getNext(dataset)
for _ in range(3):
self.evaluate(get_next())
# The 4th element is NaN, so `array_ops.check_numerics()` should fail.
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(get_next())
self.evaluate(get_next())
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(_test_combinations())
def testCaptureIterator(self, apply_map):
def _build_ds(iterator):
def _map_fn(x):
get_next = iterator.get_next()
return x * get_next
return apply_map(dataset_ops.Dataset.range(10), _map_fn)
def _build_graph():
if context.executing_eagerly():
captured_iterator = iter(dataset_ops.Dataset.range(10))
else:
captured_iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.range(10))
ds = _build_ds(captured_iterator)
return captured_iterator, ds
captured_iter, ds = _build_graph()
if not context.executing_eagerly():
self.evaluate(captured_iter.initializer)
get_next = self.getNext(ds, requires_initialization=True)
for i in range(10):
self.assertEqual(i * i, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(_test_combinations())
def testCaptureHashTable(self, apply_map):
# NOTE(mrry): We must use the V2 variants of `HashTable`
# etc. because these produce a `tf.resource`-typed output that is
# compatible with the in-graph function implementation.
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup_ops.HashTable(
lookup_ops.KeyValueTensorInitializer(keys, values), default_val)
input_sentences = dataset_ops.Dataset.from_tensor_slices(
["brain brain tank salad surgery", "surgery brain"])
dataset = apply_map(input_sentences,
lambda x: string_ops.string_split([x]).values)
dataset = apply_map(dataset, table.lookup)
get_next = self.getNext(dataset, requires_initialization=True)
self.evaluate(table.initializer)
self.evaluate(get_next())
self.evaluate(get_next())
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# TODO(b/123904513)
@combinations.generate(_test_combinations_with_mode_v1("graph"))
def testCaptureQueue(self, apply_map):
elements = np.random.randint(100, size=[200])
queue = data_flow_ops.FIFOQueue(200, dtypes.int64, shapes=[])
enqueue_op = queue.enqueue_many(elements)
close_op = queue.close()
dataset = dataset_ops.Dataset.from_tensors(0).repeat(-1)
dataset = apply_map(dataset, lambda _: queue.dequeue())
get_next = self.getNext(dataset, requires_initialization=True)
self.evaluate(enqueue_op)
self.evaluate(close_op)
for element in elements:
self.assertEqual(element, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# TODO(b/117581999): Possible deadlock in eager mode, debug.
@combinations.generate(_test_combinations_with_mode_v1("graph"))
def testCaptureSameResourceMultipleTimes(self, apply_map):
elements = np.random.randint(100, size=[200])
queue = data_flow_ops.FIFOQueue(
200, dtypes.int64, shapes=[], shared_name="shared_queue")
queue_2 = data_flow_ops.FIFOQueue(
200, dtypes.int64, shapes=[], shared_name="shared_queue")
enqueue_op = queue.enqueue_many(elements)
close_op = queue.close()
dataset = dataset_ops.Dataset.from_tensors(0).repeat(-1)
dataset = apply_map(dataset, lambda _: (queue.dequeue(), queue_2.dequeue()))
self.evaluate(enqueue_op)
self.evaluate(close_op)
get_next = self.getNext(dataset, requires_initialization=True)
for i in range(100):
self.assertCountEqual([elements[i * 2], elements[i * 2 + 1]],
self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(_test_combinations())
def testSeededStatefulOperatorIsProperlyStateful(self, apply_map):
dataset = dataset_ops.Dataset.from_tensors(0).repeat(10)
fn = lambda _: random_ops.random_uniform((), seed=11)
dataset = apply_map(dataset, fn).batch(2)
get_next = self.getNext(dataset, requires_initialization=True)
random_values = []
with self.assertRaises(errors.OutOfRangeError):
while True:
random_values.extend(self.evaluate(get_next()))
self.assertLen(random_values, 10)
self.assertGreater(np.abs(np.diff(random_values)).max(), 1e-6)
get_next = self.getNext(dataset, requires_initialization=True)
random_values_2 = []
with self.assertRaises(errors.OutOfRangeError):
while True:
random_values_2.extend(self.evaluate(get_next()))
# Randomness is repeatable given same seed
self.assertAllClose(random_values, random_values_2)
@combinations.generate(_test_combinations())
def testStatefulMapKeepsStateAcrossIterators(self, apply_map):
dataset = dataset_ops.Dataset.from_tensors(0).repeat(10)
fn = lambda _: random_ops.random_uniform((), seed=11)
dataset = apply_map(dataset, fn).repeat(1000).batch(10)
get_next = self.getNext(dataset)
random_values = self.evaluate(get_next())
# Assert that one of the next 99 batches yielded by the iterator is
# different from the first.
i = 0
while i < 99:
if np.any(random_values != self.evaluate(get_next())):
break
i += 1
self.assertLess(i, 99)
@combinations.generate(_test_combinations())
def testStatefulOperationInShortCircuit(self, apply_map):
counter_var = variable_scope.get_variable(
"counter", (), dtypes.int32, use_resource=True)
def increment_fn(x):
counter_var.assign_add(1)
return x
dataset = dataset_ops.Dataset.range(10)
dataset = apply_map(dataset, increment_fn)
get_next = self.getNext(dataset, requires_initialization=True)
self.evaluate(counter_var.initializer)
for i in range(10):
self.assertEqual(i, self.evaluate(counter_var))
self.assertEqual(i, self.evaluate(get_next()))
self.assertEqual(10, self.evaluate(counter_var))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
self.assertEqual(10, self.evaluate(counter_var))
@combinations.generate(_test_combinations())
def testMapDict(self, apply_map):
dataset = dataset_ops.Dataset.range(10)
dataset = apply_map(dataset, lambda x: {"foo": x * 2, "bar": x**2})
dataset = apply_map(dataset, lambda d: d["foo"] + d["bar"])
self.assertDatasetProduces(
dataset, expected_output=[i * 2 + i**2 for i in range(10)])
@combinations.generate(_test_combinations())
def testMapNamedtuple(self, apply_map):
# construct dataset of tuples
labels = dataset_ops.Dataset.range(10)
images = apply_map(labels, lambda l: -l)
dataset_tuple = dataset_ops.Dataset.zip((labels, images))
# convert dataset of tuples to dataset of namedtuples
example = collections.namedtuple("Example", ["label", "image"])
dataset_namedtuple = apply_map(dataset_tuple, example)
def preprocess_tuple(label, image):
image = 2 * image
return label, image
def preprocess_namedtuple(example):
return example._replace(image=2 * example.image)
# preprocess both datasets
dataset_tuple = apply_map(dataset_tuple, preprocess_tuple)
dataset_namedtuple = apply_map(dataset_namedtuple, preprocess_namedtuple)
next_tuple = self.getNext(dataset_tuple)
next_namedtuple = self.getNext(dataset_namedtuple)
# make sure both datasets contain the same data
for i in range(10):
tuple_, namedtuple_ = self.evaluate([next_tuple(), next_namedtuple()])
self.assertEqual(tuple_, namedtuple_)
self.assertEqual(tuple_, (i, -2 * i))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_namedtuple())
@combinations.generate(_test_combinations())
def testMapAttrs(self, apply_map):
if attr is None:
self.skipTest("attr module is not available.")
# construct dataset of tuples
labels = dataset_ops.Dataset.range(10)
images = apply_map(labels, lambda l: -l)
dataset = dataset_ops.Dataset.zip((labels, images))
@attr.s(cmp=True)
class Example(object):
label = attr.ib()
image = attr.ib()
dataset = apply_map(dataset, Example)
def preprocess(example):
example.image = 2 * example.image
return example
dataset = apply_map(dataset, preprocess)
get_next = self.getNext(dataset)
for i in range(10):
data = self.evaluate(get_next())
self.assertEqual(data, Example(i, -2 * i))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(_test_combinations())
def testUseStepContainerInMap(self, apply_map):
row = np.arange(6)
dataset = dataset_ops.Dataset.from_tensors(row)
dataset = apply_map(dataset,
lambda elems: map_fn.map_fn(lambda x: x * x, elems))
self.assertDatasetProduces(dataset, expected_output=[row**2])
@combinations.generate(_test_combinations())
def testCaseAndCondInMap(self, apply_map):
def control_map_fn(x, y):
def multiply():
return x * 2
def divide():
return x // 2
def defaults_two():
return control_flow_ops.cond(
math_ops.equal(math_ops.mod(x, 2), 0),
multiply,
divide,
name="cond_mult")
pred_fn_pairs = [
(math_ops.logical_or(math_ops.equal(y, 2),
math_ops.equal(y, 3)), defaults_two),
]
return control_flow_ops.case(
pred_fn_pairs, default=multiply, exclusive=True)
def build_dataset(row, num):
dataset = dataset_ops.Dataset.from_tensor_slices(row)
return apply_map(dataset, lambda x: control_map_fn(x, num))
row = np.arange(6)
for num in [2, 3, 4]:
get_next = self.getNext(build_dataset(row, num))
for i in range(6):
self.assertEqual(
(i // 2 if i % 2 else i * 2) if (num == 2 or num == 3) else i * 2,
self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(_test_combinations())
def testCaseInWhileInMap(self, apply_map):
def control_map_fn(x, y):
def multiply():
return x * 2
def divide():
return x // 2
pred_fn_pairs = [
(math_ops.logical_or(math_ops.equal(y, 2),
math_ops.equal(y, 3)), divide),
]
return control_flow_ops.case(
pred_fn_pairs, default=multiply, exclusive=True)
def build_dataset(row, num):
dataset = dataset_ops.Dataset.from_tensors(row)
return apply_map(
dataset,
lambda elems: map_fn.map_fn(lambda x: control_map_fn(x, num), elems))
row = np.arange(6)
for num in [2, 3, 4]:
get_next = self.getNext(build_dataset(row, num))
self.assertAllEqual(
[x // 2 if (num == 2 or num == 3) else x * 2 for x in row],
self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(_test_combinations())
def testCaseAndCondInWhileInMap(self, apply_map):
def control_map_fn(x, y):
def multiply():
return x * 2
def divide():
return x // 2
def defaults_two():
return control_flow_ops.cond(
math_ops.equal(math_ops.mod(x, 2), 0),
multiply,
divide,
name="cond_mult")
pred_fn_pairs = [
(math_ops.logical_or(math_ops.equal(y, 2),
math_ops.equal(y, 3)), defaults_two),
]
return control_flow_ops.case(
pred_fn_pairs, default=multiply, exclusive=True)
row = np.arange(6)
num = 2
dataset = dataset_ops.Dataset.from_tensors(row)
dataset = apply_map(
dataset,
lambda elems: map_fn.map_fn(lambda x: control_map_fn(x, num), elems))
get_next = self.getNext(dataset)
self.assertAllEqual([(x // 2 if x % 2 else x * 2) if
(num == 2 or num == 3) else x * 2 for x in row],
self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(_test_combinations())
def testNestedListMapDataset(self, apply_map):
dataset = dataset_ops.Dataset.from_tensors([0, 1, 2]).repeat(10)
dataset = apply_map(dataset, lambda a: ([a[1], a[0] + a[2]], a[1]))
expected_output = [(np.array([1, 2]), 1)] * 10
self.assertDatasetProduces(dataset, expected_output=expected_output)
@combinations.generate(
combinations.times(_test_combinations(),
combinations.combine(buffer_size=[1, 2, 3, 4])))
def testPrefetch(self, apply_map, buffer_size):
# We will use this event to test that `_map_py_func()` has been invoked a
# certain number of times (6 times, to be exact) after consuming fewer
# elements from the iterator.
ev = threading.Event()
set_event_during_invocation = 5
def _map_py_func(x):
if x == set_event_during_invocation:
ev.set()
return x * x
def _map_fn(x):
return script_ops.py_func(_map_py_func, [x], x.dtype)
# We can indirectly observe that varying the buffer size has the intended
# effect by observing when `ev` is set (on the 6th invocation of
# `_map_py_func()`).
# NOTE(mrry): We do not test with `buffer_size ==
# set_event_during_invocation`, because we must consume at least one element
# to start the prefetching.
dataset = dataset_ops.Dataset.range(100)
dataset = apply_map(dataset, _map_fn).prefetch(buffer_size)
get_next = self.getNext(dataset)
event_will_be_set_after_consuming = (
set_event_during_invocation - buffer_size + 1)
ev.clear()
for i in range(event_will_be_set_after_consuming):
self.assertFalse(ev.is_set())
self.assertEqual(i * i, self.evaluate(get_next()))
ev.wait()
for i in range(event_will_be_set_after_consuming, 100):
self.assertEqual(i * i, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(_test_combinations())
def testReturnList(self, apply_map):
dataset = dataset_ops.Dataset.range(10)
dataset = apply_map(dataset, lambda x: [x, constant_op.constant(37.0)])
self.assertDatasetProduces(
dataset, expected_output=[(i, 37.0) for i in range(10)])
@combinations.generate(_test_combinations())
def testMultiOutputPyFunc(self, apply_map):
# The `tf.py_func()` op returns a list of tensors for its outputs.
def _map_fn(x_tensor):
def _map_py_func(x):
return x, np.array(37.0, dtype=np.float64)
return script_ops.py_func(
_map_py_func, [x_tensor], [dtypes.int64, dtypes.float64])
dataset = dataset_ops.Dataset.range(10)
dataset = apply_map(dataset, _map_fn)
self.assertDatasetProduces(
dataset, expected_output=[(i, 37.0) for i in range(10)])
@combinations.generate(_test_combinations())
def testSparse(self, apply_map):
def _sparse(i):
return sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0]]),
values=(i * np.array([1])),
dense_shape=np.array([1, 1]))
dataset = dataset_ops.Dataset.range(10)
dataset = apply_map(dataset, _sparse)
self.assertDatasetProduces(
dataset, expected_output=[_sparse(i) for i in range(10)])
@combinations.generate(_test_combinations())
def testSparseChain(self, apply_map):
def _sparse(i):
return sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0]]),
values=(i * np.array([1])),
dense_shape=np.array([1, 1]))
def _check(i):
self.assertTrue(sparse_tensor.is_sparse(i))
return sparse_ops.sparse_concat(0, [i, i])
dataset = dataset_ops.Dataset.range(10)
dataset = apply_map(dataset, _sparse)
dataset = apply_map(dataset, _check)
self.assertDatasetProduces(
dataset,
expected_output=[self.evaluate(_check(_sparse(i))) for i in range(10)])
@combinations.generate(_test_combinations_with_mode("eager"))
def testSparseMapShapeInference(self, apply_map):
row_lengths = np.random.randint(0, 4, size=128)
values = np.ones(np.sum(row_lengths))
sparse = ragged_tensor.RaggedTensor.from_row_lengths(
values, row_lengths).to_sparse()
dataset = dataset_ops.Dataset.from_tensor_slices(sparse)
dataset = dataset.batch(32, drop_remainder=True)
dataset = apply_map(dataset, lambda x: x)
self.assertEqual((32, 3), dataset.element_spec.shape)
@combinations.generate(_test_combinations_with_mode("eager"))
def testSparseMapShapeInferencePartial(self, apply_map):
row_lengths = np.random.randint(0, 4, size=128)
values = np.ones(np.sum(row_lengths))
sparse = ragged_tensor.RaggedTensor.from_row_lengths(
values, row_lengths).to_sparse()
dataset = dataset_ops.Dataset.from_tensor_slices(sparse)
dataset = dataset.batch(32, drop_remainder=False)
dataset = apply_map(dataset, lambda x: x)
self.assertEqual([None, 3], dataset.element_spec.shape.as_list())
@combinations.generate(_test_combinations())
def testTensorArray(self, apply_map):
def _tensor_array(i):
i = math_ops.cast(i, dtypes.int32)
return (
tensor_array_ops.TensorArray(dtypes.int32, element_shape=(), size=i)
.unstack(math_ops.range(i, dtype=dtypes.int32)))
dataset = dataset_ops.Dataset.range(10)
dataset = apply_map(dataset, _tensor_array)
self.assertDatasetProduces(
dataset, expected_output=[list(range(i)) for i in range(10)])
@combinations.generate(_test_combinations())
def testTensorArrayChain(self, apply_map):
def _tensor_array(i):
i = math_ops.cast(i, dtypes.int32)
return (
tensor_array_ops.TensorArray(dtypes.int32, element_shape=(), size=i)
.unstack(math_ops.range(i, dtype=dtypes.int32)))
def _check(x):
self.assertIsInstance(x, tensor_array_ops.TensorArray)
return x.identity()
dataset = dataset_ops.Dataset.range(10)
dataset = apply_map(dataset, _tensor_array)
dataset = apply_map(dataset, _check)
self.assertDatasetProduces(
dataset,
expected_output=[list(range(i)) for i in range(10)])
@combinations.generate(_test_combinations())
def testRagged(self, apply_map):
def _ragged(i):
return ragged_tensor.RaggedTensor.from_tensor(i * [[1]])
dataset = dataset_ops.Dataset.range(5)
dataset = apply_map(dataset, _ragged)
self.assertDatasetProduces(
dataset,
expected_output=[ragged_factory_ops.constant([[i]]) for i in range(5)])
@combinations.generate(_test_combinations())
def testRaggedChain(self, apply_map):
def _ragged(i):
return ragged_tensor.RaggedTensor.from_tensor(i * [[1]])
def _concat(i):
self.assertTrue(ragged_tensor.is_ragged(i))
return ragged_concat_ops.concat([i, i], 0)
dataset = dataset_ops.Dataset.range(10)
dataset = apply_map(dataset, _ragged)
dataset = apply_map(dataset, _concat)
self.assertDatasetProduces(
dataset,
expected_output=[
self.evaluate(_concat(ragged_factory_ops.constant([[i]])))
for i in range(10)
])
# TODO(b/123904513)
@combinations.generate(_test_combinations_with_mode_v1("graph"))
def testParallelMapOutOfRangeError(self, apply_map):
def raising_py_func(i):
if i == 100:
raise StopIteration()
else:
return i
dataset = dataset_ops.Dataset.range(105)
dataset = apply_map(
dataset,
lambda x: script_ops.py_func(raising_py_func, [x], dtypes.int64),
num_parallel_calls=2)
get_next = self.getNext(dataset)
for i in range(100):
self.assertEqual(i, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(_test_combinations())
def testConstantOutput(self, apply_map):
dataset = dataset_ops.Dataset.range(10)
dataset = apply_map(dataset, lambda x: [x, "hello", 10])
self.assertDatasetProduces(dataset, [(i, b"hello", 10) for i in range(10)])
@combinations.generate(test_base.graph_only_combinations())
def testWarnOnSeedFromOuterGraph(self):
with ops.Graph().as_default() as g:
g.seed = 10
warnings.simplefilter("always")
def _check_warning(caught_warnings, expected_result):
found_warning = False
for warning in caught_warnings:
if ("Explicitly set the seed in the function if this is not the "
"intended behavior" in str(warning)):
found_warning = True
break
self.assertEqual(found_warning, expected_result)
# map_fun doesn't use seed, so no warning is generated.
with warnings.catch_warnings(record=True) as w:
_ = dataset_ops.Dataset.range(10).map(math_ops.square)
_check_warning(w, False)
def random_func(x):
x = math_ops.add(x, 1)
random_ops.random_shuffle([x, math_ops.square(x)])
return x
with warnings.catch_warnings(record=True) as w:
_ = dataset_ops.Dataset.range(10).map(random_func)
_check_warning(w, True)
def random_func_seeded(x):
ops.get_default_graph().seed = None
random_ops.random_shuffle(x)
return x
with warnings.catch_warnings(record=True) as w:
_ = dataset_ops.Dataset.range(10).batch(2).map(random_func_seeded)
_check_warning(w, False)
with warnings.catch_warnings(record=True) as w:
_ = dataset_ops.Dataset.range(10).batch(2).map(
lambda x: random_ops.random_shuffle(x, seed=37))
_check_warning(w, False)
@combinations.generate(_test_combinations())
def testNestedDatasetMap(self, apply_map):
dataset = dataset_ops.Dataset.from_tensors([1.0, 2.0, 3.0])
dataset = apply_map(dataset, dataset_ops.Dataset.from_tensor_slices)
dataset = apply_map(dataset, lambda ds: ds.batch(3)).flat_map(lambda x: x)
self.assertDatasetProduces(dataset, expected_output=[[1.0, 2.0, 3.0]])
@combinations.generate(_test_combinations())
def testReturnValueError(self, apply_map):
dataset = dataset_ops.Dataset.from_tensors([1.0, 2.0, 3.0])
with self.assertRaisesRegex(
TypeError, r"Unsupported return value from function passed to "
r"Dataset.map\(\)"):
_ = apply_map(dataset, lambda x: Foo)
@combinations.generate(test_base.default_test_combinations())
def testBrokenFunctionErrorOnInitialization(self):
dataset = dataset_ops.Dataset.from_tensor_slices([1.0, 2.0, 3.0])
def broken_function(_):
"""A function deliberately designed to fail on instantiation."""
value = []
tensor_value = attr_value_pb2.AttrValue()
tensor_value.tensor.CopyFrom(
tensor_util.make_tensor_proto(
value, dtype=dtypes.float32, shape=[0], verify_shape=False))
dtype_value = attr_value_pb2.AttrValue(type=dtypes.int32.as_datatype_enum)
# Create a "Const" op with a `tf.float32` value and a `tf.int32` type.
const_tensor = ops.get_default_graph().create_op(
"Const", [], [dtypes.int32],
attrs={
"value": tensor_value,
"dtype": dtype_value
},
name="BrokenConst").outputs[0]
return const_tensor
dataset = dataset.map(broken_function)
self.assertDatasetProduces(
dataset, expected_error=(errors.InvalidArgumentError, "Type mismatch"))
@combinations.generate(
combinations.times(
_test_combinations_with_mode("graph"),
combinations.combine(num_parallel_calls=[None, 12])))
def testNoInterOpParallelism(self, apply_map, num_parallel_calls):
dataset = dataset_ops.Dataset.from_tensors(0)
def _get_tid():
return np.int64(threading.current_thread().ident)
def _map_fn(_):
tids = []
for _ in range(10):
tids.append(script_ops.py_func(_get_tid, [], dtypes.int64))
return tids
dataset = apply_map(dataset, _map_fn)
dataset._variant_tensor.op._set_attr("use_inter_op_parallelism",
attr_value_pb2.AttrValue(b=False))
get_next = self.getNext(dataset)
tids = self.evaluate(get_next())
self.assertTrue(all(tids[0] == tid for tid in tids))
@combinations.generate(
combinations.times(_test_combinations(), _short_circuit_test_cases(),
combinations.combine(num_parallel_calls=[None, 12])))
def testShortCircuit(self, apply_map, structure, fn, num_parallel_calls):
dataset = self.structuredDataset(structure).repeat()
dataset = apply_map(dataset, fn, num_parallel_calls=num_parallel_calls)
get_next = self.getNext(dataset)
if isinstance(structure, tuple):
expected = fn(*self.evaluate(self.structuredElement(structure)))
else:
expected = fn(self.evaluate(self.structuredElement(structure)))
self.assertEqual(expected, self.evaluate(get_next()))
@combinations.generate(
combinations.times(_test_combinations(),
combinations.combine(num_parallel_calls=[None, 12])))
def testShortCircuitCapturedInput(self, apply_map, num_parallel_calls):
captured_t = variables.Variable(42)
dataset = self.structuredDataset(None).repeat()
dataset = apply_map(
dataset, lambda x: captured_t, num_parallel_calls=num_parallel_calls)
self.evaluate(variables.global_variables_initializer())
get_next = self.getNext(dataset, requires_initialization=True)
self.assertEqual(42, self.evaluate(get_next()))
@combinations.generate(
combinations.combine(
tf_api_version=2,
mode=["eager", "graph"],
num_parallel_calls=[None, 12]))
def testPreserveCardinality(self, num_parallel_calls):
def py_fn(_):
raise StopIteration()
dataset = dataset_ops.Dataset.from_tensors(0).map(
lambda x: script_ops.py_func(py_fn, [x], dtypes.int64),
num_parallel_calls=num_parallel_calls)
get_next = self.getNext(dataset)
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(get_next())
@combinations.generate(_test_combinations_with_mode("graph"))
def testCollectionCopy(self, apply_map):
w = variable_scope.get_variable("w", [])
self.assertIn(w, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES))
def func(x):
self.assertIn(w, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES))
return x
dataset = dataset_ops.Dataset.from_tensors(constant_op.constant(1.0))
_ = apply_map(dataset, func)
@combinations.generate(
combinations.times(
_test_combinations_with_mode_v1("graph"),
combinations.combine(num_parallel_calls=[None, 12])))
def testMapCancellation(self, apply_map, num_parallel_calls):
# Checks that a cancellation of is threaded through to map transformation.
queue = data_flow_ops.FIFOQueue(10, dtypes.int32, ())
def fn(_):
return queue.dequeue()
dataset = dataset_ops.Dataset.range(1)
dataset = apply_map(dataset, fn, num_parallel_calls=num_parallel_calls)
get_next = self.getNext(dataset, requires_initialization=True)
with self.cached_session() as sess:
thread = self.checkedThread(self.assert_op_cancelled, args=(get_next(),))
thread.start()
time.sleep(0.2)
sess.close()
thread.join()
# TODO(b/126553094): map doesnt work with variable defined inside function in
# eager mode, possible Graph tensors leak out of the function building context
# from function graph in eager mode as variables are created in init_scope.
@combinations.generate(test_base.graph_only_combinations())
def testCreateVariableInsideFunctionWithGetter(self):
def func(_):
with variable_scope.variable_scope(
"variable", reuse=variable_scope.AUTO_REUSE):
counter_var = variable_scope.get_variable(
"counter", (), dtypes.int32, use_resource=True)
return counter_var.assign_add(1)
dataset = dataset_ops.Dataset.from_tensors(0).repeat(10)
if hasattr(dataset, "map_with_legacy_function"):
# NOTE: In the legacy function, resource is captured by value.
with self.assertRaisesWithPredicateMatch(
AttributeError, "'Tensor' object has no attribute 'assign_add'"):
dataset.map_with_legacy_function(func)
dataset = dataset.map(func)
self.evaluate(variables.global_variables_initializer())
get_next = self.getNext(dataset, requires_initialization=True)
for i in range(10):
self.assertEqual(i + 1, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(_test_combinations())
def testCaptureVariable(self, apply_map):
counter_var = variable_scope.get_variable(
"counter", (), dtypes.int32, use_resource=True)
dataset = dataset_ops.Dataset.from_tensors(0).repeat(10)
dataset = apply_map(dataset, lambda _: counter_var.assign_add(1))
get_next = self.getNext(dataset, requires_initialization=True)
self.evaluate(counter_var.initializer)
for i in range(10):
self.assertEqual(i, self.evaluate(counter_var))
self.assertEqual(i + 1, self.evaluate(get_next()))
self.assertEqual(10, self.evaluate(counter_var))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
self.assertEqual(10, self.evaluate(counter_var))
@combinations.generate(_test_combinations_with_mode_v1("graph"))
def testCaptureUninitializedVariableError(self, apply_map):
counter_var = variable_scope.get_variable(
"counter", (), dtypes.int32, use_resource=True)
dataset = dataset_ops.Dataset.from_tensors(0).repeat(10)
dataset = apply_map(dataset, lambda _: counter_var.assign_add(1))
get_next = self.getNext(dataset, requires_initialization=True)
with self.assertRaises(errors.NotFoundError):
self.evaluate(get_next())
# TODO(b/121264236): add eager mode coverage when we have multi-device setup.
@combinations.generate(_test_combinations_with_mode_v1("graph"))
def testCaptureConstantsWithConflictingDevices(self, apply_map):
config = config_pb2.ConfigProto(device_count={"CPU": 3})
with self.cached_session(config=config):
with ops.device("/device:CPU:0"):
a = constant_op.constant(3.0)
with ops.device("/device:CPU:1"):
b = constant_op.constant(5.0)
def func(_):
return math_ops.add(a, b)
dataset = dataset_ops.Dataset.from_tensors(0).repeat(10)
dataset = apply_map(dataset, func)
expected_output = [8.0] * 10
self.assertDatasetProduces(dataset, expected_output=expected_output)
# TODO(b/121264236): add eager mode coverage when we have multi-device setup.
@combinations.generate(_test_combinations_with_mode_v1("graph"))
def testReferenceVariablesWithMultipleDevices(self, apply_map):
config = config_pb2.ConfigProto(device_count={"CPU": 3})
with self.cached_session(config=config):
def func(_):
with ops.device("/device:CPU:0"):
a = variables.VariableV1(3.0)
with ops.device("/device:CPU:1"):
b = variables.VariableV1(5.0)
return math_ops.add(a, b)
# NOTE: Use the legacy function implementation as eager function will
# convert RefVariables to ResourceVariables.
dataset = dataset_ops.Dataset.from_tensors(0).repeat(10)
dataset = apply_map(dataset, func)
self.evaluate(variables.global_variables_initializer())
expected_output = [8.0] * 10
self.assertDatasetProduces(
dataset,
expected_output=expected_output,
requires_initialization=True)
# TODO(b/121264236): add eager mode coverage when we have multi-device setup.
@combinations.generate(_test_combinations_with_mode_v1("graph"))
def testResourceVariablesWithMultipleDevices(self, apply_map):
config = config_pb2.ConfigProto(device_count={"CPU": 3})
def func(_):
with variable_scope.variable_scope(
"variable", reuse=variable_scope.AUTO_REUSE):
with ops.device("/device:CPU:0"):
a_var = variable_scope.get_variable(
"a", (), dtypes.int32, use_resource=True)
a_var = math_ops.add(a_var, 1)
with ops.device("/device:CPU:1"):
b_var = variable_scope.get_variable(
"b", (), dtypes.int32, use_resource=True)
return math_ops.add(a_var, b_var)
g = ops.Graph()
with self.session(config=config, graph=g):
dataset = dataset_ops.Dataset.from_tensors(0).repeat(10)
dataset = apply_map(dataset, func)
self.evaluate(variables.global_variables_initializer())
expected_output = [1] * 10
self.assertDatasetProduces(
dataset,
expected_output=expected_output,
requires_initialization=True)
@combinations.generate(
combinations.times(
_test_combinations(),
combinations.combine(
local_determinism=[None, True, False],
global_determinism=[True, False])))
def testDeterminismConfiguration(self, apply_map, local_determinism,
global_determinism):
expect_determinism = local_determinism or (local_determinism is None and
global_determinism)
elements = list(range(1000))
def dataset_fn(delay_ms):
def sleep(x):
time.sleep(delay_ms / 1000)
return x
def map_function(x):
if math_ops.equal(x, 0):
return script_ops.py_func(sleep, [x], x.dtype)
else:
return x
dataset = dataset_ops.Dataset.from_tensor_slices(elements)
dataset = apply_map(
dataset,
map_function,
num_parallel_calls=2,
deterministic=local_determinism)
opts = options_lib.Options()
opts.deterministic = global_determinism
dataset = dataset.with_options(opts)
return dataset
self.checkDeterminism(
dataset_fn, expect_determinism, expected_elements=elements)
@combinations.generate(_test_combinations())
def testNoneComponent(self, apply_map):
dataset = dataset_ops.Dataset.from_tensors((42, None))
def map_function(x, y):
if y is None:
return x / 2
return x
dataset = apply_map(dataset, map_function)
self.assertDatasetProduces(dataset, expected_output=[21])
@combinations.generate(test_base.eager_only_combinations())
def testCheckpointLargeBuffer(self):
if pywrap_sanitizers.is_tsan_enabled():
self.skipTest("Creating a large buffer causes OOM when using tsan.")
# Tensor of size 512M
dataset = dataset_ops.Dataset.from_tensors(
array_ops.ones((128, 1024, 1024), dtype=dtypes.float32))
dataset = dataset.repeat()
# Set parallelism to 5 to exceed the 2GB protobuf limit
dataset = dataset.map(lambda x: x * 2, num_parallel_calls=5)
iterator = iter(dataset)
next(iterator) # request an element to fill the parallel map buffer
ckpt = trackable_utils.Checkpoint(iterator=iterator)
manager = checkpoint_management.CheckpointManager(
ckpt, self.get_temp_dir(), max_to_keep=1)
manager.save()
class MapCheckpointTest(checkpoint_test_base.CheckpointTestBase,
parameterized.TestCase):
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
checkpoint_test_base.default_test_combinations(),
combinations.combine(num_parallel_calls=[None, 2])))
def testCore(self, verify_fn, num_parallel_calls):
tensor_slice_len = 7
num_epochs = 2
multiplier = 37.0
def _build_ds():
components = (np.arange(tensor_slice_len), np.array([[1, 2, 3]]) *
np.arange(tensor_slice_len)[:, np.newaxis],
np.array(multiplier) * np.arange(tensor_slice_len))
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
return (dataset_ops.Dataset.from_tensor_slices(components).map(
_map_fn, num_parallel_calls=num_parallel_calls).repeat(num_epochs))
verify_fn(self, _build_ds, tensor_slice_len * num_epochs)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(num_parallel_calls=[None, 2])))
def testSaveStatefulFunction(self, num_parallel_calls):
def _build_ds():
def _map_fn(x):
return random_ops.random_uniform(
(), 0, 10, dtype=dtypes.int32) * math_ops.cast(x, dtypes.int32)
return dataset_ops.Dataset.range(100).map(
_map_fn, num_parallel_calls=num_parallel_calls)
self.verify_error_on_save(_build_ds, 15, errors.FailedPreconditionError)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(num_parallel_calls=[None, 2])))
def testCaptureVariableInMapFn(self, num_parallel_calls):
def _build_ds():
counter_var = variable_scope.get_variable(
"counter", (), dtypes.int32, use_resource=True)
return (dataset_ops.Dataset.from_tensors(0).repeat(10).map(
lambda _: counter_var.assign_add(1),
num_parallel_calls=num_parallel_calls))
self.verify_error_on_save(_build_ds, 15, errors.FailedPreconditionError)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
checkpoint_test_base.default_test_combinations(),
combinations.combine(num_parallel_calls=[None, 2])))
def testCaptureConstantInMapFn(self, verify_fn, num_parallel_calls):
num_outputs = 10
def _build_ds():
constant_var = constant_op.constant(5)
return (dataset_ops.Dataset.from_tensors(0).repeat(10).map(
lambda x: x + constant_var, num_parallel_calls=num_parallel_calls))
verify_fn(self, _build_ds, num_outputs)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
checkpoint_test_base.default_test_combinations(),
combinations.combine(num_parallel_calls=[None, 2])))
def testCaptureDefunInMapFn(self, verify_fn, num_parallel_calls):
num_outputs = 10
def _build_ds():
@function.Defun(dtypes.int64)
def defun_fn(x):
return constant_op.constant(1000) + math_ops.cast(x, dtypes.int32)
return dataset_ops.Dataset.range(num_outputs).map(
defun_fn, num_parallel_calls=num_parallel_calls)
verify_fn(self, _build_ds, num_outputs)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
checkpoint_test_base.default_test_combinations(),
combinations.combine(num_parallel_calls=[None, 2])))
def testBuildDefunInMapFn(self, verify_fn, num_parallel_calls):
num_outputs = 10
def _build_ds():
@function.Defun(dtypes.int64)
def defun_fn(x):
@function.Defun(dtypes.int32)
def defun_fn_deep(x):
return constant_op.constant(1000) + math_ops.cast(x, dtypes.int32)
return constant_op.constant(11000) + defun_fn_deep(
math_ops.cast(x, dtypes.int32))
return dataset_ops.Dataset.range(num_outputs).map(
defun_fn, num_parallel_calls=num_parallel_calls)
verify_fn(self, _build_ds, num_outputs)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
checkpoint_test_base.default_test_combinations(),
combinations.combine(num_parallel_calls=[None, 2])))
def testSparse(self, verify_fn, num_parallel_calls):
def _sparse(i):
return sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0]]),
values=(i * np.array([1])),
dense_shape=np.array([1, 1]))
def _build_ds(num_outputs):
return dataset_ops.Dataset.range(num_outputs).map(
_sparse, num_parallel_calls=num_parallel_calls)
num_outputs = 10
verify_fn(self, lambda: _build_ds(num_outputs), num_outputs=num_outputs)
if __name__ == "__main__":
test.main()
| {
"content_hash": "21161130c1d5e1c79f9b517fb131fa3f",
"timestamp": "",
"source": "github",
"line_count": 1452,
"max_line_length": 80,
"avg_line_length": 37.180440771349865,
"alnum_prop": 0.6559478383284555,
"repo_name": "frreiss/tensorflow-fred",
"id": "c01e3b15170da0b3379e52afddbad5fd815cacf9",
"size": "54675",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/data/kernel_tests/map_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "6729"
},
{
"name": "Batchfile",
"bytes": "49527"
},
{
"name": "C",
"bytes": "871761"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "79093233"
},
{
"name": "CMake",
"bytes": "6500"
},
{
"name": "Dockerfile",
"bytes": "110545"
},
{
"name": "Go",
"bytes": "1852128"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "961600"
},
{
"name": "Jupyter Notebook",
"bytes": "549457"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "1644156"
},
{
"name": "Makefile",
"bytes": "62398"
},
{
"name": "Objective-C",
"bytes": "116558"
},
{
"name": "Objective-C++",
"bytes": "303063"
},
{
"name": "PHP",
"bytes": "20523"
},
{
"name": "Pascal",
"bytes": "3982"
},
{
"name": "Pawn",
"bytes": "18876"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "40003007"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Roff",
"bytes": "2472"
},
{
"name": "Ruby",
"bytes": "7464"
},
{
"name": "Shell",
"bytes": "681596"
},
{
"name": "Smarty",
"bytes": "34740"
},
{
"name": "Swift",
"bytes": "62814"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
} |
"""
WSGI config for {{ cookiecutter.project_name }} project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application # noqa
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| {
"content_hash": "26af5da4571a6b24f162b6f537b2542d",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 79,
"avg_line_length": 46.0625,
"alnum_prop": 0.7910447761194029,
"repo_name": "savioabuga/cookiecutter-django-1.7.7",
"id": "d362a44c44085e883bab5231c37efc72b5d80c37",
"size": "1474",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "{{cookiecutter.repo_name}}/config/wsgi.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5140"
},
{
"name": "CSS",
"bytes": "1212"
},
{
"name": "HTML",
"bytes": "23944"
},
{
"name": "JavaScript",
"bytes": "2375"
},
{
"name": "Makefile",
"bytes": "5692"
},
{
"name": "Python",
"bytes": "37793"
},
{
"name": "Ruby",
"bytes": "751"
},
{
"name": "Shell",
"bytes": "3620"
}
],
"symlink_target": ""
} |
"""Module containing the element class."""
import pytac
from pytac.data_source import DataSourceManager
from pytac.exceptions import DataSourceException, FieldException
class Element(object):
"""Class representing one physical element in an accelerator lattice.
An element has zero or more devices (e.g. quadrupole magnet) associated
with each of its fields (e.g. 'b1' for a quadrupole).
**Attributes:**
Attributes:
name (str): The name identifying the element.
type_ (str): The type of the element.
length (float): The length of the element in metres.
families (set): The families this element is a member of.
.. Private Attributes:
_lattice (Lattice): The lattice to which the element belongs.
_data_source_manager (DataSourceManager): A class that manages the
data sources associated
with this element.
"""
def __init__(self, length, element_type, name=None, lattice=None):
"""
Args:
length (float): The length of the element.
element_type (str): The type of the element.
name (str): The unique identifier for the element in the ring.
lattice (Lattice): The lattice to which the element belongs.
**Methods:**
"""
self.name = name
self.type_ = element_type
self.length = length
self.families = set()
self._lattice = lattice
self._data_source_manager = DataSourceManager()
@property
def index(self):
"""int: The element's index within the ring, starting at 1.
"""
if self._lattice is None:
return None
else:
return self._lattice._elements.index(self) + 1
@property
def s(self):
"""float: The element's start position within the lattice in metres.
"""
if self._lattice is None:
return None
else:
return sum([el.length for el in self._lattice[: self.index - 1]])
@property
def cell(self):
"""int: The lattice cell this element is within.
N.B. If the element spans multiple cells then the cell it begins in is
returned (lowest cell number).
"""
if self._lattice is None:
return None
elif self._lattice.cell_length is None:
return None
else:
return int(self.s / self._lattice.cell_length) + 1
def __str__(self):
"""Return a representation of an element, as a string.
Returns:
str: A representation of an element.
"""
repn = "<Element "
if self.name is not None:
repn += "'{0}', ".format(self.name)
if self.index is not None:
repn += "index {0}, ".format(self.index)
repn += "length {0} m, ".format(self.length)
if self.cell is not None:
repn += "cell {0}, ".format(self.cell)
repn += "families {0}>".format(", ".join(f for f in self.families))
return repn
__repr__ = __str__
def set_data_source(self, data_source, data_source_type):
"""Add a data source to the element.
Args:
data_source (DataSource): the data source to be set.
data_source_type (str): the type of the data source being set
pytac.LIVE or pytac.SIM.
"""
self._data_source_manager.set_data_source(data_source, data_source_type)
def get_fields(self):
"""Get the all fields defined on an element.
Includes all fields defined by all data sources.
Returns:
dict: A dictionary of all the fields defined on an element,
separated by data source(key).
"""
return self._data_source_manager.get_fields()
def add_device(self, field, device, uc):
"""Add device and unit conversion objects to a given field.
A DeviceDataSource must be set before calling this method, this
defaults to pytac.LIVE as that is the only data source that currently
uses devices.
Args:
field (str): The key to store the unit conversion and device
objects.
device (Device): The device object used for this field.
uc (UnitConv): The unit conversion object used for this field.
Raises:
DataSourceException: if no DeviceDataSource is set.
"""
try:
self._data_source_manager.add_device(field, device, uc)
except DataSourceException:
raise DataSourceException(
"No device data source for field {0} on "
"element {1}.".format(field, self)
)
def get_device(self, field):
"""Get the device for the given field.
A DeviceDataSource must be set before calling this method, this
defaults to pytac.LIVE as that is the only data source that currently
uses devices.
Args:
field (str): The lookup key to find the device on an element.
Returns:
Device: The device on the given field.
Raises:
DataSourceException: if no DeviceDataSource is set.
"""
try:
return self._data_source_manager.get_device(field)
except DataSourceException:
raise DataSourceException(
"No device data source for field {0} on "
"element {1}.".format(field, self)
)
def get_unitconv(self, field):
"""Get the unit conversion option for the specified field.
Args:
field (str): The field associated with this conversion.
Returns:
UnitConv: The object associated with the specified field.
Raises:
FieldException: if no unit conversion object is present.
"""
try:
return self._data_source_manager.get_unitconv(field)
except FieldException:
raise FieldException(
"No unit conversion option for field {0} on "
"element {1}.".format(field, self)
)
def set_unitconv(self, field, uc):
"""Set the unit conversion option for the specified field.
Args:
field (str): The field associated with this conversion.
uc (UnitConv): The unit conversion object to be set.
"""
self._data_source_manager.set_unitconv(field, uc)
def add_to_family(self, family):
"""Add the element to the specified family.
Args:
family (str): Represents the name of the family.
"""
self.families.add(family)
def get_value(
self,
field,
handle=pytac.RB,
units=pytac.DEFAULT,
data_source=pytac.DEFAULT,
throw=True,
):
"""Get the value for a field.
Returns the value of a field on the element. This value is uniquely
identified by a field and a handle. The returned value is either
in engineering or physics units. The data_source flag returns either
real or simulated values.
Args:
field (str): The requested field.
handle (str): pytac.SP or pytac.RB.
units (str): pytac.ENG or pytac.PHYS returned.
data_source (str): pytac.LIVE or pytac.SIM.
throw (bool): On failure: if True, raise ControlSystemException; if
False, return None and log a warning.
Returns:
float: The value of the requested field
Raises:
DataSourceException: if there is no data source on the given field.
FieldException: if the element does not have the specified field.
"""
try:
return self._data_source_manager.get_value(
field, handle, units, data_source, throw
)
except DataSourceException:
raise DataSourceException(
"No data source {0} on element {1}.".format(data_source, self)
)
except FieldException:
raise FieldException(
"Element {0} does not have field {1}.".format(self, field)
)
def set_value(
self,
field,
value,
handle=pytac.SP,
units=pytac.DEFAULT,
data_source=pytac.DEFAULT,
throw=True,
):
"""Set the value for a field.
This value can be set on the machine or the simulation.
Args:
field (str): The requested field.
value (float): The value to set.
handle (str): pytac.SP or pytac.RB.
units (str): pytac.ENG or pytac.PHYS.
data_source (str): pytac.LIVE or pytac.SIM.
throw (bool): On failure: if True, raise ControlSystemException: if
False, log a warning.
Raises:
DataSourceException: if arguments are incorrect.
FieldException: if the element does not have the specified field.
"""
try:
self._data_source_manager.set_value(
field, value, handle, units, data_source, throw
)
except DataSourceException:
raise DataSourceException(
"No data source {0} on element {1}.".format(data_source, self)
)
except FieldException:
raise FieldException(
"Element {0} does not have field {1}.".format(self, field)
)
def set_lattice(self, lattice):
"""Set the stored lattice reference for this element to the passed
lattice object.
Args:
lattice (Lattice): lattice object to store a reference to.
"""
self._lattice = lattice
class EpicsElement(Element):
"""EPICS-aware element.
Adds get_pv_name() method.
**Methods:**
"""
def get_pv_name(self, field, handle):
"""Get PV name for the specified field and handle.
Args:
field (str): The requested field.
handle (str): pytac.RB or pytac.SP.
Returns:
str: The readback or setpoint PV for the specified field.
Raises:
DataSourceException: if there is no data source for this field.
FieldException: if the specified field doesn't exist.
"""
try:
return (
self._data_source_manager._data_sources[pytac.LIVE]
.get_device(field)
.get_pv_name(handle)
)
except KeyError:
raise DataSourceException(
"No data source for field {0} on " "element {1}.".format(field, self)
)
except AttributeError:
raise DataSourceException(
"Cannot get PV for field {0} on element "
"{1}, as basic devices do not have "
"associated PV's.".format(field, self)
)
except FieldException:
raise FieldException("No field {0} on element {1}.".format(field, self))
| {
"content_hash": "5060d3770564e07e44d1f09ee5270db0",
"timestamp": "",
"source": "github",
"line_count": 333,
"max_line_length": 85,
"avg_line_length": 33.75975975975976,
"alnum_prop": 0.5653798256537983,
"repo_name": "willrogers/pytac",
"id": "591f39ea9089f08fad8f592577f07eaf23c6c065",
"size": "11242",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pytac/element.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "30810"
},
{
"name": "Matlab",
"bytes": "13885"
},
{
"name": "Python",
"bytes": "63104"
}
],
"symlink_target": ""
} |
__author__ = 'Joe Linn'
import pylastica.aggregation.abstractaggregation as abstract
class Global(abstract.AbstractAggregation):
pass | {
"content_hash": "e15111ac7ea71ae0d589d137e7640aed",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 60,
"avg_line_length": 20,
"alnum_prop": 0.7857142857142857,
"repo_name": "jlinn/pylastica",
"id": "3547f70d6e1381e7552f08d6b2927ef265bf7b99",
"size": "140",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pylastica/aggregation/globalagg.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Perl",
"bytes": "6774"
},
{
"name": "Python",
"bytes": "547260"
},
{
"name": "Shell",
"bytes": "1771"
}
],
"symlink_target": ""
} |
from . import AWSObject, AWSProperty
from .validators import boolean, double, integer_range, positive_integer
class GrokClassifier(AWSProperty):
props = {
'Classification': (basestring, True),
'CustomPatterns': (basestring, False),
'GrokPattern': (basestring, True),
'Name': (basestring, False),
}
class JsonClassifier(AWSProperty):
props = {
'JsonPath': (basestring, True),
'Name': (basestring, False),
}
class XMLClassifier(AWSProperty):
props = {
'Classification': (basestring, True),
'Name': (basestring, False),
'RowTag': (basestring, True),
}
class Classifier(AWSObject):
resource_type = "AWS::Glue::Classifier"
props = {
'GrokClassifier': (GrokClassifier, False),
'JsonClassifier': (JsonClassifier, False),
'XMLClassifier': (XMLClassifier, False),
}
class PhysicalConnectionRequirements(AWSProperty):
props = {
'AvailabilityZone': (basestring, True),
'SecurityGroupIdList': ([basestring], True),
'SubnetId': (basestring, True),
}
def connection_type_validator(type):
valid_types = [
'JDBC',
'SFTP',
]
if type not in valid_types:
raise ValueError('% is not a valid value for ConnectionType' % type)
return type
class ConnectionInput(AWSProperty):
props = {
'ConnectionProperties': (dict, True),
'ConnectionType': (connection_type_validator, True),
'Description': (basestring, False),
'MatchCriteria': ([basestring], True),
'Name': (basestring, False),
'PhysicalConnectionRequirements':
(PhysicalConnectionRequirements, True),
}
class Connection(AWSObject):
resource_type = "AWS::Glue::Connection"
props = {
'CatalogId': (basestring, True),
'ConnectionInput': (ConnectionInput, True),
}
class Schedule(AWSProperty):
props = {
'ScheduleExpression': (basestring, False),
}
def delete_behavior_validator(value):
valid_values = [
'LOG',
'DELETE_FROM_DATABASE',
'DEPRECATE_IN_DATABASE',
]
if value not in valid_values:
raise ValueError('% is not a valid value for DeleteBehavior' % value)
return value
def update_behavior_validator(value):
valid_values = [
'LOG',
'UPDATE_IN_DATABASE',
]
if value not in valid_values:
raise ValueError('% is not a valid value for UpdateBehavior' % value)
return value
class SchemaChangePolicy(AWSProperty):
props = {
'DeleteBehavior': (delete_behavior_validator, False),
'UpdateBehavior': (update_behavior_validator, False),
}
class JdbcTarget(AWSProperty):
props = {
'ConnectionName': (basestring, False),
'Exclusions': ([basestring], False),
'Path': (basestring, False),
}
class S3Target(AWSProperty):
props = {
'Exclusions': ([basestring], False),
'Path': (basestring, False),
}
class Targets(AWSProperty):
props = {
'JdbcTargets': ([JdbcTarget], False),
'S3Targets': ([S3Target], False),
}
class Crawler(AWSObject):
resource_type = "AWS::Glue::Crawler"
props = {
'Classifiers': ([basestring], False),
'Configuration': (basestring, False),
'DatabaseName': (basestring, True),
'Description': (basestring, False),
'Name': (basestring, False),
'Role': (basestring, True),
'Schedule': (Schedule, False),
'SchemaChangePolicy': (SchemaChangePolicy, False),
'TablePrefix': (basestring, False),
'Targets': (Targets, True),
}
class DatabaseInput(AWSProperty):
props = {
'Description': (basestring, False),
'LocationUri': (basestring, False),
'Name': (basestring, False),
'Parameters': (dict, False),
}
class Database(AWSObject):
resource_type = "AWS::Glue::Database"
props = {
'CatalogId': (basestring, True),
'DatabaseInput': (DatabaseInput, True),
}
class DevEndpoint(AWSObject):
resource_type = "AWS::Glue::DevEndpoint"
props = {
'EndpointName': (basestring, False),
'ExtraJarsS3Path': (basestring, False),
'ExtraPythonLibsS3Path': (basestring, False),
'NumberOfNodes': (positive_integer, False),
'PublicKey': (basestring, True),
'RoleArn': (basestring, True),
'SecurityGroupIds': ([basestring], False),
'SubnetId': (basestring, False),
}
class ConnectionsList(AWSProperty):
props = {
'Connections': ([basestring], False),
}
class ExecutionProperty(AWSProperty):
props = {
'MaxConcurrentRuns': (positive_integer, False),
}
class JobCommand(AWSProperty):
props = {
'Name': (basestring, False),
'ScriptLocation': (basestring, False),
}
class Job(AWSObject):
resource_type = "AWS::Glue::Job"
props = {
'AllocatedCapacity': (double, False),
'Command': (JobCommand, True),
'Connections': (ConnectionsList, False),
'DefaultArguments': (dict, False),
'Description': (basestring, False),
'ExecutionProperty': (ExecutionProperty, False),
'LogUri': (basestring, False),
'MaxRetries': (double, False),
'Name': (basestring, False),
'Role': (basestring, True),
}
class Column(AWSProperty):
props = {
'Comment': (basestring, False),
'Name': (basestring, True),
'Type': (basestring, False),
}
class Order(AWSProperty):
props = {
'Column': (basestring, True),
'SortOrder': (integer_range(0, 1), False),
}
class SerdeInfo(AWSProperty):
props = {
'Name': (basestring, False),
'Parameters': (dict, False),
'SerializationLibrary': (basestring, False),
}
class SkewedInfo(AWSProperty):
props = {
'SkewedColumnNames': ([basestring], False),
'SkewedColumnValues': ([basestring], False),
'SkewedColumnValueLocationMaps': (dict, False),
}
class StorageDescriptor(AWSProperty):
props = {
'BucketColumns': ([basestring], False),
'Columns': ([Column], False),
'Compressed': (boolean, False),
'InputFormat': (basestring, False),
'Location': (basestring, False),
'NumberofBuckets': (positive_integer, False),
'OutputFormat': (basestring, False),
'Parameters': (dict, False),
'SerdeInfo': (SerdeInfo, False),
'SkewedInfo': (SkewedInfo, False),
'SortColumns': ([Order], False),
'StoredAsSubDirectories': (boolean, False),
}
class PartitionInput(AWSProperty):
props = {
'Parameters': (dict, False),
'StorageDescriptor': (StorageDescriptor, False),
'Values': ([basestring], True),
}
class Partition(AWSObject):
resource_type = "AWS::Glue::Partition"
props = {
'CatalogId': (basestring, True),
'DatabaseName': (basestring, True),
'PartitionInput': (PartitionInput, True),
'TableName': (basestring, True),
}
def table_type_validator(type):
valid_types = [
'EXTERNAL_TABLE',
'VIRTUAL_VIEW',
]
if type not in valid_types:
raise ValueError('% is not a valid value for TableType' % type)
return type
class TableInput(AWSProperty):
props = {
'Description': (basestring, False),
'Name': (basestring, True),
'Owner': (basestring, False),
'Parameters': (dict, False),
'PartitionKeys': ([Column], False),
'Retention': (positive_integer, False),
'StorageDescriptor': (StorageDescriptor, False),
'TableType': (table_type_validator, False),
'ViewExpandedText': (basestring, False),
'ViewOriginalText': (basestring, False),
}
class Table(AWSObject):
resource_type = "AWS::Glue::Table"
props = {
'CatalogId': (basestring, True),
'DatabaseName': (basestring, True),
'TableInput': (TableInput, True),
}
class Action(AWSProperty):
props = {
'Arguments': (dict, False),
'JobName': (basestring, False),
}
class Condition(AWSProperty):
props = {
'JobName': (basestring, False),
'LogicalOperator': (basestring, False),
'State': (basestring, False),
}
class Predicate(AWSProperty):
props = {
'Conditions': ([Condition], False),
'Logical': (basestring, False),
}
def trigger_type_validator(type):
valid_types = [
'SCHEDULED',
'CONDITIONAL',
'ON_DEMAND',
]
if type not in valid_types:
raise ValueError('% is not a valid value for Type' % type)
return type
class Trigger(AWSObject):
resource_type = "AWS::Glue::Trigger"
props = {
'Actions': ([Action], True),
'Description': (basestring, False),
'Name': (basestring, False),
'Predicate': (Predicate, False),
'Schedule': (basestring, False),
'Type': (trigger_type_validator, True),
}
| {
"content_hash": "c7aa285e3a5b3451e6cb215d1acbc661",
"timestamp": "",
"source": "github",
"line_count": 365,
"max_line_length": 77,
"avg_line_length": 25.013698630136986,
"alnum_prop": 0.5913472070098577,
"repo_name": "pas256/troposphere",
"id": "381ba4005aa7ac016207bfb598217bc8747851cc",
"size": "9246",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "troposphere/glue.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "579"
},
{
"name": "Python",
"bytes": "521885"
},
{
"name": "Shell",
"bytes": "60"
}
],
"symlink_target": ""
} |
"""Implement rdf post-processors for running data through a chain of parsers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import os
import re
import stat
from future.builtins import str
from future.utils import with_metaclass
from grr_response_core.lib import objectfilter
from grr_response_core.lib import rdfvalue
from grr_response_core.lib import registry
from grr_response_core.lib.parsers import config_file
from grr_response_core.lib.rdfvalues import client_fs as rdf_client_fs
from grr_response_core.lib.rdfvalues import protodict as rdf_protodict
from grr_response_core.lib.rdfvalues import structs as rdf_structs
class Error(Exception):
"""Base error class."""
class DefinitionError(Error):
"""A filter was defined badly."""
class ProcessingError(Error):
"""A filter encountered errors processing results."""
def GetHandler(mode=""):
if mode == "SERIAL":
return SerialHandler
elif mode == "PARALLEL":
return ParallelHandler
else:
return NoOpHandler
class BaseHandler(object):
"""Abstract tests for filtering host data through a parser chain."""
def __init__(self, artifact=None, filters=None):
if not artifact:
raise DefinitionError("Filter needs some data to process!")
self.artifact = artifact
self.raw_data = [] # Collected data to analyze.
self.filters = [] # Filters used to process data.
self.cmp_data = [] # Data that results will be compared against.
self.results = [] # Residual data following filtering.
if isinstance(filters, rdf_structs.RepeatedFieldHelper):
self.filters = filters
self.Validate()
def Validate(self):
"""Verifies this filter set can process the result data."""
# All filters can handle the input type.
bad_filters = []
for f in self.filters:
try:
f.Validate()
except DefinitionError as e:
bad_filters.append("%s: %s" % (f.expression, e))
if bad_filters:
raise DefinitionError(
"Filters with invalid expressions: %s" % ", ".join(bad_filters))
def Parse(self, results):
"""Take the results and yield results that passed through the filters."""
raise NotImplementedError()
class NoOpHandler(BaseHandler):
"""Abstract parser to pass results through parsers serially."""
def Parse(self, data):
"""Take the results and yield results that passed through the filters."""
return data
class ParallelHandler(BaseHandler):
"""Abstract parser to pass results through parsers in parallel."""
def Parse(self, raw_data):
"""Take the data and yield results that passed through the filters.
The output of each filter is added to a result set. So long as the filter
selects, but does not modify, raw data, the result count will remain
accurate.
Args:
raw_data: An iterable series of rdf values.
Returns:
A list of rdf values that matched at least one filter.
"""
self.results = list()
if not self.filters:
self.results.extend(raw_data)
else:
for f in self.filters:
for result in f.Parse(raw_data):
# This used a set() previously when RDFStruct supported hash(). Since
# this code is highly generic and can handle all types of data, we
# cannot optimize this below O(n^2).
if result not in self.results:
self.results.append(result)
return self.results
class SerialHandler(BaseHandler):
"""Abstract parser to pass results through parsers serially."""
def Parse(self, raw_data):
"""Take the results and yield results that passed through the filters.
The output of each filter is used as the input for successive filters.
Args:
raw_data: An iterable series of rdf values.
Returns:
A list of rdf values that matched all filters.
"""
self.results = raw_data
for f in self.filters:
self.results = f.Parse(self.results)
return self.results
class Filter(with_metaclass(registry.MetaclassRegistry, object)):
"""A class for looking up filters.
Filters may be in other libraries or third party code. This class keeps
references to each of them so they can be called by name by checks.
"""
filters = {}
@classmethod
def GetFilter(cls, filter_name):
"""Return an initialized filter. Only initialize filters once.
Args:
filter_name: The name of the filter, as a string.
Returns:
an initialized instance of the filter.
Raises:
DefinitionError if the type of filter has not been defined.
"""
# Check if the filter is defined in the registry.
try:
filt_cls = cls.GetPlugin(filter_name)
except KeyError:
raise DefinitionError("Filter %s does not exist." % filter_name)
return filt_cls()
def ParseObjs(self, *args):
raise NotImplementedError("Filter needs to have a ParseObjs method.")
def Parse(self, objs, expression):
# Filters should process collections of rdfvalues. Require lists or sets of
# rdfvalues so that we don't pass in one (iterable) rdfvalue and get back a
# list of it's attributes.
if not isinstance(objs, (list, set)):
raise ProcessingError("Filter '%s' requires a list or set, got %s" %
(expression, type(objs)))
return list(self.ParseObjs(objs, expression))
def Validate(self, _):
raise NotImplementedError("Filter needs to have a Validate method.")
class AttrFilter(Filter):
"""A filter that extracts target attributes into key/value fields.
Accepts one or more attributes to collect. Optionally accepts an objectfilter
expression to select objects from which attributes are collected.
This filter is a convenient way to normalize the names of collected items to
use with a generic hint.
Args:
expression: One or more attributes to fetch as comma separated items.
Yields:
AttributedDict RDF values. key is the attribute name, value is the attribute
value.
"""
def _Attrs(self, expression):
attrs = [a.strip() for a in expression.strip().split() if a]
if not attrs:
raise DefinitionError("AttrFilter sets no attributes: %s" % expression)
return attrs
def _GetVal(self, obj, key):
"""Recurse down an attribute chain to the actual result data."""
if "." in key:
lhs, rhs = key.split(".", 1)
obj2 = getattr(obj, lhs, None)
if obj2 is None:
return None
return self._GetVal(obj2, rhs)
else:
return getattr(obj, key, None)
def ParseObjs(self, objs, expression):
for key in self._Attrs(expression):
# Key needs to be a string for rdfvalue.KeyValue
key = str(key)
for obj in objs:
val = self._GetVal(obj, key)
if val:
# Dict won't accept rdfvalue.RepeatedFieldHelper
if isinstance(val, rdf_structs.RepeatedFieldHelper):
val = list(val)
yield rdf_protodict.AttributedDict({"key": key, "value": val})
def Validate(self, expression):
self._Attrs(expression)
class ObjectFilter(Filter):
"""An objectfilter result processor that accepts runtime parameters."""
def _Compile(self, expression):
try:
of = objectfilter.Parser(expression).Parse()
return of.Compile(objectfilter.LowercaseAttributeFilterImplementation)
except objectfilter.Error as e:
raise DefinitionError(e)
def ParseObjs(self, objs, expression):
"""Parse one or more objects using an objectfilter expression."""
filt = self._Compile(expression)
for result in filt.Filter(objs):
yield result
def Validate(self, expression):
self._Compile(expression)
class ForEach(ObjectFilter):
"""A filter that extracts values from a repeated field.
This filter is a convenient way to extract repeated items from an object
for individual processing.
Args:
objs: One or more objects.
expression: An expression specifying what attribute to expand.
Yields:
The RDF values of elements in the repeated fields.
"""
def Validate(self, expression):
attrs = [x.strip() for x in expression.strip().split() if x]
if attrs:
if len(attrs) == 1:
return
raise DefinitionError("ForEach has multiple attributes: %s" % expression)
raise DefinitionError("ForEach sets no attribute: %s" % expression)
def ParseObjs(self, objs, expression):
for obj in objs:
repeated_vals = getattr(obj, expression)
for val in repeated_vals:
yield rdf_protodict.AttributedDict({"item": val})
class ItemFilter(ObjectFilter):
"""A filter that extracts the first match item from a objectfilter expression.
Applies an objectfilter expression to an object. The first attribute named in
the expression is returned as a key/value item.`
This filter is a convenient way to cherry pick selected items from an object
for reporting or further filters.
Args:
objs: One or more objects.
expression: An objectfilter expression..
Yields:
AttributedDict RDF values for matching items, where key is the attribute
name, and value is the attribute value.
"""
def ParseObjs(self, objs, expression):
filt = self._Compile(expression)
key = expression.split(None, 1)[0]
for result in filt.Filter(objs):
val = getattr(result, key)
yield rdf_protodict.AttributedDict({"key": key, "value": val})
class StatFilter(Filter):
"""Filters StatResult RDF Values based on file attributes.
Filters are added as expressions that include one or more key:value inputs
separated by spaced.
StatResult RDF values can be filtered on several fields:
- path_re: A regex search on the pathname attribute.
- file_re: A regex search on the filename attribute.
- file_type: One of BLOCK,CHARACTER,DIRECTORY,FIFO,REGULAR,SOCKET,SYMLINK
- gid: A numeric comparison of gid values: (!|>|>=|<=|<|=)uid
- uid: A numeric comparison of uid values: (!|>|>=|<=|<|=)uid
- mask: The permissions bits that should be checked. Defaults to 7777.
- mode: The permissions bits the StatResult should have after the mask is
applied.
Args:
expression: A statfilter expression
Yields:
StatResult objects that match the filter term.
"""
_KEYS = {"path_re", "file_re", "file_type", "uid", "gid", "mode", "mask"}
_UID_GID_RE = re.compile(r"\A(!|>|>=|<=|<|=)([0-9]+)\Z")
_PERM_RE = re.compile(r"\A[0-7]{4}\Z")
_TYPES = {
"BLOCK": stat.S_ISBLK,
"CHARACTER": stat.S_ISCHR,
"DIRECTORY": stat.S_ISDIR,
"FIFO": stat.S_ISFIFO,
"REGULAR": stat.S_ISREG,
"SOCKET": stat.S_ISSOCK,
"SYMLINK": stat.S_ISLNK
}
def _MatchFile(self, stat_entry):
filename = os.path.basename(stat_entry.pathspec.path)
return self.file_re.search(filename)
def _MatchGid(self, stat_entry):
for matcher, value in self.gid_matchers:
if not matcher(stat_entry.st_gid, value):
return False
return True
def _MatchMode(self, stat_entry):
return stat_entry.st_mode & self.mask == self.mode
def _MatchPath(self, stat_entry):
return self.path_re.search(stat_entry.pathspec.path)
def _MatchType(self, stat_entry):
return self.file_type(int(stat_entry.st_mode))
def _MatchUid(self, stat_entry):
for matcher, value in self.uid_matchers:
if not matcher(stat_entry.st_uid, value):
return False
return True
def _Comparator(self, operator):
"""Generate lambdas for uid and gid comparison."""
if operator == "=":
return lambda x, y: x == y
elif operator == ">=":
return lambda x, y: x >= y
elif operator == ">":
return lambda x, y: x > y
elif operator == "<=":
return lambda x, y: x <= y
elif operator == "<":
return lambda x, y: x < y
elif operator == "!":
return lambda x, y: x != y
raise DefinitionError("Invalid comparison operator %s" % operator)
def _Flush(self):
self.cfg = {}
self.matchers = []
self.mask = 0
self.mode = 0
self.uid_matchers = []
self.gid_matchers = []
self.file_type = ""
self.file_re = ""
self.path_re = ""
def _Load(self, expression):
self._Flush()
parser = config_file.KeyValueParser(
kv_sep=":", sep=",", term=(r"\s+", r"\n"))
parsed = {}
for entry in parser.ParseEntries(expression):
parsed.update(entry)
self.cfg = rdf_protodict.AttributedDict(parsed)
return parsed
def _Initialize(self):
"""Initialize the filter configuration from a validated configuration.
The configuration is read. Active filters are added to the matcher list,
which is used to process the Stat values.
"""
if self.cfg.mask:
self.mask = int(self.cfg.mask[0], 8)
else:
self.mask = 0o7777
if self.cfg.mode:
self.mode = int(self.cfg.mode[0], 8)
self.matchers.append(self._MatchMode)
if self.cfg.gid:
for gid in self.cfg.gid:
matched = self._UID_GID_RE.match(gid)
if matched:
o, v = matched.groups()
self.gid_matchers.append((self._Comparator(o), int(v)))
self.matchers.append(self._MatchGid)
if self.cfg.uid:
for uid in self.cfg.uid:
matched = self._UID_GID_RE.match(uid)
if matched:
o, v = matched.groups()
self.uid_matchers.append((self._Comparator(o), int(v)))
self.matchers.append(self._MatchUid)
if self.cfg.file_re:
self.file_re = re.compile(self.cfg.file_re[0])
self.matchers.append(self._MatchFile)
if self.cfg.path_re:
self.path_re = re.compile(self.cfg.path_re[0])
self.matchers.append(self._MatchPath)
if self.cfg.file_type:
self.file_type = self._TYPES.get(self.cfg.file_type[0].upper())
self.matchers.append(self._MatchType)
def ParseObjs(self, objs, expression):
"""Parse one or more objects by testing if it has matching stat results.
Args:
objs: An iterable of objects that should be checked.
expression: A StatFilter expression, e.g.:
"uid:>0 gid:=0 file_type:link"
Yields:
matching objects.
"""
self.Validate(expression)
for obj in objs:
if not isinstance(obj, rdf_client_fs.StatEntry):
continue
# If all match conditions pass, yield the object.
for match in self.matchers:
if not match(obj):
break
else:
yield obj
def Validate(self, expression):
"""Validates that a parsed rule entry is valid for fschecker.
Args:
expression: A rule expression.
Raises:
DefinitionError: If the filter definition could not be validated.
Returns:
True if the expression validated OK.
"""
parsed = self._Load(expression)
if not parsed:
raise DefinitionError("Empty StatFilter expression.")
bad_keys = set(parsed) - self._KEYS
if bad_keys:
raise DefinitionError("Invalid parameters: %s" % ",".join(bad_keys))
if self.cfg.mask and not self.cfg.mode:
raise DefinitionError("mode can only be set when mask is also defined.")
if self.cfg.mask:
if len(self.cfg.mask) > 1:
raise DefinitionError("Too many mask values defined.")
if not self._PERM_RE.match(self.cfg.mask[0]):
raise DefinitionError("mask=%s is not octal, e.g. 0600" % self.cfg.mask)
if self.cfg.mode:
if len(self.cfg.mode) > 1:
raise DefinitionError("Too many mode values defined.")
if not self._PERM_RE.match(self.cfg.mode[0]):
raise DefinitionError("mode=%s is not octal, e.g. 0600" % self.cfg.mode)
if self.cfg.gid:
for gid in self.cfg.gid:
matched = self._UID_GID_RE.match(gid)
if not matched:
raise DefinitionError("gid: %s is not an integer preceded by "
"!, >, < or =." % gid)
if self.cfg.uid:
for uid in self.cfg.uid:
matched = self._UID_GID_RE.match(uid)
if not matched:
raise DefinitionError("uid: %s is not an integer preceded by "
"!, >, < or =." % uid)
if self.cfg.file_re:
if len(self.cfg.file_re) > 1:
raise DefinitionError("Too many regexes defined: %s" % self.cfg.file_re)
try:
self.file_re = re.compile(self.cfg.file_re[0])
except (re.error, TypeError) as e:
raise DefinitionError("Invalid file regex: %s" % e)
if self.cfg.path_re:
if len(self.cfg.path_re) > 1:
raise DefinitionError("Too many regexes defined: %s" % self.cfg.path_re)
try:
self.path_re = re.compile(self.cfg.path_re[0])
except (re.error, TypeError) as e:
raise DefinitionError("Invalid path regex: %s" % e)
if self.cfg.file_type:
if len(self.cfg.file_type) > 1:
raise DefinitionError(
"Too many file types defined: %s" % self.cfg.file_type)
file_type = self.cfg.file_type[0].upper()
if file_type not in self._TYPES:
raise DefinitionError("Unsupported file type %s" % file_type)
self._Initialize()
if not self.matchers:
raise DefinitionError("StatFilter has no actions: %s" % expression)
return True
class RDFFilter(Filter):
"""Filter results to specified rdf types."""
def _RDFTypes(self, names):
for type_name in names.split(","):
yield type_name
def _GetClass(self, type_name):
return rdfvalue.RDFValue.classes.get(type_name)
def ParseObjs(self, objs, type_names):
"""Parse one or more objects by testing if it is a known RDF class."""
for obj in objs:
for type_name in self._RDFTypes(type_names):
if isinstance(obj, self._GetClass(type_name)):
yield obj
def Validate(self, type_names):
"""Filtered types need to be RDFValues."""
errs = [n for n in self._RDFTypes(type_names) if not self._GetClass(n)]
if errs:
raise DefinitionError("Undefined RDF Types: %s" % ",".join(errs))
| {
"content_hash": "c630d4bdb272b88eb672599e26066e2f",
"timestamp": "",
"source": "github",
"line_count": 566,
"max_line_length": 80,
"avg_line_length": 31.680212014134277,
"alnum_prop": 0.6576878032457755,
"repo_name": "dunkhong/grr",
"id": "232b04021fb5b61feca6188a4278a5ae8f18446f",
"size": "17953",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "grr/server/grr_response_server/check_lib/filters.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "227"
},
{
"name": "Batchfile",
"bytes": "882"
},
{
"name": "C",
"bytes": "11321"
},
{
"name": "C++",
"bytes": "54535"
},
{
"name": "CSS",
"bytes": "36745"
},
{
"name": "Dockerfile",
"bytes": "1822"
},
{
"name": "HCL",
"bytes": "8451"
},
{
"name": "HTML",
"bytes": "193751"
},
{
"name": "JavaScript",
"bytes": "12795"
},
{
"name": "Jupyter Notebook",
"bytes": "199190"
},
{
"name": "Makefile",
"bytes": "3139"
},
{
"name": "PowerShell",
"bytes": "1984"
},
{
"name": "Python",
"bytes": "7430923"
},
{
"name": "Roff",
"bytes": "444"
},
{
"name": "Shell",
"bytes": "49155"
},
{
"name": "Standard ML",
"bytes": "8172"
},
{
"name": "TSQL",
"bytes": "10560"
},
{
"name": "TypeScript",
"bytes": "56756"
}
],
"symlink_target": ""
} |
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Ark Database'
copyright = u'2015, Liu Dong'
author = u'Liu Dong'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1.0'
# The full version, including alpha/beta/rc tags.
release = '0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# on_rtd is whether we are on readthedocs.org, this line of code grabbed from docs.readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# otherwise, readthedocs.org uses their theme by default, so no need to specify it
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'ArkDatabasedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'ArkDatabase.tex', u'Ark Database Documentation',
u'Liu Dong', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'arkdatabase', u'Ark Database Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'ArkDatabase', u'Ark Database Documentation',
author, 'ArkDatabase', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| {
"content_hash": "3a7968cec2d18359545e7c5ef7f9a0e3",
"timestamp": "",
"source": "github",
"line_count": 278,
"max_line_length": 98,
"avg_line_length": 33.068345323741006,
"alnum_prop": 0.7067333840965953,
"repo_name": "arkphp/database",
"id": "509cc57123da15eec36a5481a917f3713be5112e",
"size": "9618",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doc/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "374"
},
{
"name": "PHP",
"bytes": "68875"
}
],
"symlink_target": ""
} |
from chronicle import Messenger
from chronicle.responders.scribes import TextScribe
class TestClass(object):
zero = 0
def successor(self, number):
return number + 1
test_object = TestClass()
scribe = TextScribe('TextLogging.txt')
messenger = Messenger(test_object, scribe)
messenger.zero
messenger.successor(6)
messenger.successor(-5.37)
| {
"content_hash": "e5faa6aebc56b4f2c27f02f5926b72d4",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 51,
"avg_line_length": 22.625,
"alnum_prop": 0.7458563535911602,
"repo_name": "nkashy1/chronicle",
"id": "b1fcb1362ca4f73f9aab4d86e02ca1a7fad48a95",
"size": "362",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "samples/TextLogging.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11823"
}
],
"symlink_target": ""
} |
"""
:platform: Unix, Windows
:synopsis: Este módulo implementa un algoritmo genético para crear al mejor villano.
.. module:: mejorVillano
.. module author::
Garrido Valencia Alan
Sánchez Baños Margarito
Torres Ortiz Luis Miguel
Zuñiga Hernandez Jonatan
"""
from random import randint
class Villain:
"""
Clase villano.
Atributos:
- abilities: Una lista con los valores de las habilidades del villano
Se puede usar de la siguiente manera:
>>> darthVader=Villain()
>>> darthVader.abilities
[2,3,0,3,7,1,2,4,3,8]
#También se puede pasar una lista:
>>> kidBu=Villain([4,4,1,2,1,1,5,4,3,2])
>>> kidBu.abilities
[4,4,1,2,1,1,5,4,3,2]
"""
def __init__(self,*abilities):
'''
Constructor de la clase puede instanciar de dos maneras diferentes:
-Sin parámetros, las habilidades se crearán aleatoriamente
-Se pasan una lista con los valores para las habilidades
:param abilities: Lista con los valores de las habilidades.
:type abilities: list.
'''
if abilities:
self.abilities=abilities[0]
else:
abilities=[]
for i in range(0,10):
abilities.append(randint(0,9))
self.abilities=abilities
def populate(max):
"""
Crea una población de villanos igual al valor que se le pasa como parámetro.
:param max: Número máximo de villanos que se requiere.
:type max: int.
:returns: list population-- Lista con los villanos y sus valores.
"""
population=[]
for i in range(0,max):
population.append(Villain())
return population
def createGeneration(population):
"""
Crea una nueva generación con un villano con valores seleccionados aleatoriamente de la población.
:param population: Lista de villanos de la que se obtendrá el nuevo villano.
:type population: list.
:returns: Obj Villain-- Villano con valores seleccionados de la población.
"""
abilities=[]
for i in range(0,10):
abilities.append(population[randint(0,len(population)-1)].abilities[i])
return Villain(abilities)
def createCrossbreed(Villain1,Villain2):
"""
Crea una nuevo villano de de la combinación de otros dos.
:param Villain1: Villano del que se obtendrá un número aleatorio de habilidades.
:type Villain1: Villain.
:param Villain2: Villano del que se obtendrá el resto de las habilidades.
:type Villain2: Villain.
:returns: Obj Villain-- Villano con valores seleccionados de los otros dos.
"""
abilities=[]
split=randint(0,9)
abilities=Villain1.abilities[:split]+Villain2.abilities[split:]
return Villain(abilities)
def createNewPopulation(Villain1,Villain2):
"""
Crea una nueva generación de villanos creados apartir de otros dos.
:param Villain1: Villano del que se obtendrá un número aleatorio de habilidades.
:type Villain1: Villain.
:param Villain2: Villano del que se obtendrá el resto de las habilidades.
:type Villain2: Villain.
:returns: list-- Lista de nueva generación de villanos.
"""
newPopulation=[]
for newVillain in range(0,10):
newPopulation.append(createCrossbreed(megamente,sedusa))
return newPopulation
if __name__ == '__main__':
population=populate(100)
for villain in population:
print(villain.abilities)
print("Cantidad de la poblacion: ",len(population))
megamente=createGeneration(population)
print("Las habilidades de Megamente son: %s" %(megamente.abilities))
sedusa=createGeneration(population)
print("Las habilidades de Sedusa son: %s" %(sedusa.abilities))
sedumente=createCrossbreed(megamente,sedusa)
print("Las habilidades de Sedumente son: %s" %(sedumente.abilities))
newPopulation=createNewPopulation(megamente,sedusa)
for seduvillain in newPopulation:
print(seduvillain.abilities)
sedumente1=createGeneration(newPopulation)
print("Las habilidades de Sedumente1 son: %s" %(sedumente1.abilities))
sedumente2=createGeneration(population)
print("Las habilidades de Sedumente2 son: %s" %(sedumente2.abilities))
drDoofenshmirtz=createCrossbreed(sedumente1,sedumente2)
print("Las habilidades del mejor villano son: %s" %(drDoofenshmirtz.abilities))
| {
"content_hash": "e4773ef22f1056dbf2e960efdf1f3925",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 106,
"avg_line_length": 36.46341463414634,
"alnum_prop": 0.6644370122630993,
"repo_name": "allanstone/InteligenciaArtificial",
"id": "641f8f7d515b9eb2dcab9f6ea469bbf025714022",
"size": "4556",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Tarea 3/AlgoritmoGenetico/mejorVillano.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "132481"
},
{
"name": "HTML",
"bytes": "256646"
},
{
"name": "JavaScript",
"bytes": "415033"
},
{
"name": "Prolog",
"bytes": "1257"
},
{
"name": "Python",
"bytes": "224135"
},
{
"name": "Racket",
"bytes": "2569"
}
],
"symlink_target": ""
} |
model.add(Conv2D(8, kernel_size=(3, 3),
activation='relu',
input_shape=(40,1200,3)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(16, kernel_size=(3, 3),
activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(24, kernel_size=(3, 3),
activation='sigmoid'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.4))
model.add(Dense(len(classes), activation='softmax'))
model.compile(loss=keras.losses.mean_squared_error, optimizer="rmsprop", metrics=['accuracy']) | {
"content_hash": "29695437f4815739c2432f77949a77cc",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 94,
"avg_line_length": 34.166666666666664,
"alnum_prop": 0.6959349593495935,
"repo_name": "alex-ta/Fontinator",
"id": "064fbb72f406ca8d2bc1dcf50a9aebe5172c1f95",
"size": "947",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "NeuralNet/Alex/models/model_01.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "76272"
}
],
"symlink_target": ""
} |
class Config:
draw_coverage = False
draw_textures = False
draw_grid = False
draw_overdraw = False
draw_texTypes = ['hght', 'mate', 'grass.extm', 'water.extm']
draw_texType = "hght"
draw_channels = ['all', 'r', 'g', 'b', 'a']
draw_channel = "all"
disable_alpha = False
@staticmethod
def NextTexType():
for i in range(0, len(Config.draw_texTypes)):
if Config.draw_texTypes[i] == Config.draw_texType:
Config.draw_texType = Config.draw_texTypes[(i + 1) % len(Config.draw_texTypes)]
break
@staticmethod
def NextChannel():
for i in range(0, len(Config.draw_channels)):
if Config.draw_channels[i] == Config.draw_channel:
Config.draw_channel = Config.draw_channels[(i + 1) % len(Config.draw_channels)]
break
| {
"content_hash": "d758d2e550304cc30bac6e6ffaf98abc",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 95,
"avg_line_length": 31.88888888888889,
"alnum_prop": 0.578397212543554,
"repo_name": "simply-jos/birth-of-the-toolkit",
"id": "f1188b66b244a827f3554a90dcae8acd9dccc110",
"size": "861",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "REScripts/TSCBViewer/BoTWHeightmap/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "9939"
},
{
"name": "Python",
"bytes": "23841"
}
],
"symlink_target": ""
} |
"""Payload management for sending Ansible files and test content to other systems (VMs, containers)."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import atexit
import os
import stat
import tarfile
import tempfile
import time
from . import types as t
from .config import (
IntegrationConfig,
ShellConfig,
)
from .util import (
display,
ANSIBLE_SOURCE_ROOT,
remove_tree,
is_subdir,
)
from .data import (
data_context,
)
from .util_common import (
CommonConfig,
)
# improve performance by disabling uid/gid lookups
tarfile.pwd = None
tarfile.grp = None
# this bin symlink map must exactly match the contents of the bin directory
# it is necessary for payload creation to reconstruct the bin directory when running ansible-test from an installed version of ansible
ANSIBLE_BIN_SYMLINK_MAP = {
'ansible': '../lib/ansible/cli/scripts/ansible_cli_stub.py',
'ansible-config': 'ansible',
'ansible-connection': '../lib/ansible/cli/scripts/ansible_connection_cli_stub.py',
'ansible-console': 'ansible',
'ansible-doc': 'ansible',
'ansible-galaxy': 'ansible',
'ansible-inventory': 'ansible',
'ansible-playbook': 'ansible',
'ansible-pull': 'ansible',
'ansible-test': '../test/lib/ansible_test/_data/cli/ansible_test_cli_stub.py',
'ansible-vault': 'ansible',
}
def create_payload(args, dst_path): # type: (CommonConfig, str) -> None
"""Create a payload for delegation."""
if args.explain:
return
files = list(data_context().ansible_source)
filters = {}
def make_executable(tar_info): # type: (tarfile.TarInfo) -> t.Optional[tarfile.TarInfo]
"""Make the given file executable."""
tar_info.mode |= stat.S_IXUSR | stat.S_IXOTH | stat.S_IXGRP
return tar_info
if not ANSIBLE_SOURCE_ROOT:
# reconstruct the bin directory which is not available when running from an ansible install
files.extend(create_temporary_bin_files(args))
filters.update(dict((path[3:], make_executable) for path in ANSIBLE_BIN_SYMLINK_MAP.values() if path.startswith('../')))
if not data_context().content.is_ansible:
# exclude unnecessary files when not testing ansible itself
files = [f for f in files if
is_subdir(f[1], 'bin/') or
is_subdir(f[1], 'lib/ansible/') or
(is_subdir(f[1], 'test/lib/ansible_test/') and not is_subdir(f[1], 'test/lib/ansible_test/tests/'))]
if not isinstance(args, (ShellConfig, IntegrationConfig)):
# exclude built-in ansible modules when they are not needed
files = [f for f in files if not is_subdir(f[1], 'lib/ansible/modules/') or f[1] == 'lib/ansible/modules/__init__.py']
collection_layouts = data_context().create_collection_layouts()
for layout in collection_layouts:
# include files from each collection in the same collection root as the content being tested
files.extend((os.path.join(layout.root, path), os.path.join(layout.collection.directory, path)) for path in layout.all_files())
for callback in data_context().payload_callbacks:
callback(files)
# maintain predictable file order
files = sorted(set(files))
display.info('Creating a payload archive containing %d files...' % len(files), verbosity=1)
start = time.time()
with tarfile.TarFile.gzopen(dst_path, mode='w', compresslevel=4) as tar:
for src, dst in files:
display.info('%s -> %s' % (src, dst), verbosity=4)
tar.add(src, dst, filter=filters.get(dst))
duration = time.time() - start
payload_size_bytes = os.path.getsize(dst_path)
display.info('Created a %d byte payload archive containing %d files in %d seconds.' % (payload_size_bytes, len(files), duration), verbosity=1)
def create_temporary_bin_files(args): # type: (CommonConfig) -> t.Tuple[t.Tuple[str, str], ...]
"""Create a temporary ansible bin directory populated using the symlink map."""
if args.explain:
temp_path = '/tmp/ansible-tmp-bin'
else:
temp_path = tempfile.mkdtemp(prefix='ansible', suffix='bin')
atexit.register(remove_tree, temp_path)
for name, dest in ANSIBLE_BIN_SYMLINK_MAP.items():
path = os.path.join(temp_path, name)
os.symlink(dest, path)
return tuple((os.path.join(temp_path, name), os.path.join('bin', name)) for name in sorted(ANSIBLE_BIN_SYMLINK_MAP))
| {
"content_hash": "8cc38e69e54d36d7c3857b7a81b7ffa2",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 146,
"avg_line_length": 36.71544715447155,
"alnum_prop": 0.6616474756421612,
"repo_name": "thaim/ansible",
"id": "0a6754f51348e9b26acd46a52fa041de5c1deca7",
"size": "4516",
"binary": false,
"copies": "13",
"ref": "refs/heads/fix-broken-link",
"path": "test/lib/ansible_test/_internal/payload.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7"
},
{
"name": "Shell",
"bytes": "246"
}
],
"symlink_target": ""
} |
from a10sdk.common.A10BaseClass import A10BaseClass
class SlbServerStatistics(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param out_bytes: {"type": "number", "format": "number"}
:param in_bytes: {"type": "number", "format": "number"}
:param p_conn: {"type": "number", "format": "number"}
:param out_pkts: {"type": "number", "format": "number"}
:param cur_conn: {"type": "number", "format": "number"}
:param time: {"type": "number", "format": "number"}
:param in_pkts: {"type": "number", "format": "number"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "slb-server-statistics"
self.DeviceProxy = ""
self.out_bytes = ""
self.in_bytes = ""
self.p_conn = ""
self.out_pkts = ""
self.cur_conn = ""
self.time = ""
self.in_pkts = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class Oper(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param slb_server_statistics: {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"out_bytes": {"type": "number", "format": "number"}, "in_bytes": {"type": "number", "format": "number"}, "p_conn": {"type": "number", "format": "number"}, "out_pkts": {"type": "number", "format": "number"}, "cur_conn": {"type": "number", "format": "number"}, "time": {"type": "number", "format": "number"}, "in_pkts": {"type": "number", "format": "number"}, "optional": true}}]}
:param slb_server_name: {"type": "string", "format": "string"}
:param end_time: {"type": "number", "format": "number"}
:param start_time: {"type": "number", "format": "number"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "oper"
self.DeviceProxy = ""
self.slb_server_statistics = []
self.slb_server_name = ""
self.end_time = ""
self.start_time = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class SlbServer(A10BaseClass):
"""Class Description::
Operational Status for the object slb-server.
Class slb-server supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/rrd/slb-server/oper`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "slb-server"
self.a10_url="/axapi/v3/rrd/slb-server/oper"
self.DeviceProxy = ""
self.oper = {}
for keys, value in kwargs.items():
setattr(self,keys, value)
| {
"content_hash": "b203305349ce8ceb87fe17735180c631",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 521,
"avg_line_length": 33.697916666666664,
"alnum_prop": 0.5811437403400309,
"repo_name": "a10networks/a10sdk-python",
"id": "b7564de3a76a05fe502fb6313f4b1d8c15672d3e",
"size": "3235",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "a10sdk/core/rrd/rrd_slb_server_oper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6956372"
}
],
"symlink_target": ""
} |
from .jeeves import RunBot
if __name__ == "__main__":
RunBot()
| {
"content_hash": "37461dab6425f519d45e975a58dd1f85",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 26,
"avg_line_length": 17,
"alnum_prop": 0.5588235294117647,
"repo_name": "havokoc/MyManJeeves",
"id": "14808df9dbb6b7214e2b4f8daeef747ccbb7c832",
"size": "68",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Jeeves/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "39"
},
{
"name": "Python",
"bytes": "7113"
}
],
"symlink_target": ""
} |
import numpy as np
import itertools as it
class XyzError(Exception):
pass
class Reader(object):
def __init__(self, filename, **kwargs):
self.filename = filename
self.file = open(filename, 'r')
@property
def natoms(self):
f = open(self.filename, 'r')
natoms = int(f.next())
f.close()
return natoms
@property
def nlines(self):
f = open(self.filename, 'r')
for idx, line in enumerate(f):
pass
nlines = (idx+1)/(self.natoms+2)
f.close()
return nlines
def read(self):
config = self.next()
while config is not None:
yield config
config = self.next()
def next(self):
try:
natoms = int(self.file.next())
except StopIteration:
return None
try:
self.file.next()
except StopIteration:
raise XyzError("File ended unexpectedly when reading 2nd line.")
firstline = self.file.next().split()
ndim = len(firstline)
config = np.zeros((ndim, natoms), dtype='float')
for idx in xrange(ndim):
config[idx, 0] = firstline[idx]
for idx_atom, line in it.izip(xrange(natoms-1), self.file):
firstline = self.file.next().split()
for idx in xrange(ndim):
config[idx, 0] = firstline[idx]
return config
def readlines(self, *args):
coords = []
coord = self.next()
while coord is not None:
coords.append(coord)
coord = self.next()
return np.array(coords)
def close(self):
self.file.close()
def __iter__(self):
return self.read()
readline = next
| {
"content_hash": "d968e68650495251f4c121699d18ef96",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 76,
"avg_line_length": 22.443037974683545,
"alnum_prop": 0.5318668922729837,
"repo_name": "jp43/lsdmap",
"id": "9a8554c15c6cf8c7e0c58dab8a0dcd9972b8fd21",
"size": "1773",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lsdmap/rw/xyz.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "2240"
},
{
"name": "Python",
"bytes": "130846"
},
{
"name": "Shell",
"bytes": "2967"
},
{
"name": "nesC",
"bytes": "242"
}
],
"symlink_target": ""
} |
import mock
from django.http import Http404
from django.test import TestCase
from cradmin_legacy import cradmin_testhelpers
from devilry.devilry_account.models import PermissionGroup
from devilry.devilry_admin.views.common.bulkimport_users_common import AbstractTypeInUsersView
class AbstractTypeInUsersViewTestMixin(cradmin_testhelpers.TestCaseMixin):
def mock_crinstance_with_devilry_role(self, devilryrole=PermissionGroup.GROUPTYPE_DEPARTMENTADMIN):
mock_crinstance = mock.MagicMock()
mock_crinstance.get_devilryrole_for_requestuser.return_value = devilryrole
return mock_crinstance
def test_user_devilryrole_periodadmin_raises_404(self):
with self.assertRaises(Http404):
self.mock_http200_getrequest_htmls(
cradmin_instance=self.mock_crinstance_with_devilry_role(
devilryrole=PermissionGroup.GROUPTYPE_PERIODADMIN)
)
def test_user_devilryrole_subjectadmin_raises_404(self):
with self.assertRaises(Http404):
self.mock_http200_getrequest_htmls(
cradmin_instance=self.mock_crinstance_with_devilry_role(
devilryrole=PermissionGroup.GROUPTYPE_PERIODADMIN)
)
def test_user_devilryrole_departmentadmin_does_not_raise_404(self):
self.mock_http200_getrequest_htmls(
cradmin_instance=self.mock_crinstance_with_devilry_role()
)
def test_get_render_form(self):
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_instance=self.mock_crinstance_with_devilry_role()
)
self.assertTrue(mockresponse.selector.exists('textarea#id_users_blob'))
def test_get_render_form_help_text_email_backend(self):
with self.settings(CRADMIN_LEGACY_USE_EMAIL_AUTH_BACKEND=True):
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_instance=self.mock_crinstance_with_devilry_role()
)
self.assertEqual(mockresponse.selector.one('#hint_id_users_blob').alltext_normalized,
'Type or paste in email addresses separated '
'by comma (","), space or one user on each line.')
def test_get_render_form_help_text_username_backend(self):
with self.settings(CRADMIN_LEGACY_USE_EMAIL_AUTH_BACKEND=False):
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_instance=self.mock_crinstance_with_devilry_role()
)
self.assertEqual(mockresponse.selector.one('#hint_id_users_blob').alltext_normalized,
'Type or paste in usernames separated '
'by comma (","), space or one user on each line.')
def test_get_render_form_placeholder_email_backend(self):
with self.settings(CRADMIN_LEGACY_USE_EMAIL_AUTH_BACKEND=True):
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_instance=self.mock_crinstance_with_devilry_role()
)
self.assertEqual(mockresponse.selector.one('#id_users_blob')['placeholder'],
'[email protected]\[email protected]')
def test_get_render_form_placeholder_username_backend(self):
with self.settings(CRADMIN_LEGACY_USE_EMAIL_AUTH_BACKEND=False):
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_instance=self.mock_crinstance_with_devilry_role()
)
self.assertEqual(mockresponse.selector.one('#id_users_blob')['placeholder'],
'jane\njohn')
def test_post_blank(self):
mockresponse = self.mock_http200_postrequest_htmls(
cradmin_instance=self.mock_crinstance_with_devilry_role()
)
self.assertEqual('This field is required.',
mockresponse.selector.one('#error_1_id_users_blob').alltext_normalized)
def test_post_invalid_emails_simple(self):
with self.settings(CRADMIN_LEGACY_USE_EMAIL_AUTH_BACKEND=True):
mockresponse = self.mock_http200_postrequest_htmls(
cradmin_instance=self.mock_crinstance_with_devilry_role(),
requestkwargs=dict(data={
'users_blob': 'test'
}))
self.assertEqual('Invalid email addresses: test',
mockresponse.selector.one('#error_1_id_users_blob').alltext_normalized)
def test_post_invalid_emails_multiple(self):
with self.settings(CRADMIN_LEGACY_USE_EMAIL_AUTH_BACKEND=True):
mockresponse = self.mock_http200_postrequest_htmls(
cradmin_instance=self.mock_crinstance_with_devilry_role(),
requestkwargs=dict(data={
'users_blob': 'test,[email protected],test3@@example.com'
}))
self.assertEqual('Invalid email addresses: test, test3@@example.com',
mockresponse.selector.one('#error_1_id_users_blob').alltext_normalized)
def test_post_invalid_usernames_simple(self):
with self.settings(CRADMIN_LEGACY_USE_EMAIL_AUTH_BACKEND=False):
mockresponse = self.mock_http200_postrequest_htmls(
cradmin_instance=self.mock_crinstance_with_devilry_role(),
requestkwargs=dict(data={
'users_blob': 'Test'
}))
self.assertEqual('Invalid usernames: Test',
mockresponse.selector.one('#error_1_id_users_blob').alltext_normalized)
def test_post_invalid_usernames_multiple(self):
with self.settings(CRADMIN_LEGACY_USE_EMAIL_AUTH_BACKEND=False):
mockresponse = self.mock_http200_postrequest_htmls(
cradmin_instance=self.mock_crinstance_with_devilry_role(),
requestkwargs=dict(data={
'users_blob': 'test,[email protected],test3.ing,Test4,test5'
}))
self.assertEqual('Invalid usernames: Test4, [email protected], test3.ing',
mockresponse.selector.one('#error_1_id_users_blob').alltext_normalized)
class MockTypeInUsersView(AbstractTypeInUsersView):
def get_backlink_label(self):
return 'Back'
def get_backlink_url(self):
return '/back'
class TestAbstractTypeInUsersView(TestCase, AbstractTypeInUsersViewTestMixin):
viewclass = MockTypeInUsersView
def test_split_users_blob_empty(self):
self.assertEqual(
set(),
AbstractTypeInUsersView.split_users_blob(''))
def test_split_users_blob_single(self):
self.assertEqual(
{'[email protected]'},
AbstractTypeInUsersView.split_users_blob('[email protected]'))
def test_split_users_blob_multiple(self):
self.assertEqual(
{'[email protected]', '[email protected]', '[email protected]'},
AbstractTypeInUsersView.split_users_blob('[email protected] [email protected] [email protected]'))
def test_split_users_blob_newlines(self):
self.assertEqual(
{'[email protected]', '[email protected]', '[email protected]'},
AbstractTypeInUsersView.split_users_blob('[email protected]\[email protected]\[email protected]'))
def test_split_users_blob_comma(self):
self.assertEqual(
{'[email protected]', '[email protected]', '[email protected]'},
AbstractTypeInUsersView.split_users_blob('[email protected],[email protected],[email protected]'))
def test_split_users_blob_semicolon(self):
self.assertEqual(
{'[email protected]', '[email protected]', '[email protected]'},
AbstractTypeInUsersView.split_users_blob('[email protected];[email protected];[email protected]'))
def test_split_users_blob_messy_middle(self):
self.assertEqual(
{'[email protected]', '[email protected]', '[email protected]'},
AbstractTypeInUsersView.split_users_blob('[email protected], \[email protected]'
'\n,\n, [email protected]'))
def test_split_users_blob_messy_suffix(self):
self.assertEqual(
{'[email protected]'},
AbstractTypeInUsersView.split_users_blob('[email protected], \n;\n,'))
def test_split_users_blob_messy_prefix(self):
self.assertEqual(
{'[email protected]'},
AbstractTypeInUsersView.split_users_blob(', ;\n\n,\[email protected]'))
def test_split_users_blob_messy_everywhere(self):
self.assertEqual(
{'[email protected]', '[email protected]', '[email protected]'},
AbstractTypeInUsersView.split_users_blob(' \n,,; \n, [email protected], ,;,\[email protected]'
'\n,\n, [email protected],,,,\n, \n, '))
| {
"content_hash": "5c2d2e243f350c8e0998609eecc5349f",
"timestamp": "",
"source": "github",
"line_count": 184,
"max_line_length": 111,
"avg_line_length": 48.34782608695652,
"alnum_prop": 0.633318345323741,
"repo_name": "devilry/devilry-django",
"id": "094bc256a8ef83d95aa8d9f19ba7ca2439188455",
"size": "8896",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "devilry/devilry_admin/tests/common/test_bulkimport_users_common.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "513510"
},
{
"name": "Dockerfile",
"bytes": "211"
},
{
"name": "HTML",
"bytes": "421969"
},
{
"name": "JavaScript",
"bytes": "756713"
},
{
"name": "Less",
"bytes": "166670"
},
{
"name": "PLpgSQL",
"bytes": "397986"
},
{
"name": "Python",
"bytes": "6507968"
},
{
"name": "Shell",
"bytes": "10328"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.