text
stringlengths 4
1.02M
| meta
dict |
---|---|
import pytest
blueprint_list = []
@pytest.fixture(scope='function', params=blueprint_list)
def blueprint_examples(**_):
pass
def test_blueprints(blueprint_examples):
assert blueprint_examples is None
| {
"content_hash": "8efa4f7fe234ab76c4e646992d9948b8",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 56,
"avg_line_length": 17.75,
"alnum_prop": 0.7370892018779343,
"repo_name": "cloudify-cosmo/cloudify-diamond-plugin",
"id": "c7e25259c6ce1ef8a08d27ff5b21772326fbb298",
"size": "842",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": ".circleci/test_examples.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "40130"
},
{
"name": "Shell",
"bytes": "1655"
}
],
"symlink_target": ""
} |
from argparse import ArgumentParser
from sonLib.bioio import popenCatch
parser = ArgumentParser()
parser.add_argument('halPath')
parser.add_argument('outputTsv')
opts = parser.parse_args()
genomes = popenCatch("halStats --genomes %s" % (opts.halPath)).split()
outFile = open(opts.outputTsv, 'w')
for genome in genomes:
branchLength = float(popenCatch("halStats --branchLength %s"))
outFile.write("%s\t%f\n" % (genome, branchLength))
| {
"content_hash": "66bf70273c7df6ca0cb595ea3cc4c4ee",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 70,
"avg_line_length": 34.07692307692308,
"alnum_prop": 0.7358916478555305,
"repo_name": "joelarmstrong/analysis-purgatory",
"id": "98bf6a91ad1e3fced3c564387d4dfaf755eb155d",
"size": "465",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "100way-comparison/getBranchLengthsTsv.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "82125"
},
{
"name": "R",
"bytes": "16810"
},
{
"name": "Rust",
"bytes": "17080"
},
{
"name": "Shell",
"bytes": "4890"
}
],
"symlink_target": ""
} |
class Null(object):
"""
Object is a Harsh Null.
Object can be assigned and initialised.
Will the always raise TypeError.
"""
def __init__(self, *args, **kwargs):
return
# object calling
def __call__(self, *args, **kwargs):
raise TypeError("Object is Null.")
# attribute handling
def __getattr__(self, name):
raise TypeError("Object is Null.")
def __setattr__(self, name, value):
raise TypeError("Object is Null.")
def __delattr__(self, name):
raise TypeError("Object is Null.")
# misc.
def __repr__(self):
raise TypeError("Object is Null.")
def __str__(self):
raise TypeError("Object is Null.")
def __dir__(self):
raise TypeError("Object is Null.")
# math
def __add__(self, other):
raise TypeError("Object is Null.")
def __sub__(self, other):
raise TypeError("Object is Null.")
def __radd__(self, other):
raise TypeError("Object is Null.")
def __rsub__(self, other):
raise TypeError("Object is Null.")
def __cmp__(self, other):
raise TypeError("Object is Null.")
def __nonzero__(self):
raise TypeError("Object is Null.")
| {
"content_hash": "2b535a77c2cd8416ee821c3284018d55",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 43,
"avg_line_length": 24.830188679245282,
"alnum_prop": 0.5303951367781155,
"repo_name": "AugustusLongeye/Technic-Script-2",
"id": "a7336771a302f73cdb3764932326ccbc639291e6",
"size": "1316",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "harsh_null.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "28105"
}
],
"symlink_target": ""
} |
from google.appengine.ext import ndb
class Secret(ndb.Model):
value = ndb.StringProperty()
| {
"content_hash": "61bf3c08664e8356a32a4db2976ea434",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 36,
"avg_line_length": 19.4,
"alnum_prop": 0.7422680412371134,
"repo_name": "Yelp/love",
"id": "cc6b7fa06f65dad33b11b9f027867cb9aa221765",
"size": "121",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "models/secret.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5902"
},
{
"name": "HTML",
"bytes": "43114"
},
{
"name": "JavaScript",
"bytes": "836"
},
{
"name": "Makefile",
"bytes": "1241"
},
{
"name": "Python",
"bytes": "118945"
}
],
"symlink_target": ""
} |
from workshops.models import Tag
from workshops.tests.base import TestBase
class TestTagManager(TestBase):
def setUp(self):
super()._setUpTags()
def test_main_tags(self):
# Arrange
expected = [
Tag.objects.get(name="DC"),
Tag.objects.get(name="ITT"),
Tag.objects.get(name="LC"),
Tag.objects.get(name="SWC"),
Tag.objects.get(name="TTT"),
Tag.objects.get(name="WiSE"),
]
# Act
tags = Tag.objects.main_tags().order_by("name")
# Assert
self.assertEqual(list(tags), expected)
def test_carpentries_tags(self):
# Arrange
expected = [
Tag.objects.get(name="DC"),
Tag.objects.get(name="LC"),
Tag.objects.get(name="SWC"),
]
# Act
tags = Tag.objects.carpentries().order_by("name")
# Assert
self.assertEqual(list(tags), expected)
def test_strings(self):
# Arrange
expected = ["DC", "LC", "SWC"]
# Act
tags = Tag.objects.carpentries().order_by("name").strings()
# Assert
self.assertEqual(list(tags), expected)
| {
"content_hash": "bf45a6b187e090fcfe491c0066585f0b",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 67,
"avg_line_length": 28.428571428571427,
"alnum_prop": 0.5368509212730318,
"repo_name": "pbanaszkiewicz/amy",
"id": "5d3d77c7f64a19e4b08a9bfbf8616c4de71ef1ce",
"size": "1194",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "amy/workshops/tests/test_tags.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5850"
},
{
"name": "Dockerfile",
"bytes": "1034"
},
{
"name": "HTML",
"bytes": "313293"
},
{
"name": "JavaScript",
"bytes": "39427"
},
{
"name": "Makefile",
"bytes": "1780"
},
{
"name": "Python",
"bytes": "2707815"
}
],
"symlink_target": ""
} |
import mock
from rally.plugins.openstack.scenarios.murano import environments
from tests.unit import test
MURANO_SCENARIO = ("rally.plugins.openstack.scenarios.murano."
"environments")
class MuranoEnvironmentsTestCase(test.ScenarioTestCase):
def _get_context(self):
self.context.update({
"tenant": {
"packages": [mock.MagicMock(fully_qualified_name="fake")]
},
"user": {
"tenant_id": "fake_tenant_id"
},
"config": {
"murano_packages": {
"app_package": (
"rally-jobs/extra/murano/"
"applications/HelloReporter/"
"io.murano.apps.HelloReporter.zip")
}
}
})
return self.context
def test_list_environments(self):
TEST_TARGET = "ListEnvironments"
list_env_module = ("{}.{}.{}").format(MURANO_SCENARIO,
TEST_TARGET,
"_list_environments")
scenario = environments.ListEnvironments(self.context)
with mock.patch(list_env_module) as mock_list_env:
scenario.run()
mock_list_env.assert_called_once_with()
def test_create_and_delete_environment(self):
TEST_TARGET = "CreateAndDeleteEnvironment"
generate_random_name_module = ("{}.{}.{}").format(
MURANO_SCENARIO, TEST_TARGET, "generate_random_name")
create_env_module = ("{}.{}.{}").format(MURANO_SCENARIO,
TEST_TARGET,
"_create_environment")
create_session_module = ("{}.{}.{}").format(MURANO_SCENARIO,
TEST_TARGET,
"_create_session")
delete_env_module = ("{}.{}.{}").format(MURANO_SCENARIO,
TEST_TARGET,
"_delete_environment")
scenario = environments.CreateAndDeleteEnvironment(self.context)
with mock.patch(generate_random_name_module) as mock_random_name:
with mock.patch(create_env_module) as mock_create_env:
with mock.patch(create_session_module) as mock_create_session:
with mock.patch(delete_env_module) as mock_delete_env:
fake_env = mock.Mock(id="fake_id")
mock_create_env.return_value = fake_env
mock_random_name.return_value = "foo"
scenario.run()
mock_create_env.assert_called_once_with()
mock_create_session.assert_called_once_with(
fake_env.id)
mock_delete_env.assert_called_once_with(
fake_env)
def test_create_and_deploy_environment(self):
TEST_TARGET = "CreateAndDeployEnvironment"
create_env_module = ("{}.{}.{}").format(MURANO_SCENARIO,
TEST_TARGET,
"_create_environment")
create_session_module = ("{}.{}.{}").format(MURANO_SCENARIO,
TEST_TARGET,
"_create_session")
create_service_module = ("{}.{}.{}").format(MURANO_SCENARIO,
TEST_TARGET,
"_create_service")
deploy_env_module = ("{}.{}.{}").format(MURANO_SCENARIO,
TEST_TARGET,
"_deploy_environment")
scenario = environments.CreateAndDeployEnvironment(self.context)
with mock.patch(create_env_module) as mock_create_env:
with mock.patch(create_session_module) as mock_create_session:
with mock.patch(create_service_module) as mock_create_service:
with mock.patch(deploy_env_module) as mock_deploy_env:
fake_env = mock.MagicMock(id="fake_env_id")
mock_create_env.return_value = fake_env
fake_session = mock.Mock(id="fake_session_id")
mock_create_session.return_value = fake_session
scenario.context = self._get_context()
scenario.context["tenants"] = {
"fake_tenant_id": {
"packages": [mock.MagicMock()]
}
}
scenario.run(1)
mock_create_env.assert_called_once_with()
mock_create_session.assert_called_once_with(
fake_env.id)
mock_create_service.assert_called_once_with(
fake_env,
fake_session,
"fake",
atomic_action=False)
mock_deploy_env.assert_called_once_with(
fake_env, fake_session)
self._test_atomic_action_timer(
scenario.atomic_actions(),
"murano.create_services")
| {
"content_hash": "fba89c2e43a7868fbc5f4bf7c1a9e3cd",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 78,
"avg_line_length": 48.608695652173914,
"alnum_prop": 0.4525939177101968,
"repo_name": "vganapath/rally",
"id": "5e432ed925f3b7dddce3829b2fb6ff0f80a1d127",
"size": "6220",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/plugins/openstack/scenarios/murano/test_environments.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "52008"
},
{
"name": "JavaScript",
"bytes": "8550"
},
{
"name": "Mako",
"bytes": "18645"
},
{
"name": "Python",
"bytes": "3621510"
},
{
"name": "Shell",
"bytes": "43808"
}
],
"symlink_target": ""
} |
from datetime import datetime
import logging
import random
import time
from google.appengine.api import memcache
_MEMCACHE_MASTER_DOWNLOAD_LOCK = 'master-download-lock-%s'
_MEMCACHE_MASTER_DOWNLOAD_EXPIRATION_SECONDS = 60 * 60
_DOWNLOAD_INTERVAL_SECONDS = 10
def WaitUntilDownloadAllowed(
master_name, timeout_seconds=90): # pragma: no cover
"""Waits until next download from the specified master is allowed.
Returns:
True if download is allowed to proceed.
False if download is not still allowed when the given timeout occurs.
"""
client = memcache.Client()
key = _MEMCACHE_MASTER_DOWNLOAD_LOCK % master_name
deadline = time.time() + timeout_seconds
while True:
info = client.gets(key)
if not info or time.time() - info['time'] >= _DOWNLOAD_INTERVAL_SECONDS:
new_info = {
'time': time.time()
}
if not info:
success = client.add(
key, new_info, time=_MEMCACHE_MASTER_DOWNLOAD_EXPIRATION_SECONDS)
else:
success = client.cas(
key, new_info, time=_MEMCACHE_MASTER_DOWNLOAD_EXPIRATION_SECONDS)
if success:
logging.info('Download from %s is allowed. Waited %s seconds.',
master_name, (time.time() + timeout_seconds - deadline))
return True
if time.time() > deadline:
logging.info('Download from %s is not allowed. Waited %s seconds.',
master_name, timeout_seconds)
return False
logging.info('Waiting to download from %s', master_name)
time.sleep(_DOWNLOAD_INTERVAL_SECONDS + random.random())
| {
"content_hash": "24f496bc7e59e72086dc15f7869e6de8",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 77,
"avg_line_length": 31.84,
"alnum_prop": 0.6633165829145728,
"repo_name": "nicko96/Chrome-Infra",
"id": "be4cbd4b55cb7d657c5c5d4be9a2d4ee4ba14642",
"size": "1755",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "appengine/findit/waterfall/lock_util.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "100398"
},
{
"name": "Go",
"bytes": "648467"
},
{
"name": "HTML",
"bytes": "7323317"
},
{
"name": "JavaScript",
"bytes": "913960"
},
{
"name": "Makefile",
"bytes": "11281"
},
{
"name": "Protocol Buffer",
"bytes": "2730"
},
{
"name": "Python",
"bytes": "4034630"
},
{
"name": "Shell",
"bytes": "21687"
}
],
"symlink_target": ""
} |
import requests
from BeautifulSoup import BeautifulSoup
import random,time,re
from Queue import Queue
from threading import Thread,Lock
import config
from basic import save
class Linkedin (Thread):
def __init__(self,account='',password='',session = None,ip = '',queue=None, thread_name = '',thread_id = None,term='',out=None):
Thread.__init__(self)
if account == "" or password == "" : raise ValueError("please enter your account")
self._account = account
self._password = password
self._session = requests.session()
self._ip = ip
self.queue = queue
self.thread_name = thread_name
self.thread_id = thread_id
self.term = term
self.EXIT_FLAG = False
self.out = out
@property
def account(self):
return self._account
@account.setter
def account(self,value):
if value == "":
raise ValueError("account could not be empty")
else:
_account = value
@property
def password(self):
return self._password
@password.setter
def password(self,value):
if value == "":
print "error"
raise ValueError("password could not be empty")
_password = value
@property
def session(self):
return self._session
#Linkedin randomly generate some kind of code in the html page for each login, you have submit the
#code along you login action.
def get_credential(self):
r = self.session.get('https://www.linkedin.com/uas/login?goback=&trk=hb_signin')
soup = BeautifulSoup(r.text)
loginCsrfParam = soup.find('input', id = 'loginCsrfParam-login')['value']
csrfToken = soup.find('input', id = 'csrfToken-login')['value']
sourceAlias = soup.find('input', id = 'sourceAlias-login')['value']
payload = {
'session_key': '[email protected]',
'session_password': 'hanhaohh',
'loginCsrfParam' : loginCsrfParam,
'csrfToken' : csrfToken,
'sourceAlias' : sourceAlias
}
return payload
#login the Linkedin with the cookies and temprory session in the Session.
def login(self):
self.session.post('https://www.linkedin.com/uas/login-submit', data=self.get_credential())
def crawl(self,page):
term = self.term
self.login()
head = "www.linkedin.com"
List = []
format1 = re.compile(r'link_viewJob_2"\:"[\w\d\s\;\:\/\=\?\.\$\&\%]*')
page = "https://www.linkedin.com/vsearch/j?type=jobs&keywords="+term+"&orig=GLHD&rsid=2060029261441227217895&pageKey=voltron_job_search_internal_jsp&search=Search&locationType=I&countryCode=us&openFacets=L,C&page_num="+str(page)+"&pt=jobs"
soup1 = BeautifulSoup(self.session.get(page).text.encode("utf-8"))
list1 = str(soup1.find('div',{"id":"srp_main_"}))
time.sleep(random.randint(5,8))
if list1.find("link_viewJob_2"):
contacts = format1.findall(list1)
for j in contacts:
url = head+j.split('\"')[2].replace(";",'').replace("&","&")
List.append(url)
save(list(set(List)),self.out)
def run(self):
while not self.EXIT_FLAG:
queueLock.acquire()
if self.queue.empty():
queueLock.release()
else:
page_num = self.queue.get()
queueLock.release()
self.crawl(page_num)
# print "%s processing %s" % (self.thread_id, page_num)
def stop(self):
self.EXIT_FLAG = True
queueLock = Lock()
| {
"content_hash": "202f7137ac261c38b2b455153b62d385",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 241,
"avg_line_length": 32.50515463917526,
"alnum_prop": 0.6806216301934666,
"repo_name": "hanhaohh/cvhelper",
"id": "fa42dd7b2b61c079648baa5290cf57f89ecfc1b0",
"size": "3153",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/Crawler/mult_proc_linkedin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1270"
},
{
"name": "OpenEdge ABL",
"bytes": "17864315"
},
{
"name": "Python",
"bytes": "34910"
}
],
"symlink_target": ""
} |
import sys
if sys.platform == "win32":
import psutil
print("psutil", psutil.Process().memory_info().rss)
else:
# Note: if you execute Python from cygwin,
# the sys.platform is "cygwin"
# the grading system's sys.platform is "linux2"
import resource
print("ram usage in MB :", resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1024)
| {
"content_hash": "9ed5d5889f6efc2c3681812ef65eeacb",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 89,
"avg_line_length": 36.3,
"alnum_prop": 0.6831955922865014,
"repo_name": "pk-python/basics",
"id": "388a03c03b1d30572f78ce257e557e1188e1a91a",
"size": "363",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "basics/ram_usage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "37499"
}
],
"symlink_target": ""
} |
from typing import Dict, Optional, Text
import ujson
from zerver.lib.test_classes import WebhookTestCase
from zerver.lib.webhooks.git import COMMITS_LIMIT
from zerver.models import Message
class GithubV1HookTests(WebhookTestCase):
STREAM_NAME = None # type: Optional[Text]
URL_TEMPLATE = u"/api/v1/external/github"
FIXTURE_DIR_NAME = 'github'
SEND_STREAM = False
BRANCHES = None # type: Optional[Text]
push_content = u"""zbenjamin [pushed](https://github.com/zbenjamin/zulip-test/compare/4f9adc4777d5...b95449196980) 3 commits to branch master.
* Add baz ([48c329a](https://github.com/zbenjamin/zulip-test/commit/48c329a0b68a9a379ff195ee3f1c1f4ab0b2a89e))
* Baz needs to be longer ([06ebe5f](https://github.com/zbenjamin/zulip-test/commit/06ebe5f472a32f6f31fd2a665f0c7442b69cce72))
* Final edit to baz, I swear ([b954491](https://github.com/zbenjamin/zulip-test/commit/b95449196980507f08209bdfdc4f1d611689b7a8))"""
def test_spam_branch_is_ignored(self) -> None:
self.SEND_STREAM = True
self.STREAM_NAME = 'commits'
self.BRANCHES = 'dev,staging'
data = self.get_body('push')
# We subscribe to the stream in this test, even though
# it won't get written, to avoid failing for the wrong
# reason.
self.subscribe(self.test_user, self.STREAM_NAME)
prior_count = Message.objects.count()
result = self.client_post(self.URL_TEMPLATE, data)
self.assert_json_success(result)
after_count = Message.objects.count()
self.assertEqual(prior_count, after_count)
def get_body(self, fixture_name: Text) -> Dict[str, Text]:
api_key = self.test_user.api_key
data = ujson.loads(self.fixture_data(self.FIXTURE_DIR_NAME, 'v1_' + fixture_name))
data.update({'email': self.TEST_USER_EMAIL,
'api-key': api_key,
'payload': ujson.dumps(data['payload'])})
if self.SEND_STREAM:
data['stream'] = self.STREAM_NAME
if self.BRANCHES is not None:
data['branches'] = self.BRANCHES
return data
def basic_test(self, fixture_name: Text, stream_name: Text,
expected_subject: Text, expected_content: Text,
send_stream: bool=False, branches: Optional[Text]=None) -> None:
self.STREAM_NAME = stream_name
self.SEND_STREAM = send_stream
self.BRANCHES = branches
self.send_and_test_stream_message(fixture_name, expected_subject, expected_content, content_type=None)
def test_user_specified_branches(self) -> None:
self.basic_test('push', 'my_commits', 'zulip-test / master', self.push_content,
send_stream=True, branches="master,staging")
def test_user_specified_stream(self) -> None:
"""Around May 2013 the github webhook started to specify the stream.
Before then, the stream was hard coded to "commits"."""
self.basic_test('push', 'my_commits', 'zulip-test / master', self.push_content,
send_stream=True)
def test_legacy_hook(self) -> None:
self.basic_test('push', 'commits', 'zulip-test / master', self.push_content)
def test_push_multiple_commits(self) -> None:
commit_info = "* Add baz ([48c329a](https://github.com/zbenjamin/zulip-test/commit/48c329a0b68a9a379ff195ee3f1c1f4ab0b2a89e))\n"
expected_subject = "zbenjamin [pushed](https://github.com/zbenjamin/zulip-test/compare/4f9adc4777d5...b95449196980) 50 commits to branch master.\n\n{}[and {} more commit(s)]".format(
commit_info * COMMITS_LIMIT,
50 - COMMITS_LIMIT,
)
self.basic_test('push_commits_more_than_limit', 'commits', 'zulip-test / master', expected_subject)
def test_issues_opened(self) -> None:
self.basic_test('issues_opened', 'issues',
"zulip-test / Issue #5 The frobnicator doesn't work",
"zbenjamin opened [Issue #5](https://github.com/zbenjamin/zulip-test/issues/5)\n\n~~~ quote\nI tried changing the widgets, but I got:\r\n\r\nPermission denied: widgets are immutable\n~~~")
def test_issue_comment(self) -> None:
self.basic_test('issue_comment', 'issues',
"zulip-test / Issue #5 The frobnicator doesn't work",
"zbenjamin [commented](https://github.com/zbenjamin/zulip-test/issues/5#issuecomment-23374280) on [Issue #5](https://github.com/zbenjamin/zulip-test/issues/5)\n\n~~~ quote\nWhoops, I did something wrong.\r\n\r\nI'm sorry.\n~~~")
def test_issues_closed(self) -> None:
self.basic_test('issues_closed', 'issues',
"zulip-test / Issue #5 The frobnicator doesn't work",
"zbenjamin closed [Issue #5](https://github.com/zbenjamin/zulip-test/issues/5)")
def test_pull_request_opened(self) -> None:
self.basic_test('pull_request_opened', 'commits',
"zulip-test / PR #7 Counting is hard.",
"lfaraone opened [PR #7](https://github.com/zbenjamin/zulip-test/pull/7)(assigned to lfaraone)\nfrom `patch-2` to `master`\n\n~~~ quote\nOmitted something I think?\n~~~")
def test_pull_request_closed(self) -> None:
self.basic_test('pull_request_closed', 'commits',
"zulip-test / PR #7 Counting is hard.",
"zbenjamin closed [PR #7](https://github.com/zbenjamin/zulip-test/pull/7)")
def test_pull_request_synchronize(self) -> None:
self.basic_test('pull_request_synchronize', 'commits',
"zulip-test / PR #13 Even more cowbell.",
"zbenjamin synchronized [PR #13](https://github.com/zbenjamin/zulip-test/pull/13)")
def test_pull_request_comment(self) -> None:
self.basic_test('pull_request_comment', 'commits',
"zulip-test / PR #9 Less cowbell.",
"zbenjamin [commented](https://github.com/zbenjamin/zulip-test/pull/9#issuecomment-24771110) on [PR #9](https://github.com/zbenjamin/zulip-test/pull/9)\n\n~~~ quote\nYeah, who really needs more cowbell than we already have?\n~~~")
def test_pull_request_comment_user_specified_stream(self) -> None:
self.basic_test('pull_request_comment', 'my_commits',
"zulip-test / PR #9 Less cowbell.",
"zbenjamin [commented](https://github.com/zbenjamin/zulip-test/pull/9#issuecomment-24771110) on [PR #9](https://github.com/zbenjamin/zulip-test/pull/9)\n\n~~~ quote\nYeah, who really needs more cowbell than we already have?\n~~~",
send_stream=True)
def test_commit_comment(self) -> None:
self.basic_test('commit_comment', 'commits',
"zulip-test",
"zbenjamin [commented](https://github.com/zbenjamin/zulip-test/commit/7c994678d2f98797d299abed852d3ff9d0834533#commitcomment-4252302) on [7c99467](https://github.com/zbenjamin/zulip-test/commit/7c994678d2f98797d299abed852d3ff9d0834533)\n~~~ quote\nAre we sure this is enough cowbell?\n~~~")
def test_commit_comment_line(self) -> None:
self.basic_test('commit_comment_line', 'commits',
"zulip-test",
"zbenjamin [commented](https://github.com/zbenjamin/zulip-test/commit/7c994678d2f98797d299abed852d3ff9d0834533#commitcomment-4252307) on [7c99467](https://github.com/zbenjamin/zulip-test/commit/7c994678d2f98797d299abed852d3ff9d0834533)\n~~~ quote\nThis line adds /unlucky/ cowbell (because of its line number). We should remove it.\n~~~")
class GithubV2HookTests(WebhookTestCase):
STREAM_NAME = None # type: Optional[Text]
URL_TEMPLATE = u"/api/v1/external/github"
FIXTURE_DIR_NAME = 'github'
SEND_STREAM = False
BRANCHES = None # type: Optional[Text]
push_content = """zbenjamin [pushed](https://github.com/zbenjamin/zulip-test/compare/4f9adc4777d5...b95449196980) 3 commits to branch master.
* Add baz ([48c329a](https://github.com/zbenjamin/zulip-test/commit/48c329a0b68a9a379ff195ee3f1c1f4ab0b2a89e))
* Baz needs to be longer ([06ebe5f](https://github.com/zbenjamin/zulip-test/commit/06ebe5f472a32f6f31fd2a665f0c7442b69cce72))
* Final edit to baz, I swear ([b954491](https://github.com/zbenjamin/zulip-test/commit/b95449196980507f08209bdfdc4f1d611689b7a8))"""
def test_spam_branch_is_ignored(self) -> None:
self.SEND_STREAM = True
self.STREAM_NAME = 'commits'
self.BRANCHES = 'dev,staging'
data = self.get_body('push')
# We subscribe to the stream in this test, even though
# it won't get written, to avoid failing for the wrong
# reason.
self.subscribe(self.test_user, self.STREAM_NAME)
prior_count = Message.objects.count()
result = self.client_post(self.URL_TEMPLATE, data)
self.assert_json_success(result)
after_count = Message.objects.count()
self.assertEqual(prior_count, after_count)
def get_body(self, fixture_name: Text) -> Dict[str, Text]:
api_key = self.test_user.api_key
data = ujson.loads(self.fixture_data(self.FIXTURE_DIR_NAME, 'v2_' + fixture_name))
data.update({'email': self.TEST_USER_EMAIL,
'api-key': api_key,
'payload': ujson.dumps(data['payload'])})
if self.SEND_STREAM:
data['stream'] = self.STREAM_NAME
if self.BRANCHES is not None:
data['branches'] = self.BRANCHES
return data
def basic_test(self, fixture_name: Text, stream_name: Text,
expected_subject: Text, expected_content: Text,
send_stream: bool=False, branches: Optional[Text]=None) -> None:
self.STREAM_NAME = stream_name
self.SEND_STREAM = send_stream
self.BRANCHES = branches
self.send_and_test_stream_message(fixture_name, expected_subject, expected_content, content_type=None)
def test_user_specified_branches(self) -> None:
self.basic_test('push', 'my_commits', 'zulip-test / master', self.push_content,
send_stream=True, branches="master,staging")
def test_user_specified_stream(self) -> None:
"""Around May 2013 the github webhook started to specify the stream.
Before then, the stream was hard coded to "commits"."""
self.basic_test('push', 'my_commits', 'zulip-test / master', self.push_content,
send_stream=True)
def test_push_multiple_commits(self) -> None:
commit_info = "* Add baz ([48c329a](https://github.com/zbenjamin/zulip-test/commit/48c329a0b68a9a379ff195ee3f1c1f4ab0b2a89e))\n"
expected_subject = "zbenjamin [pushed](https://github.com/zbenjamin/zulip-test/compare/4f9adc4777d5...b95449196980) 50 commits to branch master.\n\n{}[and {} more commit(s)]".format(
commit_info * COMMITS_LIMIT,
50 - COMMITS_LIMIT,
)
self.basic_test('push_commits_more_than_limit', 'commits', 'zulip-test / master', expected_subject)
def test_push_multiple_committers(self) -> None:
commit_info = "* Add baz ([48c329a](https://github.com/zbenjamin/zulip-test/commit/48c329a0b68a9a379ff195ee3f1c1f4ab0b2a89e))\n"
expected_subject = "zbenjamin [pushed](https://github.com/zbenjamin/zulip-test/compare/4f9adc4777d5...b95449196980) 6 commits to branch master. Commits by tomasz (3), baxthehacker (2) and zbenjamin (1).\n\n{}* Add baz ([48c329a](https://github.com/zbenjamin/zulip-test/commit/48c329a0b68a9a379ff195ee3f1c1f4ab0b2a89e))".format(commit_info * 5)
self.basic_test('push_multiple_committers', 'commits', 'zulip-test / master', expected_subject)
def test_push_multiple_committers_with_others(self) -> None:
commit_info = "* Final edit to baz, I swear ([b954491](https://github.com/zbenjamin/zulip-test/commit/b95449196980507f08209bdfdc4f1d611689b7a8))\n"
expected_subject = "zbenjamin [pushed](https://github.com/zbenjamin/zulip-test/compare/4f9adc4777d5...b95449196980) 10 commits to branch master. Commits by baxthehacker (4), James (3), Tomasz (2) and others (1).\n\n{}* Final edit to baz, I swear ([b954491](https://github.com/zbenjamin/zulip-test/commit/b95449196980507f08209bdfdc4f1d611689b7a8))".format(commit_info * 9)
self.basic_test('push_multiple_committers_with_others', 'commits', 'zulip-test / master', expected_subject)
def test_legacy_hook(self) -> None:
self.basic_test('push', 'commits', 'zulip-test / master', self.push_content)
def test_issues_opened(self) -> None:
self.basic_test('issues_opened', 'issues',
"zulip-test / Issue #5 The frobnicator doesn't work",
"zbenjamin opened [Issue #5](https://github.com/zbenjamin/zulip-test/issues/5)\n\n~~~ quote\nI tried changing the widgets, but I got:\r\n\r\nPermission denied: widgets are immutable\n~~~")
def test_issue_comment(self) -> None:
self.basic_test('issue_comment', 'issues',
"zulip-test / Issue #5 The frobnicator doesn't work",
"zbenjamin [commented](https://github.com/zbenjamin/zulip-test/issues/5#issuecomment-23374280) on [Issue #5](https://github.com/zbenjamin/zulip-test/issues/5)\n\n~~~ quote\nWhoops, I did something wrong.\r\n\r\nI'm sorry.\n~~~")
def test_issues_closed(self) -> None:
self.basic_test('issues_closed', 'issues',
"zulip-test / Issue #5 The frobnicator doesn't work",
"zbenjamin closed [Issue #5](https://github.com/zbenjamin/zulip-test/issues/5)")
def test_pull_request_opened(self) -> None:
self.basic_test('pull_request_opened', 'commits',
"zulip-test / PR #7 Counting is hard.",
"lfaraone opened [PR #7](https://github.com/zbenjamin/zulip-test/pull/7)(assigned to lfaraone)\nfrom `patch-2` to `master`\n\n~~~ quote\nOmitted something I think?\n~~~")
def test_pull_request_closed(self) -> None:
self.basic_test('pull_request_closed', 'commits',
"zulip-test / PR #7 Counting is hard.",
"zbenjamin closed [PR #7](https://github.com/zbenjamin/zulip-test/pull/7)")
def test_pull_request_synchronize(self) -> None:
self.basic_test('pull_request_synchronize', 'commits',
"zulip-test / PR #13 Even more cowbell.",
"zbenjamin synchronized [PR #13](https://github.com/zbenjamin/zulip-test/pull/13)")
def test_pull_request_comment(self) -> None:
self.basic_test('pull_request_comment', 'commits',
"zulip-test / PR #9 Less cowbell.",
"zbenjamin [commented](https://github.com/zbenjamin/zulip-test/pull/9#issuecomment-24771110) on [PR #9](https://github.com/zbenjamin/zulip-test/pull/9)\n\n~~~ quote\nYeah, who really needs more cowbell than we already have?\n~~~")
def test_pull_request_comment_user_specified_stream(self) -> None:
self.basic_test('pull_request_comment', 'my_commits',
"zulip-test / PR #9 Less cowbell.",
"zbenjamin [commented](https://github.com/zbenjamin/zulip-test/pull/9#issuecomment-24771110) on [PR #9](https://github.com/zbenjamin/zulip-test/pull/9)\n\n~~~ quote\nYeah, who really needs more cowbell than we already have?\n~~~",
send_stream=True)
def test_commit_comment(self) -> None:
self.basic_test('commit_comment', 'commits',
"zulip-test",
"zbenjamin [commented](https://github.com/zbenjamin/zulip-test/commit/7c994678d2f98797d299abed852d3ff9d0834533#commitcomment-4252302) on [7c99467](https://github.com/zbenjamin/zulip-test/commit/7c994678d2f98797d299abed852d3ff9d0834533)\n~~~ quote\nAre we sure this is enough cowbell?\n~~~")
def test_commit_comment_line(self) -> None:
self.basic_test('commit_comment_line', 'commits',
"zulip-test",
"zbenjamin [commented](https://github.com/zbenjamin/zulip-test/commit/7c994678d2f98797d299abed852d3ff9d0834533#commitcomment-4252307) on [7c99467](https://github.com/zbenjamin/zulip-test/commit/7c994678d2f98797d299abed852d3ff9d0834533)\n~~~ quote\nThis line adds /unlucky/ cowbell (because of its line number). We should remove it.\n~~~")
| {
"content_hash": "1265811c6bdc9508433bc2a3a8c36d61",
"timestamp": "",
"source": "github",
"line_count": 268,
"max_line_length": 379,
"avg_line_length": 61.66417910447761,
"alnum_prop": 0.6466174512888782,
"repo_name": "mahim97/zulip",
"id": "19964bac5476e09a71da7f39a1345f1808e354d9",
"size": "16526",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "zerver/webhooks/github/tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "299188"
},
{
"name": "Emacs Lisp",
"bytes": "158"
},
{
"name": "GCC Machine Description",
"bytes": "142"
},
{
"name": "Groovy",
"bytes": "5509"
},
{
"name": "HTML",
"bytes": "542463"
},
{
"name": "JavaScript",
"bytes": "1605569"
},
{
"name": "Nginx",
"bytes": "1280"
},
{
"name": "Pascal",
"bytes": "1113"
},
{
"name": "Perl",
"bytes": "401825"
},
{
"name": "Puppet",
"bytes": "86990"
},
{
"name": "Python",
"bytes": "3510480"
},
{
"name": "Ruby",
"bytes": "249744"
},
{
"name": "Shell",
"bytes": "37821"
}
],
"symlink_target": ""
} |
"""Module for compile error visualization.
Attributes:
log (logging): this module logger
"""
import logging
import sublime
from os import path
from ..completion.compiler_variant import LibClangCompilerVariant
from ..settings.settings_storage import SettingsStorage
from ..utils.subl.row_col import ZeroIndexedRowCol
from .popups import Popup
log = logging.getLogger("ECC")
PATH_TO_ICON = "Packages/EasyClangComplete/pics/icons/{icon}"
MIN_ERROR_SEVERITY = 3
class PopupErrorVis:
"""A class for compile error visualization with popups.
Attributes:
err_regions (dict): dictionary of error regions for view ids
"""
_TAG_ERRORS = "easy_clang_complete_errors"
_TAG_WARNINGS = "easy_clang_complete_warnings"
_ERROR_SCOPE = "undefined"
_WARNING_SCOPE = "undefined"
def __init__(self, settings):
"""Initialize error visualization.
Args:
mark_gutter (bool): add a mark to the gutter for error regions
"""
gutter_style = settings.gutter_style
mark_style = settings.linter_mark_style
self.settings = settings
self.err_regions = {}
if gutter_style == SettingsStorage.GUTTER_COLOR_STYLE:
self.gutter_mark_error = PATH_TO_ICON.format(
icon="error.png")
self.gutter_mark_warning = PATH_TO_ICON.format(
icon="warning.png")
elif gutter_style == SettingsStorage.GUTTER_MONO_STYLE:
self.gutter_mark_error = PATH_TO_ICON.format(
icon="error_mono.png")
self.gutter_mark_warning = PATH_TO_ICON.format(
icon="warning_mono.png")
elif gutter_style == SettingsStorage.GUTTER_DOT_STYLE:
self.gutter_mark_error = PATH_TO_ICON.format(
icon="error_dot.png")
self.gutter_mark_warning = PATH_TO_ICON.format(
icon="warning_dot.png")
else:
log.error("Unknown option for gutter_style: %s", gutter_style)
self.gutter_mark_error = ""
self.gutter_mark_warning = ""
if mark_style == SettingsStorage.MARK_STYLE_OUTLINE:
self.draw_flags = sublime.DRAW_EMPTY | sublime.DRAW_NO_FILL
elif mark_style == SettingsStorage.MARK_STYLE_FILL:
self.draw_flags = 0
elif mark_style == SettingsStorage.MARK_STYLE_SOLID_UNDERLINE:
self.draw_flags = sublime.DRAW_NO_FILL | \
sublime.DRAW_NO_OUTLINE | sublime.DRAW_SOLID_UNDERLINE
elif mark_style == SettingsStorage.MARK_STYLE_STIPPLED_UNDERLINE:
self.draw_flags = sublime.DRAW_NO_FILL | \
sublime.DRAW_NO_OUTLINE | sublime.DRAW_STIPPLED_UNDERLINE
elif mark_style == SettingsStorage.MARK_STYLE_SQUIGGLY_UNDERLINE:
self.draw_flags = sublime.DRAW_NO_FILL | \
sublime.DRAW_NO_OUTLINE | sublime.DRAW_SQUIGGLY_UNDERLINE
else:
self.draw_flags = sublime.HIDDEN
def generate(self, view, errors):
"""Generate a dictionary that stores all errors.
The errors are stored along with their positions and descriptions.
Needed to show these errors on the screen.
Args:
view (sublime.View): current view
errors (list): list of parsed errors (dict objects)
"""
view_id = view.buffer_id()
if view_id == 0:
log.error("Trying to show error on invalid view. Abort.")
return
log.debug("Generating error regions for view %s", view_id)
# first clear old regions
if view_id in self.err_regions:
log.debug("Removing old error regions")
del self.err_regions[view_id]
# create an empty region dict for view id
self.err_regions[view_id] = {}
# If the view is closed while this is running, there will be
# errors. We want to handle them gracefully.
try:
for error in errors:
self.add_error(view, error)
log.debug("%s error regions ready", len(self.err_regions))
except (AttributeError, KeyError, TypeError) as e:
log.error("View was closed -> cannot generate error vis in it")
log.info("Original exception: '%s'", repr(e))
def add_error(self, view, error_dict):
"""Put new compile error in the dictionary of errors.
Args:
view (sublime.View): current view
error_dict (dict): current error dict {row, col, file, region}
"""
logging.debug("Adding error %s", error_dict)
error_source_file = path.basename(error_dict['file'])
if error_source_file == path.basename(view.file_name()):
row_col = ZeroIndexedRowCol(error_dict['row'], error_dict['col'])
point = row_col.as_1d_location(view)
error_dict['region'] = view.word(point)
if row_col.row in self.err_regions[view.buffer_id()]:
self.err_regions[view.buffer_id()][row_col.row] += [error_dict]
else:
self.err_regions[view.buffer_id()][row_col.row] = [error_dict]
def show_errors(self, view):
"""Show current error regions.
Args:
view (sublime.View): Current view
"""
if view.buffer_id() not in self.err_regions:
# view has no errors for it
return
current_error_dict = self.err_regions[view.buffer_id()]
error_regions, warning_regions = PopupErrorVis._as_region_list(
current_error_dict)
log.debug("Showing error regions: %s", error_regions)
log.debug("Showing warning regions: %s", warning_regions)
view.add_regions(
key=PopupErrorVis._TAG_ERRORS,
regions=error_regions,
scope=PopupErrorVis._ERROR_SCOPE,
icon=self.gutter_mark_error,
flags=self.draw_flags)
view.add_regions(
key=PopupErrorVis._TAG_WARNINGS,
regions=warning_regions,
scope=PopupErrorVis._WARNING_SCOPE,
icon=self.gutter_mark_warning,
flags=self.draw_flags)
def erase_regions(self, view):
"""Erase error regions for view.
Args:
view (sublime.View): erase regions for view
"""
if view.buffer_id() not in self.err_regions:
# view has no errors for it
return
log.debug("Erasing error regions for view %s", view.buffer_id())
view.erase_regions(PopupErrorVis._TAG_ERRORS)
view.erase_regions(PopupErrorVis._TAG_WARNINGS)
def show_popup_if_needed(self, view, row):
"""Show a popup if it is needed in this row.
Args:
view (sublime.View): current view
row (int): number of row
"""
if view.buffer_id() not in self.err_regions:
return
current_err_region_dict = self.err_regions[view.buffer_id()]
if row in current_err_region_dict:
errors_dict = current_err_region_dict[row]
max_severity, error_list = PopupErrorVis._as_msg_list(errors_dict)
text_to_show = PopupErrorVis.__to_md(error_list)
if max_severity < MIN_ERROR_SEVERITY:
popup = Popup.warning(text_to_show, self.settings)
else:
popup = Popup.error(text_to_show, self.settings)
popup.show(view)
else:
log.debug("No error regions for row: %s", row)
def clear(self, view):
"""Clear errors from dict for view.
Args:
view (sublime.View): current view
"""
if view.buffer_id() not in self.err_regions:
# no errors for this view
return
view.hide_popup()
self.erase_regions(view)
del self.err_regions[view.buffer_id()]
@staticmethod
def _as_msg_list(errors_dicts):
"""Return errors as list.
Args:
errors_dicts (dict[]): A list of error dicts
"""
error_list = []
max_severity = 0
for entry in errors_dicts:
error_list.append(entry['error'])
if LibClangCompilerVariant.SEVERITY_TAG in entry:
severity = entry[LibClangCompilerVariant.SEVERITY_TAG]
if severity > max_severity:
max_severity = severity
return max_severity, error_list
@staticmethod
def _as_region_list(err_regions_dict):
"""Make a list from error region dict.
Args:
err_regions_dict (dict): dict of error regions for current view
Returns:
list(Region): list of regions to show on sublime view
"""
errors = []
warnings = []
for errors_list in err_regions_dict.values():
for entry in errors_list:
severity = MIN_ERROR_SEVERITY
if LibClangCompilerVariant.SEVERITY_TAG in entry:
severity = entry[LibClangCompilerVariant.SEVERITY_TAG]
if severity < MIN_ERROR_SEVERITY:
warnings.append(entry['region'])
else:
errors.append(entry['region'])
return errors, warnings
@staticmethod
def __to_md(error_list):
"""Convert an error dict to markdown string."""
if len(error_list) > 1:
# Make it a markdown list.
text_to_show = '\n- '.join(error_list)
text_to_show = '- ' + text_to_show
else:
text_to_show = error_list[0]
return text_to_show
| {
"content_hash": "d16b2c21b422799aaac46a1e5758561a",
"timestamp": "",
"source": "github",
"line_count": 256,
"max_line_length": 79,
"avg_line_length": 37.53515625,
"alnum_prop": 0.5872619419294411,
"repo_name": "niosus/EasyClangComplete",
"id": "b9cff1e9bcdd04313655cdf87e2611b003b4b041",
"size": "9609",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plugin/error_vis/popup_error_vis.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "469"
},
{
"name": "C++",
"bytes": "4969"
},
{
"name": "CMake",
"bytes": "1160"
},
{
"name": "CSS",
"bytes": "136"
},
{
"name": "Makefile",
"bytes": "444"
},
{
"name": "Objective-C",
"bytes": "4185"
},
{
"name": "Objective-C++",
"bytes": "87"
},
{
"name": "Python",
"bytes": "1615297"
},
{
"name": "Starlark",
"bytes": "105"
}
],
"symlink_target": ""
} |
import atexit
from monopyly import *
from collections import Counter
import random
# keep a list of possible deals - reconstruct after each deal, auction, bankruptcy, etc. and at start of game
# keep a track of offers made and refused
# keep a track of the behaviour of other players
# offer different amounts of cash
# do more with BORING sets (such as using them in the get out of jail function)
# Qubix Brookwood
# Retail Decisions
class OtherPlayer(object):
def __init__(self, name):
self.name = name
self.will_sell_for_cash = 0.5
self.sell_for_cash_confidence = 0.0
self.max_auction_premium = 0
class SetInfo(object):
class State(object):
NONE_OWNED = 0
ONE_OWNED = 1
TWO_OF_THREE_OWNED_DIFFERENT = 2
TWO_OF_THREE_OWNED_SAME = 3
OWNED_OUTRIGHT = 4
THREE_OWNED_TWO_OWNERS = 5
THREE_OWNED_THREE_OWNERS = 6
TWO_OF_TWO_OWNED_DIFFERENT = 7
BORING = 8
def __init__(self, state, owners, props):
self.state = state
self.owners = owners
self.props = props
def make(owners, props):
if owners[0] == owners[1]:
if len(props) == 2:
return SetInfo(SetInfo.State.OWNED_OUTRIGHT, [owners[0]], props)
else:
return SetInfo(SetInfo.State.TWO_OF_THREE_OWNED_SAME, [owners[0]], props)
else:
if len(props) == 2:
return SetInfo(SetInfo.State.TWO_OF_TWO_OWNED_DIFFERENT, owners, props)
else:
return SetInfo(SetInfo.State.TWO_OF_THREE_OWNED_DIFFERENT, owners, props)
return
def log_player_state(player):
text = ''
# don't deal with streets where another street in the set has houses
for x in player.state.properties:
text = text + x.name +'('
if x.is_mortgaged:
text += 'mortgaged'
if (type(x) == Street):
if (x.number_of_houses > 0):
text += str(x.number_of_houses)+'houses'
text += ') '
Logger.log(player.ai.get_name()+" has "+text, Logger.DEBUG)
def work_out_owners(property_set):
"""
@param property_set a PropertySet
@return: SetInfo
"""
if type(property_set.properties[0]) != Street:
return SetInfo(SetInfo.State.BORING, [], [])
prop1 = property_set.properties[0]
owner1 = prop1.owner
prop2 = property_set.properties[1]
owner2 = prop2.owner
if len(property_set.properties) == 2:
if owner1 == None:
if owner2 == None:
return SetInfo(SetInfo.State.NONE_OWNED, [], [prop1, prop2])
else:
return SetInfo(SetInfo.State.ONE_OWNED, [owner2], [prop2, prop1])
else:
if owner2 == None:
return SetInfo(SetInfo.State.ONE_OWNED, [owner1], [prop1, prop2])
else:
return SetInfo.make([owner1,owner2], [prop1,prop2])
else: # len(property_set) == 3
prop3 = property_set.properties[2]
owner3 = prop3.owner
if owner1 == None:
if owner2 == None:
if owner3 == None: # XXX
return SetInfo(SetInfo.State.NONE_OWNED, [], [prop1,prop2,prop3])
else: # XXA
return SetInfo(SetInfo.State.ONE_OWNED, [owner3],[prop3,prop1,prop2])
else:
if owner3 == None: # XAX
return SetInfo(SetInfo.State.ONE_OWNED, [owner2],[prop2,prop1,prop3])
else: # XA?
return SetInfo.make([owner2,owner3], [prop2,prop3,prop1])
else:
if owner2 == None:
if owner3 == None: # AXX
return SetInfo(SetInfo.State.ONE_OWNED, [owner1], [prop1,prop2,prop3])
else: # AX?
return SetInfo.make([owner1,owner3],[prop1,prop3,prop2])
else:
if owner3 == None: # A?X
return SetInfo.make([owner1,owner2],[prop1,prop2,prop3])
else: # A??
# three owners
if owner1 == owner2:
if owner2 == owner3: # AAA
return SetInfo(SetInfo.State.OWNED_OUTRIGHT, [owner1],[prop1,prop2,prop3])
else: # AAB
return SetInfo(SetInfo.State.THREE_OWNED_TWO_OWNERS, [owner1,owner3],[prop1,prop2,prop3])
else:
if (owner1 == owner3): # ABA
return SetInfo(SetInfo.State.THREE_OWNED_TWO_OWNERS, [owner1,owner2],[prop1,prop3,prop2])
else:
if (owner2 == owner3): # ABB
return SetInfo(SetInfo.State.THREE_OWNED_TWO_OWNERS, [owner2,owner1],[prop2,prop3,prop1])
else: # ABC
return SetInfo(SetInfo.State.THREE_OWNED_THREE_OWNERS, [owner1,owner2,owner3],[prop1,prop2,prop3])
def is_partial_owner(info, player):
for owner in info.owners:
if owner == player:
return True
return False
def accept_no_deal(me,him,offered,wanted):
return DealResponse(action=DealResponse.Action.REJECT)
def accept_deal_if_favorable(me,him,offered,wanted):
offer_amount = offered.price - wanted.price # if negative then he should give me money
if offer_amount < 0:
return DealResponse(action=DealResponse.Action.ACCEPT, minimum_cash_wanted=-offer_amount)
else:
offer_amount = me.ai.make_offer_amount(me, offer_amount)
return DealResponse(action=DealResponse.Action.ACCEPT, maximum_cash_offered=offer_amount)
def accept_deal_always(me,him,offered,wanted):
offer_amount = offered.price - wanted.price # if negative then he should give me money
if offer_amount < 0:
# I want it, so just take the deal
return DealResponse(action=DealResponse.Action.ACCEPT)
else:
offer_amount = me.ai.make_offer_amount(me, offer_amount)
return DealResponse(action=DealResponse.Action.ACCEPT, maximum_cash_offered=offer_amount)
def make_no_deal(me,him,offered,wanted):
return None
def make_deal_if_favourable(me,him,offered,wanted):
offer_amount = offered.price - wanted.price # if negative then he should give me money
if offer_amount < 0:
return DealProposal(him, [offered],[wanted], 0, -offer_amount)
else:
offer_amount = me.ai.make_offer_amount(me, offer_amount)
return DealProposal(him, [offered],[wanted], offer_amount)
def make_deal_always(me,him,offered,wanted):
offer_amount = offered.price - wanted.price # if negative then he should give me money
if offer_amount < 0:
# I want it, so offer for nothing
return DealProposal(him, [offered],[wanted])
else:
offer_amount = me.ai.make_offer_amount(me, offer_amount)
return DealProposal(him, [offered],[wanted], offer_amount)
class PostGiftState:
S_BAA = 0 # breaks A's set
S_BAB = 1 # gives B two of three where A had two of three
S_BAC = 2 # gives B one of A's pair
S_BAX = 3 # gives B one of A's pair
S_BBB = 4 # gives B the last of the set
S_BBC = 5 # gives B two of three
S_BBX = 6 # gives B two of three
S_BCC = 7 # gives B negotiation
S_BC = 8 # gives B negotiation
S_BCD = 9 # nothing special
S_BCX = 10 # gives B a little negotiation
S_BXX = 11 # gives B potential
BORING = 12
class x:
accept = accept_no_deal
offer = make_no_deal
class d:
accept = accept_deal_if_favorable
offer = make_deal_if_favourable
class y:
accept = accept_deal_always
offer = make_deal_always
deal_array = \
[
[ x,x,x,x,d,x,x,x,x,x,x,x,x ],
[ x,x,x,x,y,d,y,x,x,x,x,x,x ],
[ x,x,x,x,y,d,d,x,x,x,x,x,x ],
[ x,x,x,x,y,d,d,x,x,x,x,x,x ],
[ x,x,x,x,d,x,x,x,x,x,x,x,x ],
[ d,x,d,d,y,d,y,x,d,x,x,x,x ],
[ d,x,d,d,y,x,d,x,x,x,x,x,x ],
[ x,y,y,y,y,y,y,d,d,x,x,y,x ],
[ x,y,x,x,y,d,y,x,d,x,x,y,x ],
[ y,y,y,y,y,y,y,y,y,d,y,y,x ],
[ y,y,y,y,y,y,y,x,y,x,d,d,x ],
[ x,y,y,y,y,y,y,x,x,x,d,d,x ],
[ x,x,x,x,x,x,x,x,x,x,x,x,x ],
]
def gift_result(set_info, p1, p2):
"""
Figure out the result of giving away a property from a set before the gift takes place
"""
if set_info.state == SetInfo.State.OWNED_OUTRIGHT and set_info.owners[0] == p1:
return PostGiftState.S_BAA
elif set_info.state == SetInfo.State.THREE_OWNED_TWO_OWNERS and set_info.owners[0] == p1 and set_info.owners[1] == p2:
return PostGiftState.S_BAB
elif set_info.state == SetInfo.State.THREE_OWNED_TWO_OWNERS and set_info.owners[0] == p1 and set_info.owners[1] != None:
return PostGiftState.S_BAC
elif set_info.state == SetInfo.State.TWO_OF_THREE_OWNED_SAME and set_info.owners[0] == p1:
return PostGiftState.S_BAX
elif set_info.state == SetInfo.State.THREE_OWNED_TWO_OWNERS and set_info.owners[0] == p2 and set_info.owners[1] == p1:
return PostGiftState.S_BBB
elif set_info.state == SetInfo.State.THREE_OWNED_THREE_OWNERS and \
(set_info.owners[0] == p1 or set_info.owners[1] == p1 or set_info.owners[2] == p1) and \
(set_info.owners[0] == p2 or set_info.owners[1] == p2 or set_info.owners[2] == p2):
return PostGiftState.S_BBC
elif set_info.state == SetInfo.State.TWO_OF_THREE_OWNED_DIFFERENT and (set_info.owners[0] == p1 or set_info.owners[1] == p1) and \
(set_info.owners[0] == p2 or set_info.owners[1] == p2):
return PostGiftState.S_BBX
elif set_info.state == SetInfo.State.THREE_OWNED_TWO_OWNERS and set_info.owners[0] != p2 and set_info.owners[0] == p1:
return PostGiftState.S_BCC
elif set_info.state == SetInfo.State.TWO_OF_TWO_OWNED_DIFFERENT and (set_info.owners[0] == p1 and set_info.owners[1] != p2 or \
set_info.owners[1] == p1 and set_info.owners[0] != p2):
return PostGiftState.S_BC
elif set_info.state == SetInfo.State.THREE_OWNED_THREE_OWNERS and \
(set_info.owners[0] == p1 or set_info.owners[1] == p1 or set_info.owners[2] == p1) and \
(set_info.owners[0] != p2 and set_info.owners[1] != p2 and set_info.owners[2] != p2):
return PostGiftState.S_BCD
elif set_info.state == SetInfo.State.TWO_OF_THREE_OWNED_DIFFERENT and \
(set_info.owners[0] == p1 or set_info.owners[1] == p1) and \
(set_info.owners[0] != p2 and set_info.owners[1] != p2):
return PostGiftState.S_BCX
elif set_info.state == SetInfo.State.ONE_OWNED and set_info.owners[0] == p1:
return PostGiftState.S_BXX
return PostGiftState.BORING
def should_accept_deal(me,him, offered,asked, offered_property,asked_property):
'''
Should we do a deal between me and him where he is offering one of offered to me and
asking for one of asked in return?
'''
i = gift_result(asked,me,him)
j = gift_result(offered,him,me)
result = PostGiftState.deal_array[i][j].accept(me,him, offered_property, asked_property)
return result
def make_offer(me,him, offered,asked, offered_property, asked_property):
'''
Should we make an offer to player p2 where we are offering one of s1 and
we are asking for one of
'''
if asked_property.property_set == offered_property.property_set:
return None
i = gift_result(asked,me,him)
j = gift_result(offered,him,me)
result = PostGiftState.deal_array[i][j].offer(me,him, offered_property, asked_property)
return result
def on_exit(huw_ai):
print(huw_ai.square_counters)
class HuwAI20140202(PlayerAIBase):
'''
An AI that plays like Huw.
- It initially buys any properties it can.
- It builds houses when it has complete sets.
- It makes favourable deals with other players.
- It keeps almost no cash.
'''
def __init__(self):
'''
The 'constructor'.
'''
self.is_start_of_game = True
self.registered_exit = False
self.cash_reserve = 500
self.extreme_reserve = 200
self.easy_reserve = 0
self.hard_reserve = 0
self.others = {}
self.others_in_game = {}
self.sets_we_like = [PropertySet.ORANGE, PropertySet.LIGHT_BLUE, PropertySet.YELLOW, PropertySet.RED,
PropertySet.PURPLE, PropertySet.GREEN, PropertySet.DARK_BLUE, PropertySet.BROWN,
PropertySet.STATION, PropertySet.UTILITY]
self.square_counters = Counter()
self.post_gift_state = PostGiftState()
self.max_deals = 0
def start_of_game(self):
self.is_start_of_game = True
self.others_in_game = {}
self.max_deals = 0
if not self.registered_exit:
atexit.register(on_exit, self)
self.registered_exit = True
def check_start_of_game(self, game_state):
if (not self.is_start_of_game):
return
self.is_start_of_game = False
for p in game_state.players:
if (p.name != self.get_name()):
if (not p.name in self.others):
Logger.log("{0} Found other player {1}".format(self.get_name(), p.name), Logger.INFO)
self.others[p.name] = OtherPlayer(p.name)
self.others_in_game[p.name] = self.others[p.name]
def get_name(self):
return "Huw At 21"
def calculate_reserve(self, player):
''' figure out how much easy money we've got
'''
self.easy_reserve = 0
self.hard_reserve = 0
text = ''
# don't deal with streets where another street in the set has houses
for x in player.state.properties:
text = text + x.name +'('
if x.is_mortgaged:
text += 'mortgaged'
if not x.is_mortgaged and ((type(x) == Street and x.number_of_houses == 0) or type(x) != Street):
self.easy_reserve += x.mortgage_value
if (type(x) == Street):
if (x.number_of_houses > 0):
text += str(x.number_of_houses)+'houses'
self.hard_reserve += x.house_price/2 + x.mortgage_value
text += ') '
Logger.log('* I\'ve got '+text, Logger.DEBUG)
Logger.log('* I\'m worth cash {} with mortgages {} and selling houses {}'.format(
player.state.cash, player.state.cash+self.easy_reserve, player.state.cash+self.easy_reserve+self.hard_reserve), Logger.DEBUG)
def landed_on_unowned_property(self, game_state, player, property):
'''
Called when we land on an unowned property. We always buy it if we
can while keeping a small cash reserve.
'''
if (player.state.cash + self.easy_reserve) > (self.cash_reserve + property.price):
return PlayerAIBase.Action.BUY
else:
return PlayerAIBase.Action.DO_NOT_BUY
def deal_proposed(self, game_state, me, deal_proposal):
'''
Called when a deal is proposed by another player.
'''
# can't deal with complex stuff
if len(deal_proposal.properties_wanted) > 1:
return DealResponse(DealResponse.Action.REJECT)
avail_cash = me.state.cash + self.easy_reserve
them = deal_proposal.proposed_by_player
if len(deal_proposal.properties_offered) == 1 and len(deal_proposal.properties_wanted) == 1:
property_offered = deal_proposal.properties_offered[0]
property_wanted = deal_proposal.properties_wanted[0]
info_offered = work_out_owners(property_offered.property_set)
info_wanted = work_out_owners(property_wanted.property_set)
return should_accept_deal(me, them, info_offered, info_wanted, property_offered, property_wanted)
elif len(deal_proposal.properties_offered) > 0:
property = deal_proposal.properties_offered[0]
info = work_out_owners(property.property_set)
if info.state == SetInfo.State.THREE_OWNED_TWO_OWNERS and info.owners[0] == me:
return DealResponse(DealResponse.Action.ACCEPT, maximum_cash_offered=min(property.price*2, avail_cash))
else:
return DealResponse(DealResponse.Action.ACCEPT, maximum_cash_offered=min(property.price, avail_cash))
elif len(deal_proposal.properties_wanted) == 1:
# never just give away a property for money
pass
else:
pass
return DealResponse(action=DealResponse.Action.REJECT)
def deal_result(self, deal_info):
if deal_info == PlayerAIBase.DealInfo.INVALID_DEAL_PROPOSED:
Logger.log("** Deal result: INVALID_DEAL_PROPOSED", Logger.DEBUG)
elif deal_info == PlayerAIBase.DealInfo.SUCCEEDED:
Logger.log("** Deal result: SUCCEEDED", Logger.DEBUG)
elif deal_info == PlayerAIBase.DealInfo.DEAL_REJECTED:
Logger.log("** Deal result: DEAL_REJECTED", Logger.DEBUG)
def build_houses(self, game_state, player):
'''
Gives us the opportunity to build houses.
'''
# We find the first set we own that we can build on...
for owned_set in player.state.owned_unmortgaged_sets:
# We can't build on stations or utilities, or if the
# set already has hotels on all the properties...
if not owned_set.can_build_houses:
continue
# We see how much money we need for one house on each property...
cost = owned_set.house_price * owned_set.number_of_properties
if player.state.cash > (self.cash_reserve + cost):
# We build one house on each property...
return [(p, 1) for p in owned_set.properties]
# We can't build...
return []
def property_offered_for_auction(self, game_state, player, property):
max_tuple = max(self.others_in_game.items(), key = (lambda v: v[1].max_auction_premium) )
offer = property.price + max_tuple[1].max_auction_premium
offer = self.make_offer_amount(player, offer)
Logger.log("Making auction offer "+str(offer), Logger.DEBUG)
return offer
def make_offer_amount(self, player, target):
target = target + random.randint(1, 10)
offer = max(0, min(target, player.state.cash+self.easy_reserve-self.cash_reserve))
return offer
def work_out_deals(self, me):
deals = []
infos = dict([ ( set, work_out_owners(me.board.get_property_set(set)) ) for set in self.sets_we_like])
for asked_property in me.board.squares:
if type(asked_property) == Street:
him = asked_property.owner
if him != me and him != None:
for offered_property in me.state.properties:
offered = infos[offered_property.property_set.set_enum]
asked = infos[asked_property.property_set.set_enum]
deal = make_offer(me,him, offered,asked, offered_property, asked_property)
if deal != None:
deals.append(deal)
deals.append(DealProposal(him, None, [asked_property], self.make_offer_amount(me, asked_property.price)))
self.max_deals = max(self.max_deals, len(deals))
return deals
def propose_deal(self, game_state, me):
Logger.log("* Propose deal perhaps", Logger.DEBUG)
if me.state.ai_processing_seconds_remaining < 3:
return
deals = self.work_out_deals(me)
if len(deals) > 0:
deal = deals[random.randint(0,len(deals)-1)]
return deal
return None
def start_of_turn(self, game_state, player):
self.check_start_of_game(game_state)
if (player.ai == self):
self.calculate_reserve(player)
def pay_ten_pounds_or_take_a_chance(self, game_state, player):
return PlayerAIBase.Action.TAKE_A_CHANCE
def players_birthday(self):
return "Happy Birthday!"
def money_will_be_taken(self, player, amount):
Logger.log("* Money will be taken "+str(amount), Logger.DEBUG)
self.owe = amount
self.to_sell = max(0, amount - player.state.cash - self.easy_reserve)
def mortgage_properties(self, game_state, player):
Logger.log("* Mortgage perhaps", Logger.DEBUG)
need = self.owe - player.state.cash
to_mortgage = []
for p in player.state.properties:
if need > 0 and not p.is_mortgaged:
to_mortgage.append(p)
need -= p.mortgage_value
return to_mortgage
def sell_houses(self, game_state, player):
Logger.log("* Sell perhaps", Logger.DEBUG)
return []
def get_out_of_jail(self, game_state, player):
Logger.log("* Get out of jail perhaps", Logger.DEBUG)
for set in self.sets_we_like:
properties = player.board.get_property_set(set)
info = work_out_owners(properties)
if info.state == SetInfo.State.NONE_OWNED or \
info.state == SetInfo.State.ONE_OWNED or \
info.state == SetInfo.State.TWO_OF_THREE_OWNED_SAME:
return self._get_me_out_of_jail(player)
return PlayerAIBase.Action.STAY_IN_JAIL
def _get_me_out_of_jail(self, player):
if (player.state.number_of_get_out_of_jail_free_cards > 0):
return PlayerAIBase.Action.PLAY_GET_OUT_OF_JAIL_FREE_CARD
else:
if player.state.cash > 50:
return PlayerAIBase.Action.BUY_WAY_OUT_OF_JAIL
else:
return PlayerAIBase.Action.STAY_IN_JAIL
def auction_result(self, status, property, player, amount_paid):
if (status == PlayerAIBase.Action.AUCTION_SUCCEEDED and player.ai != self):
premium = amount_paid - property.price
other_player = self.others_in_game[player.name]
other_player.max_auction_premium = max(other_player.max_auction_premium, premium)
def player_landed_on_square(self, game_state, square, player):
self.square_counters[square.name]+=1
def deal_completed(self, dr):
Logger.log("* Deal completed proposed by: {}; to {}; giving {} getting {} for {}".
format(dr.proposer.ai.get_name(),dr.proposee.ai.get_name(), dr.properties_transferred_to_proposee, dr.properties_transferred_to_proposer,
dr.cash_transferred_from_proposer_to_proposee), level=Logger.INFO)
def game_over(self, winner, maximum_rounds_played):
Logger.log("* Max deals was "+str(self.max_deals), Logger.INFO)
def player_ran_out_of_time(self, player):
Logger.log("* "+player.ai.get_name()+" ran out of time", Logger.DEBUG)
def unmortgage_properties(self, game_state, me):
sets = set()
candidate = None
for property in me.state.properties:
property_set = property.property_set
if property.is_mortgaged and candidate == None:
candidate = property_set
if not property_set in sets:
info = work_out_owners(property.property_set)
sets.add(property_set)
if info.State == SetInfo.State.OWNED_OUTRIGHT:
if property_set in me.state.owned_unmortgaged_sets:
if property.number_of_houses < 5:
# we could still build on this set.
return
else:
candidate = property_set
if candidate != None:
properties = []
unmortgage_cost = 0
for property in candidate.properties:
if property.is_mortgaged:
unmortgage_cost += int(property.mortgage_value * 1.1)
if unmortgage_cost > me.state.cash-self.cash_reserve:
if len(properties) > 0:
Logger.log("Unmortgaging "+str(properties), Logger.INFO)
return properties
properties.append(property)
| {
"content_hash": "9b0e2dc60cb97dfc0008ed02172c44c1",
"timestamp": "",
"source": "github",
"line_count": 573,
"max_line_length": 156,
"avg_line_length": 42.57242582897033,
"alnum_prop": 0.5897761744691318,
"repo_name": "richard-shepherd/monopyly",
"id": "67805f722ae326a5c7f4b72749c5c84711b10533",
"size": "24394",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "AIs/Huw Roberts/huw20140202.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "57628"
},
{
"name": "Python",
"bytes": "2136924"
},
{
"name": "Shell",
"bytes": "474"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import datetime
import pytz
import re
import sys
from .row import Row
__all__ = ['get_parser']
ERROR_VALUE = 99.99
ERROR_TOLERANCE = sys.float_info.epsilon
class Parser():
def __init__(self, f):
self._fobj = f
def get(self):
for line in self._fobj:
for unvalidated_row in parse_line(line):
row = validate_row(unvalidated_row)
if row is not None:
yield row
def get_parser(fobj, options):
return Parser(fobj)
def validate_row(row):
for number in (row.observed_sea_level, row.predicted_tide_level,
row.derived_surge_level):
if is_error_value(number):
return None
return row
def is_error_value(number):
return almost_equal(number, ERROR_VALUE)
def almost_equal(a, b):
return abs(a - b) < ERROR_TOLERANCE
def parse_line(line):
(day, month, year, hour, minute, observation1, prediction1, surge1, flag1,
observation2, prediction2, surge2, flag2, observation3,
prediction3, surge3, flag3) = re.split('\s+', line.strip('\r\n'))
dt1 = datetime.datetime(
int(year),
int(month),
int(day),
int(hour),
int(minute),
tzinfo=pytz.UTC)
dt2 = dt1 + datetime.timedelta(minutes=1)
dt3 = dt2 + datetime.timedelta(minutes=1)
observation1 = float(observation1)
observation2 = float(observation2)
observation3 = float(observation3)
prediction1 = float(prediction1)
prediction2 = float(prediction2)
prediction3 = float(prediction3)
surge1 = float(surge1)
surge2 = float(surge2)
surge3 = float(surge3)
yield Row(dt1,
observed_sea_level=observation1,
predicted_tide_level=prediction1,
derived_surge_level=surge1)
yield Row(dt2,
observed_sea_level=observation2,
predicted_tide_level=prediction2,
derived_surge_level=surge2)
yield Row(dt3,
observed_sea_level=observation3,
predicted_tide_level=prediction3,
derived_surge_level=surge3)
| {
"content_hash": "e4868187ebf97969f8154ce6dde84da6",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 78,
"avg_line_length": 26.317073170731707,
"alnum_prop": 0.6167747914735866,
"repo_name": "sealevelresearch/tide-wrangler",
"id": "59fc41a0274e7b37af223534ff2ae2cacbcde111",
"size": "2181",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tide_wrangler/parsers/hilbre.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "47"
},
{
"name": "Python",
"bytes": "65324"
},
{
"name": "Shell",
"bytes": "758"
}
],
"symlink_target": ""
} |
from Pots import Pots
from Isdn import Isdn
from G3 import G3
| {
"content_hash": "6e74f1c03bd53dffb7553f8554c169ac",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 21,
"avg_line_length": 15.75,
"alnum_prop": 0.7936507936507936,
"repo_name": "philuu12/PYTHON_4_NTWK_ENGRS",
"id": "5392fbdac025d24130c2f047c432877ec78e3d6a",
"size": "63",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wk9_practice/Phone/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "116931"
}
],
"symlink_target": ""
} |
from yamlize.attributes import Attribute, MapItem, KeyedListItem
from yamlize.yamlizing_error import YamlizingError
class AttributeCollection(object):
__slots__ = ('order', 'by_key', 'by_name')
def __init__(self, *args, **kwargs):
# let's assume the order things were defined is the order we want to
# display them, still public if someone wants to muck
self.order = list()
self.by_key = dict()
self.by_name = dict()
for item in args:
if not isinstance(item, Attribute):
raise TypeError('Incorrect type {} while initializing '
'AttributeCollection with {}'
.format(type(item), item))
self.add(item)
def __iter__(self):
return iter(self.order)
@property
def required(self):
return {attr for attr in self if attr.is_required}
def add(self, attr):
existing = self.by_key.get(attr.key, None)
if existing is not None and existing is not attr:
raise KeyError('AttributeCollection already contains an entry for '
'{}, previously defined: {}'
.format(attr.key, existing))
elif existing is attr:
return
existing = self.by_name.get(attr.name, None)
if existing is not None and existing is not attr:
raise KeyError('AttributeCollection already contains an entry for '
'{}, previously defined: {}'
.format(attr.name, existing))
elif existing is attr:
return
self.by_key[attr.key] = attr
self.by_name[attr.name] = attr
self.order.append(attr)
def from_yaml(self, obj, loader, key_node, val_node, round_trip_data):
"""
returns: Attribute that was applied
"""
key = loader.construct_object(key_node)
attribute = self.by_key.get(key, None)
if attribute is None:
raise YamlizingError('Error parsing {}, found key `{}` but '
'expected any of {}'
.format(type(obj), key, self.by_key.keys()),
key_node)
attribute.from_yaml(obj, loader, val_node, round_trip_data)
return attribute
def yaml_attribute_order(self, obj, attr_order):
"""
returns: Attribute that was applied
"""
new_attrs = []
for attr in self:
if attr not in attr_order:
new_attrs.append(attr)
return attr_order + new_attrs
def attr_dump_order(self, obj, attr_order):
"""
returns: Attribute that was applied
"""
new_attrs = []
for attr in self:
if attr.has_default(obj):
if attr in attr_order:
attr_order.remove(attr)
continue
if attr not in attr_order:
new_attrs.append(attr)
return attr_order + new_attrs
class MapAttributeCollection(AttributeCollection):
__slots__ = ()
def from_yaml(self, obj, loader, key_node, val_node, round_trip_data):
"""
returns: Attribute that was applied, or None.
Raises an exception if there was actually a problem.
"""
key = loader.construct_object(key_node)
attribute = self.by_key.get(key, None)
if attribute is not None:
attribute.from_yaml(obj, loader, val_node, round_trip_data)
else:
# the key_node will point to our object
del loader.constructed_objects[key_node]
key = obj.key_type.from_yaml(loader, key_node, round_trip_data)
val = obj.value_type.from_yaml(loader, val_node, round_trip_data)
try:
obj.__setitem__(key, val)
except Exception as ee:
raise YamlizingError('Failed to add key `{}` with value `{}`, got: {}'
.format(key, val, ee), key_node)
return attribute # could be None, and that is fine
def yaml_attribute_order(self, obj, attr_order):
"""
returns: Attribute that was applied
"""
attr_order = AttributeCollection.yaml_attribute_order(self, obj, attr_order)
for item_key in obj.keys():
attr_order.append(MapItem(item_key, obj.key_type, obj.value_type))
return attr_order
def attr_dump_order(self, obj, attr_order):
"""
returns: Attribute that was applied
"""
attr_order = AttributeCollection.attr_dump_order(self, obj, attr_order)
for item_key in obj.keys():
attr_order.append(MapItem(item_key, obj.key_type, obj.value_type))
return attr_order
class KeyedListAttributeCollection(AttributeCollection):
__slots__ = ()
def from_yaml(self, obj, loader, key_node, val_node, round_trip_data):
"""
returns: Attribute that was applied, or None.
Raises an exception if there was actually a problem.
"""
key = loader.construct_object(key_node)
attribute = self.by_key.get(key, None)
if attribute is not None:
attribute.from_yaml(obj, loader, val_node, round_trip_data)
else:
# the key_node will point to our object
del loader.constructed_objects[key_node]
val = obj.item_type.from_yaml_key_val(
loader,
key_node,
val_node,
obj.__class__.key_attr,
round_trip_data
)
obj[obj.__class__.key_attr.get_value(val)] = val
return attribute # could be None, and that is fine
def yaml_attribute_order(self, obj, attr_order):
"""
returns: Attribute that was applied
"""
attr_order = AttributeCollection.yaml_attribute_order(self, obj, attr_order)
for item_key in obj.keys():
attr_order.append(KeyedListItem(obj.__class__.key_attr, obj.item_type, item_key))
return attr_order
def attr_dump_order(self, obj, attr_order):
"""
returns: Attribute that was applied
"""
attr_order = AttributeCollection.attr_dump_order(self, obj, attr_order)
for item_key in obj.keys():
attr_order.append(KeyedListItem(obj.__class__.key_attr, obj.item_type, item_key))
return attr_order
| {
"content_hash": "9b633cab34b8876b84efa93edc63853b",
"timestamp": "",
"source": "github",
"line_count": 198,
"max_line_length": 93,
"avg_line_length": 32.984848484848484,
"alnum_prop": 0.5594855305466238,
"repo_name": "SimplyKnownAsG/yamlize",
"id": "a7c60f9d19bcbf37f540085f7e4f50d79486dbfe",
"size": "6532",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "yamlize/attribute_collection.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "80405"
}
],
"symlink_target": ""
} |
"""A lightweight buffer for maintaining tensors."""
import tensorflow.compat.v1 as tf
class TensorBuffer(object):
"""A lightweight buffer for maintaining lists.
The TensorBuffer accumulates tensors of the given shape into a tensor (whose
rank is one more than that of the given shape) via calls to `append`. The
current value of the accumulated tensor can be extracted via the property
`values`.
"""
def __init__(self, capacity, shape, dtype=tf.int32, name=None):
"""Initializes the TensorBuffer.
Args:
capacity: Initial capacity. Buffer will double in capacity each time it is
filled to capacity.
shape: The shape (as tuple or list) of the tensors to accumulate.
dtype: The type of the tensors.
name: A string name for the variable_scope used.
Raises:
ValueError: If the shape is empty (specifies scalar shape).
"""
shape = list(shape)
self._rank = len(shape)
self._name = name
self._dtype = dtype
if not self._rank:
raise ValueError('Shape cannot be scalar.')
shape = [capacity] + shape
with tf.compat.v1.variable_scope(self._name):
# We need to use a placeholder as the initial value to allow resizing.
self._buffer = tf.Variable(
initial_value=tf.placeholder_with_default(
tf.zeros(shape, dtype), shape=None),
trainable=False,
name='buffer',
use_resource=True)
self._current_size = tf.Variable(
initial_value=0, dtype=tf.int32, trainable=False, name='current_size')
self._capacity = tf.Variable(
initial_value=capacity,
dtype=tf.int32,
trainable=False,
name='capacity')
def append(self, value):
"""Appends a new tensor to the end of the buffer.
Args:
value: The tensor to append. Must match the shape specified in the
initializer.
Returns:
An op appending the new tensor to the end of the buffer.
"""
def _double_capacity():
"""Doubles the capacity of the current tensor buffer."""
padding = tf.zeros_like(self._buffer, self._buffer.dtype)
new_buffer = tf.concat([self._buffer, padding], axis=0)
if tf.executing_eagerly():
with tf.compat.v1.variable_scope(self._name, reuse=True):
self._buffer = tf.get_variable(
name='buffer',
dtype=self._dtype,
initializer=new_buffer,
trainable=False)
return self._buffer, tf.compat.v1.assign(
self._capacity, tf.multiply(self._capacity, 2))
else:
return tf.compat.v1.assign(
self._buffer, new_buffer,
validate_shape=False), tf.compat.v1.assign(
self._capacity, tf.multiply(self._capacity, 2))
update_buffer, update_capacity = tf.cond(
pred=tf.equal(self._current_size, self._capacity),
true_fn=_double_capacity,
false_fn=lambda: (self._buffer, self._capacity))
with tf.control_dependencies([update_buffer, update_capacity]):
with tf.control_dependencies([
tf.assert_less(
self._current_size,
self._capacity,
message='Appending past end of TensorBuffer.'),
tf.assert_equal(
tf.shape(input=value),
tf.shape(input=self._buffer)[1:],
message='Appending value of inconsistent shape.')
]):
with tf.control_dependencies(
[tf.compat.v1.assign(self._buffer[self._current_size, :], value)]):
return tf.compat.v1.assign_add(self._current_size, 1)
@property
def values(self):
"""Returns the accumulated tensor."""
begin_value = tf.zeros([self._rank + 1], dtype=tf.int32)
value_size = tf.concat([[self._current_size],
tf.constant(-1, tf.int32, [self._rank])], 0)
return tf.slice(self._buffer, begin_value, value_size)
@property
def current_size(self):
"""Returns the current number of tensors in the buffer."""
return self._current_size
@property
def capacity(self):
"""Returns the current capacity of the buffer."""
return self._capacity
| {
"content_hash": "3d7fdb666e2634a7bad1d06a547a50e3",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 80,
"avg_line_length": 35.440677966101696,
"alnum_prop": 0.6209947393591583,
"repo_name": "tensorflow/privacy",
"id": "c9558bcf8d40bfd412698b9f0c6b8162479a55af",
"size": "4790",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow_privacy/privacy/analysis/tensor_buffer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "767849"
},
{
"name": "Python",
"bytes": "1466141"
},
{
"name": "Shell",
"bytes": "21949"
},
{
"name": "Starlark",
"bytes": "35224"
}
],
"symlink_target": ""
} |
from config import TRANSACTION_MODE
from exception import MonSQLException
from table import Table
from queryset import DataRow
import abc
class Database:
"""
Database wrapper for interaction with specific database
"""
def __init__(self, db, mode=TRANSACTION_MODE.DEFAULT):
self.__db = db
self.__cursor = self.__db.cursor()
self.__table_map = {}
self.__mode = mode
"""
Properties for accessibility to subclasses
"""
@property
def cursor(self):
return self.__cursor
@property
def db(self):
return self.__db
@property
def mode(self):
return self.__mode
def __ensure_table_obj(self, name):
if not self.__table_map.has_key(name):
self.__table_map[name] = self.get_table_obj(name)
@abc.abstractmethod
def get_table_obj(self, name):
"""This is used internally inside the class
Implemented by subclasses, because different database may use different table class"""
pass
@abc.abstractmethod
def list_tables(self):
"""
Return a list of lower case table names. Different databases have their own ways to
do this, so leave the implementation to the subclasses
"""
pass
@abc.abstractmethod
def truncate_table(self, tablename):
"""Delete all rows in a table.
Not all databases support built-in truncate, so implementation is left
to subclasses. For those don't support truncate, 'delete from ...' is used """
pass
def get(self, name):
"""
Return a Table object to perform operations on this table.
Note that all tables returned by the samle Database instance shared the same connection.
:Parameters:
- name: A table name
:Returns: A Table object
"""
self.__ensure_table_obj(name)
return self.__table_map[name]
def close(self):
"""
Close the connection to the server
"""
self.__db.close()
self.__table_map = {}
def commit(self):
"""
Commit the current session
"""
self.__db.commit()
def set_foreign_key_check(self, to_check):
"""
Enable/disable foreign key check. Disabling this is especially useful when
deleting from a table with foreign key pointing to itself
"""
if to_check:
self.__db.cursor().execute('SET foreign_key_checks = 1;')
else:
self.__db.cursor().execute('SET foreign_key_checks = 0;')
def is_table_existed(self, tablename):
"""
Check whether the given table name exists in this database. Return boolean.
"""
all_tablenames = self.list_tables()
tablename = tablename.lower()
if tablename in all_tablenames:
return True
else:
return False
def create_table(self, tablename, columns, primary_key=None, force_recreate=False):
"""
:Parameters:
- tablename: string
- columns: list or tuples, with each element be a string like 'id INT NOT NULL UNIQUE'
- primary_key: list or tuples, with elements be the column names
- force_recreate: When table of the same name already exists, if this is True, drop that table; if False, raise exception
:Return: Nothing
"""
if self.is_table_existed(tablename):
if force_recreate:
self.drop_table(tablename)
else:
raise MonSQLException('TABLE ALREADY EXISTS')
columns_specs = ','.join(columns)
if primary_key is not None:
if len(primary_key) == 0:
raise MonSQLException('PRIMARY KEY MUST AT LEAST CONTAINS ONE COLUMN')
columns_specs += ',PRIMARY KEY(%s)' %(','.join(primary_key))
sql = 'CREATE TABLE %s(%s)' %(tablename, columns_specs)
self.__cursor.execute(sql)
self.__db.commit()
def drop_table(self, tablename, silent=False):
"""
Drop a table
:Parameters:
- tablename: string
- slient: boolean. If false and the table doesn't exists an exception will be raised;
Otherwise it will be ignored
:Return: Nothing
"""
if not silent and not self.is_table_existed(tablename):
raise MonSQLException('TABLE %s DOES NOT EXIST' %tablename)
self.__cursor.execute('DROP TABLE IF EXISTS %s' %(tablename))
self.__db.commit()
def raw(self, sql):
"""
Execute raw sql
:Parameters:
- sql: string, sql to be executed
:Return: the result of this execution
If it's a select, return a list with each element be a DataRow instance
Otherwise return raw result from the cursor (Should be insert or update or delete)
"""
res = self.cursor.execute(sql)
if self.cursor.description is None:
return res
rows = self.cursor.fetchall()
columns = [d[0] for d in self.cursor.description]
structured_rows = []
for row in rows:
data = {}
for val, col in zip(row, columns):
data[col] = val
structured_rows.append(DataRow(data))
return structured_rows
| {
"content_hash": "aa6d2dad88e29a7ea63889d301340521",
"timestamp": "",
"source": "github",
"line_count": 187,
"max_line_length": 129,
"avg_line_length": 28.818181818181817,
"alnum_prop": 0.5860085359064762,
"repo_name": "firstprayer/monsql",
"id": "a6591450047da3ae9496076440a770bfcb7967b1",
"size": "5406",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "monsql/db.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "7078"
},
{
"name": "Python",
"bytes": "52662"
},
{
"name": "Shell",
"bytes": "6710"
}
],
"symlink_target": ""
} |
EVENT_TYPES_TO_LEVEL = {'success': 4, 'error': 2, 'warning': 3, 'information': 4}
# Audit success and audit failure events do not correspond to a specific level but can instead be queried
# by using specific keywords.
# https://docs.microsoft.com/en-us/dotnet/api/system.diagnostics.eventing.reader.standardeventkeywords?view=netframework-4.8
EVENT_TYPES_TO_KEYWORD = {'failure audit': '0x8010000000000000', 'success audit': '0x8020000000000000'}
| {
"content_hash": "86f52d17d5b386c4e36f7ce765e43e43",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 124,
"avg_line_length": 74.66666666666667,
"alnum_prop": 0.765625,
"repo_name": "DataDog/integrations-core",
"id": "608862b9cecdf681a05701055470a9967111c3ce",
"size": "1147",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "win32_event_log/datadog_checks/win32_event_log/constants.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "578"
},
{
"name": "COBOL",
"bytes": "12312"
},
{
"name": "Dockerfile",
"bytes": "22998"
},
{
"name": "Erlang",
"bytes": "15518"
},
{
"name": "Go",
"bytes": "6988"
},
{
"name": "HCL",
"bytes": "4080"
},
{
"name": "HTML",
"bytes": "1318"
},
{
"name": "JavaScript",
"bytes": "1817"
},
{
"name": "Kotlin",
"bytes": "430"
},
{
"name": "Lua",
"bytes": "3489"
},
{
"name": "PHP",
"bytes": "20"
},
{
"name": "PowerShell",
"bytes": "2398"
},
{
"name": "Python",
"bytes": "13020828"
},
{
"name": "Roff",
"bytes": "359"
},
{
"name": "Ruby",
"bytes": "241"
},
{
"name": "Scala",
"bytes": "7000"
},
{
"name": "Shell",
"bytes": "83227"
},
{
"name": "Swift",
"bytes": "203"
},
{
"name": "TSQL",
"bytes": "29972"
},
{
"name": "TypeScript",
"bytes": "1019"
}
],
"symlink_target": ""
} |
import re
import urlparse
import formencode
from formencode import htmlfill, validators
class BaseForm(formencode.Schema):
allow_extra_fields = True
filter_extra_fields = True
form_errors = {}
params = {}
#_xsrf = validators.PlainText(not_empty=True, max=32)
def __init__(self, handler):
self._values = {}
arguments = {}
request = handler.request
content_type = request.headers.get("Content-Type", "")
if request.method == "POST":
if content_type.startswith("application/x-www-form-urlencoded"):
arguments = urlparse.parse_qs(request.body, keep_blank_values=1)
for k, v in arguments.iteritems():
if len(v) == 1:
self._values[k] = v[0]
else:
self._values[k] = v
self._handler = handler
self.result = True
def validate(self):
try:
self.params = self.to_python(self._values)
self.result = True
self.validate_after()
except formencode.Invalid, error:
self.params = error.value
self.form_errors = error.error_dict or {}
self.result = False
except Exception, e:
pass
return self.result
def add_error(self, attr, msg):
self.result = False
self.form_errors[attr] = msg
def validate_after(self):
pass
| {
"content_hash": "8cfefcf05e5ad56abea6b04e0d288a5a",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 80,
"avg_line_length": 28.423076923076923,
"alnum_prop": 0.5453315290933695,
"repo_name": "feilaoda/FlickBoard",
"id": "cc9983294701e47b929eb57fe7dd6a3561de2a11",
"size": "1503",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project/app/base/form.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "120662"
},
{
"name": "Python",
"bytes": "164260"
},
{
"name": "Shell",
"bytes": "0"
}
],
"symlink_target": ""
} |
from typing import Union
from types import ModuleType
import mxnet as mx
from mxnet import nd, sym
from rl_coach.architectures.embedder_parameters import InputEmbedderParameters
from rl_coach.architectures.mxnet_components.embedders.embedder import InputEmbedder
from rl_coach.architectures.mxnet_components.layers import Dense
from rl_coach.base_parameters import EmbedderScheme
nd_sym_type = Union[mx.nd.NDArray, mx.sym.Symbol]
class VectorEmbedder(InputEmbedder):
def __init__(self, params: InputEmbedderParameters):
"""
An vector embedder is an input embedder that takes an vector input from the state and produces a vector
embedding by passing it through a neural network.
:param params: parameters object containing input_clipping, input_rescaling, batchnorm, activation_function
and dropout properties.
"""
super(VectorEmbedder, self).__init__(params)
self.input_rescaling = params.input_rescaling['vector']
self.input_offset = params.input_offset['vector']
@property
def schemes(self):
"""
Schemes are the pre-defined network architectures of various depths and complexities that can be used. Are used
to create Block when VectorEmbedder is initialised.
:return: dictionary of schemes, with key of type EmbedderScheme enum and value being list of mxnet.gluon.Block.
"""
return {
EmbedderScheme.Empty:
[],
EmbedderScheme.Shallow:
[
Dense(units=128)
],
# Use for DQN
EmbedderScheme.Medium:
[
Dense(units=256)
],
# Use for Carla
EmbedderScheme.Deep:
[
Dense(units=128),
Dense(units=128),
Dense(units=128)
]
}
def hybrid_forward(self, F: ModuleType, x: nd_sym_type, *args, **kwargs) -> nd_sym_type:
"""
Used for forward pass through embedder network.
:param F: backend api, either `nd` or `sym` (if block has been hybridized).
:type F: nd or sym
:param x: vector representing environment state, of shape (batch_size, in_channels).
:return: embedding of environment state, of shape (batch_size, channels).
"""
if isinstance(x, nd.NDArray) and len(x.shape) != 2 and self.scheme != EmbedderScheme.Empty:
raise ValueError("Vector embedders expect the input size to have 2 dimensions. The given size is: {}"
.format(x.shape))
return super(VectorEmbedder, self).hybrid_forward(F, x, *args, **kwargs)
| {
"content_hash": "0d9064a7cbccc79050626964bd0f5557",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 119,
"avg_line_length": 38.80281690140845,
"alnum_prop": 0.6192377495462795,
"repo_name": "NervanaSystems/coach",
"id": "4a36357247a0e50173226a493336e13165a3d73a",
"size": "2755",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rl_coach/architectures/mxnet_components/embedders/vector_embedder.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "791"
},
{
"name": "CSS",
"bytes": "6493"
},
{
"name": "Dockerfile",
"bytes": "1118"
},
{
"name": "HTML",
"bytes": "161"
},
{
"name": "Jupyter Notebook",
"bytes": "91174"
},
{
"name": "Makefile",
"bytes": "5036"
},
{
"name": "Python",
"bytes": "1926733"
},
{
"name": "Shell",
"bytes": "428"
}
],
"symlink_target": ""
} |
from flask import Flask, render_template, request
import unirest
app = Flask(__name__)
@app.route('/', methods=['GET', 'POST'])
def index():
if request.method == 'GET':
return render_template('index.html')
elif request.method == 'POST':
response = unirest.post('https://github.com/login/oauth/access_token', headers={'Accept': 'application/json'},
params={'client_id': '#You Client Code',
'client_secret': '#Your client_secret',
'code': request.form['code']})
if response.code == 200:
return response.raw_body
else:
return {'code': 500}
else:
return 'Unsupported request method'
@app.route('/starring', methods=['POST'])
def starring():
if request.method == 'POST':
access_token = request.form['accessToken']
repo = request.form['repo']
if access_token and not access_token.isspace() and repo and not repo.isspace():
response = unirest.put('https://api.github.com/user/starred/' + repo + '?access_token=' + access_token,
headers={'Accept': 'application/json', 'User-Agent': 'All-Star'})
if response.code == 204:
return '200'
else:
return '500'
else:
return 'Error Parameter'
else:
return 'Unsupported request method'
if __name__ == '__main__':
app.run() | {
"content_hash": "ee9caff8eef274136ca4ccba927babb3",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 118,
"avg_line_length": 34.45454545454545,
"alnum_prop": 0.5329815303430079,
"repo_name": "gucheen/github-starring",
"id": "8721f6a468e60951903fc23a71a82e893378c3ee",
"size": "1516",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "application.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1133"
},
{
"name": "JavaScript",
"bytes": "4798"
},
{
"name": "Python",
"bytes": "1516"
}
],
"symlink_target": ""
} |
from rest_framework import renderers
from rest_framework.parsers import BaseParser
from rest_framework.exceptions import ParseError
from django.conf import settings
from django.utils import six
import bson
class MongoJSONParser(BaseParser):
"""
Parses JSON-serialized data.
"""
media_type = 'application/json'
renderer_class = renderers.JSONRenderer
def parse(self, stream, media_type=None, parser_context=None):
"""
Parses the incoming bytestream as JSON and returns the resulting data.
"""
parser_context = parser_context or {}
encoding = parser_context.get('encoding', settings.DEFAULT_CHARSET)
try:
data = stream.read().decode(encoding)
return bson.json_util.loads(data)
except ValueError as exc:
raise ParseError('JSON parse error - %s' % six.text_type(exc)) | {
"content_hash": "05003e246321637610c9703dce95322b",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 78,
"avg_line_length": 31.571428571428573,
"alnum_prop": 0.6798642533936652,
"repo_name": "martinrusev/amonone",
"id": "86ce2d4fa90ec10d0e95a37e6faf3a0dce0a7711",
"size": "884",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "amon/apps/api/parsers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "77950"
},
{
"name": "JavaScript",
"bytes": "28811"
},
{
"name": "Python",
"bytes": "180983"
},
{
"name": "Ruby",
"bytes": "131"
},
{
"name": "Shell",
"bytes": "5652"
}
],
"symlink_target": ""
} |
from __future__ import print_function, division
import argparse
import hashlib
import inspect
import os
import serial
import struct
import sys
import time
import base64
import zlib
import shlex
__version__ = "2.0.1"
MAX_UINT32 = 0xffffffff
MAX_UINT24 = 0xffffff
DEFAULT_TIMEOUT = 3 # timeout for most flash operations
START_FLASH_TIMEOUT = 20 # timeout for starting flash (may perform erase)
CHIP_ERASE_TIMEOUT = 120 # timeout for full chip erase
SYNC_TIMEOUT = 0.1 # timeout for syncing with bootloader
DETECTED_FLASH_SIZES = {0x12: '256KB', 0x13: '512KB', 0x14: '1MB',
0x15: '2MB', 0x16: '4MB', 0x17: '8MB', 0x18: '16MB'}
def check_supported_function(func, check_func):
"""
Decorator implementation that wraps a check around an ESPLoader
bootloader function to check if it's supported.
This is used to capture the multidimensional differences in
functionality between the ESP8266 & ESP32 ROM loaders, and the
software stub that runs on both. Not possible to do this cleanly
via inheritance alone.
"""
def inner(*args, **kwargs):
obj = args[0]
if check_func(obj):
return func(*args, **kwargs)
else:
raise NotImplementedInROMError(obj, func)
return inner
def stub_function_only(func):
""" Attribute for a function only supported in the software stub loader """
return check_supported_function(func, lambda o: o.IS_STUB)
def stub_and_esp32_function_only(func):
""" Attribute for a function only supported by software stubs or ESP32 ROM """
return check_supported_function(func, lambda o: o.IS_STUB or o.CHIP_NAME == "ESP32")
PYTHON2 = sys.version_info[0] < 3 # True if on pre-Python 3
# Function to return nth byte of a bitstring
# Different behaviour on Python 2 vs 3
if PYTHON2:
def byte(bitstr, index):
return ord(bitstr[index])
else:
def byte(bitstr, index):
return bitstr[index]
def esp8266_function_only(func):
""" Attribute for a function only supported on ESP8266 """
return check_supported_function(func, lambda o: o.CHIP_NAME == "ESP8266")
class ESPLoader(object):
""" Base class providing access to ESP ROM & softtware stub bootloaders.
Subclasses provide ESP8266 & ESP32 specific functionality.
Don't instantiate this base class directly, either instantiate a subclass or
call ESPLoader.detect_chip() which will interrogate the chip and return the
appropriate subclass instance.
"""
CHIP_NAME = "Espressif device"
IS_STUB = False
DEFAULT_PORT = "/dev/ttyUSB0"
# Commands supported by ESP8266 ROM bootloader
ESP_FLASH_BEGIN = 0x02
ESP_FLASH_DATA = 0x03
ESP_FLASH_END = 0x04
ESP_MEM_BEGIN = 0x05
ESP_MEM_END = 0x06
ESP_MEM_DATA = 0x07
ESP_SYNC = 0x08
ESP_WRITE_REG = 0x09
ESP_READ_REG = 0x0a
# Some comands supported by ESP32 ROM bootloader (or -8266 w/ stub)
ESP_SPI_SET_PARAMS = 0x0B
ESP_SPI_ATTACH = 0x0D
ESP_CHANGE_BAUDRATE = 0x0F
ESP_FLASH_DEFL_BEGIN = 0x10
ESP_FLASH_DEFL_DATA = 0x11
ESP_FLASH_DEFL_END = 0x12
ESP_SPI_FLASH_MD5 = 0x13
# Some commands supported by stub only
ESP_ERASE_FLASH = 0xD0
ESP_ERASE_REGION = 0xD1
ESP_READ_FLASH = 0xD2
ESP_RUN_USER_CODE = 0xD3
# Maximum block sized for RAM and Flash writes, respectively.
ESP_RAM_BLOCK = 0x1800
FLASH_WRITE_SIZE = 0x400
# Default baudrate. The ROM auto-bauds, so we can use more or less whatever we want.
ESP_ROM_BAUD = 115200
# First byte of the application image
ESP_IMAGE_MAGIC = 0xe9
# Initial state for the checksum routine
ESP_CHECKSUM_MAGIC = 0xef
# Flash sector size, minimum unit of erase.
FLASH_SECTOR_SIZE = 0x1000
UART_DATA_REG_ADDR = 0x60000078
# Memory addresses
IROM_MAP_START = 0x40200000
IROM_MAP_END = 0x40300000
# The number of bytes in the UART response that signify command status
STATUS_BYTES_LENGTH = 2
def __init__(self, port=DEFAULT_PORT, baud=ESP_ROM_BAUD):
"""Base constructor for ESPLoader bootloader interaction
Don't call this constructor, either instantiate ESP8266ROM
or ESP32ROM, or use ESPLoader.detect_chip().
This base class has all of the instance methods for bootloader
functionality supported across various chips & stub
loaders. Subclasses replace the functions they don't support
with ones which throw NotImplementedInROMError().
"""
if isinstance(port, serial.Serial):
self._port = port
else:
self._port = serial.serial_for_url(port)
self._slip_reader = slip_reader(self._port)
# setting baud rate in a separate step is a workaround for
# CH341 driver on some Linux versions (this opens at 9600 then
# sets), shouldn't matter for other platforms/drivers. See
# https://github.com/espressif/esptool/issues/44#issuecomment-107094446
self._set_port_baudrate(baud)
def _set_port_baudrate(self, baud):
try:
self._port.baudrate = baud
except IOError:
raise FatalError("Failed to set baud rate %d. The driver may not support this rate." % baud)
@staticmethod
def detect_chip(port=DEFAULT_PORT, baud=ESP_ROM_BAUD, connect_mode='default_reset'):
""" Use serial access to detect the chip type.
We use the UART's datecode register for this, it's mapped at
the same address on ESP8266 & ESP32 so we can use one
memory read and compare to the datecode register for each chip
type.
This routine automatically performs ESPLoader.connect() (passing
connect_mode parameter) as part of querying the chip.
"""
detect_port = ESPLoader(port, baud)
detect_port.connect(connect_mode)
print('Detecting chip type...', end='')
sys.stdout.flush()
date_reg = detect_port.read_reg(ESPLoader.UART_DATA_REG_ADDR)
for cls in [ESP8266ROM, ESP32ROM]:
if date_reg == cls.DATE_REG_VALUE:
# don't connect a second time
inst = cls(detect_port._port, baud)
print(' %s' % inst.CHIP_NAME)
return inst
print('')
raise FatalError("Unexpected UART datecode value 0x%08x. Failed to autodetect chip type." % date_reg)
""" Read a SLIP packet from the serial port """
def read(self):
return next(self._slip_reader)
""" Write bytes to the serial port while performing SLIP escaping """
def write(self, packet):
buf = b'\xc0' \
+ (packet.replace(b'\xdb',b'\xdb\xdd').replace(b'\xc0',b'\xdb\xdc')) \
+ b'\xc0'
self._port.write(buf)
""" Calculate checksum of a blob, as it is defined by the ROM """
@staticmethod
def checksum(data, state=ESP_CHECKSUM_MAGIC):
for b in data:
if type(b) is int: # python 2/3 compat
state ^= b
else:
state ^= ord(b)
return state
""" Send a request and read the response """
def command(self, op=None, data=b"", chk=0, wait_response=True):
if op is not None:
pkt = struct.pack(b'<BBHI', 0x00, op, len(data), chk) + data
self.write(pkt)
if not wait_response:
return
# tries to get a response until that response has the
# same operation as the request or a retries limit has
# exceeded. This is needed for some esp8266s that
# reply with more sync responses than expected.
for retry in range(100):
p = self.read()
if len(p) < 8:
continue
(resp, op_ret, len_ret, val) = struct.unpack('<BBHI', p[:8])
if resp != 1:
continue
data = p[8:]
if op is None or op_ret == op:
return val, data
raise FatalError("Response doesn't match request")
def check_command(self, op_description, op=None, data=b'', chk=0):
"""
Execute a command with 'command', check the result code and throw an appropriate
FatalError if it fails.
Returns the "result" of a successful command.
"""
val, data = self.command(op, data, chk)
# things are a bit weird here, bear with us
# the status bytes are the last 2/4 bytes in the data (depending on chip)
if len(data) < self.STATUS_BYTES_LENGTH:
raise FatalError("Failed to %s. Only got %d byte status response." % (op_description, len(data)))
status_bytes = data[-self.STATUS_BYTES_LENGTH:]
# we only care if the first one is non-zero. If it is, the second byte is a reason.
if byte(status_bytes, 0) != 0:
raise FatalError.WithResult('Failed to %s' % op_description, status_bytes)
# if we had more data than just the status bytes, return it as the result
# (this is used by the md5sum command, maybe other commands?)
if len(data) > self.STATUS_BYTES_LENGTH:
return data[:-self.STATUS_BYTES_LENGTH]
else: # otherwise, just return the 'val' field which comes from the reply header (this is used by read_reg)
return val
def flush_input(self):
self._port.flushInput()
self._slip_reader = slip_reader(self._port)
def sync(self):
self.command(self.ESP_SYNC, b'\x07\x07\x12\x20' + 32 * b'\x55')
for i in range(7):
self.command()
def _connect_attempt(self, mode='default_reset', esp32r0_delay=False):
""" A single connection attempt, with esp32r0 workaround options """
# esp32r0_delay is a workaround for bugs with the most common auto reset
# circuit and Windows, if the EN pin on the dev board does not have
# enough capacitance.
#
# Newer dev boards shouldn't have this problem (higher value capacitor
# on the EN pin), and ESP32 revision 1 can't use this workaround as it
# relies on a silicon bug.
#
# Details: https://github.com/espressif/esptool/issues/136
last_error = None
# issue reset-to-bootloader:
# RTS = either CH_PD/EN or nRESET (both active low = chip in reset
# DTR = GPIO0 (active low = boot to flasher)
#
# DTR & RTS are active low signals,
# ie True = pin @ 0V, False = pin @ VCC.
if mode != 'no_reset':
self._port.setDTR(False) # IO0=HIGH
self._port.setRTS(True) # EN=LOW, chip in reset
time.sleep(0.1)
if esp32r0_delay:
# Some chips are more likely to trigger the esp32r0
# watchdog reset silicon bug if they're held with EN=LOW
# for a longer period
time.sleep(1.2)
self._port.setDTR(True) # IO0=LOW
self._port.setRTS(False) # EN=HIGH, chip out of reset
if esp32r0_delay:
# Sleep longer after reset.
# This workaround only works on revision 0 ESP32 chips,
# it exploits a silicon bug spurious watchdog reset.
time.sleep(0.4) # allow watchdog reset to occur
time.sleep(0.05)
self._port.setDTR(False) # IO0=HIGH, done
self._port.timeout = SYNC_TIMEOUT
for _ in range(5):
try:
self.flush_input()
self._port.flushOutput()
self.sync()
self._port.timeout = DEFAULT_TIMEOUT
return None
except FatalError as e:
if esp32r0_delay:
print('_', end='')
else:
print('.', end='')
sys.stdout.flush()
time.sleep(0.05)
last_error = e
return last_error
def connect(self, mode='default_reset'):
""" Try connecting repeatedly until successful, or giving up """
print('Connecting...', end='')
sys.stdout.flush()
last_error = None
try:
for _ in range(10):
last_error = self._connect_attempt(mode=mode, esp32r0_delay=False)
if last_error is None:
return
last_error = self._connect_attempt(mode=mode, esp32r0_delay=True)
if last_error is None:
return
finally:
print('') # end 'Connecting...' line
raise FatalError('Failed to connect to %s: %s' % (self.CHIP_NAME, last_error))
""" Read memory address in target """
def read_reg(self, addr):
# we don't call check_command here because read_reg() function is called
# when detecting chip type, and the way we check for success (STATUS_BYTES_LENGTH) is different
# for different chip types (!)
val, data = self.command(self.ESP_READ_REG, struct.pack('<I', addr))
if byte(data, 0) != 0:
raise FatalError.WithResult("Failed to read register address %08x" % addr, data)
return val
""" Write to memory address in target """
def write_reg(self, addr, value, mask=0xFFFFFFFF, delay_us=0):
return self.check_command("write target memory", self.ESP_WRITE_REG,
struct.pack('<IIII', addr, value, mask, delay_us))
""" Start downloading an application image to RAM """
def mem_begin(self, size, blocks, blocksize, offset):
return self.check_command("enter RAM download mode", self.ESP_MEM_BEGIN,
struct.pack('<IIII', size, blocks, blocksize, offset))
""" Send a block of an image to RAM """
def mem_block(self, data, seq):
return self.check_command("write to target RAM", self.ESP_MEM_DATA,
struct.pack('<IIII', len(data), seq, 0, 0) + data,
self.checksum(data))
""" Leave download mode and run the application """
def mem_finish(self, entrypoint=0):
return self.check_command("leave RAM download mode", self.ESP_MEM_END,
struct.pack('<II', int(entrypoint == 0), entrypoint))
""" Start downloading to Flash (performs an erase)
Returns number of blocks (of size self.FLASH_WRITE_SIZE) to write.
"""
def flash_begin(self, size, offset):
num_blocks = (size + self.FLASH_WRITE_SIZE - 1) // self.FLASH_WRITE_SIZE
erase_size = self.get_erase_size(offset, size)
self._port.timeout = START_FLASH_TIMEOUT
t = time.time()
self.check_command("enter Flash download mode", self.ESP_FLASH_BEGIN,
struct.pack('<IIII', erase_size, num_blocks, self.FLASH_WRITE_SIZE, offset))
if size != 0 and not self.IS_STUB:
print("Took %.2fs to erase flash block" % (time.time() - t))
self._port.timeout = DEFAULT_TIMEOUT
return num_blocks
""" Write block to flash """
def flash_block(self, data, seq):
self.check_command("write to target Flash after seq %d" % seq,
self.ESP_FLASH_DATA,
struct.pack('<IIII', len(data), seq, 0, 0) + data,
self.checksum(data))
""" Leave flash mode and run/reboot """
def flash_finish(self, reboot=False):
pkt = struct.pack('<I', int(not reboot))
# stub sends a reply to this command
self.check_command("leave Flash mode", self.ESP_FLASH_END, pkt)
""" Run application code in flash """
def run(self, reboot=False):
# Fake flash begin immediately followed by flash end
self.flash_begin(0, 0)
self.flash_finish(reboot)
""" Read SPI flash manufacturer and device id """
def flash_id(self):
SPIFLASH_RDID = 0x9F
return self.run_spiflash_command(SPIFLASH_RDID, b"", 24)
def parse_flash_size_arg(self, arg):
try:
return self.FLASH_SIZES[arg]
except KeyError:
raise FatalError("Flash size '%s' is not supported by this chip type. Supported sizes: %s"
% (arg, ", ".join(self.FLASH_SIZES.keys())))
def run_stub(self, stub=None):
if stub is None:
if self.IS_STUB:
raise FatalError("Not possible for a stub to load another stub (memory likely to overlap.)")
stub = self.STUB_CODE
# Upload
print("Uploading stub...")
for field in ['text', 'data']:
if field in stub:
offs = stub[field + "_start"]
length = len(stub[field])
blocks = (length + self.ESP_RAM_BLOCK - 1) // self.ESP_RAM_BLOCK
self.mem_begin(length, blocks, self.ESP_RAM_BLOCK, offs)
for seq in range(blocks):
from_offs = seq * self.ESP_RAM_BLOCK
to_offs = from_offs + self.ESP_RAM_BLOCK
self.mem_block(stub[field][from_offs:to_offs], seq)
print("Running stub...")
self.mem_finish(stub['entry'])
p = self.read()
if p != b'OHAI':
raise FatalError("Failed to start stub. Unexpected response: %s" % p)
print("Stub running...")
return self.STUB_CLASS(self)
@stub_and_esp32_function_only
def flash_defl_begin(self, size, compsize, offset):
""" Start downloading compressed data to Flash (performs an erase)
Returns number of blocks (size self.FLASH_WRITE_SIZE) to write.
"""
num_blocks = (compsize + self.FLASH_WRITE_SIZE - 1) // self.FLASH_WRITE_SIZE
erase_blocks = (size + self.FLASH_WRITE_SIZE - 1) // self.FLASH_WRITE_SIZE
self._port.timeout = START_FLASH_TIMEOUT
t = time.time()
if self.IS_STUB:
write_size = size # stub expects number of bytes here, manages erasing internally
else:
write_size = erase_blocks * self.FLASH_WRITE_SIZE # ROM expects rounded up to erase block size
print("Compressed %d bytes to %d..." % (size, compsize))
self.check_command("enter compressed flash mode", self.ESP_FLASH_DEFL_BEGIN,
struct.pack('<IIII', write_size, num_blocks, self.FLASH_WRITE_SIZE, offset))
if size != 0 and not self.IS_STUB:
# (stub erases as it writes, but ROM loaders erase on begin)
print("Took %.2fs to erase flash block" % (time.time() - t))
self._port.timeout = DEFAULT_TIMEOUT
return num_blocks
""" Write block to flash, send compressed """
@stub_and_esp32_function_only
def flash_defl_block(self, data, seq):
self.check_command("write compressed data to flash after seq %d" % seq,
self.ESP_FLASH_DEFL_DATA, struct.pack('<IIII', len(data), seq, 0, 0) + data, self.checksum(data))
""" Leave compressed flash mode and run/reboot """
@stub_and_esp32_function_only
def flash_defl_finish(self, reboot=False):
if not reboot and not self.IS_STUB:
# skip sending flash_finish to ROM loader, as this
# exits the bootloader. Stub doesn't do this.
return
pkt = struct.pack('<I', int(not reboot))
self.check_command("leave compressed flash mode", self.ESP_FLASH_DEFL_END, pkt)
self.in_bootloader = False
@stub_and_esp32_function_only
def flash_md5sum(self, addr, size):
# the MD5 command returns additional bytes in the standard
# command reply slot
res = self.check_command('calculate md5sum', self.ESP_SPI_FLASH_MD5, struct.pack('<IIII', addr, size, 0, 0))
if len(res) == 32:
return res.decode("utf-8") # already hex formatted
elif len(res) == 16:
return hexify(res).lower()
else:
raise FatalError("MD5Sum command returned unexpected result: %r" % res)
@stub_and_esp32_function_only
def change_baud(self, baud):
print("Changing baud rate to %d" % baud)
self.command(self.ESP_CHANGE_BAUDRATE, struct.pack('<II', baud, 0))
print("Changed.")
self._set_port_baudrate(baud)
time.sleep(0.05) # get rid of crap sent during baud rate change
self.flush_input()
@stub_function_only
def erase_flash(self):
# depending on flash chip model the erase may take this long (maybe longer!)
self._port.timeout = CHIP_ERASE_TIMEOUT
try:
self.check_command("erase flash", self.ESP_ERASE_FLASH)
finally:
self._port.timeout = DEFAULT_TIMEOUT
@stub_function_only
def erase_region(self, offset, size):
if offset % self.FLASH_SECTOR_SIZE != 0:
raise FatalError("Offset to erase from must be a multiple of 4096")
if size % self.FLASH_SECTOR_SIZE != 0:
raise FatalError("Size of data to erase must be a multiple of 4096")
self.check_command("erase region", self.ESP_ERASE_REGION, struct.pack('<II', offset, size))
@stub_function_only
def read_flash(self, offset, length, progress_fn=None):
# issue a standard bootloader command to trigger the read
self.check_command("read flash", self.ESP_READ_FLASH,
struct.pack('<IIII',
offset,
length,
self.FLASH_SECTOR_SIZE,
64))
# now we expect (length // block_size) SLIP frames with the data
data = b''
while len(data) < length:
p = self.read()
data += p
self.write(struct.pack('<I', len(data)))
if progress_fn and (len(data) % 1024 == 0 or len(data) == length):
progress_fn(len(data), length)
if progress_fn:
progress_fn(len(data), length)
if len(data) > length:
raise FatalError('Read more than expected')
digest_frame = self.read()
if len(digest_frame) != 16:
raise FatalError('Expected digest, got: %s' % hexify(digest_frame))
expected_digest = hexify(digest_frame).upper()
digest = hashlib.md5(data).hexdigest().upper()
if digest != expected_digest:
raise FatalError('Digest mismatch: expected %s, got %s' % (expected_digest, digest))
return data
def flash_spi_attach(self, hspi_arg):
"""Send SPI attach command to enable the SPI flash pins
ESP8266 ROM does this when you send flash_begin, ESP32 ROM
has it as a SPI command.
"""
# last 3 bytes in ESP_SPI_ATTACH argument are reserved values
arg = struct.pack('<I', hspi_arg)
if not self.IS_STUB:
# ESP32 ROM loader takes additional 'is legacy' arg, which is not
# currently supported in the stub loader or esptool.py (as it's not usually needed.)
is_legacy = 0
arg += struct.pack('BBBB', is_legacy, 0, 0, 0)
self.check_command("configure SPI flash pins", ESP32ROM.ESP_SPI_ATTACH, arg)
def flash_set_parameters(self, size):
"""Tell the ESP bootloader the parameters of the chip
Corresponds to the "flashchip" data structure that the ROM
has in RAM.
'size' is in bytes.
All other flash parameters are currently hardcoded (on ESP8266
these are mostly ignored by ROM code, on ESP32 I'm not sure.)
"""
fl_id = 0
total_size = size
block_size = 64 * 1024
sector_size = 4 * 1024
page_size = 256
status_mask = 0xffff
self.check_command("set SPI params", ESP32ROM.ESP_SPI_SET_PARAMS,
struct.pack('<IIIIII', fl_id, total_size, block_size, sector_size, page_size, status_mask))
def run_spiflash_command(self, spiflash_command, data=b"", read_bits=0):
"""Run an arbitrary SPI flash command.
This function uses the "USR_COMMAND" functionality in the ESP
SPI hardware, rather than the precanned commands supported by
hardware. So the value of spiflash_command is an actual command
byte, sent over the wire.
After writing command byte, writes 'data' to MOSI and then
reads back 'read_bits' of reply on MISO. Result is a number.
"""
# SPI_USR register flags
SPI_USR_COMMAND = (1 << 31)
SPI_USR_MISO = (1 << 28)
SPI_USR_MOSI = (1 << 27)
# SPI registers, base address differs ESP32 vs 8266
base = self.SPI_REG_BASE
SPI_CMD_REG = base + 0x00
SPI_USR_REG = base + 0x1C
SPI_USR1_REG = base + 0x20
SPI_USR2_REG = base + 0x24
SPI_W0_REG = base + self.SPI_W0_OFFS
# following two registers are ESP32 only
if self.SPI_HAS_MOSI_DLEN_REG:
# ESP32 has a more sophisticated wayto set up "user" commands
def set_data_lengths(mosi_bits, miso_bits):
SPI_MOSI_DLEN_REG = base + 0x28
SPI_MISO_DLEN_REG = base + 0x2C
if mosi_bits > 0:
self.write_reg(SPI_MOSI_DLEN_REG, mosi_bits - 1)
if miso_bits > 0:
self.write_reg(SPI_MISO_DLEN_REG, miso_bits - 1)
else:
def set_data_lengths(mosi_bits, miso_bits):
SPI_DATA_LEN_REG = SPI_USR1_REG
SPI_MOSI_BITLEN_S = 17
SPI_MISO_BITLEN_S = 8
mosi_mask = 0 if (mosi_bits == 0) else (mosi_bits - 1)
miso_mask = 0 if (miso_bits == 0) else (miso_bits - 1)
self.write_reg(SPI_DATA_LEN_REG,
(miso_mask << SPI_MISO_BITLEN_S) | (
mosi_mask << SPI_MOSI_BITLEN_S))
# SPI peripheral "command" bitmasks for SPI_CMD_REG
SPI_CMD_USR = (1 << 18)
# shift values
SPI_USR2_DLEN_SHIFT = 28
if read_bits > 32:
raise FatalError("Reading more than 32 bits back from a SPI flash operation is unsupported")
if len(data) > 64:
raise FatalError("Writing more than 64 bytes of data with one SPI command is unsupported")
data_bits = len(data) * 8
old_spi_usr = self.read_reg(SPI_USR_REG)
old_spi_usr2 = self.read_reg(SPI_USR2_REG)
flags = SPI_USR_COMMAND
if read_bits > 0:
flags |= SPI_USR_MISO
if data_bits > 0:
flags |= SPI_USR_MOSI
set_data_lengths(data_bits, read_bits)
self.write_reg(SPI_USR_REG, flags)
self.write_reg(SPI_USR2_REG,
(7 << SPI_USR2_DLEN_SHIFT) | spiflash_command)
if data_bits == 0:
self.write_reg(SPI_W0_REG, 0) # clear data register before we read it
else:
data = pad_to(data, 4, b'\00') # pad to 32-bit multiple
words = struct.unpack("I" * (len(data) // 4), data)
next_reg = SPI_W0_REG
for word in words:
self.write_reg(next_reg, word)
next_reg += 4
self.write_reg(SPI_CMD_REG, SPI_CMD_USR)
def wait_done():
for _ in range(10):
if (self.read_reg(SPI_CMD_REG) & SPI_CMD_USR) == 0:
return
raise FatalError("SPI command did not complete in time")
wait_done()
status = self.read_reg(SPI_W0_REG)
# restore some SPI controller registers
self.write_reg(SPI_USR_REG, old_spi_usr)
self.write_reg(SPI_USR2_REG, old_spi_usr2)
return status
def read_status(self, num_bytes=2):
"""Read up to 24 bits (num_bytes) of SPI flash status register contents
via RDSR, RDSR2, RDSR3 commands
Not all SPI flash supports all three commands. The upper 1 or 2
bytes may be 0xFF.
"""
SPIFLASH_RDSR = 0x05
SPIFLASH_RDSR2 = 0x35
SPIFLASH_RDSR3 = 0x15
status = 0
shift = 0
for cmd in [SPIFLASH_RDSR, SPIFLASH_RDSR2, SPIFLASH_RDSR3][0:num_bytes]:
status += self.run_spiflash_command(cmd, read_bits=8) << shift
shift += 8
return status
def write_status(self, new_status, num_bytes=2, set_non_volatile=False):
"""Write up to 24 bits (num_bytes) of new status register
num_bytes can be 1, 2 or 3.
Not all flash supports the additional commands to write the
second and third byte of the status register. When writing 2
bytes, esptool also sends a 16-byte WRSR command (as some
flash types use this instead of WRSR2.)
If the set_non_volatile flag is set, non-volatile bits will
be set as well as volatile ones (WREN used instead of WEVSR).
"""
SPIFLASH_WRSR = 0x01
SPIFLASH_WRSR2 = 0x31
SPIFLASH_WRSR3 = 0x11
SPIFLASH_WEVSR = 0x50
SPIFLASH_WREN = 0x06
SPIFLASH_WRDI = 0x04
enable_cmd = SPIFLASH_WREN if set_non_volatile else SPIFLASH_WEVSR
# try using a 16-bit WRSR (not supported by all chips)
# this may be redundant, but shouldn't hurt
if num_bytes == 2:
self.run_spiflash_command(enable_cmd)
self.run_spiflash_command(SPIFLASH_WRSR, struct.pack("<H", new_status))
# also try using individual commands (also not supported by all chips for num_bytes 2 & 3)
for cmd in [SPIFLASH_WRSR, SPIFLASH_WRSR2, SPIFLASH_WRSR3][0:num_bytes]:
self.run_spiflash_command(enable_cmd)
self.run_spiflash_command(cmd, struct.pack("B", new_status & 0xFF))
new_status >>= 8
self.run_spiflash_command(SPIFLASH_WRDI)
def hard_reset(self):
self._port.setRTS(True) # EN->LOW
time.sleep(0.1)
self._port.setRTS(False)
def soft_reset(self, stay_in_bootloader):
if not self.IS_STUB:
if stay_in_bootloader:
return # ROM bootloader is already in bootloader!
else:
# 'run user code' is as close to a soft reset as we can do
self.flash_begin(0, 0)
self.flash_finish(False)
else:
if stay_in_bootloader:
# soft resetting from the stub loader
# will re-load the ROM bootloader
self.flash_begin(0, 0)
self.flash_finish(True)
elif self.CHIP_NAME != "ESP8266":
raise FatalError("Soft resetting is currently only supported on ESP8266")
else:
# running user code from stub loader requires some hacks
# in the stub loader
self.command(self.ESP_RUN_USER_CODE, wait_response=False)
class ESP8266ROM(ESPLoader):
""" Access class for ESP8266 ROM bootloader
"""
CHIP_NAME = "ESP8266"
IS_STUB = False
DATE_REG_VALUE = 0x00062000
# OTP ROM addresses
ESP_OTP_MAC0 = 0x3ff00050
ESP_OTP_MAC1 = 0x3ff00054
ESP_OTP_MAC3 = 0x3ff0005c
SPI_REG_BASE = 0x60000200
SPI_W0_OFFS = 0x40
SPI_HAS_MOSI_DLEN_REG = False
FLASH_SIZES = {
'512KB':0x00,
'256KB':0x10,
'1MB':0x20,
'2MB':0x30,
'4MB':0x40,
'2MB-c1': 0x50,
'4MB-c1':0x60,
'8MB':0x80,
'16MB':0x90,
}
BOOTLOADER_FLASH_OFFSET = 0
def get_chip_description(self):
return "ESP8266"
def flash_spi_attach(self, hspi_arg):
if self.IS_STUB:
super(ESP8266ROM, self).flash_spi_attach(hspi_arg)
else:
# ESP8266 ROM has no flash_spi_attach command in serial protocol,
# but flash_begin will do it
self.flash_begin(0, 0)
def flash_set_parameters(self, size):
# not implemented in ROM, but OK to silently skip for ROM
if self.IS_STUB:
super(ESP8266ROM, self).flash_set_parameters(size)
def chip_id(self):
""" Read Chip ID from OTP ROM - see http://esp8266-re.foogod.com/wiki/System_get_chip_id_%28IoT_RTOS_SDK_0.9.9%29 """
id0 = self.read_reg(self.ESP_OTP_MAC0)
id1 = self.read_reg(self.ESP_OTP_MAC1)
return (id0 >> 24) | ((id1 & MAX_UINT24) << 8)
def read_mac(self):
""" Read MAC from OTP ROM """
mac0 = self.read_reg(self.ESP_OTP_MAC0)
mac1 = self.read_reg(self.ESP_OTP_MAC1)
mac3 = self.read_reg(self.ESP_OTP_MAC3)
if (mac3 != 0):
oui = ((mac3 >> 16) & 0xff, (mac3 >> 8) & 0xff, mac3 & 0xff)
elif ((mac1 >> 16) & 0xff) == 0:
oui = (0x18, 0xfe, 0x34)
elif ((mac1 >> 16) & 0xff) == 1:
oui = (0xac, 0xd0, 0x74)
else:
raise FatalError("Unknown OUI")
return oui + ((mac1 >> 8) & 0xff, mac1 & 0xff, (mac0 >> 24) & 0xff)
def get_erase_size(self, offset, size):
""" Calculate an erase size given a specific size in bytes.
Provides a workaround for the bootloader erase bug."""
sectors_per_block = 16
sector_size = self.FLASH_SECTOR_SIZE
num_sectors = (size + sector_size - 1) // sector_size
start_sector = offset // sector_size
head_sectors = sectors_per_block - (start_sector % sectors_per_block)
if num_sectors < head_sectors:
head_sectors = num_sectors
if num_sectors < 2 * head_sectors:
return (num_sectors + 1) // 2 * sector_size
else:
return (num_sectors - head_sectors) * sector_size
class ESP8266StubLoader(ESP8266ROM):
""" Access class for ESP8266 stub loader, runs on top of ROM.
"""
FLASH_WRITE_SIZE = 0x4000 # matches MAX_WRITE_BLOCK in stub_loader.c
IS_STUB = True
def __init__(self, rom_loader):
self._port = rom_loader._port
self.flush_input() # resets _slip_reader
def get_erase_size(self, offset, size):
return size # stub doesn't have same size bug as ROM loader
ESP8266ROM.STUB_CLASS = ESP8266StubLoader
class ESP32ROM(ESPLoader):
"""Access class for ESP32 ROM bootloader
"""
CHIP_NAME = "ESP32"
IS_STUB = False
DATE_REG_VALUE = 0x15122500
IROM_MAP_START = 0x400d0000
IROM_MAP_END = 0x40400000
DROM_MAP_START = 0x3F400000
DROM_MAP_END = 0x3F700000
# ESP32 uses a 4 byte status reply
STATUS_BYTES_LENGTH = 4
SPI_REG_BASE = 0x60002000
EFUSE_REG_BASE = 0x6001a000
SPI_W0_OFFS = 0x80
SPI_HAS_MOSI_DLEN_REG = True
FLASH_SIZES = {
'1MB':0x00,
'2MB':0x10,
'4MB':0x20,
'8MB':0x30,
'16MB':0x40
}
BOOTLOADER_FLASH_OFFSET = 0x1000
def get_chip_description(self):
blk3 = self.read_efuse(3)
chip_version = (blk3 >> 12) & 0xF
pkg_version = (blk3 >> 9) & 0x07
silicon_rev = {
0: "0",
8: "1"
}.get(chip_version, "(unknown 0x%x)" % chip_version)
chip_name = {
0: "ESP32D0WDQ6",
1: "ESP32D0WDQ5",
2: "ESP32D2WDQ5",
}.get(pkg_version, "unknown ESP32")
return "%s (revision %s)" % (chip_name, silicon_rev)
def read_efuse(self, n):
""" Read the nth word of the ESP3x EFUSE region. """
return self.read_reg(self.EFUSE_REG_BASE + (4 * n))
def chip_id(self):
word16 = self.read_efuse(1)
word17 = self.read_efuse(2)
return ((word17 & MAX_UINT24) << 24) | (word16 >> 8) & MAX_UINT24
def read_mac(self):
""" Read MAC from EFUSE region """
words = [self.read_efuse(2), self.read_efuse(1)]
bitstring = struct.pack(">II", *words)
bitstring = bitstring[2:8] # trim the 2 byte CRC
try:
return tuple(ord(b) for b in bitstring)
except TypeError: # Python 3, bitstring elements are already bytes
return tuple(bitstring)
def get_erase_size(self, offset, size):
return size
class ESP32StubLoader(ESP32ROM):
""" Access class for ESP32 stub loader, runs on top of ROM.
"""
FLASH_WRITE_SIZE = 0x4000 # matches MAX_WRITE_BLOCK in stub_loader.c
STATUS_BYTES_LENGTH = 2 # same as ESP8266, different to ESP32 ROM
IS_STUB = True
def __init__(self, rom_loader):
self._port = rom_loader._port
self.flush_input() # resets _slip_reader
ESP32ROM.STUB_CLASS = ESP32StubLoader
class ESPBOOTLOADER(object):
""" These are constants related to software ESP bootloader, working with 'v2' image files """
# First byte of the "v2" application image
IMAGE_V2_MAGIC = 0xea
# First 'segment' value in a "v2" application image, appears to be a constant version value?
IMAGE_V2_SEGMENT = 4
def LoadFirmwareImage(chip, filename):
""" Load a firmware image. Can be for ESP8266 or ESP32. ESP8266 images will be examined to determine if they are
original ROM firmware images (ESPFirmwareImage) or "v2" OTA bootloader images.
Returns a BaseFirmwareImage subclass, either ESPFirmwareImage (v1) or OTAFirmwareImage (v2).
"""
with open(filename, 'rb') as f:
if chip == 'esp32':
return ESP32FirmwareImage(f)
else: # Otherwise, ESP8266 so look at magic to determine the image type
magic = ord(f.read(1))
f.seek(0)
if magic == ESPLoader.ESP_IMAGE_MAGIC:
return ESPFirmwareImage(f)
elif magic == ESPBOOTLOADER.IMAGE_V2_MAGIC:
return OTAFirmwareImage(f)
else:
raise FatalError("Invalid image magic number: %d" % magic)
class ImageSegment(object):
""" Wrapper class for a segment in an ESP image
(very similar to a section in an ELFImage also) """
def __init__(self, addr, data, file_offs=None):
self.addr = addr
# pad all ImageSegments to at least 4 bytes length
self.data = pad_to(data, 4, b'\x00')
self.file_offs = file_offs
self.include_in_checksum = True
def copy_with_new_addr(self, new_addr):
""" Return a new ImageSegment with same data, but mapped at
a new address. """
return ImageSegment(new_addr, self.data, 0)
def __repr__(self):
r = "len 0x%05x load 0x%08x" % (len(self.data), self.addr)
if self.file_offs is not None:
r += " file_offs 0x%08x" % (self.file_offs)
return r
class ELFSection(ImageSegment):
""" Wrapper class for a section in an ELF image, has a section
name as well as the common properties of an ImageSegment. """
def __init__(self, name, addr, data):
super(ELFSection, self).__init__(addr, data)
self.name = name.decode("utf-8")
def __repr__(self):
return "%s %s" % (self.name, super(ELFSection, self).__repr__())
class BaseFirmwareImage(object):
SEG_HEADER_LEN = 8
""" Base class with common firmware image functions """
def __init__(self):
self.segments = []
self.entrypoint = 0
def load_common_header(self, load_file, expected_magic):
(magic, segments, self.flash_mode, self.flash_size_freq, self.entrypoint) = struct.unpack('<BBBBI', load_file.read(8))
if magic != expected_magic or segments > 16:
raise FatalError('Invalid firmware image magic=%d segments=%d' % (magic, segments))
return segments
def load_segment(self, f, is_irom_segment=False):
""" Load the next segment from the image file """
file_offs = f.tell()
(offset, size) = struct.unpack('<II', f.read(8))
self.warn_if_unusual_segment(offset, size, is_irom_segment)
segment_data = f.read(size)
if len(segment_data) < size:
raise FatalError('End of file reading segment 0x%x, length %d (actual length %d)' % (offset, size, len(segment_data)))
segment = ImageSegment(offset, segment_data, file_offs)
self.segments.append(segment)
return segment
def warn_if_unusual_segment(self, offset, size, is_irom_segment):
if not is_irom_segment:
if offset > 0x40200000 or offset < 0x3ffe0000 or size > 65536:
print('WARNING: Suspicious segment 0x%x, length %d' % (offset, size))
def save_segment(self, f, segment, checksum=None):
""" Save the next segment to the image file, return next checksum value if provided """
f.write(struct.pack('<II', segment.addr, len(segment.data)))
f.write(segment.data)
if checksum is not None:
return ESPLoader.checksum(segment.data, checksum)
def read_checksum(self, f):
""" Return ESPLoader checksum from end of just-read image """
# Skip the padding. The checksum is stored in the last byte so that the
# file is a multiple of 16 bytes.
align_file_position(f, 16)
return ord(f.read(1))
def calculate_checksum(self):
""" Calculate checksum of loaded image, based on segments in
segment array.
"""
checksum = ESPLoader.ESP_CHECKSUM_MAGIC
for seg in self.segments:
if seg.include_in_checksum:
checksum = ESPLoader.checksum(seg.data, checksum)
return checksum
def append_checksum(self, f, checksum):
""" Append ESPLoader checksum to the just-written image """
align_file_position(f, 16)
f.write(struct.pack(b'B', checksum))
def write_common_header(self, f, segments):
f.write(struct.pack('<BBBBI', ESPLoader.ESP_IMAGE_MAGIC, len(segments),
self.flash_mode, self.flash_size_freq, self.entrypoint))
def is_irom_addr(self, addr):
""" Returns True if an address starts in the irom region.
Valid for ESP8266 only.
"""
return ESP8266ROM.IROM_MAP_START <= addr < ESP8266ROM.IROM_MAP_END
def get_irom_segment(self):
irom_segments = [s for s in self.segments if self.is_irom_addr(s.addr)]
if len(irom_segments) > 0:
if len(irom_segments) != 1:
raise FatalError('Found %d segments that could be irom0. Bad ELF file?' % len(irom_segments))
return irom_segments[0]
return None
def get_non_irom_segments(self):
irom_segment = self.get_irom_segment()
return [s for s in self.segments if s != irom_segment]
class ESPFirmwareImage(BaseFirmwareImage):
""" 'Version 1' firmware image, segments loaded directly by the ROM bootloader. """
ROM_LOADER = ESP8266ROM
def __init__(self, load_file=None):
super(ESPFirmwareImage, self).__init__()
self.flash_mode = 0
self.flash_size_freq = 0
self.version = 1
if load_file is not None:
segments = self.load_common_header(load_file, ESPLoader.ESP_IMAGE_MAGIC)
for _ in range(segments):
self.load_segment(load_file)
self.checksum = self.read_checksum(load_file)
def default_output_name(self, input_file):
""" Derive a default output name from the ELF name. """
return input_file + '-'
def save(self, basename):
""" Save a set of V1 images for flashing. Parameter is a base filename. """
# IROM data goes in its own plain binary file
irom_segment = self.get_irom_segment()
if irom_segment is not None:
with open("%s0x%05x.bin" % (basename, irom_segment.addr - ESP8266ROM.IROM_MAP_START), "wb") as f:
f.write(irom_segment.data)
# everything but IROM goes at 0x00000 in an image file
normal_segments = self.get_non_irom_segments()
with open("%s0x00000.bin" % basename, 'wb') as f:
self.write_common_header(f, normal_segments)
checksum = ESPLoader.ESP_CHECKSUM_MAGIC
for segment in normal_segments:
checksum = self.save_segment(f, segment, checksum)
self.append_checksum(f, checksum)
class OTAFirmwareImage(BaseFirmwareImage):
""" 'Version 2' firmware image, segments loaded by software bootloader stub
(ie Espressif bootloader or rboot)
"""
ROM_LOADER = ESP8266ROM
def __init__(self, load_file=None):
super(OTAFirmwareImage, self).__init__()
self.version = 2
if load_file is not None:
segments = self.load_common_header(load_file, ESPBOOTLOADER.IMAGE_V2_MAGIC)
if segments != ESPBOOTLOADER.IMAGE_V2_SEGMENT:
# segment count is not really segment count here, but we expect to see '4'
print('Warning: V2 header has unexpected "segment" count %d (usually 4)' % segments)
# irom segment comes before the second header
#
# the file is saved in the image with a zero load address
# in the header, so we need to calculate a load address
irom_segment = self.load_segment(load_file, True)
# for actual mapped addr, add ESP8266ROM.IROM_MAP_START + flashing_Addr + 8
irom_segment.addr = 0
irom_segment.include_in_checksum = False
first_flash_mode = self.flash_mode
first_flash_size_freq = self.flash_size_freq
first_entrypoint = self.entrypoint
# load the second header
segments = self.load_common_header(load_file, ESPLoader.ESP_IMAGE_MAGIC)
if first_flash_mode != self.flash_mode:
print('WARNING: Flash mode value in first header (0x%02x) disagrees with second (0x%02x). Using second value.'
% (first_flash_mode, self.flash_mode))
if first_flash_size_freq != self.flash_size_freq:
print('WARNING: Flash size/freq value in first header (0x%02x) disagrees with second (0x%02x). Using second value.'
% (first_flash_size_freq, self.flash_size_freq))
if first_entrypoint != self.entrypoint:
print('WARNING: Entrypoint address in first header (0x%08x) disagrees with second header (0x%08x). Using second value.'
% (first_entrypoint, self.entrypoint))
# load all the usual segments
for _ in range(segments):
self.load_segment(load_file)
self.checksum = self.read_checksum(load_file)
def default_output_name(self, input_file):
""" Derive a default output name from the ELF name. """
irom_segment = self.get_irom_segment()
if irom_segment is not None:
irom_offs = irom_segment.addr - ESP8266ROM.IROM_MAP_START
else:
irom_offs = 0
return "%s-0x%05x.bin" % (os.path.splitext(input_file)[0],
irom_offs & ~(ESPLoader.FLASH_SECTOR_SIZE - 1))
def save(self, filename):
with open(filename, 'wb') as f:
# Save first header for irom0 segment
f.write(struct.pack(b'<BBBBI', ESPBOOTLOADER.IMAGE_V2_MAGIC, ESPBOOTLOADER.IMAGE_V2_SEGMENT,
self.flash_mode, self.flash_size_freq, self.entrypoint))
irom_segment = self.get_irom_segment()
if irom_segment is not None:
# save irom0 segment, make sure it has load addr 0 in the file
irom_segment = irom_segment.copy_with_new_addr(0)
self.save_segment(f, irom_segment)
# second header, matches V1 header and contains loadable segments
normal_segments = self.get_non_irom_segments()
self.write_common_header(f, normal_segments)
checksum = ESPLoader.ESP_CHECKSUM_MAGIC
for segment in normal_segments:
checksum = self.save_segment(f, segment, checksum)
self.append_checksum(f, checksum)
class ESP32FirmwareImage(BaseFirmwareImage):
""" ESP32 firmware image is very similar to V1 ESP8266 image,
except with an additional 16 byte reserved header at top of image,
and because of new flash mapping capabilities the flash-mapped regions
can be placed in the normal image (just @ 64kB padded offsets).
"""
ROM_LOADER = ESP32ROM
# 16 byte extended header contains WP pin number (byte), then 6 half-byte drive stength
# config fields, then 12 reserved bytes. None of this is exposed in esptool.py right now,
# but we need to set WP to 0xEE (disabled) to avoid problems when remapping SPI flash
# pins via efuse (for example on ESP32-D2WD).
EXTENDED_HEADER = [0xEE] + ([0] * 15)
EXTENDED_HEADER_STRUCT_FMT = "B" * 16
def __init__(self, load_file=None):
super(ESP32FirmwareImage, self).__init__()
self.flash_mode = 0
self.flash_size_freq = 0
self.version = 1
if load_file is not None:
segments = self.load_common_header(load_file, ESPLoader.ESP_IMAGE_MAGIC)
additional_header = list(struct.unpack(self.EXTENDED_HEADER_STRUCT_FMT, load_file.read(16)))
# check these bytes are unused
if additional_header != self.EXTENDED_HEADER:
print("WARNING: ESP32 image header contains unknown flags. Possibly this image is from a different version of esptool.py")
for _ in range(segments):
self.load_segment(load_file)
self.checksum = self.read_checksum(load_file)
def is_flash_addr(self, addr):
return (ESP32ROM.IROM_MAP_START <= addr < ESP32ROM.IROM_MAP_END) \
or (ESP32ROM.DROM_MAP_START <= addr < ESP32ROM.DROM_MAP_END)
def default_output_name(self, input_file):
""" Derive a default output name from the ELF name. """
return "%s.bin" % (os.path.splitext(input_file)[0])
def warn_if_unusual_segment(self, offset, size, is_irom_segment):
pass # TODO: add warnings for ESP32 segment offset/size combinations that are wrong
def save(self, filename):
padding_segments = 0
with open(filename, 'wb') as f:
self.write_common_header(f, self.segments)
# first 4 bytes of header are read by ROM bootloader for SPI
# config, but currently unused
f.write(struct.pack(self.EXTENDED_HEADER_STRUCT_FMT, *self.EXTENDED_HEADER))
checksum = ESPLoader.ESP_CHECKSUM_MAGIC
last_addr = None
for segment in sorted(self.segments, key=lambda s:s.addr):
# IROM/DROM segment flash mappings need to align on
# 64kB boundaries.
#
# TODO: intelligently order segments to reduce wastage
# by squeezing smaller DRAM/IRAM segments into the
# 64kB padding space.
IROM_ALIGN = 65536
# check for multiple ELF sections that live in the same flash mapping region.
# this is usually a sign of a broken linker script, but if you have a legitimate
# use case then let us know (we can merge segments here, but as a rule you probably
# want to merge them in your linker script.)
if last_addr is not None and self.is_flash_addr(last_addr) \
and self.is_flash_addr(segment.addr) and segment.addr // IROM_ALIGN == last_addr // IROM_ALIGN:
raise FatalError(("Segment loaded at 0x%08x lands in same 64KB flash mapping as segment loaded at 0x%08x. " +
"Can't generate binary. Suggest changing linker script or ELF to merge sections.") %
(segment.addr, last_addr))
last_addr = segment.addr
if self.is_flash_addr(segment.addr):
# Actual alignment required for the segment header: positioned so that
# after we write the next 8 byte header, file_offs % IROM_ALIGN == segment.addr % IROM_ALIGN
#
# (this is because the segment's vaddr may not be IROM_ALIGNed, more likely is aligned
# IROM_ALIGN+0x10 to account for longest possible header.
align_past = (segment.addr % IROM_ALIGN) - self.SEG_HEADER_LEN
assert (align_past + self.SEG_HEADER_LEN) == (segment.addr % IROM_ALIGN)
# subtract SEG_HEADER_LEN a second time, as the padding block has a header as well
pad_len = (IROM_ALIGN - (f.tell() % IROM_ALIGN)) + align_past - self.SEG_HEADER_LEN
if pad_len < 0:
pad_len += IROM_ALIGN
if pad_len > 0:
null = ImageSegment(0, b'\x00' * pad_len, f.tell())
checksum = self.save_segment(f, null, checksum)
padding_segments += 1
# verify that after the 8 byte header is added, were are at the correct offset relative to the segment's vaddr
assert (f.tell() + 8) % IROM_ALIGN == segment.addr % IROM_ALIGN
checksum = self.save_segment(f, segment, checksum)
self.append_checksum(f, checksum)
# kinda hacky: go back to the initial header and write the new segment count
# that includes padding segments. Luckily(?) this header is not checksummed
f.seek(1)
try:
f.write(chr(len(self.segments) + padding_segments))
except TypeError: # Python 3
f.write(bytes([len(self.segments) + padding_segments]))
class ELFFile(object):
SEC_TYPE_PROGBITS = 0x01
SEC_TYPE_STRTAB = 0x03
LEN_SEC_HEADER = 0x28
def __init__(self, name):
# Load sections from the ELF file
self.name = name
with open(self.name, 'rb') as f:
self._read_elf_file(f)
def get_section(self, section_name):
for s in self.sections:
if s.name == section_name:
return s
raise ValueError("No section %s in ELF file" % section_name)
def _read_elf_file(self, f):
# read the ELF file header
LEN_FILE_HEADER = 0x34
try:
(ident,_type,machine,_version,
self.entrypoint,_phoff,shoff,_flags,
_ehsize, _phentsize,_phnum, shentsize,
shnum,shstrndx) = struct.unpack("<16sHHLLLLLHHHHHH", f.read(LEN_FILE_HEADER))
except struct.error as e:
raise FatalError("Failed to read a valid ELF header from %s: %s" % (self.name, e))
if byte(ident, 0) != 0x7f or ident[1:4] != b'ELF':
raise FatalError("%s has invalid ELF magic header" % self.name)
if machine != 0x5e:
raise FatalError("%s does not appear to be an Xtensa ELF file. e_machine=%04x" % (self.name, machine))
if shentsize != self.LEN_SEC_HEADER:
raise FatalError("%s has unexpected section header entry size 0x%x (not 0x28)" % (self.name, shentsize, self.LEN_SEC_HEADER))
if shnum == 0:
raise FatalError("%s has 0 section headers" % (self.name))
self._read_sections(f, shoff, shnum, shstrndx)
def _read_sections(self, f, section_header_offs, section_header_count, shstrndx):
f.seek(section_header_offs)
len_bytes = section_header_count * self.LEN_SEC_HEADER
section_header = f.read(len_bytes)
if len(section_header) == 0:
raise FatalError("No section header found at offset %04x in ELF file." % section_header_offs)
if len(section_header) != (len_bytes):
raise FatalError("Only read 0x%x bytes from section header (expected 0x%x.) Truncated ELF file?" % (len(section_header), len_bytes))
# walk through the section header and extract all sections
section_header_offsets = range(0, len(section_header), self.LEN_SEC_HEADER)
def read_section_header(offs):
name_offs,sec_type,_flags,lma,sec_offs,size = struct.unpack_from("<LLLLLL", section_header[offs:])
return (name_offs, sec_type, lma, size, sec_offs)
all_sections = [read_section_header(offs) for offs in section_header_offsets]
prog_sections = [s for s in all_sections if s[1] == ELFFile.SEC_TYPE_PROGBITS]
# search for the string table section
if not (shstrndx * self.LEN_SEC_HEADER) in section_header_offsets:
raise FatalError("ELF file has no STRTAB section at shstrndx %d" % shstrndx)
_,sec_type,_,sec_size,sec_offs = read_section_header(shstrndx * self.LEN_SEC_HEADER)
if sec_type != ELFFile.SEC_TYPE_STRTAB:
print('WARNING: ELF file has incorrect STRTAB section type 0x%02x' % sec_type)
f.seek(sec_offs)
string_table = f.read(sec_size)
# build the real list of ELFSections by reading the actual section names from the
# string table section, and actual data for each section from the ELF file itself
def lookup_string(offs):
raw = string_table[offs:]
return raw[:raw.index(b'\x00')]
def read_data(offs,size):
f.seek(offs)
return f.read(size)
prog_sections = [ELFSection(lookup_string(n_offs), lma, read_data(offs, size)) for (n_offs, _type, lma, size, offs) in prog_sections
if lma != 0]
self.sections = prog_sections
def slip_reader(port):
"""Generator to read SLIP packets from a serial port.
Yields one full SLIP packet at a time, raises exception on timeout or invalid data.
Designed to avoid too many calls to serial.read(1), which can bog
down on slow systems.
"""
partial_packet = None
in_escape = False
while True:
waiting = port.inWaiting()
read_bytes = port.read(1 if waiting == 0 else waiting)
if read_bytes == b'':
raise FatalError("Timed out waiting for packet %s" % ("header" if partial_packet is None else "content"))
for b in read_bytes:
if type(b) is int:
b = bytes([b]) # python 2/3 compat
if partial_packet is None: # waiting for packet header
if b == b'\xc0':
partial_packet = b""
else:
raise FatalError('Invalid head of packet (%r)' % b)
elif in_escape: # part-way through escape sequence
in_escape = False
if b == b'\xdc':
partial_packet += b'\xc0'
elif b == b'\xdd':
partial_packet += b'\xdb'
else:
raise FatalError('Invalid SLIP escape (%r%r)' % (b'\xdb', b))
elif b == b'\xdb': # start of escape sequence
in_escape = True
elif b == b'\xc0': # end of packet
yield partial_packet
partial_packet = None
else: # normal byte in packet
partial_packet += b
def arg_auto_int(x):
return int(x, 0)
def div_roundup(a, b):
""" Return a/b rounded up to nearest integer,
equivalent result to int(math.ceil(float(int(a)) / float(int(b))), only
without possible floating point accuracy errors.
"""
return (int(a) + int(b) - 1) // int(b)
def align_file_position(f, size):
""" Align the position in the file to the next block of specified size """
align = (size - 1) - (f.tell() % size)
f.seek(align, 1)
def flash_size_bytes(size):
""" Given a flash size of the type passed in args.flash_size
(ie 512KB or 1MB) then return the size in bytes.
"""
if "MB" in size:
return int(size[:size.index("MB")]) * 1024 * 1024
elif "KB" in size:
return int(size[:size.index("KB")]) * 1024
else:
raise FatalError("Unknown size %s" % size)
def hexify(s):
if not PYTHON2:
return ''.join('%02X' % c for c in s)
else:
return ''.join('%02X' % ord(c) for c in s)
def unhexify(hs):
s = bytes()
for i in range(0, len(hs) - 1, 2):
hex_string = hs[i:i + 2]
if not PYTHON2:
s += bytes([int(hex_string, 16)])
else:
s += chr(int(hex_string, 16))
return s
def pad_to(data, alignment, pad_character=b'\xFF'):
""" Pad to the next alignment boundary """
pad_mod = len(data) % alignment
if pad_mod != 0:
data += pad_character * (alignment - pad_mod)
return data
class FatalError(RuntimeError):
"""
Wrapper class for runtime errors that aren't caused by internal bugs, but by
ESP8266 responses or input content.
"""
def __init__(self, message):
RuntimeError.__init__(self, message)
@staticmethod
def WithResult(message, result):
"""
Return a fatal error object that appends the hex values of
'result' as a string formatted argument.
"""
message += " (result was %s)" % hexify(result)
return FatalError(message)
class NotImplementedInROMError(FatalError):
"""
Wrapper class for the error thrown when a particular ESP bootloader function
is not implemented in the ROM bootloader.
"""
def __init__(self, bootloader, func):
FatalError.__init__(self, "%s ROM does not support function %s." % (bootloader.CHIP_NAME, func.__name__))
# "Operation" commands, executable at command line. One function each
#
# Each function takes either two args (<ESPLoader instance>, <args>) or a single <args>
# argument.
def load_ram(esp, args):
image = LoadFirmwareImage(esp, args.filename)
print('RAM boot...')
for (offset, size, data) in image.segments:
print('Downloading %d bytes at %08x...' % (size, offset), end=' ')
sys.stdout.flush()
esp.mem_begin(size, div_roundup(size, esp.ESP_RAM_BLOCK), esp.ESP_RAM_BLOCK, offset)
seq = 0
while len(data) > 0:
esp.mem_block(data[0:esp.ESP_RAM_BLOCK], seq)
data = data[esp.ESP_RAM_BLOCK:]
seq += 1
print('done!')
print('All segments done, executing at %08x' % image.entrypoint)
esp.mem_finish(image.entrypoint)
def read_mem(esp, args):
print('0x%08x = 0x%08x' % (args.address, esp.read_reg(args.address)))
def write_mem(esp, args):
esp.write_reg(args.address, args.value, args.mask, 0)
print('Wrote %08x, mask %08x to %08x' % (args.value, args.mask, args.address))
def dump_mem(esp, args):
f = open(args.filename, 'wb')
for i in range(args.size // 4):
d = esp.read_reg(args.address + (i * 4))
f.write(struct.pack(b'<I', d))
if f.tell() % 1024 == 0:
print('\r%d bytes read... (%d %%)' % (f.tell(),
f.tell() * 100 // args.size),
end=' ')
sys.stdout.flush()
print('Done!')
def detect_flash_size(esp, args):
if args.flash_size == 'detect':
flash_id = esp.flash_id()
size_id = flash_id >> 16
args.flash_size = DETECTED_FLASH_SIZES.get(size_id)
if args.flash_size is None:
print('Warning: Could not auto-detect Flash size (FlashID=0x%x, SizeID=0x%x), defaulting to 4MB' % (flash_id, size_id))
args.flash_size = '4MB'
else:
print('Auto-detected Flash size:', args.flash_size)
def _update_image_flash_params(esp, address, args, image):
""" Modify the flash mode & size bytes if this looks like an executable bootloader image """
if len(image) < 8:
return image # not long enough to be a bootloader image
# unpack the (potential) image header
magic, _, flash_mode, flash_size_freq = struct.unpack("BBBB", image[:4])
if address != esp.BOOTLOADER_FLASH_OFFSET or magic != esp.ESP_IMAGE_MAGIC:
return image # not flashing a bootloader, so don't modify this
if args.flash_mode != 'keep':
flash_mode = {'qio':0, 'qout':1, 'dio':2, 'dout': 3}[args.flash_mode]
flash_freq = flash_size_freq & 0x0F
if args.flash_freq != 'keep':
flash_freq = {'40m':0, '26m':1, '20m':2, '80m': 0xf}[args.flash_freq]
flash_size = flash_size_freq & 0xF0
if args.flash_size != 'keep':
flash_size = esp.parse_flash_size_arg(args.flash_size)
flash_params = struct.pack(b'BB', flash_mode, flash_size + flash_freq)
if flash_params != image[2:4]:
print('Flash params set to 0x%04x' % struct.unpack(">H", flash_params))
image = image[0:2] + flash_params + image[4:]
return image
def write_flash(esp, args):
# set args.compress based on default behaviour:
# -> if either --compress or --no-compress is set, honour that
# -> otherwise, set --compress unless --no-stub is set
if args.compress is None and not args.no_compress:
args.compress = not args.no_stub
# verify file sizes fit in flash
flash_end = flash_size_bytes(args.flash_size)
for address, argfile in args.addr_filename:
argfile.seek(0,2) # seek to end
if address + argfile.tell() > flash_end:
raise FatalError(("File %s (length %d) at offset %d will not fit in %d bytes of flash. " +
"Use --flash-size argument, or change flashing address.")
% (argfile.name, argfile.tell(), address, flash_end))
argfile.seek(0)
for address, argfile in args.addr_filename:
if args.no_stub:
print('Erasing flash...')
image = pad_to(argfile.read(), 4)
image = _update_image_flash_params(esp, address, args, image)
calcmd5 = hashlib.md5(image).hexdigest()
uncsize = len(image)
if args.compress:
uncimage = image
image = zlib.compress(uncimage, 9)
ratio = uncsize / len(image)
blocks = esp.flash_defl_begin(uncsize, len(image), address)
else:
ratio = 1.0
blocks = esp.flash_begin(uncsize, address)
argfile.seek(0) # in case we need it again
seq = 0
written = 0
t = time.time()
esp._port.timeout = min(DEFAULT_TIMEOUT * ratio,
CHIP_ERASE_TIMEOUT * 2)
while len(image) > 0:
print('\rWriting at 0x%08x... (%d %%)' % (address + seq * esp.FLASH_WRITE_SIZE, 100 * (seq + 1) // blocks), end='')
sys.stdout.flush()
block = image[0:esp.FLASH_WRITE_SIZE]
if args.compress:
esp.flash_defl_block(block, seq)
else:
# Pad the last block
block = block + b'\xff' * (esp.FLASH_WRITE_SIZE - len(block))
esp.flash_block(block, seq)
image = image[esp.FLASH_WRITE_SIZE:]
seq += 1
written += len(block)
t = time.time() - t
speed_msg = ""
if args.compress:
if t > 0.0:
speed_msg = " (effective %.1f kbit/s)" % (uncsize / t * 8 / 1000)
print('\rWrote %d bytes (%d compressed) at 0x%08x in %.1f seconds%s...' % (uncsize, written, address, t, speed_msg))
else:
if t > 0.0:
speed_msg = " (%.1f kbit/s)" % (written / t * 8 / 1000)
print('\rWrote %d bytes at 0x%08x in %.1f seconds%s...' % (written, address, t, speed_msg))
try:
res = esp.flash_md5sum(address, uncsize)
if res != calcmd5:
print('File md5: %s' % calcmd5)
print('Flash md5: %s' % res)
print('MD5 of 0xFF is %s' % (hashlib.md5(b'\xFF' * uncsize).hexdigest()))
raise FatalError("MD5 of file does not match data in flash!")
else:
print('Hash of data verified.')
except NotImplementedInROMError:
pass
esp._port.timeout = DEFAULT_TIMEOUT
print('\nLeaving...')
if esp.IS_STUB:
# skip sending flash_finish to ROM loader here,
# as it causes the loader to exit and run user code
esp.flash_begin(0, 0)
if args.compress:
esp.flash_defl_finish(False)
else:
esp.flash_finish(False)
if args.verify:
print('Verifying just-written flash...')
print('(This option is deprecated, flash contents are now always read back after flashing.)')
verify_flash(esp, args)
def image_info(args):
image = LoadFirmwareImage(args.chip, args.filename)
print('Image version: %d' % image.version)
print('Entry point: %08x' % image.entrypoint if image.entrypoint != 0 else 'Entry point not set')
print('%d segments' % len(image.segments))
print
idx = 0
for seg in image.segments:
idx += 1
print('Segment %d: %r' % (idx, seg))
calc_checksum = image.calculate_checksum()
print('Checksum: %02x (%s)' % (image.checksum,
'valid' if image.checksum == calc_checksum else 'invalid - calculated %02x' % calc_checksum))
def make_image(args):
image = ESPFirmwareImage()
if len(args.segfile) == 0:
raise FatalError('No segments specified')
if len(args.segfile) != len(args.segaddr):
raise FatalError('Number of specified files does not match number of specified addresses')
for (seg, addr) in zip(args.segfile, args.segaddr):
data = open(seg, 'rb').read()
image.segments.append(ImageSegment(addr, data))
image.entrypoint = args.entrypoint
image.save(args.output)
def elf2image(args):
e = ELFFile(args.input)
if args.chip == 'auto': # Default to ESP8266 for backwards compatibility
print("Creating image for ESP8266...")
args.chip == 'esp8266'
if args.chip == 'esp32':
image = ESP32FirmwareImage()
elif args.version == '1': # ESP8266
image = ESPFirmwareImage()
else:
image = OTAFirmwareImage()
image.entrypoint = e.entrypoint
image.segments = e.sections # ELFSection is a subclass of ImageSegment
image.flash_mode = {'qio':0, 'qout':1, 'dio':2, 'dout': 3}[args.flash_mode]
image.flash_size_freq = image.ROM_LOADER.FLASH_SIZES[args.flash_size]
image.flash_size_freq += {'40m':0, '26m':1, '20m':2, '80m': 0xf}[args.flash_freq]
if args.output is None:
args.output = image.default_output_name(args.input)
image.save(args.output)
def read_mac(esp, args):
mac = esp.read_mac()
def print_mac(label, mac):
print('%s: %s' % (label, ':'.join(map(lambda x: '%02x' % x, mac))))
print_mac("MAC", mac)
def chip_id(esp, args):
chipid = esp.chip_id()
print('Chip ID: 0x%08x' % chipid)
def erase_flash(esp, args):
print('Erasing flash (this may take a while)...')
t = time.time()
esp.erase_flash()
print('Chip erase completed successfully in %.1fs' % (time.time() - t))
def erase_region(esp, args):
print('Erasing region (may be slow depending on size)...')
t = time.time()
esp.erase_region(args.address, args.size)
print('Erase completed successfully in %.1f seconds.' % (time.time() - t))
def run(esp, args):
esp.run()
def flash_id(esp, args):
flash_id = esp.flash_id()
print('Manufacturer: %02x' % (flash_id & 0xff))
flid_lowbyte = (flash_id >> 16) & 0xFF
print('Device: %02x%02x' % ((flash_id >> 8) & 0xff, flid_lowbyte))
print('Detected flash size: %s' % (DETECTED_FLASH_SIZES.get(flid_lowbyte, "Unknown")))
def read_flash(esp, args):
if args.no_progress:
flash_progress = None
else:
def flash_progress(progress, length):
msg = '%d (%d %%)' % (progress, progress * 100.0 / length)
padding = '\b' * len(msg)
if progress == length:
padding = '\n'
sys.stdout.write(msg + padding)
sys.stdout.flush()
t = time.time()
data = esp.read_flash(args.address, args.size, flash_progress)
t = time.time() - t
print('\rRead %d bytes at 0x%x in %.1f seconds (%.1f kbit/s)...'
% (len(data), args.address, t, len(data) / t * 8 / 1000))
open(args.filename, 'wb').write(data)
def verify_flash(esp, args):
differences = False
for address, argfile in args.addr_filename:
image = pad_to(argfile.read(), 4)
argfile.seek(0) # rewind in case we need it again
image = _update_image_flash_params(esp, address, args, image)
image_size = len(image)
print('Verifying 0x%x (%d) bytes @ 0x%08x in flash against %s...' % (image_size, image_size, address, argfile.name))
# Try digest first, only read if there are differences.
digest = esp.flash_md5sum(address, image_size)
expected_digest = hashlib.md5(image).hexdigest()
if digest == expected_digest:
print('-- verify OK (digest matched)')
continue
else:
differences = True
if getattr(args, 'diff', 'no') != 'yes':
print('-- verify FAILED (digest mismatch)')
continue
flash = esp.read_flash(address, image_size)
assert flash != image
diff = [i for i in range(image_size) if flash[i] != image[i]]
print('-- verify FAILED: %d differences, first @ 0x%08x' % (len(diff), address + diff[0]))
for d in diff:
flash_byte = flash[d]
image_byte = image[d]
if PYTHON2:
flash_byte = ord(flash_byte)
image_byte = ord(image_byte)
print(' %08x %02x %02x' % (address + d, flash_byte, image_byte))
if differences:
raise FatalError("Verify failed.")
def read_flash_status(esp, args):
print('Status value: 0x%04x' % esp.read_status(args.bytes))
def write_flash_status(esp, args):
fmt = "0x%%0%dx" % (args.bytes * 2)
args.value = args.value & ((1 << (args.bytes * 8)) - 1)
print(('Initial flash status: ' + fmt) % esp.read_status(args.bytes))
print(('Setting flash status: ' + fmt) % args.value)
esp.write_status(args.value, args.bytes, args.non_volatile)
print(('After flash status: ' + fmt) % esp.read_status(args.bytes))
def version(args):
print(__version__)
#
# End of operations functions
#
def main():
parser = argparse.ArgumentParser(description='esptool.py v%s - ESP8266 ROM Bootloader Utility' % __version__, prog='esptool')
parser.add_argument('--chip', '-c',
help='Target chip type',
choices=['auto', 'esp8266', 'esp32'],
default=os.environ.get('ESPTOOL_CHIP', 'auto'))
parser.add_argument(
'--port', '-p',
help='Serial port device',
default=os.environ.get('ESPTOOL_PORT', ESPLoader.DEFAULT_PORT))
parser.add_argument(
'--baud', '-b',
help='Serial port baud rate used when flashing/reading',
type=arg_auto_int,
default=os.environ.get('ESPTOOL_BAUD', ESPLoader.ESP_ROM_BAUD))
parser.add_argument(
'--before',
help='What to do before connecting to the chip',
choices=['default_reset', 'no_reset'],
default=os.environ.get('ESPTOOL_BEFORE', 'default_reset'))
parser.add_argument(
'--after', '-a',
help='What to do after esptool.py is finished',
choices=['hard_reset', 'soft_reset', 'no_reset'],
default=os.environ.get('ESPTOOL_AFTER', 'hard_reset'))
parser.add_argument(
'--no-stub',
help="Disable launching the flasher stub, only talk to ROM bootloader. Some features will not be available.",
action='store_true')
subparsers = parser.add_subparsers(
dest='operation',
help='Run esptool {command} -h for additional help')
def add_spi_connection_arg(parent):
parent.add_argument('--spi-connection', '-sc', help='ESP32-only argument. Override default SPI Flash connection. ' +
'Value can be SPI, HSPI or a comma-separated list of 5 I/O numbers to use for SPI flash (CLK,Q,D,HD,CS).',
action=SpiConnectionAction)
parser_load_ram = subparsers.add_parser(
'load_ram',
help='Download an image to RAM and execute')
parser_load_ram.add_argument('filename', help='Firmware image')
parser_dump_mem = subparsers.add_parser(
'dump_mem',
help='Dump arbitrary memory to disk')
parser_dump_mem.add_argument('address', help='Base address', type=arg_auto_int)
parser_dump_mem.add_argument('size', help='Size of region to dump', type=arg_auto_int)
parser_dump_mem.add_argument('filename', help='Name of binary dump')
parser_read_mem = subparsers.add_parser(
'read_mem',
help='Read arbitrary memory location')
parser_read_mem.add_argument('address', help='Address to read', type=arg_auto_int)
parser_write_mem = subparsers.add_parser(
'write_mem',
help='Read-modify-write to arbitrary memory location')
parser_write_mem.add_argument('address', help='Address to write', type=arg_auto_int)
parser_write_mem.add_argument('value', help='Value', type=arg_auto_int)
parser_write_mem.add_argument('mask', help='Mask of bits to write', type=arg_auto_int)
def add_spi_flash_subparsers(parent, is_elf2image):
""" Add common parser arguments for SPI flash properties """
extra_keep_args = [] if is_elf2image else ['keep']
auto_detect = not is_elf2image
parent.add_argument('--flash_freq', '-ff', help='SPI Flash frequency',
choices=extra_keep_args + ['40m', '26m', '20m', '80m'],
default=os.environ.get('ESPTOOL_FF', '40m' if is_elf2image else 'keep'))
parent.add_argument('--flash_mode', '-fm', help='SPI Flash mode',
choices=extra_keep_args + ['qio', 'qout', 'dio', 'dout'],
default=os.environ.get('ESPTOOL_FM', 'qio' if is_elf2image else 'keep'))
parent.add_argument('--flash_size', '-fs', help='SPI Flash size in MegaBytes (1MB, 2MB, 4MB, 8MB, 16M)'
' plus ESP8266-only (256KB, 512KB, 2MB-c1, 4MB-c1)',
action=FlashSizeAction, auto_detect=auto_detect,
default=os.environ.get('ESPTOOL_FS', 'detect' if auto_detect else '1MB'))
add_spi_connection_arg(parent)
parser_write_flash = subparsers.add_parser(
'write_flash',
help='Write a binary blob to flash')
parser_write_flash.add_argument('addr_filename', metavar='<address> <filename>', help='Address followed by binary filename, separated by space',
action=AddrFilenamePairAction)
add_spi_flash_subparsers(parser_write_flash, is_elf2image=False)
parser_write_flash.add_argument('--no-progress', '-p', help='Suppress progress output', action="store_true")
parser_write_flash.add_argument('--verify', help='Verify just-written data on flash ' +
'(mostly superfluous, data is read back during flashing)', action='store_true')
compress_args = parser_write_flash.add_mutually_exclusive_group(required=False)
compress_args.add_argument('--compress', '-z', help='Compress data in transfer (default unless --no-stub is specified)',action="store_true", default=None)
compress_args.add_argument('--no-compress', '-u', help='Disable data compression during transfer (default if --no-stub is specified)',action="store_true")
subparsers.add_parser(
'run',
help='Run application code in flash')
parser_image_info = subparsers.add_parser(
'image_info',
help='Dump headers from an application image')
parser_image_info.add_argument('filename', help='Image file to parse')
parser_make_image = subparsers.add_parser(
'make_image',
help='Create an application image from binary files')
parser_make_image.add_argument('output', help='Output image file')
parser_make_image.add_argument('--segfile', '-f', action='append', help='Segment input file')
parser_make_image.add_argument('--segaddr', '-a', action='append', help='Segment base address', type=arg_auto_int)
parser_make_image.add_argument('--entrypoint', '-e', help='Address of entry point', type=arg_auto_int, default=0)
parser_elf2image = subparsers.add_parser(
'elf2image',
help='Create an application image from ELF file')
parser_elf2image.add_argument('input', help='Input ELF file')
parser_elf2image.add_argument('--output', '-o', help='Output filename prefix (for version 1 image), or filename (for version 2 single image)', type=str)
parser_elf2image.add_argument('--version', '-e', help='Output image version', choices=['1','2'], default='1')
add_spi_flash_subparsers(parser_elf2image, is_elf2image=True)
subparsers.add_parser(
'read_mac',
help='Read MAC address from OTP ROM')
subparsers.add_parser(
'chip_id',
help='Read Chip ID from OTP ROM')
parser_flash_id = subparsers.add_parser(
'flash_id',
help='Read SPI flash manufacturer and device ID')
add_spi_connection_arg(parser_flash_id)
parser_read_status = subparsers.add_parser(
'read_flash_status',
help='Read SPI flash status register')
add_spi_connection_arg(parser_read_status)
parser_read_status.add_argument('--bytes', help='Number of bytes to read (1-3)', type=int, choices=[1,2,3], default=2)
parser_write_status = subparsers.add_parser(
'write_flash_status',
help='Write SPI flash status register')
add_spi_connection_arg(parser_write_status)
parser_write_status.add_argument('--non-volatile', help='Write non-volatile bits (use with caution)', action='store_true')
parser_write_status.add_argument('--bytes', help='Number of status bytes to write (1-3)', type=int, choices=[1,2,3], default=2)
parser_write_status.add_argument('value', help='New value', type=arg_auto_int)
parser_read_flash = subparsers.add_parser(
'read_flash',
help='Read SPI flash content')
add_spi_connection_arg(parser_read_flash)
parser_read_flash.add_argument('address', help='Start address', type=arg_auto_int)
parser_read_flash.add_argument('size', help='Size of region to dump', type=arg_auto_int)
parser_read_flash.add_argument('filename', help='Name of binary dump')
parser_read_flash.add_argument('--no-progress', '-p', help='Suppress progress output', action="store_true")
parser_verify_flash = subparsers.add_parser(
'verify_flash',
help='Verify a binary blob against flash')
parser_verify_flash.add_argument('addr_filename', help='Address and binary file to verify there, separated by space',
action=AddrFilenamePairAction)
parser_verify_flash.add_argument('--diff', '-d', help='Show differences',
choices=['no', 'yes'], default='no')
add_spi_flash_subparsers(parser_verify_flash, is_elf2image=False)
parser_erase_flash = subparsers.add_parser(
'erase_flash',
help='Perform Chip Erase on SPI flash')
add_spi_connection_arg(parser_erase_flash)
parser_erase_region = subparsers.add_parser(
'erase_region',
help='Erase a region of the flash')
add_spi_connection_arg(parser_erase_region)
parser_erase_region.add_argument('address', help='Start address (must be multiple of 4096)', type=arg_auto_int)
parser_erase_region.add_argument('size', help='Size of region to erase (must be multiple of 4096)', type=arg_auto_int)
subparsers.add_parser(
'version', help='Print esptool version')
# internal sanity check - every operation matches a module function of the same name
for operation in subparsers.choices.keys():
assert operation in globals(), "%s should be a module function" % operation
expand_file_arguments()
args = parser.parse_args()
print('esptool.py v%s' % __version__)
# operation function can take 1 arg (args), 2 args (esp, arg)
# or be a member function of the ESPLoader class.
if args.operation is None:
parser.print_help()
sys.exit(1)
operation_func = globals()[args.operation]
operation_args,_,_,_ = inspect.getargspec(operation_func)
if operation_args[0] == 'esp': # operation function takes an ESPLoader connection object
initial_baud = min(ESPLoader.ESP_ROM_BAUD, args.baud) # don't sync faster than the default baud rate
if args.chip == 'auto':
esp = ESPLoader.detect_chip(args.port, initial_baud, args.before)
else:
chip_class = {
'esp8266': ESP8266ROM,
'esp32': ESP32ROM,
}[args.chip]
esp = chip_class(args.port, initial_baud)
esp.connect(args.before)
print("Chip is %s" % (esp.get_chip_description()))
if not args.no_stub:
esp = esp.run_stub()
if args.baud > initial_baud:
try:
esp.change_baud(args.baud)
except NotImplementedInROMError:
print("WARNING: ROM doesn't support changing baud rate. Keeping initial baud rate %d" % initial_baud)
# override common SPI flash parameter stuff if configured to do so
if hasattr(args, "spi_connection") and args.spi_connection is not None:
if esp.CHIP_NAME != "ESP32":
raise FatalError("Chip %s does not support --spi-connection option." % esp.CHIP_NAME)
print("Configuring SPI flash mode...")
esp.flash_spi_attach(args.spi_connection)
elif args.no_stub:
print("Enabling default SPI flash mode...")
# ROM loader doesn't enable flash unless we explicitly do it
esp.flash_spi_attach(0)
if hasattr(args, "flash_size"):
print("Configuring flash size...")
detect_flash_size(esp, args)
esp.flash_set_parameters(flash_size_bytes(args.flash_size))
operation_func(esp, args)
# finish execution based on args.after
if args.after == 'hard_reset':
print('Hard resetting...')
esp.hard_reset()
elif args.after == 'soft_reset':
print('Soft resetting...')
# flash_finish will trigger a soft reset
esp.soft_reset(False)
else:
print('Staying in bootloader.')
if esp.IS_STUB:
esp.soft_reset(True) # exit stub back to ROM loader
else:
operation_func(args)
def expand_file_arguments():
""" Any argument starting with "@" gets replaced with all values read from a text file.
Text file arguments can be split by newline or by space.
Values are added "as-is", as if they were specified in this order on the command line.
"""
new_args = []
expanded = False
for arg in sys.argv:
if arg.startswith("@"):
expanded = True
with open(arg[1:],"r") as f:
for line in f.readlines():
new_args += shlex.split(line)
else:
new_args.append(arg)
if expanded:
print("esptool.py %s" % (" ".join(new_args[1:])))
sys.argv = new_args
class FlashSizeAction(argparse.Action):
""" Custom flash size parser class to support backwards compatibility with megabit size arguments.
(At next major relase, remove deprecated sizes and this can become a 'normal' choices= argument again.)
"""
def __init__(self, option_strings, dest, nargs=1, auto_detect=False, **kwargs):
super(FlashSizeAction, self).__init__(option_strings, dest, nargs, **kwargs)
self._auto_detect = auto_detect
def __call__(self, parser, namespace, values, option_string=None):
try:
value = {
'2m': '256KB',
'4m': '512KB',
'8m': '1MB',
'16m': '2MB',
'32m': '4MB',
'16m-c1': '2MB-c1',
'32m-c1': '4MB-c1',
}[values[0]]
print("WARNING: Flash size arguments in megabits like '%s' are deprecated." % (values[0]))
print("Please use the equivalent size '%s'." % (value))
print("Megabit arguments may be removed in a future release.")
except KeyError:
value = values[0]
known_sizes = dict(ESP8266ROM.FLASH_SIZES)
known_sizes.update(ESP32ROM.FLASH_SIZES)
if self._auto_detect:
known_sizes['detect'] = 'detect'
if value not in known_sizes:
raise argparse.ArgumentError(self, '%s is not a known flash size. Known sizes: %s' % (value, ", ".join(known_sizes.keys())))
setattr(namespace, self.dest, value)
class SpiConnectionAction(argparse.Action):
""" Custom action to parse 'spi connection' override. Values are SPI, HSPI, or a sequence of 5 pin numbers separated by commas.
"""
def __call__(self, parser, namespace, value, option_string=None):
if value.upper() == "SPI":
value = 0
elif value.upper() == "HSPI":
value = 1
elif "," in value:
values = value.split(",")
if len(values) != 5:
raise argparse.ArgumentError(self, '%s is not a valid list of comma-separate pin numbers. Must be 5 numbers - CLK,Q,D,HD,CS.' % value)
try:
values = tuple(int(v,0) for v in values)
except ValueError:
raise argparse.ArgumentError(self, '%s is not a valid argument. All pins must be numeric values' % values)
if any([v for v in values if v > 33 or v < 0]):
raise argparse.ArgumentError(self, 'Pin numbers must be in the range 0-33.')
# encode the pin numbers as a 32-bit integer with packed 6-bit values, the same way ESP32 ROM takes them
# TODO: make this less ESP32 ROM specific somehow...
clk,q,d,hd,cs = values
value = (hd << 24) | (cs << 18) | (d << 12) | (q << 6) | clk
else:
raise argparse.ArgumentError(self, '%s is not a valid spi-connection value. ' +
'Values are SPI, HSPI, or a sequence of 5 pin numbers CLK,Q,D,HD,CS).' % values)
setattr(namespace, self.dest, value)
class AddrFilenamePairAction(argparse.Action):
""" Custom parser class for the address/filename pairs passed as arguments """
def __init__(self, option_strings, dest, nargs='+', **kwargs):
super(AddrFilenamePairAction, self).__init__(option_strings, dest, nargs, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
# validate pair arguments
pairs = []
for i in range(0,len(values),2):
try:
address = int(values[i],0)
except ValueError as e:
raise argparse.ArgumentError(self,'Address "%s" must be a number' % values[i])
try:
argfile = open(values[i + 1], 'rb')
except IOError as e:
raise argparse.ArgumentError(self, e)
except IndexError:
raise argparse.ArgumentError(self,'Must be pairs of an address and the binary filename to write there')
pairs.append((address, argfile))
# Sort the addresses and check for overlapping
end = 0
for address, argfile in sorted(pairs):
argfile.seek(0,2) # seek to end
size = argfile.tell()
argfile.seek(0)
sector_start = address & ~(ESPLoader.FLASH_SECTOR_SIZE - 1)
sector_end = ((address + size + ESPLoader.FLASH_SECTOR_SIZE - 1) & ~(ESPLoader.FLASH_SECTOR_SIZE - 1)) - 1
if sector_start < end:
message = 'Detected overlap at address: 0x%x for file: %s' % (address, argfile.name)
raise argparse.ArgumentError(self, message)
end = sector_end
setattr(namespace, self.dest, pairs)
# Binary stub code (see flasher_stub dir for source & details)
ESP8266ROM.STUB_CODE = eval(zlib.decompress(base64.b64decode(b"""
eNrNPWtj00a2f8VSQkhMaDWSrEcIxXaCSSlsA5QUet020kiCsoVNjHdDWfrfr85rZiQ7BNrt3vsh1CONZs6cc+a8Z/rv68v63fL63qC8Pn9XZPN3Kpi/C4Jx+4+av2sa+JsdwqPuX9b+NfWdb48mX7ffxe1fCV3v\
tG81N+o71C1zPivbniqHWcbUk16c9iZQ638rpw+B5gCkuzPRDD2o7UfjtcuZv8v1DV5HEcivdtrrbvd/0hqCqLfyXkM+LzvY6SBksOPA1iI/qxCMZw5AQBPzdQ6N2mnkBtGx8wY+VqUdugjmix4yMgPCfCk/j9t/\
aqehQmcI7YBRBk5DNWYR++3jnAEKXFCBOEXlQBc40AWdl5rmMvOokYMi1aV5EDishg2ZvQTWEkJnmdMobOMZfjXefYD/CW7hf94dGfa4z7/K+Gv+pfUX/Eu1E9QhN6osx18vzbN2kEpmzFvAauTi8YMtAYmH9NrR\
K1pU3n5ZKGJy+ES1v3XgFxs+EpAWHBYH7dOwmLbjh8UE5iva4ZqwuENbpU5pNG1QBFNE8FARKyICAT3t7yBxNxiAFH7jpyEwI8+a6aEH/Q/uEjkC1TYL3kZaCY2VPBzuwtwDGlIDWsKpwC8LGdGkVbEG1AIfMioC\
ZQYDqoRBPDAPcOhd+IdHi/ujXfYcYEWETOTHI9q7TXMuPxhFYcmA8GC6WTcYcmULew7c1+yFBsZtEhIqWchngpiM3tAk5sWOfaqicAJsEnIn5T86wCcjv/CBW2BcIBK+jUI7VgZ4VirwB810vtze7UATtl8zGTSv\
qSz7axLSg6yGRgbDFiw1spZ6BeO8kWkj352fxlNqAD8GHm1AFL0tc5ZtF5XeBUmLy6KR65qBUZcAI9AWzSqZdcvFVcV8Q9ii/1YhCG5AFSI5IeiDIA0fNQJt+zBnodckbzca7Dz7UZ4czN80G9L1Ac8J31SuGGoO\
8Ktz/P3EmS6yEOZZV+TlHeCC5D1BBlhu3jpDiIZKuHPQyFezd3a00hlt9hqez9pvWFYq3UNETnNtNyQbaHVj6QwPWkDe0MhND84vLBKy1H6iXACm8tx3Hj4XqEKHrpWCHirabSdcyrOAngGHA7S/93ihS9Ysuea+\
i7rvGAkjF1eqAkkOmgtGf84SKTF6sOGPwtkP8u7cFSq/rbJg3YqtuiKmVjjxa6Ngdyfw0b/6H52QyFcFLogYGBf/BfXK9MMBsGB88gSUGsvLEHTJVyyiEpbi1p6ALYfi9c3KdI8JnXVAc2mc60v+kX5Fa4A5UIWt\
Dk3LaC5bRrJ2GSes2mvA9PeWBGBnVKFs7S2D8GfC7D8z5oUgZSMU/VFocmvl1T690snfXGJvSZfZQ3icPfCGjoh3AQmF75ztPst4Phgr5ulU2I6y+WmjzEQEpDQQYCiIZxMXuR4xox0u5N0WOOMmpN3tLkbzJdsF\
++MxsQbqJ2AaUCPayk4eJgc9gZwv6r1uZnamujiYWrKDAVnXYx+GS7+A8R8zdKNSLIgp/0A59RRkvkjg9PQpKp1DeHh8OIAOALgaRwOAS4tO0UrWPMDd2wI5u+HgBcYGvFg0OLhRSTRfgjwxb7MTFmlZyEIIkaOT\
gRVVGckP4gkSB0JhtH+Ra2YDw33fYkdP+I72bIf1gsQX1ut5L/w1MILXncayP8kplXY+M0xLrGGmxsEnTObaJfs7op3WaZdpGv0PRRq2cafNX5K10aTJLnw+AP7ugpUbdiHDPGeRGiCo7S4Cuxet6+wbn1rBaPj9\
gHpp5BwQWM1zAg07RDP0OEZGyvLylbPPKoumVYyShBKTWOBFHgkGy7sLNoTb3zDiwrEytONPrkgxsMN1KYaMkinAZlFhyzER7v+pELztjBu5Ke0TgCWIt0Ki23X0nS1hld5ICYdIVSBTEIei0nCa26xLo5S9LxzY\
Y2hXmMQjS7KprTmGktZ0yHY9mqipg8lkuMmoafr9bsNUN+eCuqr/3idLuc64Q97vwMwZu/6Zj9Y52ujRFP6Nn83nYIL9E0aZkRtG3dfbkREakdEAuBAYFeUGsuPdH5iQCfBS1yX8uE3so4Cm3WOFYXGDGa92DK3Y\
Y1cUnDfNSy5Dn/lOie9ZMqdEPhqKC+9GWVwjb4kV7fWUxarspRA7hAXw++gBm76hNzpnj6QGPvFunH3D6rk4JJcZTPKq+HtZ7OAgu+Nn7C8WZBnDJlUohn6FGS/Iuidt8SvBAGZorRe79GEG5FGj7V9hwmJrUWzi\
wMP9JyA4P8BWgw7xSxAjwNFlZNkdgyCxInS1HIa7bJveI0ZCZtJMmNSVsBHxEewH+G8W5rANcvIuivROjs7t5mv6pP25xcYLKIoWO2D16Y0Ag0ntHCkzNVhguThR8dflj4Q53KwNUvcNIaJIScTW6GMhC3m/0Rig\
+nSTk6mKAmjELic+4KGsjWQG6H26Aj/q/hrm3t+ZOQ4MjrigJV07TnFJr+16VPSc1jH7wgl8CPwjC//LHvx5aNS77IbAq51OGjt5mh/BfyOIPoUvWdoH12DjvPKnRfC4iMviS6AP8ChzryKmW5EGCPXUCx57cenh\
R15EPE6bribffdAQH2r9r+NBLJszG6DvdPwdOLzlUx4uPH/ObIRhIXRX0D9/kVr8B6PxGGIuYlUkY9g2UdAA0+gZD5WsIN+7B2PvHuSMaLJFgsiVLMxKTXJ7TCRk5nBfGj5jBvUH27xFNS0u0JG3D3PlpTApDaMl\
pCh0DYWQviyP57BRG98GUYmc/Lsai9QGJ7yGHZOVzH4JKwAc8ZxVRqstN9vllOnAm+yQyoC9VkcT0mqwrKY6ASpsoJ7dRGFQ3UJEbH4EEblBxA4HcPWTE9JuTcHmRNzj6Gz9ymeuJfAHlp51ls7L4LjamBcMlFIf\
gH4D+kgF1sZVyUOSBQ3wZ5Xj4jcuXfxCVo4OAY6jT1hNx7NPX/efpDgsG/UdxKiCjBUa2FtN/crf84HVpqSYDKnZUayziay8F3VN9yefzv0wVUV/uZaw0WJH0whlTSSxAhkMHtC31YRN1NqIuFf+fV+wRFDz1NYu\
nbBhXL2K7kf+AfQxyPYYFTAysF9TGbkIIwseBLUK4GmSWdfEw2itAmsJlP8y91sfBEQlBb2a1sZY3grnyz3X7oBIJAIV+BjdyBN2mxKOuHRRuPt2JrFkiUQBx+0j5q99KsftRyekTuuEP4n7ZGSUJ3+ZrGkCu+Fq\
TH4s0cTYcjjMsp3jjQbB/sGMFh6Eq0KllSY5QA+7MVh4EFgvm8VdAfCpNf5IFz2kRhUcgH1wuoFWwnUyqSCOhYSoeiG6CKhQrgEgQwAyAwAjiq1zI/N2wWZaosH/3hrxawi3C0qqnazM1uwmpBKEM1v67hwD0Bqd\
mffwzwWFVAMFUbIQWom0kpsgrcC217D6lBT4Au2rng6vctqeVocvvFB0N2lt0OAwo37/9Kzh2A4Qp8k4NoHobvf2Ytb32HrrwYRPxFFRZI0t6+6Qn8z85L+A7bMrxjiIgmr6ghwyziV8ZFE6XrMoCXxkwwtFhjN0\
sUtsV9yuGzCXSuIimx4IW0UXbO2CX6vDM0rZSdA+j6cS+Sx99vWa1JccgiR4gMZ62Xdtp0fmU9phWSZDgIUHeZY622N/G6IYqYzXDI8Wh/ZjttIxDohhSOXE3QC7EARvpSMkAnizObr82ucYNRh5FiGj9f+ZkMHg\
RuO9JWLShnfWNznnFzBUiEOBoaXGM6Yw4KtGfzMY2PDoGsvgiFhxDuPdjwZb8D0kiUCborIokwTx9MKC0nq/CPdjzA/3jeekz6NCqJYLXbN5xozjZrOT2/tX0myN/QV23QmpFx3+P1AM3reokb/qc2WL82BzuDl5\
QCiydqhV1MZIbZHvpajPfYyfJKRilY6QPBHOCVl8dX7okuZsAeTCnT4+ewfqaQHm7zNwRdShuDytvlIu7RYFjfU/qwJmfn2yB09p3cP5dSdWFaiT/gce078lEw6JQA13ppSIxOSPerTKIi2HqZZBtiEAhDIaCy14\
qiyE7an84iZC/oVVha4XliVuiF2AH2IGKnJFEe6VYLohXH441fSzBeILUoLoxMcyDzCaglymeuhqYNb/GNVZIHrbVkFqPM9ATUIrQ6nsc7iHZox2iNp5ccgZq3pkl4vmZbLJ6sOgm/PHTOrlni9wDEiEwx9gLccI\
wgcyS8HObXL2SS9R1REYkeAHanDhMEeXyeRIxLzIOMsleEx486K3C4URmM+WBHA6LXCbHUICPloU2Vk64Bi+UmGRhl4y9VIEJV54SVhsj6PB1MvOzpHxDxdFMi2yexTbqRJWsGDTpGlwdkRYzINDG5Afj9W02Lb+\
JwKZcQIQS0kwPofgT9s1jd/CAFNvG2gUTb+H1kKy5yAqUrKT0Ihq5KvgDFTi+AK/ZTqjr1/5OAJyh/j8dZBPBB+Me6wRCduVn8HKzw5hCwCd0vE95FV4giNX8XzZrgfmiCWeABuoBdj/DoX61AT+6oWHyL5osdb+\
DNSkxTDsfG30WcqmZbnwUpgvO0PUY3A9CLKAN2eOQIyRjc4iWlsePrGSMM/dPYLRMha3tubDu7m1Y4PgRWqTvVlltQGI6oqT59RXEssQhQfIi8SXVzsSGh5h0A6kKCDmnJPnerhx5jPiTLqG2QI2TC7xTzX+Wsme\
uUcwS2LdrV3ADumY0q4pP0Nv6ZpdjKn30WQr0GJ2KFADRm+jP5Ap6ShdjOPAdtEBxHSC+D5guYgI2aGN8wHP1xF7quX0RmsDgdWLVlzMOjYRJxOiFd26l4877eidRScw3ei/57RbqoeO85k6ToTJBBgaTm31Q1be\
7MwR2ToWNoA2cdaNAU18g6ASFYsRVdhLLv/87jJP6DIPCqpgIMyD2jvIFYydUZXRG6tncgkZamaHmCmvSjFCYkt+sDQN+X1UDQSAzxMh+SEzieKEvYomFdvCB0ez3YRbppiBpog6Zp2TE0ezWik04LQ6g38pSIJW\
gpKggKb0M27hrb6vOeV8QYA+GQjPLDoNZX+kJHORr7LXojmKravcJqWNITNHc/cfhESCIMfCHsptGBPKLzhOd+eKON0art/gKF0Wr5qIhvnTKyJ1DudrdhECshw2V/eA5b8gM/shvQ1Tef9DayVK5vS5pnxYVnBK\
RuU41gWMtYoLgOsCCH4BKxxeeHdhSDTW9HDrPo2x7LAFyXTiDB5QaZGADQquJdg4LYsc7h2GhCN1hbP9M63EWnOt/UbO9vumZ/B3HYLzLfhyihRS6OapcEj0TaTsp2le8L7g+BKtfYWTnoBU9LFyD4UvRDMavRrh\
X1Cp1/0+zC3AHphBXOygpgxO9B6sI0ijNZrTdogxddAfYdXn9jCiBglNjJOXUU6/wCLEuLvepnwniHuAq0Qr9ondNkYb1smUJaQmzYD6JuKiymRktu7OcS76c0Pk7Lc2odlwiaLRrjrZxrA517OVGDkH6dhRqeVf\
pFIzUan6UpU6ZGGK1aQx12z+QZWKIKUdlTpao1I5Hnztc4TLW85Vsj2ERZfVOu2a/He0q/6LtCtLk3BVwU64bMOy0Fj0mmUh9IAGp8xCaEgFgx11espxy8QUb2NI7QNpvsyhvSo3OhoP5drpC7RkUdXuQVynaMZg\
jQJe80QYYGYqA/pKdSUsMnCkJBVjvBBjdfa13U6mbEeTH4hhw3h9oKkTXi3duK6ETIxxDInjFJy23FbLWp6QOoKNwWV8sUIb9iHLwNBmwrXl5Z7fIVPgkClQLWlYOkV2IwMlVLn5gUPZPfsGC0nMXlxYwqH2ATla\
vse9ubpBn4jRI/QZCX1eO5nLSwjFxZumlmyz72yArsuna/GJgm2w+en4nK7g85z2YlPe9y91RuCxwrqE1sv6mjHb5A5mUxM3UeW9D451l+Kiz0SHY73ZGXqR7316iOUCpcH3e2NTWsPSxyBPhaoELTnI2TVY4uK3\
k4Z3HQUdd7A87mFZ6iXKp68tuTgtLyOpG09nL4SAtMEbyX5AYqAQJGOocwi5PBib5EdDSXqu8N6UGG/5qoRHSQnmkkrATk1gY1aHDIdkFgLc7NllknsDtQAnsQvW6XkEJXFN9TGrMOwJbRvxDvtC25g7Nzkx1jcR\
WciSCA865mHRMQmjjjUostkJ8bpm3RqrTyJFrt1ARIvFzNjrGQ06GaJZcCQEQY/JKFUsJWgkDqm5vk+p0zFLdmDCQJ3+IsZB/IKNA3t4omslqPQUS0Hv2T2cIX3XGQdmHUNY3vcAo+tsk2I4RcY9goUB9wXxq0MM\
uJkgSx6L4DkUs0DfiNLbkmNK+lbBCo+5qvVKNsuYzQrMwbG/aDgs/w9wWJ+3gnVGAuLOOUBQjT5qJJSukfDwUiMhHnMtfIH1sOul5jmHIy9lqKnLUF1rkw4etFJTAnrWVMjVkAZF3kiM8y3oSSw7kKHI7HCpoXgg\
xI7X2AmXCMYJFzyXsinV3iHmVqbF5oQSsfAOhMBejIdlMGOskpf25EFZTT+dxVZKxrpcVnL0qEz+44xWXi3HXoAQ04sVCZZ2PqBAgyPHJBloNZ7g+cxRQIi4XI6GFSU60NALKrFaRw7q+sEIa4pT9IXOcGz0C8Ji\
GBwMvhcReHx3QKbZ8PBedpcLUqXGDzdGsfuEYuJ4oHP/iccFE6C89GZezn5eTxgnmV/nC6jPN47+LkbpMeetqOoSRiMDLBhuOO69Ki981MmLYojKVvZ5WUrBUetimIOQUzGSSg4VVNO77nOFTSXN8FaED8JbT2Hb\
JBJfFLGXiqfI4X50S3B8AqoB4o3kocKH6tVTttaVFd14Aq7un96DaD0a1CnXhYSc3QVutfVYX6ErhpmkTw5x7hJM5I/N/s6BlssiPqO/xCHTJon7h0uzpCBbamVuIyq2Pj2dvcs5IcLDySoernJK/2x9mu7Vp1EJ\
+5qarJlV8J+X/d0la8C63hiW+U/53erftDlV2RzxRKyWcHrMFqGhmXI4u5S1dqD2/omSZCMv7UHV7HJuBgUWexCoLbAMiAzHduFDKGtZwHYByyv+DX79Qk5Cho8wBAQ/Rh+4ZAw4j6NU4KzoEYeTUjZ/EGdIwGdA\
GMV2dnFMpGlQIujjY3DGqGLWzfWD1AX82CDY72y6STpa/0YlsxwB3O4l5Z2UIh44K5xn6oefSHbANjJPIS9bPF3zIrrsRXzZi9FlL5LLXqS9F9jI0Awtogu0oc82JoBqn/CN58qD004RmHvap/T3zEjDPTGRL2At\
DZoMkPEO6hbzGLekrH2rxb4kKjwmM/t1nwotwhWG1PkQHxyF4FPKv/T7LjzK83LB/hmpYDos4315/h30b2n4IxNWv7xNvFrC4V4tx7giYXw8MwUZQpy7ePqc9nvNkqzi+AEkmYrwA4l+FALCmZz2B8GFH2W8Q6Ne\
9FhEWdArfJYDaZB+q2p1j0yEG9tY5dvIMUFfziHwORsFoehiZyObLzAqDSiCv+hs3yZS1aiUcxVYI1OhQDi9xtZp+dPxT4NjPuen8/ni2AdZqBcMWXqTWCgbReY0ORZreDfo1JiKeGZ9RLGBrJaDJfjF9j6ohamj\
l2vZwjWZl9qEeOWAULoDjKAdDGJVUw1jaSM1QW4V8knGmDUcC31ZifzsdszbseFgMVpDARzM5l6Q2yg81l3wUt3ssx2fSOo/1MmJGa8D4XdOXwRs9kQ6rh1/QPcVSFCu4LlqOl24D4x2cwD5PMjRZeaQKxOBUYkH\
4YvtXejfis65446G/bs4nMNBQP5R7zSXPUNgTgWKMcfOCwkH/N7nawAqPmtEhYFfchEIbJUmb/ekWA14gEmjF3UDVMTG4G+08kyKkXENRw+ezecvf333ASHho1FCos7Z38qJ2vP+rEILMpY+F86FKvlKudeCe6aW\
OxCJzlUHzsFBPkQSiLddiJmdH83n2S5e4+DhMer2UW7jT+SaQ+C55kAabs1nzjk0nUH1AIQ7YT83+XHIuxVP5eE7Xp0grBaYQ++ge4mFDj28psLDayo8vKbCu0M1l4rKp52rT4hFTtmqC91bYcJ1V8RAVqjRnXsV\
8LqFwWDzZMpXUmAReTOxeRz3ngXPGu38+OvOZQx448TJstPDubZBheTfDjYCs5TMuc9Grb/cBvxqczNMFruXwYzNnTQPqJoGbzJRnU8z+tSBHfe7Ytsll2tDBnL2FZe0xANsmLCL+jdRyHk2Kilegsj8gB80qtMV\
D7eZ04PNAEzTbPSt1LXK6Su46iKI2L9t8on8aPuPD2i1POAzZrQaL4e4H0k0A/HQYNC8gtBDE5tPSgx5379rgTrk+elLjy5aaRpkqpjyZ/seWHfoOSA8aKi5t3TgOdhHpGzbBUtxPgd5mmaFdSxAz+k8q5FvzuO4\
y21Kjg7s+TaSxx3wYDVfioMnRPDUISdwMejdyHZumPMthaWqKPOI3FgxxzyhAneXuXcddYXx2C7TBm7cLenwmA4LnlH9KuH4Ak8/7nL+m/VHAJd81GziK0nI6hHZrfSQ65theUUmmzbuIDzvbErk5xOo8222H3BJ\
RzS/vmGzvSqcfM9wGS2+vTsYxHLypp44EJAA3C24ErwZP+3soBBDqkGweSI36KA2adBuM8ql6fKD4tIZ+3iqO+yBtX8kWbIHA+SD/EG2u+fv7Ap5gZqXUfGB3EskHWq0LkArqbu09Cp/dARKu+X35Tlyv1VTqD4g\
q4zlNuBdV3CtjsKLFWKOBmo5HsN1unrStR/R7YmEDDUFgt7Q86rpk7dvIbH9c53kRs3uYGPcQXDHNRjO9UNzJlmvXrqRaanm13FrW+FXw5vg6GqwhirWVGi55nA3RHYOqIVrLrJHfPSkycx+GQiaEQhzphjOw2FJ\
qN0ELE/FzH2MUiiYzyfPNx+JsoEv4p2YLj37wTfbXuNFGjr/bge+goLyFXsgU7e5MhLXNgp22TL7GFe4AMpdKCVCGqg7/UmkA4aq99eC8GkztROY+wLkcPvoLjOOnJEdiZJAmKQbniWPLoMMKyPUZ0PWg46oxGoR\
qYabYI8Z42MRK0qgsb+T8sG12N2fV0vb1Q1auYRtsXceFvZ2KbViHGZsrANw2gDUu81AlevQJKze423q8My1K166jTO3sXQb79zGh+5lfFnvcr6833ZvYMNK7szco/YN/orNs+qWcz9fnsslasacc9HKN+KgYAD8\
gsBDaQdSrxWC7k1qcG0HXlUHR2Ak1ohuANurqvBR2cgNCA27FQoDV45jQt75Y2aYJvyVbwRZK6agELfk6F2ngveBZAi0c6lC/1M8MwgWLIUMho9gqkOxL/DCG+xayA1YuMqX9oRFlcgNP+TbYBAdHQg59dVY0weW\
KneWFU9/k8yeMlK4WlleYetm2onfcUBc2xtifuXYbCJG2kQsoZvMvsUhzBTfnsPpPbIuES29qcP+1I/49ovGBiZaqJ9xUKOoH/Geoee/wPOCb8NwoPX4cgl8RBl0DKVDeR94MihJmrMvhYWiTNRnaOu2q9pmBWtB\
qpxrbSef1s4dcuEaHJZmAQ1HcED+0GUvj7hCurHZoXbII1pnidlYcxwO6+NHg6cBBlZivnIowEih5Y/7iW9OtB2656IhwhSxgC7zrUNwGQo0QG/z5FhrufaiNjmgJ2YVgglpDrKRnKv6SrYyene4YX4SL6YozMmO\
C7kGxyxwwmUxjXW4Ww6aYrbgXuqI1yAYbLAUSkuHVDFfy1JX5iQvXaPZGNf0ky0F/Fb1pdInCaNaVIGIHy0HCZ0xXQw16h/msCWnciVLrTlqbzY3bqC6u4GyNRZUgxg0QNzmIYKHA/4mX/2m4mhuJcGx4AQPaHnv\
+QY+R57yKKMVXrlYYRTarCRLbvYDlkhSqDlRyz9GKGsfPBbliVb492yMlJabcjiaaQ4yBRFFXXGcLT4tM9ra5BNQptRiKuYNHMMJh5hz2RHvsHNuT3pq7DlfyszYmwLPHaClf7Wuv7X0zDdUB4XN67sDvN7357fL\
YgGX/KogjdM8aK3S9k39Zrn4zX0Ytw+rYlnAbcC+e4MtypCRkxB0IvxUQsJ/iApOPsIFvHP7k3Kc1PiZC0jwJlQlCdMxFzLLG0Qb3kg8/sn86nzwaL7khy0zys+sAUdkdWyncYNAXe2DC6xlzMZpqFSOb31kXOLh\
9d00n0xqG39nuxEnYV4lFEUs3oPxBz4k8PHpLm8UJd0E1H9zZn49YUYKxjsO0pvITHxhUIqGMuH/jXmIF8bQDEefD9+fbtSNgemawLR01ty/M2wlBB312r0TsN2jkW6xD5WMdVq925VVb248+uUqysC9P9Y2OncC\
Fr2oTW9Mrdbcma16/fv3aIe9dtRrx7120mtnvbbutlUPns6BZTVwG52e7mXc6nT12u6/7E9d0Q4/k4eu4qmreKzfTq5op1e0s4+2lx9pvflIq3t797q2/mh78bG9c+Xf5+7b5LNwtPyMdfchb66QAj3IVQ8S1cOi\
6oy34TZuuI3OsLfcxoHbeOI2OgR525M0PTiLXlv32nW0Zpeo/+Iu/qulwJ+VEn9WivxZKfNnpdBV7c/8U4GNm5kdmOLOo4OiI95pscmULBhrHMAwO01d/v+KWF2pz+ataw1Hadj6ltnv/wsZwgm9\
""")))
ESP32ROM.STUB_CODE = eval(zlib.decompress(base64.b64decode(b"""
eNqNWnt31LgV/yqOgbxItpbtseW0WxKgQwjblkAJgc7pjiXbCbSkkM4h4Szbz17dlyTPTNr+MWDL0tXVffzuQ/lla9HfLrYOErM1ux104v4pH8FThk/V0ew2c4+Ncq+d+w2zW5slNKj1bOH+hafs/tkxfcWZ7f8z\
UwG9jCbIT2XCQRY9RT8tHPUT99kSJQ17GnrO3FiW+713YE3M1cYKe+kHosiv27PF2fUq/0gGiKtcTuLey2Q7W3+SLDskVvvAp6ocJ23gue8imdmlPZuG9gwDyMTZt7ul538qPOssWg1K7R4RAflldI5I3RvCTZ7C\
J+D7iXuYwEl0OEnf0td2ItI/f0wiGkRUxRGQhU9v3TwYNecp8PQGVO3OZicwI2eioLoCpJ0enrtX9cCNF5GKM36GY02AwmkYDLoCuU1oRZePPj6+HGn6GOW5YKL68XHKqrbZQQmEHjfpkqBFiGCXyG62ZKT40mSR\
rGENepN9FBmOaKeM3w8P5ekYmOA1akS69KSDslDCIKAaHo7kwR/0HFzi5AlYnhp92Hb/VEmyYIXm4KW2ugef3OS+p8lGhWcv5jbf5Af8LcgOWmavceQs2wdYdx9bvYnImkilDf1v4MfPfjvLHtg4jSqztHPDUgVB\
wEewdRhreCy4UP5XlrvjR9vRcY75S7QBcq4iDpleU8Mev4+Xazorni8LXt2hh9fk9qqZEjZl2Xc3DezCfVEgafdlMfZhJr0CAjBQejp340UTGOqUh8YFH70YCeWctzXpJhMGAHUq7BrWOjyXghfO5OzYHN7RkjEm\
vdwH62IUPCLbcBhRZhPL9laAv4Onl+9fv5zN3BxdyeqeJESu+dStdl+USFnfJ9kh9OQUDETsMciCWlQJUioSx6jJE0YSxoU+8mltD1KyKVvu/AW5OnjzDv4DluGo4HtjNBiHKPTez+hHLsY8Or6P54f5KUmiDaFL\
JNt2BNc6CgGBqx9nVyHo9Jb8CJFckaW2eUB58B8lAKhIOF0fRZg8gtJi2Z1DhG6TGE1zjo4m3+CZYOLDKirHsmyze2Rky1EKDUiCTiaxvFW3EZscFPF4mRz1BGbmH+EVdXWUwnOBnIFHthBWmt1d2lbTnJAN6OZk\
5z3bC1rV/uyK6dsJ89uM+N0jrPFhPDAEwIxqy8kIcVpPuoDvnRlnJCPByBzLtl6MaeNaoamZTv1f6HQ8p1yds5oh0EkOgHYVEEToybsyKadXRsArZ2xdm7HJ83n84gCuA3OfOPvv9A/sCY2NhiF+wnndS7WxQTwA\
VKGCoyxy5KRVwEkI/hWHEvSil08A0TvOSkRDKkyLKYE3wQ4y37AjrnBRLK89SQ8RPs8sGSs6BLuuiVYDsLctx4R+jQ5hvIlSHyNrHgQrRQshuSM62WUteB+KrBN2tN3/sojLWF+f45dF/HIbvwBIXTDWAZ6zi8AW\
l+wsG6C2JsIHOWI70Pm0fg7mcB0khV5a7c2uII5oc8Hz7tAcng4B6JkTOAzmUw4bqPQ6nhIv/SPsciroBbo3wtqbj7RIkses2o+SNK+oQ0sCpmOEuoCMZ34p9lcloBAONgAwq8GGsTzKMFAKk1dw+uoTsdG0p9PZ\
tXDyQOQxdzviFmyzGP7YAhHieEfihWRsVOJ03NbMlFlxro/px6c1oam1nDjgbi+79YfWtfvcFrRFH1cTLLOuXK88yUcUp9uY9OXbEhy/sKHVKCIoxarp7ArOPKG5bf5CkGqg3Qkiv1M+aTjd6urN6WyL/AylMnyG\
j8nYM9sVdO3DaXBpts5FWeiTIHRy8T2I2Kj14i7/ewVMKdgXDtSptyR03VOe5auhOwo8VxP8+fjoOfjfQ6wDSjSCxSFbEFUK+lAq+SXFLNeIYIl6hOeHoyJ2LQMEmVlUoWxFsYT5isolmGRCUfT9kkn3DCbnkvkd\
/mMXQ4XOOWJYu4dPP9F/JVV+uBhNR17AZKnsOsxIfy6snHuA+olCLE5VJdGCEkVxPhv54FUKAUo/5uwjuwvANyiKY+HVMdhiWEWaj7GEeJHWOVknWkgum7wgvLYq6UMqiAaluILDh14466la6iYRUthdyC/te5+s\
PQ8BT44TuPVul5JVw0BUHRbObpMHyahtUjAkTu7w4naNROy4taK5lLAEOOBCNk/ugcWk8Bi3ChICE0jfMnY13GaNHw2Rf3XqNW8ZDYKwoGAAwYHUW2mIWO+e/RoJIUr0r2DThxtMrT8ieB+GEzOumwGnx42iW7aA\
1RYUj6rKHMHhDYWpTKWnswUOqZ3TgcpmVaf8tZBp2HGCVoSap8kATRa9DR4XcZPvnHJhEdmINmJnBLZwPmvZCmgL9ovxLpxA58kGlVAYymvKbpQa5xqwp2UMlXFwltaP96LEqbiAPz6x2eVwmEZS0orCYpZd5H/C\
k3Ax6gGk+poMlYz3Mgg1sR42ZRxDhBtrEfsGqZirX/D1jM/ryOjBp5a3rfF2AsixxBVB1AWm5sPXwJi2EQWcM8jCKbV4hKaJaE6/CYeSd9vRjrZqaLttMGg/+EQmw4DjhQvEYYnbH3hTLoZliRpN05UEUptGjFlm\
rEFw5aLYoQeErWrf7bmQsYzGqDIHUTQ+TsiUpQ0fxN+KaFMKeJmaktmKxKDwpQ1+Jzn9Pvqw18cmZySiThY3htJR5GIQdenTNJmnZOOq2uUK002vozCJvSWoILX6uowUZ5QEKPBEzHEwG87JH6y9T9SGJqY2h3B8\
RmcZ+nbOThWVgC3/BuxdXq3ZtEMInT+FjfyOz7gTu0KKmL9YofMe8qZVzs+4QdNPubPr83LpigRJT/8eDN7PzdjwTaSMXosZPlvztStEVW1kFZnaDNOyai5NKkwxsKN1whjSxoZmuRpXjXjK37hxoan3vEVY1ZrQ\
0rJmTV4ERlcip3uw7hnseBTF3Wp5wxZFciY3BXW0ZUISU1xZWAXA2PA1BCZ4PXec1Qeourv8IczEHcARin0+c3USOpLga7COuEH9LWJBVBfs8yjm51TqbW2/I6iHRuqAHaOBDLbxYD7fgSaIIdyGXRquH4zgOstN\
ckfZmbuuDc0yBqwdjom1aL1LAdvHmQlDHjyXEgoqjjksrRZvLBI2GhjBHm+VyPw6YlzxaTJ6SOYEIbh7tkONHRLWtBxXKJgcd0GU0nZZEqY3qd/IXYqW7/oimBwXjQY7yduQVlUPA/RqQkLNaNeY2McE7KYhkR6O\
ZabAUrWx5ESofRXwLr73EQIclrK1Hk0fVb/WhWHrPoQRxb6MOXMZOwS3v210PxYZmhiRZlsDbbSAJQ2kfE3DyWAk7VcM1JCi9Z/AxDq8OpJZcDdQ//gHIgnIpsqVkwGh/sDb8j616Kxve+1S4HHqWLxIkY2rF1TX\
4RmahG7nyIJzelHYEd8g24Zp0BTAPLWSSiQS4igoNeOIJnJbVdoyrA1DwpccAF5NzeW/v3BpJMVqMcU6YzjXSynWIJcNlboAF5tQQjQ9RWYxyZq+xWjqo7EJJY2qo/pwlCq8WAn1qvptnGqoKuRJqgzQq6rQBOtq\
BkLFCWIfB6xLIOOLmn5cCWAvtsXMGIzmiN6MgTdDGB03ArtulQQmrQXfxygux5bmoIPqUEjTUfFWwMclyu0gBAPy9+Q8cZCXxUbAr0jjzJs4VgruKUxC07BBxTySIfeUpmAyPfBpMl9PrKTzg6TzQ5IKCRMlR5bR\
y6MAVDtgL/0X7hF1oJP7uElLlPvsBgR/Da/VLSR0trmpgq0BQXDrjttmFisruYjQ8OELtTwGvuTBGdCUKrjWZZU1fJMxSA2jSYXAhZUx7lWB8JbV5qDnCtDsAejki0QnshrTwKgEQcM3Sy3cI7S6FTsi/245srUA\
C4Zv1nAA+wyfA1VsQ7TEHfbyRchduDfAe5GKwSZqWpLM+q+xoL5x/IfaoboF9dobioX1gEHxhoN4dUN9hs6jjUBhvfMaFC6WDImeS02vt5kLgCNT+nAWX8F5vCUKXDv2ZOBnlPoZ9iw09n61wa274NuNXp2D42tU\
B1xg4xStcrsOSGL4ilWuK3BmLjMl0WLK1RrKYnB87YLxCFSgp6TtYXDF9WU9xwo4XPyaknnQ1BnAyq/3bX/xfXrKCZ11Li2XOLusfACc0nUspiH6BpzhAoLdW9AENrdrSV19ZFW/Lh/pE9eAV3O+/ZJalW9At+KG\
ys0acLMhm6h9txBl4FsMH3iKlSwHBFH9GzPZC0lj3xJUYza0gfbwIVCWTn6jbJS+6ktfTFo93wb+P+Hyn5log9eSTdhe5TQNqP4TlYWG/IpQAo7d1MEY1t+VNBISfOq7zeDJboi5gs2D2DFQV7G8sEP2nQIW7bLJ\
sX2ymfJTFVqyowu+bOee7AVkbMrtNgkzIaRajLVN/NdHOHefRuLj+R267ywLOYwZLU0DI2E5LxmLa2svwb8O+/lfi/Ya/kZMZXVZFKoqtfvSXy2uv/nBIstLN9i1i5b/mCxq/m7xl5hQMZlUhda//gfTQr5T\
""")))
def _main():
try:
main()
except FatalError as e:
print('\nA fatal error occurred: %s' % e)
sys.exit(2)
if __name__ == '__main__':
_main()
| {
"content_hash": "8e97b6397b9b4125df70dd8f9ade89e6",
"timestamp": "",
"source": "github",
"line_count": 2394,
"max_line_length": 161,
"avg_line_length": 45.0187969924812,
"alnum_prop": 0.6440176293203433,
"repo_name": "husigeza/pycom-micropython-sigfox",
"id": "590b920fbda11ad3ffe861ce8946740dbc146f4c",
"size": "108706",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "esp32/tools/esptool.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "55179"
},
{
"name": "C",
"bytes": "32133098"
},
{
"name": "C++",
"bytes": "642137"
},
{
"name": "HTML",
"bytes": "84456"
},
{
"name": "Makefile",
"bytes": "104211"
},
{
"name": "Objective-C",
"bytes": "10903"
},
{
"name": "Python",
"bytes": "1000724"
},
{
"name": "Shell",
"bytes": "13441"
}
],
"symlink_target": ""
} |
from pywikibot import family
from pywikibot.tools import deprecated
class Family(family.Family):
def __init__(self):
family.Family.__init__(self)
self.name = 'kontu'
self.langs = {
'fi': 'kontu.wiki',
# A list of valid language codes is needed for the
# -exceptinsidetag:interwiki parameter of the replace script to
# work. The category script does not allow the values to be empty.
'de': 'localhost',
'en': 'localhost',
'fa': 'localhost',
'fr': 'localhost'
}
def scriptpath(self, code):
return {
'fi': '/w',
'de': '/dummy',
'en': '/dummy',
'fa': '/dummy',
'fr': '/dummy'
}[code]
@deprecated('APISite.version()')
def version(self, code):
return {
'fi': u'1.24.1',
}[code]
| {
"content_hash": "a80aa3ab2f000bbfbf007f567423c7c9",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 78,
"avg_line_length": 28.060606060606062,
"alnum_prop": 0.490280777537797,
"repo_name": "hperala/kontuwikibot",
"id": "f2104ea63e3c15626046468771bf830ba0e24738",
"size": "951",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pywikibot/families/kontu_family.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "97"
},
{
"name": "C",
"bytes": "137889"
},
{
"name": "C++",
"bytes": "4113"
},
{
"name": "Python",
"bytes": "3758566"
}
],
"symlink_target": ""
} |
from datetime import *
from flask.ext.restful import Resource, marshal, reqparse
from flask import request, abort, g, url_for
from ..services import user_papers
from ..core import auth
from ..helpers.linkheader import composeLinkHeader
from .fields import paper_fields, full_paper_fields
class PaperListAPI(Resource):
"""
API :class:`Resource` for a list of papers for the user in the request.
"""
decorators = [auth.login_required]
def __init__(self):
self.parser = reqparse.RequestParser()
self.parser.add_argument('page', type=int, default=1)
self.parser.add_argument('per_page', type=int, default=50)
self.parser.add_argument('unread', type=bool, default=False)
self.parser.add_argument('since', type=str)
self.parser.add_argument('ids', type=str)
super(PaperListAPI, self).__init__()
def get(self, journal_id=None):
args = self.parser.parse_args()
user = g.user
paperList = user_papers.grab_papers(user, journal_id, args['unread'],
args['ids'], args['since'])
paperList = paperList.order_by(user_papers.model().created.desc())
paperList = paperList.paginate(args['page'], per_page=args['per_page'])
numberOfPages = paperList.pages
pageNavigationLinks = {}
if args['page'] < numberOfPages:
pageNavigationLinks['last'] = url_for('papers', _external=True,
per_page=args['per_page'], page=numberOfPages)
if args['page'] < numberOfPages-1:
pageNavigationLinks['next'] = url_for('papers', _external=True,
per_page=args['per_page'], page=args['page']+1)
if args['page'] > 1:
pageNavigationLinks['first'] = url_for('papers', _external=True,
per_page=args['per_page'], page=1)
if args['page'] > 2:
pageNavigationLinks['prev'] = url_for('papers', _external=True,
per_page=args['per_page'], page=args['page']-1)
return map(lambda p: marshal(p, paper_fields), paperList.items), 200, \
{'X-Total-Count': str(paperList.pages), 'Link': composeLinkHeader(pageNavigationLinks)}
class PaperAPI(Resource):
"""API :class:`Resource` for a single paper."""
decorators = [auth.login_required]
def get(self, id):
user = g.user
paper = user.papers.filter_by(paper_id=id).first_or_404()
return marshal(paper, full_paper_fields)
class UnreadPapersAPI(Resource):
"""API :class:`Resource` to retrieve or mark papers as unread"""
decorators = [auth.login_required]
def get(self):
"""Get the list of the unread paper ids"""
user = g.user
return user_papers.unreadList(user)
def put(self):
"""Grab the ids from the request to mark read papers as unread"""
unread_ids = request.json['unread_papers']
if len(unread_ids) > 1000:
abort(413)
user = g.user
marked_ids = user_papers.markUnread(user, unread_ids)
return marked_ids
class ReadPapersAPI(Resource):
"""API :class:`Resource` to mark papers as read"""
decorators = [auth.login_required]
def put(self):
"""Put papers in the read list equivalent to marking them as read"""
read_ids = request.json['readPapers']
if len(read_ids) > 1000:
abort(413)
user = g.user
marked_ids = user_papers.markRead(user, read_ids)
return marked_ids
class MarkAllPapersAPI(Resource):
"""API :class:`Resource` to mark all papers as read"""
decorators = [auth.login_required]
def put(self):
"""
Delete papers from the unread list equivalent to marking them as read
"""
user = g.user
user_papers.markAllRead(user)
return ''
| {
"content_hash": "77c4788800f9dea847951e887479587f",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 99,
"avg_line_length": 33.940677966101696,
"alnum_prop": 0.5915106117353308,
"repo_name": "dedalusj/PaperChase",
"id": "c937d5fbde5ccf932b53340fc033583949f9c855",
"size": "4005",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/paperchase/api/papers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "24393"
},
{
"name": "JavaScript",
"bytes": "54250"
},
{
"name": "Python",
"bytes": "83893"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import contextlib
import time
import unittest
from astroid import builder
from astroid import nodes
from astroid import parse
from astroid import transforms
@contextlib.contextmanager
def add_transform(manager, node, transform, predicate=None):
manager.register_transform(node, transform, predicate)
try:
yield
finally:
manager.unregister_transform(node, transform, predicate)
class TestTransforms(unittest.TestCase):
def setUp(self):
self.transformer = transforms.TransformVisitor()
def parse_transform(self, code):
module = parse(code, apply_transforms=False)
return self.transformer.visit(module)
def test_function_inlining_transform(self):
def transform_call(node):
# Let's do some function inlining
inferred = next(node.infer())
return inferred
self.transformer.register_transform(nodes.Call,
transform_call)
module = self.parse_transform('''
def test(): return 42
test() #@
''')
self.assertIsInstance(module.body[1], nodes.Expr)
self.assertIsInstance(module.body[1].value, nodes.Const)
self.assertEqual(module.body[1].value.value, 42)
def test_recursive_transforms_into_astroid_fields(self):
# Test that the transformer walks properly the tree
# by going recursively into the _astroid_fields per each node.
def transform_compare(node):
# Let's check the values of the ops
_, right = node.ops[0]
# Assume they are Consts and they were transformed before
# us.
return nodes.const_factory(node.left.value < right.value)
def transform_name(node):
# Should be Consts
return next(node.infer())
self.transformer.register_transform(nodes.Compare, transform_compare)
self.transformer.register_transform(nodes.Name, transform_name)
module = self.parse_transform('''
a = 42
b = 24
a < b
''')
self.assertIsInstance(module.body[2], nodes.Expr)
self.assertIsInstance(module.body[2].value, nodes.Const)
self.assertFalse(module.body[2].value.value)
def test_transform_patches_locals(self):
def transform_function(node):
assign = nodes.Assign()
name = nodes.AssignName()
name.name = 'value'
assign.targets = [name]
assign.value = nodes.const_factory(42)
node.body.append(assign)
self.transformer.register_transform(nodes.FunctionDef,
transform_function)
module = self.parse_transform('''
def test():
pass
''')
func = module.body[0]
self.assertEqual(len(func.body), 2)
self.assertIsInstance(func.body[1], nodes.Assign)
self.assertEqual(func.body[1].as_string(), 'value = 42')
def test_predicates(self):
def transform_call(node):
inferred = next(node.infer())
return inferred
def should_inline(node):
return node.func.name.startswith('inlineme')
self.transformer.register_transform(nodes.Call,
transform_call,
should_inline)
module = self.parse_transform('''
def inlineme_1():
return 24
def dont_inline_me():
return 42
def inlineme_2():
return 2
inlineme_1()
dont_inline_me()
inlineme_2()
''')
values = module.body[-3:]
self.assertIsInstance(values[0], nodes.Expr)
self.assertIsInstance(values[0].value, nodes.Const)
self.assertEqual(values[0].value.value, 24)
self.assertIsInstance(values[1], nodes.Expr)
self.assertIsInstance(values[1].value, nodes.Call)
self.assertIsInstance(values[2], nodes.Expr)
self.assertIsInstance(values[2].value, nodes.Const)
self.assertEqual(values[2].value.value, 2)
def test_transforms_are_separated(self):
# Test that the transforming is done at a separate
# step, which means that we are not doing inference
# on a partially constructed tree anymore, which was the
# source of crashes in the past when certain inference rules
# were used in a transform.
def transform_function(node):
if node.decorators:
for decorator in node.decorators.nodes:
inferred = next(decorator.infer())
if inferred.qname() == 'abc.abstractmethod':
return next(node.infer_call_result(node))
return None
manager = builder.MANAGER
with add_transform(manager, nodes.FunctionDef, transform_function):
module = builder.parse('''
import abc
from abc import abstractmethod
class A(object):
@abc.abstractmethod
def ala(self):
return 24
@abstractmethod
def bala(self):
return 42
''')
cls = module['A']
ala = cls.body[0]
bala = cls.body[1]
self.assertIsInstance(ala, nodes.Const)
self.assertEqual(ala.value, 24)
self.assertIsInstance(bala, nodes.Const)
self.assertEqual(bala.value, 42)
def test_transforms_are_called_for_builtin_modules(self):
# Test that transforms are called for builtin modules.
def transform_function(node):
name = nodes.AssignName()
name.name = 'value'
node.args.args = [name]
return node
manager = builder.MANAGER
predicate = lambda node: node.root().name == 'time'
with add_transform(manager, nodes.FunctionDef,
transform_function, predicate):
builder_instance = builder.AstroidBuilder()
module = builder_instance.module_build(time)
asctime = module['asctime']
self.assertEqual(len(asctime.args.args), 1)
self.assertIsInstance(asctime.args.args[0], nodes.AssignName)
self.assertEqual(asctime.args.args[0].name, 'value')
def test_builder_apply_transforms(self):
def transform_function(node):
return nodes.const_factory(42)
manager = builder.MANAGER
with add_transform(manager, nodes.FunctionDef, transform_function):
astroid_builder = builder.AstroidBuilder(apply_transforms=False)
module = astroid_builder.string_build('''def test(): pass''')
# The transform wasn't applied.
self.assertIsInstance(module.body[0], nodes.FunctionDef)
def test_transform_crashes_on_is_subtype_of(self):
# Test that we don't crash when having is_subtype_of
# in a transform, as per issue #188. This happened
# before, when the transforms weren't in their own step.
def transform_class(cls):
if cls.is_subtype_of('django.db.models.base.Model'):
return cls
return cls
self.transformer.register_transform(nodes.ClassDef,
transform_class)
self.parse_transform('''
# Change environ to automatically call putenv() if it exists
import os
putenv = os.putenv
try:
# This will fail if there's no putenv
putenv
except NameError:
pass
else:
import UserDict
''')
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "9e3b21a412bcaa66fc39121f833d1d7e",
"timestamp": "",
"source": "github",
"line_count": 228,
"max_line_length": 77,
"avg_line_length": 34.3640350877193,
"alnum_prop": 0.585067007019783,
"repo_name": "lucidmotifs/auto-aoc",
"id": "b6b7c9d86d147b9a457f247aeb392881df575fbf",
"size": "8122",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": ".venv/lib/python3.5/site-packages/astroid/tests/unittest_transforms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "74"
},
{
"name": "C",
"bytes": "41695"
},
{
"name": "C++",
"bytes": "35306"
},
{
"name": "CSS",
"bytes": "96"
},
{
"name": "HTML",
"bytes": "48431"
},
{
"name": "JavaScript",
"bytes": "2043"
},
{
"name": "Python",
"bytes": "4850280"
},
{
"name": "Shell",
"bytes": "3778"
},
{
"name": "Visual Basic",
"bytes": "820"
},
{
"name": "XSLT",
"bytes": "2058"
}
],
"symlink_target": ""
} |
"""Support for Arlo Alarm Control Panels."""
import logging
import voluptuous as vol
from homeassistant.components.alarm_control_panel import (
PLATFORM_SCHEMA,
AlarmControlPanelEntity,
)
from homeassistant.components.alarm_control_panel.const import (
SUPPORT_ALARM_ARM_AWAY,
SUPPORT_ALARM_ARM_HOME,
SUPPORT_ALARM_ARM_NIGHT,
)
from homeassistant.const import (
ATTR_ATTRIBUTION,
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_ARMED_NIGHT,
STATE_ALARM_DISARMED,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from . import ATTRIBUTION, DATA_ARLO, SIGNAL_UPDATE_ARLO
_LOGGER = logging.getLogger(__name__)
ARMED = "armed"
CONF_HOME_MODE_NAME = "home_mode_name"
CONF_AWAY_MODE_NAME = "away_mode_name"
CONF_NIGHT_MODE_NAME = "night_mode_name"
DISARMED = "disarmed"
ICON = "mdi:security"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_HOME_MODE_NAME, default=ARMED): cv.string,
vol.Optional(CONF_AWAY_MODE_NAME, default=ARMED): cv.string,
vol.Optional(CONF_NIGHT_MODE_NAME, default=ARMED): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Arlo Alarm Control Panels."""
arlo = hass.data[DATA_ARLO]
if not arlo.base_stations:
return
home_mode_name = config[CONF_HOME_MODE_NAME]
away_mode_name = config[CONF_AWAY_MODE_NAME]
night_mode_name = config[CONF_NIGHT_MODE_NAME]
base_stations = []
for base_station in arlo.base_stations:
base_stations.append(
ArloBaseStation(
base_station, home_mode_name, away_mode_name, night_mode_name
)
)
add_entities(base_stations, True)
class ArloBaseStation(AlarmControlPanelEntity):
"""Representation of an Arlo Alarm Control Panel."""
def __init__(self, data, home_mode_name, away_mode_name, night_mode_name):
"""Initialize the alarm control panel."""
self._base_station = data
self._home_mode_name = home_mode_name
self._away_mode_name = away_mode_name
self._night_mode_name = night_mode_name
self._state = None
@property
def icon(self):
"""Return icon."""
return ICON
async def async_added_to_hass(self):
"""Register callbacks."""
self.async_on_remove(
async_dispatcher_connect(
self.hass, SIGNAL_UPDATE_ARLO, self._update_callback
)
)
@callback
def _update_callback(self):
"""Call update method."""
self.async_schedule_update_ha_state(True)
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def supported_features(self) -> int:
"""Return the list of supported features."""
return SUPPORT_ALARM_ARM_HOME | SUPPORT_ALARM_ARM_AWAY | SUPPORT_ALARM_ARM_NIGHT
def update(self):
"""Update the state of the device."""
_LOGGER.debug("Updating Arlo Alarm Control Panel %s", self.name)
mode = self._base_station.mode
if mode:
self._state = self._get_state_from_mode(mode)
else:
self._state = None
def alarm_disarm(self, code=None):
"""Send disarm command."""
self._base_station.mode = DISARMED
def alarm_arm_away(self, code=None):
"""Send arm away command. Uses custom mode."""
self._base_station.mode = self._away_mode_name
def alarm_arm_home(self, code=None):
"""Send arm home command. Uses custom mode."""
self._base_station.mode = self._home_mode_name
def alarm_arm_night(self, code=None):
"""Send arm night command. Uses custom mode."""
self._base_station.mode = self._night_mode_name
@property
def name(self):
"""Return the name of the base station."""
return self._base_station.name
@property
def extra_state_attributes(self):
"""Return the state attributes."""
return {
ATTR_ATTRIBUTION: ATTRIBUTION,
"device_id": self._base_station.device_id,
}
def _get_state_from_mode(self, mode):
"""Convert Arlo mode to Home Assistant state."""
if mode == ARMED:
return STATE_ALARM_ARMED_AWAY
if mode == DISARMED:
return STATE_ALARM_DISARMED
if mode == self._home_mode_name:
return STATE_ALARM_ARMED_HOME
if mode == self._away_mode_name:
return STATE_ALARM_ARMED_AWAY
if mode == self._night_mode_name:
return STATE_ALARM_ARMED_NIGHT
return mode
| {
"content_hash": "70c30a8b9b0a86d26f7111c9edf03805",
"timestamp": "",
"source": "github",
"line_count": 158,
"max_line_length": 88,
"avg_line_length": 30.17721518987342,
"alnum_prop": 0.631501677852349,
"repo_name": "adrienbrault/home-assistant",
"id": "dd899cbd04ff279e67ed2fd06e4521b0cab04365",
"size": "4768",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/arlo/alarm_control_panel.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1795"
},
{
"name": "Python",
"bytes": "32021043"
},
{
"name": "Shell",
"bytes": "4900"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import re
import sys
import json
import errno
import select
import threading
import traceback
import multiprocessing
# Python 2/3 compatibility layer.
# Six is not available out of the box on GDB 7.4 so we roll our own.
if sys.version_info[0] == 3:
import socketserver
def iteritems(d, **kw):
return iter(d.items(**kw))
def to_bytes(s):
return s.encode("latin-1")
def exec_file(filename, context):
with open("parsers.py") as file:
exec(file.read(), context)
else:
import SocketServer as socketserver
def iteritems(d, **kw):
return iter(d.iteritems(**kw))
def to_bytes(s):
return s
def exec_file(filename, context):
with open("parsers.py") as file:
exec(file.read()) in context
class DebugServer(object):
def parser_for_type(type):
return self.parsers.get(str(type.unqualified()))
def parse_value(self, value, name):
try:
parser = self.parsers.get(str(value.type.unqualified()))
if parser:
return {
"n": name,
"t": str(value.type),
"p": parser(value)
}
except Exception as e:
# There's a lot of exceptions thrown around when variables aren't
# initialized or have garbage.
# TODO: Try no to swallow all the exceptions, just the relevant ones.
print(e)
return None
def evaluate_expression(self, expression):
raise NotImplementedError()
def parse_expression(self, expression):
value = self.evaluate_expression(expression)
return self.parse_value(value, expression) if value else None
def serialize(self, local_variables, member_variables, watches):
return json.dumps({"locals": local_variables, "members": member_variables, "watches": watches})
def local_symbols(self):
raise NotImplementedError()
def member_symbols(self):
raise NotImplementedError()
def store_locals(self):
try:
# Try to parse every variable, store what works.
local_variables = {}
for (name, value) in iteritems(self.local_symbols()):
parsed = self.parse_value(value, name)
if parsed:
local_variables[name] = parsed
# Do the same for the member variables.
member_variables = {}
for (name, value) in iteritems(self.member_symbols()):
parsed = self.parse_value(value, name)
if parsed:
member_variables[name] = parsed
# Add watches to the variables.
watches = {}
for i in self.watches:
parsed = self.parse_expression(i)
if parsed:
watches[i] = parsed
self.send_to_server(self.serialize(local_variables, member_variables, watches))
except Exception as e:
traceback.print_exc()
print(e)
def add_watch(self, expression):
self.watches.add(expression)
class ThreadedTCPRequestHandler(socketserver.BaseRequestHandler):
def read_file(self, filename):
with open(filename, "r") as file:
return file.read()
def send(self, data, format):
self.request.sendall(to_bytes("HTTP/1.1 200 OK\n"
"Content-Type: {0}; charset=UTF-8\n"
"Content-Length: {1}\n"
"\n"
"{2}".format(format, len(data), data)))
def handle(self):
data = self.request.recv(4096)
match = re.match(b"GET (.*?) HTTP/1\.1", data)
if match:
url = match.group(1)
if url == b"/":
self.send(self.read_file("client.html"), "text/html")
elif url == b"/lv":
self.send(self.server.content_to_serve(), "application/json")
else:
self.request.sendall(b"HTTP/1.1 404 Not Found\n")
class ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
# Monkey patch select.select to ignore signals and retry
original_select = select.select
def signal_resistant_select(*args, **kwargs):
while True:
try:
return DebugServer.ThreadedTCPServer.original_select(*args, **kwargs)
except select.error as e:
if e[0] != errno.EINTR:
raise
select.select = signal_resistant_select
def __init__(self, server_address, RequestHandlerClass, json_debug_server):
self.json_debug_server = json_debug_server
self.allow_reuse_address = True
socketserver.TCPServer.__init__(self, server_address, RequestHandlerClass)
def content_to_serve(self):
return self.json_debug_server.content_to_serve
def send_to_server(self, content):
self.parent_end.send(content)
def start_server(self, our_end, parent_end):
# Close parent's end of the pipe, otherwise we don't get notified when the parent dies
parent_end.close()
# Create the server thread
server = DebugServer.ThreadedTCPServer(("localhost", 4000), DebugServer.ThreadedTCPRequestHandler, self)
server_thread = threading.Thread(target = server.serve_forever)
server_thread.daemon = True
server_thread.start()
# Wait for data from the parent
while True:
try:
self.content_to_serve = our_end.recv()
except IOError as e:
if e.errno != errno.EINTR:
raise
except EOFError as e:
# This should happen when the parent's terminated and the pipe got closed
server.shutdown()
server_thread.join
return
#
# Commands
#
def command_add_watch(self, argument):
if len(argument) > 0:
self.add_watch(argument)
self.store_locals()
def command_list_watches(self, argument):
for index, watch in enumerate(sorted(self.watches)):
print("#%d: %s" % (index, watch))
def command_remove_watch(self, argument):
if len(argument) > 0:
original_size = len(self.watches)
if argument[0] == "#":
try:
index = int(argument[1:])
if index < original_size:
self.watches.remove(sorted(self.watches)[index])
else:
print("Index '#%d' is too big" % index)
except ValueError as e:
print("Invalid index '%s'" % argument)
elif argument == "*":
self.watches.clear()
elif argument in self.watches:
self.watches.remove(argument)
else:
print("Map watch '%s' doesn't exist" % argument)
if len(self.watches) != original_size:
self.store_locals()
#
# Initialization
#
def load_parsers(self):
# Decorator used in parser declaration
#
# @parsed_type("ns::Type")
# def parse_type_ns_Type(value):
# return float(value["member"])
def parsed_type(name):
def parsed_type_decorator(function):
function.parsed_type = name
return function
return parsed_type_decorator
parsers = {"parsed_type": parsed_type}
exec_file("parsers.py", parsers)
parsers = {
parsers[i].parsed_type: parsers[i]
for i in parsers
if "parsed_type" in dir(parsers[i])
}
# Add reference and pointer equivalents
for i in list(parsers.keys()):
# References
parsers["%s &" % i] = parsers[i]
parsers["const %s &" % i] = parsers[i]
# Pointers
dereference_and_parse = lambda value: parsers[i](value.dereference())
parsers["%s *" % i] = dereference_and_parse
parsers["const %s *" % i] = dereference_and_parse
self.parsers = parsers
def add_commands(self):
self.add_command("jds-add-watch", self.command_add_watch)
self.add_command("jds-list-watches", self.command_list_watches)
self.add_command("jds-remove-watch", self.command_remove_watch)
def add_command(self, name, handler):
raise NotImplementedError()
def install_stop_hook(self, hook):
raise NotImplementedError()
def __init__(self):
self.content_to_serve = self.serialize({}, {}, {})
self.parent_end, child_end = multiprocessing.Pipe()
self.watches = set()
self.parsers = {}
# Create a server
server_process = multiprocessing.Process(target = self.start_server, args = (child_end, self.parent_end))
server_process.daemon = True
server_process.start()
# Close child's end of the pipe, otherwise the child doesn't receive an error when the parent dies
child_end.close()
self.load_parsers()
self.add_commands()
self.install_stop_hook(self.store_locals)
print("Supported types:")
for i in self.parsers:
print(" - %s" % i)
class GdbDebugServer(DebugServer):
class Command(gdb.Command):
def __init__(self, name, handler):
super(self.__class__, self).__init__(name, gdb.COMMAND_DATA)
self.handler = handler
def invoke(self, argument, from_tty):
self.handler(argument)
def add_command(self, name, handler):
self.Command(name, handler)
def install_stop_hook(self, hook):
gdb.events.stop.connect(lambda event: hook())
def local_symbols(self):
# Collect blocks up to the function level.
blocks = []
block = gdb.selected_frame().block()
while not (block == None or block.is_static or block.is_global):
blocks.append(block)
block = block.superblock
# Go though all the blocks from the most outer to the most inner one and
# collect all local variable names.
symbols = {}
for index, block in enumerate(reversed(blocks)):
for i in block:
symbols[i.name] = i
# Evaluate: convert symbols to values.
# For GDB 7.5 it should be: symbol.value(gdb.selected_frame())
# For now use version that works on both 7.4 and 7.5+
return {name: gdb.selected_frame().read_var(symbol.name) for (name, symbol) in iteritems(symbols)}
def member_symbols(self):
members = []
try:
members = gdb.parse_and_eval("this").dereference().type.fields()
except gdb.error:
# Not in a class context
pass
return {i.name: gdb.parse_and_eval("(*this).%s" % i.name) for i in members if not i.is_base_class}
def evaluate_expression(self, expression):
try:
return gdb.parse_and_eval(expression)
except gdb.error:
pass
return None
__jds__ = GdbDebugServer()
| {
"content_hash": "08d48aba168a7ff1b14d448e36244089",
"timestamp": "",
"source": "github",
"line_count": 341,
"max_line_length": 113,
"avg_line_length": 33.65395894428153,
"alnum_prop": 0.5588184036249564,
"repo_name": "detunized/Judas",
"id": "ac4b862e5d39a2db1fb549df9dc454e749134594",
"size": "11476",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "judas.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "6450"
},
{
"name": "Python",
"bytes": "16397"
}
],
"symlink_target": ""
} |
from gchaos.config.datastore import CONFIG as DATASTORE_CONFIG
DATASTORE = 'datastore'
DEFAULT_CONFIG = {
DATASTORE: DATASTORE_CONFIG
}
__all__ = [DEFAULT_CONFIG]
| {
"content_hash": "c4328a7d9a7df20e5c37f27f0889ce52",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 62,
"avg_line_length": 15.636363636363637,
"alnum_prop": 0.7267441860465116,
"repo_name": "RealKinetic/gchaos",
"id": "a72352576d8b3271068c69b4d1149cd825cf08bd",
"size": "1277",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gchaos/config/memory.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1524"
},
{
"name": "Python",
"bytes": "73185"
},
{
"name": "Shell",
"bytes": "1450"
}
],
"symlink_target": ""
} |
import collections
import json
import os
import sys
import hashlib
import logging
from .utils import cached_property, get_resource
from .graphics import Image
def load_json(filename):
try:
with open(filename, 'r') as fp:
result = json.load(fp)
if not isinstance(result, dict):
raise ValueError('Failed to load %s because it should contain a dictionary object, not an array.' % filename)
return result
except ValueError:
raise ValueError('Failed to load %s because it\'s not a valid JSON file' % filename)
except IOError:
#either non-existent file or empty filename
return {}
def save_json(filename, obj):
data = json.dumps(obj)
with open(filename, 'w') as fp:
fp.write(data)
class SettingsDict(collections.MutableMapping):
'''
Represents the tingapp.settings dict-like object.
The settings are loaded from three files in the app bundle
- default_settings.json
This file contains default settings as defined by the app creator
- settings.json
This file contains settings as set by a user when installing the app
(via Tide, for example)
- local_settings.json
This file contains settings written by the app itself.
Settings can be overridden by later files.
Changes are always saved to the local_settings.json file.
'''
def __init__(self, path):
#note we do NOT initialise self.dct or self.local_settings here - this ensures we
#raise an error in the event that they are accessed before self.load
self.loaded = False
self.path = path
def __contains__(self, item):
if not self.loaded:
self.load()
return item in self.dct
def __len__(self):
if not self.loaded:
self.load()
return len(self.dct)
def __getitem__(self, key):
if not self.loaded:
self.load()
return self.dct[key]
def __setitem__(self, key, value):
if not self.loaded:
self.load()
self.dct[key] = value
self.local_settings[key] = value
self.save()
def __delitem__(self, key):
if not self.loaded:
self.load()
del self.local_settings[key]
def __iter__(self):
if not self.loaded:
self.load()
return iter(self.dct)
def load(self):
self.dct = load_json(os.path.join(self.path, 'default_settings.json'))
self.dct.update(load_json(os.path.join(self.path, 'settings.json')))
self.local_settings = load_json(os.path.join(self.path, 'local_settings.json'))
self.dct.update(self.local_settings)
self.loaded = True
def save(self):
save_json(os.path.join(self.path, 'local_settings.json'), self.local_settings)
def generic_icon(name):
name_hash = int(hashlib.md5(name).hexdigest(), 16)
color_options = [
'blue', 'teal', 'green', 'olive', 'yellow', 'orange', 'red',
'fuchsia', 'purple', 'maroon'
]
color = color_options[name_hash % len(color_options)]
letter = name[0].lower()
icon = Image(size=(96, 96))
icon.fill(color=color)
image = get_resource('default-icon-texture-96.png')
icon.image(image)
font = get_resource('MiniSet2.ttf')
descenders = ['g', 'p', 'q', 'y']
ascenders = ['b', 'd', 'f', 'h', 'k', 'l', 't']
y_offset = 0
if letter in descenders:
y_offset -= 8
if letter in ascenders:
y_offset += 6
icon.text(letter,
xy=(52, 41 + y_offset),
color='white',
font=font,
font_size=70)
# they're a little large compared to the real icons, let's size them down a bit
resized_icon = Image(size=(96,96))
resized_icon.image(icon, scale=0.9)
return resized_icon
class TingApp(object):
def __init__(self, path=None):
"""path is the root path of the app you want to inspect
if path is None, then will let you inspect the current app"""
if path is None:
path = os.path.dirname(os.path.abspath(sys.argv[0]))
self.path = path
self.settings = SettingsDict(path)
@cached_property
def info(self):
return load_json(os.path.join(self.path, 'app.tbinfo'))
@property
def name(self):
if 'name' in self.info and self.info['name'] != '':
return self.info['name']
else:
return os.path.basename(self.path)
@cached_property
def icon(self):
icon_path = os.path.join(self.path, 'icon.png')
if not os.path.isfile(icon_path):
return generic_icon(self.name)
try:
icon = Image.load(icon_path)
except:
logging.exception('Failed to load icon at %s', icon_path)
return generic_icon(self.name)
if icon.size != (96, 96):
# resize the icon by redrawing in the correct size
resized_icon = Image(size=(96, 96))
resized_icon.image(icon, scale='shrinkToFit')
return resized_icon
return icon
app = TingApp()
| {
"content_hash": "86ce9d486109b4e530d89a49cabf2f88",
"timestamp": "",
"source": "github",
"line_count": 177,
"max_line_length": 125,
"avg_line_length": 29.231638418079097,
"alnum_prop": 0.5947042906841902,
"repo_name": "furbrain/tingbot-python",
"id": "c8cd0ca301fcc495d44e5f87e6900eb6cb14c9d9",
"size": "5174",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tingbot/tingapp.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "1345"
},
{
"name": "Python",
"bytes": "131709"
}
],
"symlink_target": ""
} |
"""
Views for managing Neutron Subnets.
"""
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import tabs
from horizon.utils import memoized
from horizon import workflows
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.networks.subnets \
import tables as project_tables
from openstack_dashboard.dashboards.project.networks.subnets \
import tabs as project_tabs
from openstack_dashboard.dashboards.project.networks.subnets import utils
from openstack_dashboard.dashboards.project.networks.subnets \
import workflows as project_workflows
from openstack_dashboard.dashboards.project.networks.views \
import DefaultSubnetWorkflowMixin
class CreateView(DefaultSubnetWorkflowMixin, workflows.WorkflowView):
workflow_class = project_workflows.CreateSubnet
@memoized.memoized_method
def get_object(self):
try:
network_id = self.kwargs["network_id"]
network = api.neutron.network_get(self.request, network_id)
return network
except Exception:
redirect = reverse('horizon:project:networks:index')
msg = _("Unable to retrieve network.")
exceptions.handle(self.request, msg, redirect=redirect)
def get_initial(self):
network = self.get_object()
return {"network_id": self.kwargs['network_id'],
"network_name": network.name_or_id,
"dns_nameservers": self.get_default_dns_servers()}
class UpdateView(workflows.WorkflowView):
workflow_class = project_workflows.UpdateSubnet
@memoized.memoized_method
def _get_object(self, *args, **kwargs):
subnet_id = self.kwargs['subnet_id']
try:
return api.neutron.subnet_get(self.request, subnet_id)
except Exception:
redirect = reverse("horizon:project:networks:index")
msg = _('Unable to retrieve subnet details')
exceptions.handle(self.request, msg, redirect=redirect)
def get_initial(self):
initial = super(UpdateView, self).get_initial()
subnet = self._get_object()
initial['network_id'] = self.kwargs['network_id']
initial['subnet_id'] = subnet['id']
initial['subnet_name'] = subnet['name']
for key in ('cidr', 'ip_version', 'enable_dhcp'):
initial[key] = subnet[key]
initial['gateway_ip'] = subnet['gateway_ip'] or ''
initial['no_gateway'] = (subnet['gateway_ip'] is None)
if initial['ip_version'] == 6:
initial['ipv6_modes'] = utils.get_ipv6_modes_menu_from_attrs(
subnet['ipv6_ra_mode'], subnet['ipv6_address_mode'])
initial['dns_nameservers'] = '\n'.join(subnet['dns_nameservers'])
pools = ['%s,%s' % (p['start'], p['end'])
for p in subnet['allocation_pools']]
initial['allocation_pools'] = '\n'.join(pools)
routes = ['%s,%s' % (r['destination'], r['nexthop'])
for r in subnet['host_routes']]
initial['host_routes'] = '\n'.join(routes)
return initial
class DetailView(tabs.TabView):
tab_group_class = project_tabs.SubnetDetailTabs
template_name = 'horizon/common/_detail.html'
page_title = "{{ subnet.name|default:subnet.id }}"
@memoized.memoized_method
def get_data(self):
subnet_id = self.kwargs['subnet_id']
try:
subnet = api.neutron.subnet_get(self.request, subnet_id)
except Exception:
subnet = []
msg = _('Unable to retrieve subnet details.')
exceptions.handle(self.request, msg,
redirect=self.get_redirect_url())
else:
if subnet.ip_version == 6:
ipv6_modes = utils.get_ipv6_modes_menu_from_attrs(
subnet.ipv6_ra_mode, subnet.ipv6_address_mode)
subnet.ipv6_modes_desc = utils.IPV6_MODE_MAP.get(ipv6_modes)
if ('subnetpool_id' in subnet and
subnet.subnetpool_id and
api.neutron.is_extension_supported(self.request,
'subnet_allocation')):
subnetpool = api.neutron.subnetpool_get(self.request,
subnet.subnetpool_id)
subnet.subnetpool_name = subnetpool.name
return subnet
@memoized.memoized_method
def get_network(self, network_id):
try:
network = api.neutron.network_get(self.request, network_id)
except Exception:
network = {}
msg = _('Unable to retrieve network details.')
exceptions.handle(self.request, msg)
return network
def get_context_data(self, **kwargs):
context = super(DetailView, self).get_context_data(**kwargs)
subnet = self.get_data()
network = self.get_network(subnet.network_id)
subnet.network_name = network.get('name')
subnet.network_url = self.get_network_detail_url(subnet.network_id)
network_nav = subnet.network_name or subnet.network_id
table = project_tables.SubnetsTable(self.request,
network_id=subnet.network_id)
# TODO(robcresswell) Add URL for "Subnets" crumb after bug/1416838
breadcrumb = [
(network_nav, subnet.network_url),
(_("Subnets"),), ]
context["custom_breadcrumb"] = breadcrumb
context["subnet"] = subnet
context["url"] = self.get_redirect_url()
context["actions"] = table.render_row_actions(subnet)
return context
def get_tabs(self, request, *args, **kwargs):
subnet = self.get_data()
return self.tab_group_class(request, subnet=subnet, **kwargs)
@staticmethod
def get_network_detail_url(network_id):
return reverse('horizon:project:networks:detail',
args=(network_id,))
@staticmethod
def get_redirect_url():
return reverse('horizon:project:networks:index')
| {
"content_hash": "d47e881db48889bac0b69c463048becd",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 77,
"avg_line_length": 38.30434782608695,
"alnum_prop": 0.6131019944867845,
"repo_name": "coreycb/horizon",
"id": "4066e96673e29818bda14be8f2802a100ab09167",
"size": "6775",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openstack_dashboard/dashboards/project/networks/subnets/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "125003"
},
{
"name": "HTML",
"bytes": "570845"
},
{
"name": "JavaScript",
"bytes": "1890885"
},
{
"name": "Makefile",
"bytes": "588"
},
{
"name": "Python",
"bytes": "5337537"
},
{
"name": "Shell",
"bytes": "19049"
}
],
"symlink_target": ""
} |
from growcut import growcut_python
from numba import autojit
benchmarks = (
("growcut_numba",
autojit(growcut_python.growcut_python)),
)
| {
"content_hash": "27bef9b7d4de1a4b651862c66c985f8e",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 45,
"avg_line_length": 18.5,
"alnum_prop": 0.722972972972973,
"repo_name": "numfocus/python-benchmarks",
"id": "31e9e1a295df5c8184ce463517222d3229503332",
"size": "148",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "growcut/growcut_numba.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "150278"
},
{
"name": "JavaScript",
"bytes": "61962"
},
{
"name": "Python",
"bytes": "44572"
}
],
"symlink_target": ""
} |
from __future__ import annotations
from typing import overload
import warnings
import numpy as np
from pandas._libs import (
iNaT,
lib,
missing as libmissing,
)
from pandas._typing import (
ArrayLike,
AstypeArg,
Dtype,
DtypeObj,
npt,
)
from pandas.compat.numpy import function as nv
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.base import (
ExtensionDtype,
register_extension_dtype,
)
from pandas.core.dtypes.common import (
is_bool_dtype,
is_datetime64_dtype,
is_float,
is_float_dtype,
is_integer_dtype,
is_list_like,
is_object_dtype,
pandas_dtype,
)
from pandas.core.dtypes.missing import isna
from pandas.core.arrays import ExtensionArray
from pandas.core.arrays.masked import (
BaseMaskedArray,
BaseMaskedDtype,
)
from pandas.core.arrays.numeric import (
NumericArray,
NumericDtype,
)
from pandas.core.ops import invalid_comparison
from pandas.core.tools.numeric import to_numeric
class _IntegerDtype(NumericDtype):
"""
An ExtensionDtype to hold a single size & kind of integer dtype.
These specific implementations are subclasses of the non-public
_IntegerDtype. For example we have Int8Dtype to represent signed int 8s.
The attributes name & type are set when these subclasses are created.
"""
def __repr__(self) -> str:
sign = "U" if self.is_unsigned_integer else ""
return f"{sign}Int{8 * self.itemsize}Dtype()"
@cache_readonly
def is_signed_integer(self) -> bool:
return self.kind == "i"
@cache_readonly
def is_unsigned_integer(self) -> bool:
return self.kind == "u"
@property
def _is_numeric(self) -> bool:
return True
@classmethod
def construct_array_type(cls) -> type[IntegerArray]:
"""
Return the array type associated with this dtype.
Returns
-------
type
"""
return IntegerArray
def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None:
# we only handle nullable EA dtypes and numeric numpy dtypes
if not all(
isinstance(t, BaseMaskedDtype)
or (
isinstance(t, np.dtype)
and (np.issubdtype(t, np.number) or np.issubdtype(t, np.bool_))
)
for t in dtypes
):
return None
np_dtype = np.find_common_type(
# error: List comprehension has incompatible type List[Union[Any,
# dtype, ExtensionDtype]]; expected List[Union[dtype, None, type,
# _SupportsDtype, str, Tuple[Any, Union[int, Sequence[int]]],
# List[Any], _DtypeDict, Tuple[Any, Any]]]
[
t.numpy_dtype # type: ignore[misc]
if isinstance(t, BaseMaskedDtype)
else t
for t in dtypes
],
[],
)
if np.issubdtype(np_dtype, np.integer):
return INT_STR_TO_DTYPE[str(np_dtype)]
elif np.issubdtype(np_dtype, np.floating):
from pandas.core.arrays.floating import FLOAT_STR_TO_DTYPE
return FLOAT_STR_TO_DTYPE[str(np_dtype)]
return None
def safe_cast(values, dtype, copy: bool):
"""
Safely cast the values to the dtype if they
are equivalent, meaning floats must be equivalent to the
ints.
"""
try:
return values.astype(dtype, casting="safe", copy=copy)
except TypeError as err:
casted = values.astype(dtype, copy=copy)
if (casted == values).all():
return casted
raise TypeError(
f"cannot safely cast non-equivalent {values.dtype} to {np.dtype(dtype)}"
) from err
def coerce_to_array(
values, dtype, mask=None, copy: bool = False
) -> tuple[np.ndarray, np.ndarray]:
"""
Coerce the input values array to numpy arrays with a mask
Parameters
----------
values : 1D list-like
dtype : integer dtype
mask : bool 1D array, optional
copy : bool, default False
if True, copy the input
Returns
-------
tuple of (values, mask)
"""
# if values is integer numpy array, preserve its dtype
if dtype is None and hasattr(values, "dtype"):
if is_integer_dtype(values.dtype):
dtype = values.dtype
if dtype is not None:
if isinstance(dtype, str) and (
dtype.startswith("Int") or dtype.startswith("UInt")
):
# Avoid DeprecationWarning from NumPy about np.dtype("Int64")
# https://github.com/numpy/numpy/pull/7476
dtype = dtype.lower()
if not issubclass(type(dtype), _IntegerDtype):
try:
dtype = INT_STR_TO_DTYPE[str(np.dtype(dtype))]
except KeyError as err:
raise ValueError(f"invalid dtype specified {dtype}") from err
if isinstance(values, IntegerArray):
values, mask = values._data, values._mask
if dtype is not None:
values = values.astype(dtype.numpy_dtype, copy=False)
if copy:
values = values.copy()
mask = mask.copy()
return values, mask
values = np.array(values, copy=copy)
if is_object_dtype(values):
inferred_type = lib.infer_dtype(values, skipna=True)
if inferred_type == "empty":
values = np.empty(len(values))
values.fill(np.nan)
elif inferred_type not in [
"floating",
"integer",
"mixed-integer",
"integer-na",
"mixed-integer-float",
]:
raise TypeError(f"{values.dtype} cannot be converted to an IntegerDtype")
elif is_bool_dtype(values) and is_integer_dtype(dtype):
values = np.array(values, dtype=int, copy=copy)
elif not (is_integer_dtype(values) or is_float_dtype(values)):
raise TypeError(f"{values.dtype} cannot be converted to an IntegerDtype")
if mask is None:
mask = isna(values)
else:
assert len(mask) == len(values)
if not values.ndim == 1:
raise TypeError("values must be a 1D list-like")
if not mask.ndim == 1:
raise TypeError("mask must be a 1D list-like")
# infer dtype if needed
if dtype is None:
dtype = np.dtype("int64")
else:
dtype = dtype.type
# if we are float, let's make sure that we can
# safely cast
# we copy as need to coerce here
if mask.any():
values = values.copy()
values[mask] = 1
values = safe_cast(values, dtype, copy=False)
else:
values = safe_cast(values, dtype, copy=False)
return values, mask
class IntegerArray(NumericArray):
"""
Array of integer (optional missing) values.
.. versionchanged:: 1.0.0
Now uses :attr:`pandas.NA` as the missing value rather
than :attr:`numpy.nan`.
.. warning::
IntegerArray is currently experimental, and its API or internal
implementation may change without warning.
We represent an IntegerArray with 2 numpy arrays:
- data: contains a numpy integer array of the appropriate dtype
- mask: a boolean array holding a mask on the data, True is missing
To construct an IntegerArray from generic array-like input, use
:func:`pandas.array` with one of the integer dtypes (see examples).
See :ref:`integer_na` for more.
Parameters
----------
values : numpy.ndarray
A 1-d integer-dtype array.
mask : numpy.ndarray
A 1-d boolean-dtype array indicating missing values.
copy : bool, default False
Whether to copy the `values` and `mask`.
Attributes
----------
None
Methods
-------
None
Returns
-------
IntegerArray
Examples
--------
Create an IntegerArray with :func:`pandas.array`.
>>> int_array = pd.array([1, None, 3], dtype=pd.Int32Dtype())
>>> int_array
<IntegerArray>
[1, <NA>, 3]
Length: 3, dtype: Int32
String aliases for the dtypes are also available. They are capitalized.
>>> pd.array([1, None, 3], dtype='Int32')
<IntegerArray>
[1, <NA>, 3]
Length: 3, dtype: Int32
>>> pd.array([1, None, 3], dtype='UInt16')
<IntegerArray>
[1, <NA>, 3]
Length: 3, dtype: UInt16
"""
# The value used to fill '_data' to avoid upcasting
_internal_fill_value = 1
@cache_readonly
def dtype(self) -> _IntegerDtype:
return INT_STR_TO_DTYPE[str(self._data.dtype)]
def __init__(self, values: np.ndarray, mask: np.ndarray, copy: bool = False):
if not (isinstance(values, np.ndarray) and values.dtype.kind in ["i", "u"]):
raise TypeError(
"values should be integer numpy array. Use "
"the 'pd.array' function instead"
)
super().__init__(values, mask, copy=copy)
@classmethod
def _from_sequence(
cls, scalars, *, dtype: Dtype | None = None, copy: bool = False
) -> IntegerArray:
values, mask = coerce_to_array(scalars, dtype=dtype, copy=copy)
return IntegerArray(values, mask)
@classmethod
def _from_sequence_of_strings(
cls, strings, *, dtype: Dtype | None = None, copy: bool = False
) -> IntegerArray:
scalars = to_numeric(strings, errors="raise")
return cls._from_sequence(scalars, dtype=dtype, copy=copy)
def _coerce_to_array(self, value) -> tuple[np.ndarray, np.ndarray]:
return coerce_to_array(value, dtype=self.dtype)
@overload
def astype(self, dtype: npt.DTypeLike, copy: bool = ...) -> np.ndarray:
...
@overload
def astype(self, dtype: ExtensionDtype, copy: bool = ...) -> ExtensionArray:
...
@overload
def astype(self, dtype: AstypeArg, copy: bool = ...) -> ArrayLike:
...
def astype(self, dtype: AstypeArg, copy: bool = True) -> ArrayLike:
"""
Cast to a NumPy array or ExtensionArray with 'dtype'.
Parameters
----------
dtype : str or dtype
Typecode or data-type to which the array is cast.
copy : bool, default True
Whether to copy the data, even if not necessary. If False,
a copy is made only if the old dtype does not match the
new dtype.
Returns
-------
ndarray or ExtensionArray
NumPy ndarray, BooleanArray or IntegerArray with 'dtype' for its dtype.
Raises
------
TypeError
if incompatible type with an IntegerDtype, equivalent of same_kind
casting
"""
dtype = pandas_dtype(dtype)
if isinstance(dtype, ExtensionDtype):
return super().astype(dtype, copy=copy)
na_value: float | np.datetime64 | lib.NoDefault
# coerce
if is_float_dtype(dtype):
# In astype, we consider dtype=float to also mean na_value=np.nan
na_value = np.nan
elif is_datetime64_dtype(dtype):
na_value = np.datetime64("NaT")
else:
na_value = lib.no_default
return self.to_numpy(dtype=dtype, na_value=na_value, copy=False)
def _values_for_argsort(self) -> np.ndarray:
"""
Return values for sorting.
Returns
-------
ndarray
The transformed values should maintain the ordering between values
within the array.
See Also
--------
ExtensionArray.argsort : Return the indices that would sort this array.
"""
data = self._data.copy()
if self._mask.any():
data[self._mask] = data.min() - 1
return data
def _cmp_method(self, other, op):
from pandas.core.arrays import BooleanArray
mask = None
if isinstance(other, BaseMaskedArray):
other, mask = other._data, other._mask
elif is_list_like(other):
other = np.asarray(other)
if other.ndim > 1:
raise NotImplementedError("can only perform ops with 1-d structures")
if len(self) != len(other):
raise ValueError("Lengths must match to compare")
if other is libmissing.NA:
# numpy does not handle pd.NA well as "other" scalar (it returns
# a scalar False instead of an array)
# This may be fixed by NA.__array_ufunc__. Revisit this check
# once that's implemented.
result = np.zeros(self._data.shape, dtype="bool")
mask = np.ones(self._data.shape, dtype="bool")
else:
with warnings.catch_warnings():
# numpy may show a FutureWarning:
# elementwise comparison failed; returning scalar instead,
# but in the future will perform elementwise comparison
# before returning NotImplemented. We fall back to the correct
# behavior today, so that should be fine to ignore.
warnings.filterwarnings("ignore", "elementwise", FutureWarning)
with np.errstate(all="ignore"):
method = getattr(self._data, f"__{op.__name__}__")
result = method(other)
if result is NotImplemented:
result = invalid_comparison(self._data, other, op)
# nans propagate
if mask is None:
mask = self._mask.copy()
else:
mask = self._mask | mask
return BooleanArray(result, mask)
def sum(self, *, skipna=True, min_count=0, **kwargs):
nv.validate_sum((), kwargs)
return super()._reduce("sum", skipna=skipna, min_count=min_count)
def prod(self, *, skipna=True, min_count=0, **kwargs):
nv.validate_prod((), kwargs)
return super()._reduce("prod", skipna=skipna, min_count=min_count)
def min(self, *, skipna=True, **kwargs):
nv.validate_min((), kwargs)
return super()._reduce("min", skipna=skipna)
def max(self, *, skipna=True, **kwargs):
nv.validate_max((), kwargs)
return super()._reduce("max", skipna=skipna)
def _maybe_mask_result(self, result, mask, other, op_name: str):
"""
Parameters
----------
result : array-like
mask : array-like bool
other : scalar or array-like
op_name : str
"""
# if we have a float operand we are by-definition
# a float result
# or our op is a divide
if (is_float_dtype(other) or is_float(other)) or (
op_name in ["rtruediv", "truediv"]
):
from pandas.core.arrays import FloatingArray
return FloatingArray(result, mask, copy=False)
if result.dtype == "timedelta64[ns]":
from pandas.core.arrays import TimedeltaArray
result[mask] = iNaT
return TimedeltaArray._simple_new(result)
return type(self)(result, mask, copy=False)
_dtype_docstring = """
An ExtensionDtype for {dtype} integer data.
.. versionchanged:: 1.0.0
Now uses :attr:`pandas.NA` as its missing value,
rather than :attr:`numpy.nan`.
Attributes
----------
None
Methods
-------
None
"""
# create the Dtype
@register_extension_dtype
class Int8Dtype(_IntegerDtype):
type = np.int8
name = "Int8"
__doc__ = _dtype_docstring.format(dtype="int8")
@register_extension_dtype
class Int16Dtype(_IntegerDtype):
type = np.int16
name = "Int16"
__doc__ = _dtype_docstring.format(dtype="int16")
@register_extension_dtype
class Int32Dtype(_IntegerDtype):
type = np.int32
name = "Int32"
__doc__ = _dtype_docstring.format(dtype="int32")
@register_extension_dtype
class Int64Dtype(_IntegerDtype):
type = np.int64
name = "Int64"
__doc__ = _dtype_docstring.format(dtype="int64")
@register_extension_dtype
class UInt8Dtype(_IntegerDtype):
type = np.uint8
name = "UInt8"
__doc__ = _dtype_docstring.format(dtype="uint8")
@register_extension_dtype
class UInt16Dtype(_IntegerDtype):
type = np.uint16
name = "UInt16"
__doc__ = _dtype_docstring.format(dtype="uint16")
@register_extension_dtype
class UInt32Dtype(_IntegerDtype):
type = np.uint32
name = "UInt32"
__doc__ = _dtype_docstring.format(dtype="uint32")
@register_extension_dtype
class UInt64Dtype(_IntegerDtype):
type = np.uint64
name = "UInt64"
__doc__ = _dtype_docstring.format(dtype="uint64")
INT_STR_TO_DTYPE: dict[str, _IntegerDtype] = {
"int8": Int8Dtype(),
"int16": Int16Dtype(),
"int32": Int32Dtype(),
"int64": Int64Dtype(),
"uint8": UInt8Dtype(),
"uint16": UInt16Dtype(),
"uint32": UInt32Dtype(),
"uint64": UInt64Dtype(),
}
| {
"content_hash": "b9c9f6c09d7d6b96ef192b19a999a0cc",
"timestamp": "",
"source": "github",
"line_count": 587,
"max_line_length": 85,
"avg_line_length": 28.66609880749574,
"alnum_prop": 0.5931538598680691,
"repo_name": "dsm054/pandas",
"id": "e62a2f95b0340264142d9f2f58a03d3d9506656c",
"size": "16827",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pandas/core/arrays/integer.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4912"
},
{
"name": "C",
"bytes": "405762"
},
{
"name": "C++",
"bytes": "17194"
},
{
"name": "HTML",
"bytes": "551714"
},
{
"name": "Makefile",
"bytes": "574"
},
{
"name": "Python",
"bytes": "14362074"
},
{
"name": "Shell",
"bytes": "29904"
},
{
"name": "Smarty",
"bytes": "2069"
}
],
"symlink_target": ""
} |
'''Public section, including homepage and signup.'''
import time
import datetime
import urllib2
import json
from flask import (
render_template, jsonify, current_app, send_from_directory, request
)
from purchasing.extensions import cache
from purchasing.users.models import User
from purchasing.public.models import AppStatus
from purchasing.public import blueprint
@blueprint.route("/", methods=["GET", "POST"])
def home():
return render_template("public/home.html")
@blueprint.route("/about/")
def about():
return render_template("public/about.html")
@blueprint.route('/robots.txt')
def static_from_root():
return send_from_directory(current_app.static_folder, request.path[1:])
@blueprint.route('/_status')
def status():
'''Reports about App Status
'''
response = {
'status': 'ok',
'dependencies': ['Celery', 'Postgres', 'Redis', 'S3', 'Sendgrid'],
'resources': {}
}
# order the try/except blocks in the reverse order of seriousness
# in terms of an outage
try:
url = 'https://sendgrid.com/api/stats.get.json?api_user={user}&api_key={_pass}&days={days}'.format(
user=current_app.config['MAIL_USERNAME'],
_pass=current_app.config['MAIL_PASSWORD'],
days=datetime.date.today().day
)
sendgrid = json.loads(urllib2.urlopen(url).read())
sent = sum([m['delivered'] + m['repeat_bounces'] for m in sendgrid])
response['resources']['Sendgrid'] = '{}% used'.format((100 * float(sent)) / int(
current_app.config.get('SENDGRID_MONTHLY_LIMIT', 12000)
))
except Exception, e:
response['status'] = 'Sendgrid is unavailable: {}'.format(e)
try:
# TODO: figure out some way to figure out if s3 is down
pass
except Exception, e:
response['status'] = 'S3 is unavailable: {}'.format(e)
try:
redis_up = cache.cache._client.ping()
if not redis_up:
response['status'] = 'Redis is down or unavailable'
except Exception, e:
response['status'] = 'Redis is down or unavailable'
try:
status = AppStatus.query.first()
if status.status != 'ok':
if response['status'] != 'ok':
response['status'] += ' || {}: {}'.format(status.status, status.message)
else:
response['status'] = '{}: {}'.format(status.status, status.message)
except Exception, e:
response['status'] = 'Database is unavailable: {}'.format(e)
response['updated'] = int(time.time())
return jsonify(response)
| {
"content_hash": "22d9c4d65eb297b8709b1fdf0c801730",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 107,
"avg_line_length": 32.123456790123456,
"alnum_prop": 0.617217524980784,
"repo_name": "CityofPittsburgh/pittsburgh-purchasing-suite",
"id": "51d9a04a517fd4175c828735f566d9ed8172f8eb",
"size": "2626",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "purchasing/public/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "24438"
},
{
"name": "HTML",
"bytes": "316264"
},
{
"name": "JavaScript",
"bytes": "25700"
},
{
"name": "Makefile",
"bytes": "613"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "685542"
},
{
"name": "Shell",
"bytes": "3379"
}
],
"symlink_target": ""
} |
my_name = "Josh Nemmers"
print "Let's talk about %s" % my_name
my_age = 16
print "I am %d years old" % my_age
| {
"content_hash": "90af8d751354aedfe5c1051726f8bb28",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 37,
"avg_line_length": 22.2,
"alnum_prop": 0.6396396396396397,
"repo_name": "chrisortman/CIS-121",
"id": "3f5aeff31d85e1deffb79edcecce983c202db463",
"size": "111",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "k0764898/lpthw/ex5.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "413801"
}
],
"symlink_target": ""
} |
from flask.ext.wtf import Form
from wtforms import IntegerField, StringField, validators, FileField, BooleanField, SelectField
from .models import Team
class TeamForm(Form):
number = IntegerField("Number", [validators.DataRequired(),
validators.NumberRange(min=1, max=99999)])
name = StringField("Name", [validators.DataRequired(),
validators.Length(min=1, max=50)])
affiliation = StringField("Affiliation", [validators.Length(min=1, max=200)])
city = StringField("City", [validators.Length(min=1, max=50)])
state = StringField("State", [validators.Length(min=2, max=2)])
is_rookie = BooleanField("Rookie?")
def validate(self):
# Base validation
rv = Form.validate(self)
if not rv:
return False
if not self.number:
return True
# validate that the team number isn't already being used
t = Team.query.filter_by(number=self.number.data).first()
if t is not None:
self.number.errors.append("Team with this number already exists")
return False
# TODO validate that if the 'highest round reached' value is being decreased that there aren't existing scores for that round
return True
class UploadForm(Form):
file = FileField(u'', [validators.regexp(u'^.*\.csv$')])
| {
"content_hash": "fc4ff77703ce066956a7c9e39850d03f",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 133,
"avg_line_length": 37.513513513513516,
"alnum_prop": 0.6347262247838616,
"repo_name": "rtfoley/scorepy",
"id": "27cf5f5faee9cb22cdaa7dc19dfa1701f231ec74",
"size": "1388",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/teams/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "19816"
},
{
"name": "HTML",
"bytes": "50116"
},
{
"name": "JavaScript",
"bytes": "100677"
},
{
"name": "Python",
"bytes": "72580"
}
],
"symlink_target": ""
} |
from module import Module
from PyQt5 import QtWidgets, QtGui
class SubMod0(Module):
#Constructor
def __init__(self, frame):
Module.__init__(self, frame, 'submod0')
self.label = QtWidgets.QLabel()
#self.label.setGeometry(10,10,100,200)
grid = QtWidgets.QGridLayout()
self.setLayout(grid)
grid.addWidget(self.label, 0, 0, 1, 1)
reader = QtGui.QImageReader("Mod2Line.png")
image = reader.read()
qpixmap = QtGui.QPixmap()
qpixmap.convertFromImage(image)
self.label.setPixmap(qpixmap)
self.changeStateComplete()
def main(self):
pass
| {
"content_hash": "51b00105d9d12885432ae726ec86d3c6",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 45,
"avg_line_length": 25.454545454545453,
"alnum_prop": 0.7178571428571429,
"repo_name": "Saucyz/explode",
"id": "93ab5ff994155c253dfca8002944bb54e55c97a7",
"size": "560",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/submod0.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "72479"
},
{
"name": "C",
"bytes": "758259"
},
{
"name": "C++",
"bytes": "108303"
},
{
"name": "HTML",
"bytes": "263059"
},
{
"name": "Logos",
"bytes": "13573"
},
{
"name": "Makefile",
"bytes": "191151"
},
{
"name": "Mathematica",
"bytes": "346"
},
{
"name": "Python",
"bytes": "87135"
},
{
"name": "Shell",
"bytes": "4813"
},
{
"name": "Standard ML",
"bytes": "32"
},
{
"name": "SystemVerilog",
"bytes": "379091"
},
{
"name": "VHDL",
"bytes": "16752591"
},
{
"name": "Verilog",
"bytes": "1247774"
}
],
"symlink_target": ""
} |
from allauth.socialaccount import providers
from allauth.socialaccount.providers.base import ProviderAccount
from allauth.socialaccount.providers.oauth2.provider import OAuth2Provider
class ShopifyAccount(ProviderAccount):
pass
class ShopifyProvider(OAuth2Provider):
id = 'shopify'
name = 'Shopify'
account_class = ShopifyAccount
def get_auth_params(self, request, action):
ret = super(ShopifyProvider, self).get_auth_params(request, action)
shop = request.GET.get('shop', None)
if shop:
ret.update({'shop': shop})
return ret
def get_default_scope(self):
return ['read_orders', 'read_products']
def extract_uid(self, data):
return str(data['shop']['id'])
def extract_common_fields(self, data):
# See: https://docs.shopify.com/api/shop
# User is only available with Shopify Plus, email is the only
# common field
return dict(email=data['shop']['email'])
providers.registry.register(ShopifyProvider)
| {
"content_hash": "27c29b0a5c51cafac62c98d7031168b8",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 75,
"avg_line_length": 29.485714285714284,
"alnum_prop": 0.6792635658914729,
"repo_name": "wli/django-allauth",
"id": "6d4999a3901ef3a5be82b95c4d561a1e4733f9bb",
"size": "1032",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "allauth/socialaccount/providers/shopify/provider.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "42101"
},
{
"name": "JavaScript",
"bytes": "3967"
},
{
"name": "Makefile",
"bytes": "298"
},
{
"name": "Python",
"bytes": "588192"
}
],
"symlink_target": ""
} |
"""Setup file for data structures/ linkedlist file."""
from setuptools import setup
setup(
name="linked_list",
description="A project to create and instantiate linked lists.",
version=0.1,
author="Reggie & Rachael",
author_email="[email protected]",
license="MIT",
py_modules=['linked_list'],
package_dir={'': 'src'},
install_requires=[],
extras_require={'test': ['pytest', 'pytest-watch', 'pytest-cov', 'tox']},
# entry_points={
# 'console_scripts': [
# "mailroom = mailroom:main"
# ]
# }
)
| {
"content_hash": "438e06476618b3b42778e6bf39a1f384",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 77,
"avg_line_length": 26.454545454545453,
"alnum_prop": 0.5979381443298969,
"repo_name": "rwisecar/data-structures",
"id": "129a616a3ac5be67b1af37235d5750da06f3b776",
"size": "582",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "111625"
}
],
"symlink_target": ""
} |
import numpy
import logging
import pprint
import operator
import itertools
import theano
import theano.tensor as TT
from theano import pp
from theano.ifelse import ifelse
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
from groundhog.layers import\
Layer,\
MultiLayer,\
SoftmaxLayer,\
HierarchicalSoftmaxLayer,\
LSTMLayer, \
RecurrentLayer,\
RecursiveConvolutionalLayer,\
UnaryOp,\
Shift,\
LastState,\
DropOp,\
Concatenate
from groundhog.models import LM_Model
from groundhog.datasets import PytablesBitextIterator
from groundhog.utils import sample_zeros, sample_weights_orth, init_bias, sample_weights_classic
import groundhog.utils as utils
logger = logging.getLogger(__name__)
def create_padded_batch(state, x, y, return_dict=False):
"""A callback given to the iterator to transform data in suitable format
:type x: list
:param x: list of numpy.array's, each array is a batch of phrases
in some of source languages
:type y: list
:param y: same as x but for target languages
:param new_format: a wrapper to be applied on top of returned value
:returns: a tuple (X, Xmask, Y, Ymask) where
- X is a matrix, each column contains a source sequence
- Xmask is 0-1 matrix, each column marks the sequence positions in X
- Y and Ymask are matrices of the same format for target sequences
OR new_format applied to the tuple
Notes:
* actually works only with x[0] and y[0]
* len(x[0]) thus is just the minibatch size
* len(x[0][idx]) is the size of sequence idx
"""
mx = state['seqlen']
my = state['seqlen']
if state['trim_batches']:
# Similar length for all source sequences
mx = numpy.minimum(state['seqlen'], max([len(xx) for xx in x[0]]))+1
# Similar length for all target sequences
my = numpy.minimum(state['seqlen'], max([len(xx) for xx in y[0]]))+1
# Batch size
n = x[0].shape[0]
X = numpy.zeros((mx, n), dtype='int64')
Y = numpy.zeros((my, n), dtype='int64')
Xmask = numpy.zeros((mx, n), dtype='float32')
Ymask = numpy.zeros((my, n), dtype='float32')
# Fill X and Xmask
for idx in xrange(len(x[0])):
# Insert sequence idx in a column of matrix X
if mx < len(x[0][idx]):
X[:mx, idx] = x[0][idx][:mx]
else:
X[:len(x[0][idx]), idx] = x[0][idx][:mx]
# Mark the end of phrase
if len(x[0][idx]) < mx:
X[len(x[0][idx]):, idx] = state['null_sym_source']
# Initialize Xmask column with ones in all positions that
# were just set in X
Xmask[:len(x[0][idx]), idx] = 1.
if len(x[0][idx]) < mx:
Xmask[len(x[0][idx]), idx] = 1.
# Fill Y and Ymask in the same way as X and Xmask in the previous loop
for idx in xrange(len(y[0])):
Y[:len(y[0][idx]), idx] = y[0][idx][:my]
if len(y[0][idx]) < my:
Y[len(y[0][idx]):, idx] = state['null_sym_target']
Ymask[:len(y[0][idx]), idx] = 1.
if len(y[0][idx]) < my:
Ymask[len(y[0][idx]), idx] = 1.
null_inputs = numpy.zeros(X.shape[1])
# We say that an input pair is valid if both:
# - either source sequence or target sequence is non-empty
# - source sequence and target sequence have null_sym ending
# Why did not we filter them earlier?
for idx in xrange(X.shape[1]):
if numpy.sum(Xmask[:,idx]) == 0 and numpy.sum(Ymask[:,idx]) == 0:
null_inputs[idx] = 1
if Xmask[-1,idx] and X[-1,idx] != state['null_sym_source']:
null_inputs[idx] = 1
if Ymask[-1,idx] and Y[-1,idx] != state['null_sym_target']:
null_inputs[idx] = 1
valid_inputs = 1. - null_inputs
# Leave only valid inputs
X = X[:,valid_inputs.nonzero()[0]]
Y = Y[:,valid_inputs.nonzero()[0]]
Xmask = Xmask[:,valid_inputs.nonzero()[0]]
Ymask = Ymask[:,valid_inputs.nonzero()[0]]
if len(valid_inputs.nonzero()[0]) <= 0:
return None
# Unknown words
X[X >= state['n_sym_source']] = state['unk_sym_source']
Y[Y >= state['n_sym_target']] = state['unk_sym_target']
if return_dict:
return {'x' : X, 'x_mask' : Xmask, 'y': Y, 'y_mask' : Ymask}
else:
return X, Xmask, Y, Ymask
def get_batch_iterator(state):
class Iterator(PytablesBitextIterator):
def __init__(self, *args, **kwargs):
PytablesBitextIterator.__init__(self, *args, **kwargs)
self.batch_iter = None
self.peeked_batch = None
def get_homogenous_batch_iter(self):
while True:
k_batches = state['sort_k_batches']
batch_size = state['bs']
data = [PytablesBitextIterator.next(self) for k in range(k_batches)]
x = numpy.asarray(list(itertools.chain(*map(operator.itemgetter(0), data))))
y = numpy.asarray(list(itertools.chain(*map(operator.itemgetter(1), data))))
lens = numpy.asarray([map(len, x), map(len, y)])
order = numpy.argsort(lens.max(axis=0)) if state['sort_k_batches'] > 1 \
else numpy.arange(len(x))
for k in range(k_batches):
indices = order[k * batch_size:(k + 1) * batch_size]
batch = create_padded_batch(state, [x[indices]], [y[indices]],
return_dict=True)
if batch:
yield batch
def next(self, peek=False):
if not self.batch_iter:
self.batch_iter = self.get_homogenous_batch_iter()
if self.peeked_batch:
# Only allow to peek one batch
assert not peek
logger.debug("Use peeked batch")
batch = self.peeked_batch
self.peeked_batch = None
return batch
if not self.batch_iter:
raise StopIteration
batch = next(self.batch_iter)
if peek:
self.peeked_batch = batch
return batch
train_data = Iterator(
batch_size=int(state['bs']),
target_file=state['target'][0],
source_file=state['source'][0],
can_fit=False,
queue_size=1000,
shuffle=state['shuffle'],
use_infinite_loop=state['use_infinite_loop'],
max_len=state['seqlen'])
valid_data = Iterator(
batch_size=int(state['bs']),
target_file=state['valid_target'][0],
source_file=state['valid_source'][0],
can_fit=False,
queue_size=1000,
shuffle=state['shuffle'],
use_infinite_loop=state['use_infinite_loop'],
max_len=state['seqlen'])
return train_data, valid_data
class RecurrentLayerWithSearch(Layer):
"""A copy of RecurrentLayer from groundhog"""
def __init__(self, rng,
n_hids,
c_dim=None,
scale=.01,
activation=TT.tanh,
bias_fn='init_bias',
bias_scale=0.,
init_fn='sample_weights',
gating=False,
reseting=False,
dropout=1.,
gater_activation=TT.nnet.sigmoid,
reseter_activation=TT.nnet.sigmoid,
weight_noise=False,
name=None):
logger.debug("RecurrentLayerWithSearch is used")
self.grad_scale = 1
assert gating == True
assert reseting == True
assert dropout == 1.
assert weight_noise == False
updater_activation = gater_activation
if type(init_fn) is str or type(init_fn) is unicode:
init_fn = eval(init_fn)
if type(bias_fn) is str or type(bias_fn) is unicode:
bias_fn = eval(bias_fn)
if type(activation) is str or type(activation) is unicode:
activation = eval(activation)
if type(updater_activation) is str or type(updater_activation) is unicode:
updater_activation = eval(updater_activation)
if type(reseter_activation) is str or type(reseter_activation) is unicode:
reseter_activation = eval(reseter_activation)
self.scale = scale
self.activation = activation
self.n_hids = n_hids
self.bias_scale = bias_scale
self.bias_fn = bias_fn
self.init_fn = init_fn
self.updater_activation = updater_activation
self.reseter_activation = reseter_activation
self.c_dim = c_dim
assert rng is not None, "random number generator should not be empty!"
super(RecurrentLayerWithSearch, self).__init__(self.n_hids,
self.n_hids, rng, name)
self.params = []
self._init_params()
def _init_params(self):
self.W_hh = theano.shared(
self.init_fn(self.n_hids,
self.n_hids,
-1,
self.scale,
rng=self.rng),
name="W_%s"%self.name)
self.params = [self.W_hh]
self.G_hh = theano.shared(
self.init_fn(self.n_hids,
self.n_hids,
-1,
self.scale,
rng=self.rng),
name="G_%s"%self.name)
self.params.append(self.G_hh)
self.R_hh = theano.shared(
self.init_fn(self.n_hids,
self.n_hids,
-1,
self.scale,
rng=self.rng),
name="R_%s"%self.name)
self.params.append(self.R_hh)
self.A_cp = theano.shared(
sample_weights_classic(self.c_dim,
self.n_hids,
-1,
10 ** (-3),
rng=self.rng),
name="A_%s"%self.name)
self.params.append(self.A_cp)
self.B_hp = theano.shared(
sample_weights_classic(self.n_hids,
self.n_hids,
-1,
10 ** (-3),
rng=self.rng),
name="B_%s"%self.name)
self.params.append(self.B_hp)
self.D_pe = theano.shared(
numpy.zeros((self.n_hids, 1), dtype="float32"),
name="D_%s"%self.name)
self.params.append(self.D_pe)
self.params_grad_scale = [self.grad_scale for x in self.params]
def set_decoding_layers(self, c_inputer, c_reseter, c_updater):
self.c_inputer = c_inputer
self.c_reseter = c_reseter
self.c_updater = c_updater
for layer in [c_inputer, c_reseter, c_updater]:
self.params += layer.params
self.params_grad_scale += layer.params_grad_scale
def step_fprop(self,
state_below,
state_before,
gater_below=None,
reseter_below=None,
mask=None,
c=None,
c_mask=None,
p_from_c=None,
use_noise=True,
no_noise_bias=False,
step_num=None,
return_alignment=False):
"""
Constructs the computational graph of this layer.
:type state_below: theano variable
:param state_below: the input to the layer
:type mask: None or theano variable
:param mask: mask describing the length of each sequence in a
minibatch
:type state_before: theano variable
:param state_before: the previous value of the hidden state of the
layer
:type updater_below: theano variable
:param updater_below: the input to the update gate
:type reseter_below: theano variable
:param reseter_below: the input to the reset gate
:type use_noise: bool
:param use_noise: flag saying if weight noise should be used in
computing the output of this layer
:type no_noise_bias: bool
:param no_noise_bias: flag saying if weight noise should be added to
the bias as well
"""
updater_below = gater_below
W_hh = self.W_hh
G_hh = self.G_hh
R_hh = self.R_hh
A_cp = self.A_cp
B_hp = self.B_hp
D_pe = self.D_pe
# The code works only with 3D tensors
cndim = c.ndim
if cndim == 2:
c = c[:, None, :]
# Warning: either source_num or target_num should be equal,
# or on of them sould be 1 (they have to broadcast)
# for the following code to make any sense.
source_len = c.shape[0]
source_num = c.shape[1]
target_num = state_before.shape[0]
dim = self.n_hids
# Form projection to the tanh layer from the previous hidden state
# Shape: (source_len, target_num, dim)
p_from_h = ReplicateLayer(source_len)(utils.dot(state_before, B_hp)).out
# Form projection to the tanh layer from the source annotation.
if not p_from_c:
p_from_c = utils.dot(c, A_cp).reshape((source_len, source_num, dim))
# Sum projections - broadcasting happens at the dimension 1.
p = p_from_h + p_from_c
# Apply non-linearity and project to energy.
energy = TT.exp(utils.dot(TT.tanh(p), D_pe)).reshape((source_len, target_num))
if c_mask:
# This is used for batches only, that is target_num == source_num
energy *= c_mask
# Calculate energy sums.
normalizer = energy.sum(axis=0)
# Get probabilities.
probs = energy / normalizer
# Calculate weighted sums of source annotations.
# If target_num == 1, c shoulds broadcasted at the 1st dimension.
# Probabilities are broadcasted at the 2nd dimension.
ctx = (c * probs.dimshuffle(0, 1, 'x')).sum(axis=0)
state_below += self.c_inputer(ctx).out
reseter_below += self.c_reseter(ctx).out
updater_below += self.c_updater(ctx).out
# Reset gate:
# optionally reset the hidden state.
reseter = self.reseter_activation(TT.dot(state_before, R_hh) +
reseter_below)
reseted_state_before = reseter * state_before
# Feed the input to obtain potential new state.
preactiv = TT.dot(reseted_state_before, W_hh) + state_below
h = self.activation(preactiv)
# Update gate:
# optionally reject the potential new state and use the new one.
updater = self.updater_activation(TT.dot(state_before, G_hh) +
updater_below)
h = updater * h + (1-updater) * state_before
if mask is not None:
if h.ndim ==2 and mask.ndim==1:
mask = mask.dimshuffle(0,'x')
h = mask * h + (1-mask) * state_before
results = [h, ctx]
if return_alignment:
results += [probs]
return results
def fprop(self,
state_below,
mask=None,
init_state=None,
gater_below=None,
reseter_below=None,
c=None,
c_mask=None,
nsteps=None,
batch_size=None,
use_noise=True,
truncate_gradient=-1,
no_noise_bias=False,
return_alignment=False):
updater_below = gater_below
if theano.config.floatX=='float32':
floatX = numpy.float32
else:
floatX = numpy.float64
if nsteps is None:
nsteps = state_below.shape[0]
if batch_size and batch_size != 1:
nsteps = nsteps / batch_size
if batch_size is None and state_below.ndim == 3:
batch_size = state_below.shape[1]
if state_below.ndim == 2 and \
(not isinstance(batch_size,int) or batch_size > 1):
state_below = state_below.reshape((nsteps, batch_size, self.n_in))
if updater_below:
updater_below = updater_below.reshape((nsteps, batch_size, self.n_in))
if reseter_below:
reseter_below = reseter_below.reshape((nsteps, batch_size, self.n_in))
if not init_state:
if not isinstance(batch_size, int) or batch_size != 1:
init_state = TT.alloc(floatX(0), batch_size, self.n_hids)
else:
init_state = TT.alloc(floatX(0), self.n_hids)
p_from_c = utils.dot(c, self.A_cp).reshape(
(c.shape[0], c.shape[1], self.n_hids))
if mask:
sequences = [state_below, mask, updater_below, reseter_below]
non_sequences = [c, c_mask, p_from_c]
# seqs | out | non_seqs
fn = lambda x, m, g, r, h, c1, cm, pc : self.step_fprop(x, h, mask=m,
gater_below=g, reseter_below=r,
c=c1, p_from_c=pc, c_mask=cm,
use_noise=use_noise, no_noise_bias=no_noise_bias,
return_alignment=return_alignment)
else:
sequences = [state_below, updater_below, reseter_below]
non_sequences = [c, p_from_c]
# seqs | out | non_seqs
fn = lambda x, g, r, h, c1, pc : self.step_fprop(x, h,
gater_below=g, reseter_below=r,
c=c1, p_from_c=pc,
use_noise=use_noise, no_noise_bias=no_noise_bias,
return_alignment=return_alignment)
outputs_info = [init_state, None]
if return_alignment:
outputs_info.append(None)
rval, updates = theano.scan(fn,
sequences=sequences,
non_sequences=non_sequences,
outputs_info=outputs_info,
name='layer_%s'%self.name,
truncate_gradient=truncate_gradient,
n_steps=nsteps)
self.out = rval
self.rval = rval
self.updates = updates
return self.out
class ReplicateLayer(Layer):
def __init__(self, n_times):
self.n_times = n_times
super(ReplicateLayer, self).__init__(0, 0, None)
def fprop(self, x):
# This is black magic based on broadcasting,
# that's why variable names don't make any sense.
# copy x n_times times
a = TT.shape_padleft(x)
padding = [1] * x.ndim
b = TT.alloc(numpy.float32(1), self.n_times, *padding)
self.out = a * b
return self.out
class PadLayer(Layer):
def __init__(self, required):
self.required = required
Layer.__init__(self, 0, 0, None)
def fprop(self, x):
if_longer = x[:self.required]
padding = ReplicateLayer(TT.max([1, self.required - x.shape[0]]))(x[-1]).out
if_shorter = TT.concatenate([x, padding])
diff = x.shape[0] - self.required
self.out = ifelse(diff < 0, if_shorter, if_longer)
return self.out
class ZeroLayer(Layer):
def fprop(self, x):
self.out = TT.zeros(x.shape)
return self.out
def none_if_zero(x):
if x == 0:
return None
return x
class Maxout(object):
def __init__(self, maxout_part):
self.maxout_part = maxout_part
def __call__(self, x):
shape = x.shape
if x.ndim == 1:
shape1 = TT.cast(shape[0] / self.maxout_part, 'int64')
shape2 = TT.cast(self.maxout_part, 'int64')
x = x.reshape([shape1, shape2])
x = x.max(1)
else:
shape1 = TT.cast(shape[1] / self.maxout_part, 'int64')
shape2 = TT.cast(self.maxout_part, 'int64')
x = x.reshape([shape[0], shape1, shape2])
x = x.max(2)
return x
def prefix_lookup(state, p, s):
if '%s_%s'%(p,s) in state:
return state['%s_%s'%(p, s)]
return state[s]
class EncoderDecoderBase(object):
def _create_embedding_layers(self):
logger.debug("_create_embedding_layers")
self.approx_embedder = MultiLayer(
self.rng,
n_in=self.state['n_sym_source']
if self.prefix.find("enc") >= 0
else self.state['n_sym_target'],
n_hids=[self.state['rank_n_approx']],
activation=[self.state['rank_n_activ']],
name='{}_approx_embdr'.format(self.prefix),
**self.default_kwargs)
# We have 3 embeddings for each word in each level,
# the one used as input,
# the one used to control resetting gate,
# the one used to control update gate.
self.input_embedders = [lambda x : 0] * self.num_levels
self.reset_embedders = [lambda x : 0] * self.num_levels
self.update_embedders = [lambda x : 0] * self.num_levels
embedder_kwargs = dict(self.default_kwargs)
embedder_kwargs.update(dict(
n_in=self.state['rank_n_approx'],
n_hids=[self.state['dim'] * self.state['dim_mult']],
activation=['lambda x:x']))
for level in range(self.num_levels):
self.input_embedders[level] = MultiLayer(
self.rng,
name='{}_input_embdr_{}'.format(self.prefix, level),
**embedder_kwargs)
if prefix_lookup(self.state, self.prefix, 'rec_gating'):
self.update_embedders[level] = MultiLayer(
self.rng,
learn_bias=False,
name='{}_update_embdr_{}'.format(self.prefix, level),
**embedder_kwargs)
if prefix_lookup(self.state, self.prefix, 'rec_reseting'):
self.reset_embedders[level] = MultiLayer(
self.rng,
learn_bias=False,
name='{}_reset_embdr_{}'.format(self.prefix, level),
**embedder_kwargs)
def _create_inter_level_layers(self):
logger.debug("_create_inter_level_layers")
inter_level_kwargs = dict(self.default_kwargs)
inter_level_kwargs.update(
n_in=self.state['dim'],
n_hids=self.state['dim'] * self.state['dim_mult'],
activation=['lambda x:x'])
self.inputers = [0] * self.num_levels
self.reseters = [0] * self.num_levels
self.updaters = [0] * self.num_levels
for level in range(1, self.num_levels):
self.inputers[level] = MultiLayer(self.rng,
name="{}_inputer_{}".format(self.prefix, level),
**inter_level_kwargs)
if prefix_lookup(self.state, self.prefix, 'rec_reseting'):
self.reseters[level] = MultiLayer(self.rng,
name="{}_reseter_{}".format(self.prefix, level),
**inter_level_kwargs)
if prefix_lookup(self.state, self.prefix, 'rec_gating'):
self.updaters[level] = MultiLayer(self.rng,
name="{}_updater_{}".format(self.prefix, level),
**inter_level_kwargs)
def _create_transition_layers(self):
logger.debug("_create_transition_layers")
self.transitions = []
rec_layer = eval(prefix_lookup(self.state, self.prefix, 'rec_layer'))
add_args = dict()
if rec_layer == RecurrentLayerWithSearch:
add_args = dict(c_dim=self.state['c_dim'])
for level in range(self.num_levels):
self.transitions.append(rec_layer(
self.rng,
n_hids=self.state['dim'],
activation=prefix_lookup(self.state, self.prefix, 'activ'),
bias_scale=self.state['bias'],
init_fn=(self.state['rec_weight_init_fn']
if not self.skip_init
else "sample_zeros"),
scale=prefix_lookup(self.state, self.prefix, 'rec_weight_scale'),
weight_noise=self.state['weight_noise_rec'],
dropout=self.state['dropout_rec'],
gating=prefix_lookup(self.state, self.prefix, 'rec_gating'),
gater_activation=prefix_lookup(self.state, self.prefix, 'rec_gater'),
reseting=prefix_lookup(self.state, self.prefix, 'rec_reseting'),
reseter_activation=prefix_lookup(self.state, self.prefix, 'rec_reseter'),
name='{}_transition_{}'.format(self.prefix, level),
**add_args))
class Encoder(EncoderDecoderBase):
def __init__(self, state, rng, prefix='enc', skip_init=False):
self.state = state
self.rng = rng
self.prefix = prefix
self.skip_init = skip_init
self.num_levels = self.state['encoder_stack']
# support multiple gating/memory units
if 'dim_mult' not in self.state:
self.state['dim_mult'] = 1.
if 'hid_mult' not in self.state:
self.state['hid_mult'] = 1.
def create_layers(self):
""" Create all elements of Encoder's computation graph"""
self.default_kwargs = dict(
init_fn=self.state['weight_init_fn'] if not self.skip_init else "sample_zeros",
weight_noise=self.state['weight_noise'],
scale=self.state['weight_scale'])
self._create_embedding_layers()
self._create_transition_layers()
self._create_inter_level_layers()
self._create_representation_layers()
def _create_representation_layers(self):
logger.debug("_create_representation_layers")
# If we have a stack of RNN, then their last hidden states
# are combined with a maxout layer.
self.repr_contributors = [None] * self.num_levels
for level in range(self.num_levels):
self.repr_contributors[level] = MultiLayer(
self.rng,
n_in=self.state['dim'],
n_hids=[self.state['dim'] * self.state['maxout_part']],
activation=['lambda x: x'],
name="{}_repr_contrib_{}".format(self.prefix, level),
**self.default_kwargs)
self.repr_calculator = UnaryOp(
activation=eval(self.state['unary_activ']),
name="{}_repr_calc".format(self.prefix))
def build_encoder(self, x,
x_mask=None,
use_noise=False,
approx_embeddings=None,
return_hidden_layers=False):
"""Create the computational graph of the RNN Encoder
:param x:
input variable, either vector of word indices or
matrix of word indices, where each column is a sentence
:param x_mask:
when x is a matrix and input sequences are
of variable length, this 1/0 matrix is used to specify
the matrix positions where the input actually is
:param use_noise:
turns on addition of noise to weights
(UNTESTED)
:param approx_embeddings:
forces encoder to use given embeddings instead of its own
:param return_hidden_layers:
if True, encoder returns all the activations of the hidden layer
(WORKS ONLY IN NON-HIERARCHICAL CASE)
"""
# Low rank embeddings of all the input words.
# Shape in case of matrix input:
# (max_seq_len * batch_size, rank_n_approx),
# where max_seq_len is the maximum length of batch sequences.
# Here and later n_words = max_seq_len * batch_size.
# Shape in case of vector input:
# (seq_len, rank_n_approx)
if not approx_embeddings:
approx_embeddings = self.approx_embedder(x)
# Low rank embeddings are projected to contribute
# to input, reset and update signals.
# All the shapes: (n_words, dim)
input_signals = []
reset_signals = []
update_signals = []
for level in range(self.num_levels):
input_signals.append(self.input_embedders[level](approx_embeddings))
update_signals.append(self.update_embedders[level](approx_embeddings))
reset_signals.append(self.reset_embedders[level](approx_embeddings))
# Hidden layers.
# Shape in case of matrix input: (max_seq_len, batch_size, dim)
# Shape in case of vector input: (seq_len, dim)
hidden_layers = []
for level in range(self.num_levels):
# Each hidden layer (except the bottom one) receives
# input, reset and update signals from below.
# FIXME above statement is not correct, should be:
# each input, reset and update gate,
# except for those at time 0,
# takes previous hidden layer as input
# All the shapes: (n_words, dim)
if level > 0:
# encoder
input_signals[level] += self.inputers[level](hidden_layers[-1])
update_signals[level] += self.updaters[level](hidden_layers[-1])
reset_signals[level] += self.reseters[level](hidden_layers[-1])
# transitions are RecurrentLayers
hidden_layers.append(self.transitions[level](
input_signals[level],
nsteps=x.shape[0],
batch_size=x.shape[1] if x.ndim == 2 else 1,
mask=x_mask,
gater_below=none_if_zero(update_signals[level]),
reseter_below=none_if_zero(reset_signals[level]),
use_noise=use_noise))
if return_hidden_layers:
assert self.state['encoder_stack'] == 1
return hidden_layers[0]
# If we no stack of RNN but only a usual one,
# then the last hidden state is used as a representation.
# Return value shape in case of matrix input:
# (batch_size, dim)
# Return value shape in case of vector input:
# (dim,)
if self.num_levels == 1 or self.state['take_top']:
c = LastState()(hidden_layers[-1])
if c.out.ndim == 2:
c.out = c.out[:,:self.state['dim']]
else:
c.out = c.out[:self.state['dim']]
return c
# If we have a stack of RNN, then their last hidden states
# are combined with a maxout layer.
# Return value however has the same shape.
contributions = []
for level in range(self.num_levels):
contributions.append(self.repr_contributors[level](
LastState()(hidden_layers[level])))
# I do not know a good starting value for sum
# concat all num_levels last hidden layers (after repr_contributors)
c = self.repr_calculator(sum(contributions[1:], contributions[0]))
return c
class Decoder(EncoderDecoderBase):
EVALUATION = 0
SAMPLING = 1
BEAM_SEARCH = 2
def __init__(self, state, rng, prefix='dec',
skip_init=False, compute_alignment=False):
self.state = state
self.rng = rng
self.prefix = prefix
self.skip_init = skip_init
self.compute_alignment = compute_alignment
# Actually there is a problem here -
# we don't make difference between number of input layers
# and outputs layers.
self.num_levels = self.state['decoder_stack']
if 'dim_mult' not in self.state:
self.state['dim_mult'] = 1.
def create_layers(self):
""" Create all elements of Decoder's computation graph"""
self.default_kwargs = dict(
init_fn=self.state['weight_init_fn'] if not self.skip_init else "sample_zeros",
weight_noise=self.state['weight_noise'],
scale=self.state['weight_scale'])
self._create_embedding_layers()
self._create_transition_layers()
self._create_inter_level_layers()
self._create_initialization_layers()
self._create_decoding_layers()
self._create_readout_layers()
if self.state['search']:
assert self.num_levels == 1
self.transitions[0].set_decoding_layers(
self.decode_inputers[0],
self.decode_reseters[0],
self.decode_updaters[0])
def _create_initialization_layers(self):
logger.debug("_create_initialization_layers")
self.initializers = [ZeroLayer()] * self.num_levels
if self.state['bias_code']:
for level in range(self.num_levels):
self.initializers[level] = MultiLayer(
self.rng,
n_in=self.state['dim'],
n_hids=[self.state['dim'] * self.state['hid_mult']],
activation=[prefix_lookup(self.state, 'dec', 'activ')],
bias_scale=[self.state['bias']],
name='{}_initializer_{}'.format(self.prefix, level),
**self.default_kwargs)
def _create_decoding_layers(self):
logger.debug("_create_decoding_layers")
self.decode_inputers = [lambda x : 0] * self.num_levels
self.decode_reseters = [lambda x : 0] * self.num_levels
self.decode_updaters = [lambda x : 0] * self.num_levels
# self.back_decode_inputers = [lambda x : 0] * self.num_levels
# self.back_decode_reseters = [lambda x : 0] * self.num_levels
# self.back_decode_updaters = [lambda x : 0] * self.num_levels
decoding_kwargs = dict(self.default_kwargs)
decoding_kwargs.update(dict(
n_in=self.state['c_dim'],
n_hids=self.state['dim'] * self.state['dim_mult'],
activation=['lambda x:x'],
learn_bias=False))
if self.state['decoding_inputs']:
# use context from encoder
for level in range(self.num_levels):
# Input contributions
self.decode_inputers[level] = MultiLayer(
self.rng,
name='{}_dec_inputter_{}'.format(self.prefix, level),
**decoding_kwargs)
# Update gate contributions
if prefix_lookup(self.state, 'dec', 'rec_gating'):
self.decode_updaters[level] = MultiLayer(
self.rng,
name='{}_dec_updater_{}'.format(self.prefix, level),
**decoding_kwargs)
# Reset gate contributions
if prefix_lookup(self.state, 'dec', 'rec_reseting'):
self.decode_reseters[level] = MultiLayer(
self.rng,
name='{}_dec_reseter_{}'.format(self.prefix, level),
**decoding_kwargs)
def _create_readout_layers(self):
softmax_layer = self.state['softmax_layer'] if 'softmax_layer' in self.state \
else 'SoftmaxLayer'
logger.debug("_create_readout_layers")
readout_kwargs = dict(self.default_kwargs)
readout_kwargs.update(dict(
n_hids=self.state['dim'],
activation='lambda x: x',
))
self.repr_readout = MultiLayer(
self.rng,
n_in=self.state['c_dim'],
learn_bias=False,
name='{}_repr_readout'.format(self.prefix),
**readout_kwargs)
# Attention - this is the only readout layer
# with trainable bias. Should be careful with that.
self.hidden_readouts = [None] * self.num_levels
for level in range(self.num_levels):
self.hidden_readouts[level] = MultiLayer(
self.rng,
n_in=self.state['dim'],
name='{}_hid_readout_{}'.format(self.prefix, level),
**readout_kwargs)
self.prev_word_readout = 0
if self.state['bigram']:
self.prev_word_readout = MultiLayer(
self.rng,
n_in=self.state['rank_n_approx'],
n_hids=self.state['dim'],
activation=['lambda x:x'],
learn_bias=False,
name='{}_prev_readout_{}'.format(self.prefix, level),
**self.default_kwargs)
if self.state['deep_out']:
act_layer = UnaryOp(activation=eval(self.state['unary_activ']))
drop_layer = DropOp(rng=self.rng, dropout=self.state['dropout'])
self.output_nonlinearities = [act_layer, drop_layer]
self.output_layer = eval(softmax_layer)(
self.rng,
n_in = self.state['dim'] / self.state['maxout_part'],
n_out = self.state['n_sym_target'],
sparsity=-1,
rank_n_approx=self.state['rank_n_approx'],
name='{}_deep_softmax'.format(self.prefix),
use_nce=self.state['use_nce'] if 'use_nce' in self.state else False,
**self.default_kwargs)
else:
self.output_nonlinearities = []
self.output_layer = eval(softmax_layer)(
self.rng,
n_in = self.state['dim'],
n_out = self.state['n_sym_target'],
sparsity=-1,
rank_n_approx=self.state['rank_n_approx'],
name='dec_softmax',
sum_over_time=True,
use_nce=self.state['use_nce'] if 'use_nce' in self.state else False,
**self.default_kwargs)
def build_decoder(self, c, y,
c_mask=None,
y_mask=None,
step_num=None,
mode=EVALUATION,
given_init_states=None,
T=1):
"""Create the computational graph of the RNN Decoder.
:param c:
representations produced by an encoder.
(n_samples, dim) matrix if mode == sampling or
(max_seq_len, batch_size, dim) matrix if mode == evaluation
:param c_mask:
if mode == evaluation a 0/1 matrix identifying valid positions in c
:param y:
if mode == evaluation
target sequences, matrix of word indices of shape (max_seq_len, batch_size),
where each column is a sequence
if mode != evaluation
a vector of previous words of shape (n_samples,)
:param y_mask:
if mode == evaluation a 0/1 matrix determining lengths
of the target sequences, must be None otherwise
:param mode:
chooses on of three modes: evaluation, sampling and beam_search
:param given_init_states:
for sampling and beam_search. A list of hidden states
matrices for each layer, each matrix is (n_samples, dim)
:param T:
sampling temperature
"""
# Check parameter consistency
if mode == Decoder.EVALUATION:
assert not given_init_states
else:
assert not y_mask
assert given_init_states
if mode == Decoder.BEAM_SEARCH:
assert T == 1
# For log-likelihood evaluation the representation is replicated for conveniency
# not when backward RNN is used
# Shape if mode == evaluation
# (max_seq_len, batch_size, dim)
# Shape if mode != evaluation
# (n_samples, dim)
if not self.state['search']:
if mode == Decoder.EVALUATION:
c = PadLayer(y.shape[0])(c)
else:
assert step_num
c_pos = TT.minimum(step_num, c.shape[0] - 1)
# Low rank embeddings of all the input words.
# Shape if mode == evaluation
# (n_words, rank_n_approx),
# Shape if mode != evaluation
# (n_samples, rank_n_approx)
approx_embeddings = self.approx_embedder(y)
# Low rank embeddings are projected to contribute
# to input, reset and update signals.
# All the shapes if mode == evaluation:
# (n_words, dim)
# where: n_words = max_seq_len * batch_size
# All the shape if mode != evaluation:
# (n_samples, dim)
input_signals = []
reset_signals = []
update_signals = []
for level in range(self.num_levels):
# Contributions directly from input words.
input_signals.append(self.input_embedders[level](approx_embeddings))
update_signals.append(self.update_embedders[level](approx_embeddings))
reset_signals.append(self.reset_embedders[level](approx_embeddings))
# Contributions from the encoded source sentence.
if not self.state['search']:
current_c = c if mode == Decoder.EVALUATION else c[c_pos]
input_signals[level] += self.decode_inputers[level](current_c)
update_signals[level] += self.decode_updaters[level](current_c)
reset_signals[level] += self.decode_reseters[level](current_c)
# Hidden layers' initial states.
# Shapes if mode == evaluation:
# (batch_size, dim)
# Shape if mode != evaluation:
# (n_samples, dim)
init_states = given_init_states
if not init_states:
init_states = []
for level in range(self.num_levels):
init_c = c[0, :, -self.state['dim']:]
init_states.append(self.initializers[level](init_c))
# Hidden layers' states.
# Shapes if mode == evaluation:
# (seq_len, batch_size, dim)
# Shapes if mode != evaluation:
# (n_samples, dim)
hidden_layers = []
contexts = []
# Default value for alignment must be smth computable
alignment = TT.zeros((1,))
for level in range(self.num_levels):
if level > 0:
# decoder
input_signals[level] += self.inputers[level](hidden_layers[level - 1])
update_signals[level] += self.updaters[level](hidden_layers[level - 1])
reset_signals[level] += self.reseters[level](hidden_layers[level - 1])
add_kwargs = (dict(state_before=init_states[level])
if mode != Decoder.EVALUATION
else dict(init_state=init_states[level],
batch_size=y.shape[1] if y.ndim == 2 else 1,
nsteps=y.shape[0]))
if self.state['search']:
add_kwargs['c'] = c
add_kwargs['c_mask'] = c_mask
add_kwargs['return_alignment'] = self.compute_alignment
if mode != Decoder.EVALUATION:
add_kwargs['step_num'] = step_num
result = self.transitions[level](
input_signals[level],
mask=y_mask,
gater_below=none_if_zero(update_signals[level]),
reseter_below=none_if_zero(reset_signals[level]),
one_step=mode != Decoder.EVALUATION,
use_noise=mode == Decoder.EVALUATION,
**add_kwargs)
if self.state['search']:
if self.compute_alignment:
#This implicitly wraps each element of result.out with a Layer to keep track of the parameters.
#It is equivalent to h=result[0], ctx=result[1] etc.
h, ctx, alignment = result
if mode == Decoder.EVALUATION:
alignment = alignment.out
else:
#This implicitly wraps each element of result.out with a Layer to keep track of the parameters.
#It is equivalent to h=result[0], ctx=result[1]
h, ctx = result
else:
h = result
if mode == Decoder.EVALUATION:
ctx = c
else:
ctx = ReplicateLayer(given_init_states[0].shape[0])(c[c_pos]).out
hidden_layers.append(h)
contexts.append(ctx)
# In hidden_layers we do no have the initial state, but we need it.
# Instead of it we have the last one, which we do not need.
# So what we do is discard the last one and prepend the initial one.
if mode == Decoder.EVALUATION:
for level in range(self.num_levels):
hidden_layers[level].out = TT.concatenate([
TT.shape_padleft(init_states[level].out),
hidden_layers[level].out])[:-1]
# The output representation to be fed in softmax.
# Shape if mode == evaluation
# (n_words, dim_r)
# Shape if mode != evaluation
# (n_samples, dim_r)
# ... where dim_r depends on 'deep_out' option.
readout = self.repr_readout(contexts[0])
for level in range(self.num_levels):
if mode != Decoder.EVALUATION:
read_from = init_states[level]
else:
read_from = hidden_layers[level]
read_from_var = read_from if type(read_from) == theano.tensor.TensorVariable else read_from.out
if read_from_var.ndim == 3:
read_from_var = read_from_var[:,:,:self.state['dim']]
else:
read_from_var = read_from_var[:,:self.state['dim']]
if type(read_from) != theano.tensor.TensorVariable:
read_from.out = read_from_var
else:
read_from = read_from_var
readout += self.hidden_readouts[level](read_from)
if self.state['bigram']:
if mode != Decoder.EVALUATION:
# state['check_first_word'] should always be true
check_first_word = (y > 0
if self.state['check_first_word']
else TT.ones((y.shape[0]), dtype="float32"))
# padright is necessary as we want to multiply each row with a certain scalar
readout += TT.shape_padright(check_first_word) * self.prev_word_readout(approx_embeddings).out
else:
if y.ndim == 1:
readout += Shift()(self.prev_word_readout(approx_embeddings).reshape(
(y.shape[0], 1, self.state['dim'])))
else:
# This place needs explanation. When prev_word_readout is applied to
# approx_embeddings the resulting shape is
# (n_batches * sequence_length, repr_dimensionality). We first
# transform it into 3D tensor to shift forward in time. Then
# reshape it back.
readout += Shift()(self.prev_word_readout(approx_embeddings).reshape(
(y.shape[0], y.shape[1], self.state['dim']))).reshape(
readout.out.shape)
for fun in self.output_nonlinearities:
readout = fun(readout)
if mode == Decoder.SAMPLING:
sample = self.output_layer.get_sample(
state_below=readout,
temp=T)
# Current SoftmaxLayer.get_cost is stupid,
# that's why we have to reshape a lot.
self.output_layer.get_cost(
state_below=readout.out,
temp=T,
target=sample)
log_prob = self.output_layer.cost_per_sample
return [sample] + [log_prob] + hidden_layers
elif mode == Decoder.BEAM_SEARCH:
return self.output_layer(
state_below=readout.out,
temp=T).out
elif mode == Decoder.EVALUATION:
return (self.output_layer.train(
state_below=readout,
target=y,
mask=y_mask,
reg=None),
alignment)
else:
raise Exception("Unknown mode for build_decoder")
def sampling_step(self, *args):
"""
Implements one step of sampling
"""
"""
Args are necessary since the number (and the order) of arguments can vary
"""
args = iter(args)
# Arguments that correspond to scan's "sequences" parameteter:
step_num = next(args)
assert step_num.ndim == 0
# Arguments that correspond to scan's "outputs" parameteter:
prev_word = next(args)
assert prev_word.ndim == 1
# skip the previous word log probability
assert next(args).ndim == 1
prev_hidden_states = [next(args) for k in range(self.num_levels)]
assert prev_hidden_states[0].ndim == 2
# Arguments that correspond to scan's "non_sequences":
c = next(args)
assert c.ndim == 2
T = next(args)
assert T.ndim == 0
decoder_args = dict(given_init_states=prev_hidden_states, T=T, c=c)
sample, log_prob = self.build_decoder(y=prev_word, step_num=step_num, mode=Decoder.SAMPLING, **decoder_args)[:2]
hidden_states = self.build_decoder(y=sample, step_num=step_num, mode=Decoder.SAMPLING, **decoder_args)[2:]
return [sample, log_prob] + hidden_states
def build_initializers(self, c):
return [init(c).out for init in self.initializers]
def build_sampler(self, n_samples, n_steps, T, c):
states = [TT.zeros(shape=(n_samples,), dtype='int64'),
TT.zeros(shape=(n_samples,), dtype='float32')]
init_c = c[0, -self.state['dim']:]
states += [ReplicateLayer(n_samples)(init(init_c).out).out for init in self.initializers]
if not self.state['search']:
c = PadLayer(n_steps)(c).out
# Pad with final states
non_sequences = [c, T]
outputs, updates = theano.scan(self.sampling_step,
outputs_info=states,
non_sequences=non_sequences,
sequences=[TT.arange(n_steps, dtype="int64")],
n_steps=n_steps,
name="{}_sampler_scan".format(self.prefix))
return (outputs[0], outputs[1]), updates
def build_next_probs_predictor(self, c, step_num, y, init_states):
return self.build_decoder(c, y, mode=Decoder.BEAM_SEARCH,
given_init_states=init_states, step_num=step_num)
def build_next_states_computer(self, c, step_num, y, init_states):
return self.build_decoder(c, y, mode=Decoder.SAMPLING,
given_init_states=init_states, step_num=step_num)[2:]
class RNNEncoderDecoder(object):
"""This class encapsulates the translation model.
The expected usage pattern is:
>>> encdec = RNNEncoderDecoder(...)
>>> encdec.build(...)
>>> useful_smth = encdec.create_useful_smth(...)
Functions from the create_smth family (except create_lm_model)
when called complile and return functions that do useful stuff.
"""
def __init__(self, state, rng,
skip_init=False,
compute_alignment=False):
"""Constructor.
:param state:
A state in the usual groundhog sense.
:param rng:
Random number generator. Something like numpy.random.RandomState(seed).
:param skip_init:
If True, all the layers are initialized with zeros. Saves time spent on
parameter initialization if they are loaded later anyway.
:param compute_alignment:
If True, the alignment is returned by the decoder.
"""
self.state = state
self.rng = rng
self.skip_init = skip_init
self.compute_alignment = compute_alignment
def build(self):
logger.debug("Create input variables")
self.x = TT.lmatrix('x')
self.x_mask = TT.matrix('x_mask')
self.y = TT.lmatrix('y')
self.y_mask = TT.matrix('y_mask')
self.inputs = [self.x, self.y, self.x_mask, self.y_mask]
# Annotation for the log-likelihood computation
training_c_components = []
logger.debug("Create forward encoder")
self.encoder = Encoder(self.state, self.rng,
prefix="enc",
skip_init=self.skip_init)
self.encoder.create_layers()
logger.debug("Build forward encoding computation graph")
forward_training_c = self.encoder.build_encoder(
self.x, self.x_mask,
use_noise=True,
return_hidden_layers=True)
logger.debug("Create backward encoder")
self.backward_encoder = Encoder(self.state, self.rng,
prefix="back_enc",
skip_init=self.skip_init)
self.backward_encoder.create_layers()
logger.debug("Build backward encoding computation graph")
backward_training_c = self.backward_encoder.build_encoder(
self.x[::-1],
self.x_mask[::-1],
use_noise=True,
approx_embeddings=self.encoder.approx_embedder(self.x[::-1]),
return_hidden_layers=True)
# Reverse time for backward representations.
backward_training_c.out = backward_training_c.out[::-1]
if self.state['forward']:
training_c_components.append(forward_training_c)
if self.state['last_forward']:
training_c_components.append(
ReplicateLayer(self.x.shape[0])(forward_training_c[-1]))
if self.state['backward']:
training_c_components.append(backward_training_c)
if self.state['last_backward']:
training_c_components.append(ReplicateLayer(self.x.shape[0])
(backward_training_c[0]))
self.state['c_dim'] = len(training_c_components) * self.state['dim']
logger.debug("Create decoder")
self.decoder = Decoder(self.state, self.rng,
skip_init=self.skip_init, compute_alignment=self.compute_alignment)
self.decoder.create_layers()
logger.debug("Build log-likelihood computation graph")
self.predictions, self.alignment = self.decoder.build_decoder(
c=Concatenate(axis=2)(*training_c_components), c_mask=self.x_mask,
y=self.y, y_mask=self.y_mask)
# Annotation for sampling
sampling_c_components = []
logger.debug("Build sampling computation graph")
self.sampling_x = TT.lvector("sampling_x")
self.n_samples = TT.lscalar("n_samples")
self.n_steps = TT.lscalar("n_steps")
self.T = TT.scalar("T")
self.forward_sampling_c = self.encoder.build_encoder(
self.sampling_x,
return_hidden_layers=True).out
self.backward_sampling_c = self.backward_encoder.build_encoder(
self.sampling_x[::-1],
approx_embeddings=self.encoder.approx_embedder(self.sampling_x[::-1]),
return_hidden_layers=True).out[::-1]
if self.state['forward']:
sampling_c_components.append(self.forward_sampling_c)
if self.state['last_forward']:
sampling_c_components.append(ReplicateLayer(self.sampling_x.shape[0])
(self.forward_sampling_c[-1]))
if self.state['backward']:
sampling_c_components.append(self.backward_sampling_c)
if self.state['last_backward']:
sampling_c_components.append(ReplicateLayer(self.sampling_x.shape[0])
(self.backward_sampling_c[0]))
self.sampling_c = Concatenate(axis=1)(*sampling_c_components).out
(self.sample, self.sample_log_prob), self.sampling_updates =\
self.decoder.build_sampler(self.n_samples, self.n_steps, self.T,
c=self.sampling_c)
logger.debug("Create auxiliary variables")
self.c = TT.matrix("c")
self.step_num = TT.lscalar("step_num")
self.current_states = [TT.matrix("cur_{}".format(i))
for i in range(self.decoder.num_levels)]
self.gen_y = TT.lvector("gen_y")
def create_lm_model(self):
if hasattr(self, 'lm_model'):
return self.lm_model
self.lm_model = LM_Model(
cost_layer=self.predictions,
sample_fn=self.create_sampler(),
weight_noise_amount=self.state['weight_noise_amount'],
indx_word=self.state['indx_word_target'],
indx_word_src=self.state['indx_word'],
rng=self.rng)
self.lm_model.load_dict(self.state)
logger.debug("Model params:\n{}".format(
pprint.pformat(sorted([p.name for p in self.lm_model.params]))))
return self.lm_model
def create_representation_computer(self):
if not hasattr(self, "repr_fn"):
self.repr_fn = theano.function(
inputs=[self.sampling_x],
outputs=[self.sampling_c],
name="repr_fn")
return self.repr_fn
def create_initializers(self):
if not hasattr(self, "init_fn"):
init_c = self.sampling_c[0, -self.state['dim']:]
self.init_fn = theano.function(
inputs=[self.sampling_c],
outputs=self.decoder.build_initializers(init_c),
name="init_fn")
return self.init_fn
def create_sampler(self, many_samples=False):
if hasattr(self, 'sample_fn'):
return self.sample_fn
logger.debug("Compile sampler")
self.sample_fn = theano.function(
inputs=[self.n_samples, self.n_steps, self.T, self.sampling_x],
outputs=[self.sample, self.sample_log_prob],
updates=self.sampling_updates,
name="sample_fn")
if not many_samples:
def sampler(*args):
return map(lambda x : x.squeeze(), self.sample_fn(1, *args))
return sampler
return self.sample_fn
def create_scorer(self, batch=False):
if not hasattr(self, 'score_fn'):
logger.debug("Compile scorer")
self.score_fn = theano.function(
inputs=self.inputs,
outputs=[-self.predictions.cost_per_sample],
name="score_fn")
if batch:
return self.score_fn
def scorer(x, y):
x_mask = numpy.ones(x.shape[0], dtype="float32")
y_mask = numpy.ones(y.shape[0], dtype="float32")
return self.score_fn(x[:, None], y[:, None],
x_mask[:, None], y_mask[:, None])
return scorer
def create_next_probs_computer(self):
if not hasattr(self, 'next_probs_fn'):
self.next_probs_fn = theano.function(
inputs=[self.c, self.step_num, self.gen_y] + self.current_states,
outputs=[self.decoder.build_next_probs_predictor(
self.c, self.step_num, self.gen_y, self.current_states)],
name="next_probs_fn")
return self.next_probs_fn
def create_next_states_computer(self):
if not hasattr(self, 'next_states_fn'):
self.next_states_fn = theano.function(
inputs=[self.c, self.step_num, self.gen_y] + self.current_states,
outputs=self.decoder.build_next_states_computer(
self.c, self.step_num, self.gen_y, self.current_states),
name="next_states_fn")
return self.next_states_fn
def create_probs_computer(self, return_alignment=False):
if not hasattr(self, 'probs_fn'):
logger.debug("Compile probs computer")
self.probs_fn = theano.function(
inputs=self.inputs,
outputs=[self.predictions.word_probs, self.alignment],
name="probs_fn")
def probs_computer(x, y):
x_mask = numpy.ones(x.shape[0], dtype="float32")
y_mask = numpy.ones(y.shape[0], dtype="float32")
probs, alignment = self.probs_fn(x[:, None], y[:, None],
x_mask[:, None], y_mask[:, None])
if return_alignment:
return probs, alignment
else:
return probs
return probs_computer
def parse_input(state, word2idx, line, raise_unk=False, idx2word=None, unk_sym=-1, null_sym=-1):
if unk_sym < 0:
unk_sym = state['unk_sym_source']
if null_sym < 0:
null_sym = state['null_sym_source']
seqin = line.split()
seqlen = len(seqin)
seq = numpy.zeros(seqlen+1, dtype='int64')
for idx,sx in enumerate(seqin):
seq[idx] = word2idx.get(sx, unk_sym)
if seq[idx] >= state['n_sym_source']:
seq[idx] = unk_sym
if seq[idx] == unk_sym and raise_unk:
raise Exception("Unknown word {}".format(sx))
seq[-1] = null_sym
if idx2word:
idx2word[null_sym] = '<eos>'
idx2word[unk_sym] = state['oov']
parsed_in = [idx2word[sx] for sx in seq]
return seq, " ".join(parsed_in)
return seq, seqin
| {
"content_hash": "b84b6bd187ccf951a8642c48d8d607f7",
"timestamp": "",
"source": "github",
"line_count": 1544,
"max_line_length": 120,
"avg_line_length": 39.88665803108808,
"alnum_prop": 0.5460745311358285,
"repo_name": "sjtufs/GroundHog",
"id": "076386baa4d6bb036f4a86745bf7a6aca03525d5",
"size": "61585",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "experiments/nmt/encdec.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "8023"
},
{
"name": "PHP",
"bytes": "372"
},
{
"name": "Perl",
"bytes": "32592"
},
{
"name": "Python",
"bytes": "489654"
},
{
"name": "Shell",
"bytes": "2189"
},
{
"name": "Smalltalk",
"bytes": "1892"
}
],
"symlink_target": ""
} |
'''
Select values of dict of a numpy arrays, shape (1,) only based on a range
(python tuple), range is inclusive (excluding is open).
Usage:
dictrestrict.py [options] <input> <var> <range>
Options:
--output=OUTPUT -o OUTPUT Output to OUTPUT instead of stdout.
--exclude -x Exclude range instead of including it.
'''
import pickle as pickle;
import numpy as np;
from docopt import docopt;
opts = docopt(__doc__,help=True);
name = opts['<input>'];
var = opts['<var>'];
r = eval(opts['<range>']);
with open(name) as f:
d=pickle.load(f);
good = (d[var] >= r[0]) & (d[var] <= r[1])
if opts['--exclude']:
good = np.logical_not(good);
#removing
for k in d.keys():
d[k] = d[k][good];
if opts['--output']:
with open(opts['--output'],"wb") as f:
pickle.dump(d,f,2);
pass;
else:
print(pickle.dumps(d,2));
| {
"content_hash": "324998c985accb6bb45e268ba101e290",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 73,
"avg_line_length": 25.38235294117647,
"alnum_prop": 0.6060254924681344,
"repo_name": "noobermin/lspreader",
"id": "52cbbc7577b46c0ccf3ff36081f77c578649b3cd",
"size": "885",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/dictrestrict.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "62371"
},
{
"name": "Shell",
"bytes": "8780"
}
],
"symlink_target": ""
} |
import sys
import urllib
import urllib2
from lxml import html
import os
import glob
from pyquery import PyQuery
from hurry.filesize import size
reload(sys)
sys.setdefaultencoding("utf-8")
fileYml = '../_data/library.yml'
fileDir = '../assets/pdf/library/'
url = 'http://www.the-blockchain.com/docs/'
listUrl = url + 'index.php'
site = html.fromstring(urllib.urlopen(listUrl).read())
pq = PyQuery(site)
def download_file(url):
file_name = url.split('/')[-1]
file_name = file_name.replace(u'\u200b', '')
url = url.replace(u'\u200b', '%E2%80%8B')
url = url.replace(' ', '%20')
url = url.replace("'", '%E2%80%99')
u = urllib2.urlopen(url)
f = open(fileDir + file_name, 'wb')
meta = u.info()
file_size = int(meta.getheaders("Content-Length")[0])
print "Downloading: %s Bytes: %s" % (file_name, file_size)
file_size_dl = 0
block_sz = 8192
while True:
buffer = u.read(block_sz)
if not buffer:
break
file_size_dl += len(buffer)
f.write(buffer)
status = r"%10d [%3.2f%%]" % (file_size_dl, file_size_dl * 100. / file_size)
status = status + chr(8)*(len(status)+1)
print status,
f.close()
return file_size
if not os.path.exists(fileDir):
os.makedirs(fileDir)
else:
files = glob.glob(fileDir + "*")
for f in files:
os.remove(f)
rows = pq('td.thumbtext')
with open(fileYml, 'w') as f:
for row in rows:
filename = row.find('h2').find('a').attrib['href']
filesize = download_file(url + filename.replace(u'\u2019', "'"))
filedate = row.find('p').text.strip()
print "\nFilesize: " + size(filesize)
print "Filedate: " + filedate.encode('utf-8')
print "\n"
f.write('- title: ' + filename.replace('.pdf', '').replace(u'\u200b', '') + '\n')
f.write(' date: ' + filedate + '\n')
f.write(' size: ' + size(filesize) + '\n')
f.write(' file: ' + filename.replace(u'\u200b', '') + '\n')
f.write(' desc:\n')
f.write(' authors:\n')
f.write(' organization:\n')
| {
"content_hash": "bf62ca2dd9d2ac50f665c694f0a58b61",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 89,
"avg_line_length": 29.055555555555557,
"alnum_prop": 0.5769598470363289,
"repo_name": "sunnankar/wucorg",
"id": "55920d2d63b11c24206e833880ad6a982f6a0c19",
"size": "2572",
"binary": false,
"copies": "1",
"ref": "refs/heads/gh-pages",
"path": "scripts/extract_papers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "191423"
},
{
"name": "HTML",
"bytes": "2070119"
},
{
"name": "JavaScript",
"bytes": "126181"
},
{
"name": "PHP",
"bytes": "96"
},
{
"name": "Python",
"bytes": "2572"
}
],
"symlink_target": ""
} |
from functools import reduce
def factorial(x):
try:
result = reduce(lambda i, j: i*j, range(1,x+1))
return result
except (RuntimeError, TypeError, NameError):
return 0
def gamma(x):
return factorial(x-1)
| {
"content_hash": "5fc2b6ad7d0c24a29691ebeb4f5c800e",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 49,
"avg_line_length": 19.545454545454547,
"alnum_prop": 0.7069767441860465,
"repo_name": "peterhogan/python",
"id": "79ac4209b4e2904e1a6d729b44ab3f838a684ed7",
"size": "215",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "probability/gamma_dist_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "106668"
},
{
"name": "Shell",
"bytes": "438"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='FeedDownloaded',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('season', models.IntegerField()),
('episode', models.IntegerField()),
('re_filter', models.CharField(max_length=100)),
],
),
migrations.AlterUniqueTogether(
name='feeddownloaded',
unique_together=set([('season', 'episode', 're_filter')]),
),
]
| {
"content_hash": "465044d18998de3d7fdb44c2524fc9dc",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 114,
"avg_line_length": 29.24,
"alnum_prop": 0.5567715458276333,
"repo_name": "MiiRaGe/miilibrary",
"id": "861a45ec65c37c8c7c8e95882bd19cf91c169ea4",
"size": "755",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mii_rss/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "222"
},
{
"name": "HTML",
"bytes": "14946"
},
{
"name": "Python",
"bytes": "226463"
},
{
"name": "Shell",
"bytes": "1581"
}
],
"symlink_target": ""
} |
"""
APIs for extending the python social auth pipeline
"""
import logging
from django.shortcuts import redirect
from django_redis import get_redis_connection
from rolepermissions.checkers import has_role
from social_core.exceptions import AuthException, AuthFailed
from backends.base import BaseEdxOAuth2
from backends.utils import update_email
from dashboard.api import CACHE_KEY_FAILURE_NUMS_BY_USER, FIELD_USER_ID_BASE_STR, CACHE_KEY_FAILED_USERS_NOT_TO_UPDATE
from micromasters.utils import now_in_utc
from profiles.models import Profile
from profiles.util import split_name
from roles.models import (
Instructor,
Staff,
)
log = logging.getLogger(__name__)
def update_profile_from_edx(backend, user, response, is_new, *args, **kwargs):
# pylint: disable=unused-argument
"""
Gets profile information from EDX and saves them in the user profile
Args:
backend (social.backends.oauth.BaseOAuth2): the python social auth backend
user (User): user object
response (dict): dictionary of the user information coming
from previous functions in the pipeline
is_new (bool): whether the authenticated user created a new local instance
Returns:
None
"""
# this function is completely skipped if the backend is not edx or
# the user has not created now
if not isinstance(backend, BaseEdxOAuth2):
return
if has_role(user, [Staff.ROLE_ID, Instructor.ROLE_ID]):
next_relative_url = "/learners"
else:
next_relative_url = "/dashboard"
next_url = backend.strategy.session.load().get('next') or backend.strategy.session.get('next')
if not next_url:
next_url = next_relative_url
backend.strategy.session_set('next', next_url)
user_profile_edx = kwargs.get('edx_profile')
update_email(user_profile_edx, user)
if not is_new:
return
try:
user_profile = Profile.objects.get(user=user)
except Profile.DoesNotExist:
# this should never happen, since the profile is created with a signal
# right after the user is created
log.error('No profile found for the user %s', user.username)
return
name = user_profile_edx.get('name', "")
user_profile.edx_name = name
user_profile.first_name, user_profile.last_name = split_name(name)
user_profile.preferred_name = name
user_profile.edx_bio = user_profile_edx.get('bio')
user_profile.country = user_profile_edx.get('country')
user_profile.edx_requires_parental_consent = user_profile_edx.get('requires_parental_consent')
user_profile.edx_level_of_education = user_profile_edx.get('level_of_education')
user_profile.edx_goals = user_profile_edx.get('goals')
user_profile.edx_language_proficiencies = user_profile_edx.get('language_proficiencies')
try:
user_profile.preferred_language = user_profile.edx_language_proficiencies[0]['code']
except (IndexError, ValueError, KeyError, TypeError):
pass
user_profile.gender = user_profile_edx.get('gender')
user_profile.edx_mailing_address = user_profile_edx.get('mailing_address')
user_profile.agreed_to_terms_of_service = True
user_profile.save()
log.debug(
'Profile for user "%s" updated with values from EDX %s',
user.username,
user_profile_edx
)
def check_edx_verified_email(backend, response, details, *args, **kwargs): # pylint: disable=unused-argument
"""Get account information to check if email was verified for account on edX"""
if not isinstance(backend, BaseEdxOAuth2):
return {}
username = details.get('username')
access_token = response.get('access_token')
if not access_token:
# this should never happen for the edx oauth provider, but just in case...
raise AuthException('Missing access token for the edX user {0}'.format(username))
user_profile_edx = backend.get_json(
backend.get_url('/api/user/v1/accounts/{0}'.format(username)),
headers={
"Authorization": "Bearer {}".format(access_token),
}
)
if not user_profile_edx.get('is_active'):
return redirect('verify-email')
return {'edx_profile': user_profile_edx}
def set_last_update(details, *args, **kwargs): # pylint: disable=unused-argument
"""
Pipeline function to add extra information about when the social auth
profile has been updated.
Args:
details (dict): dictionary of informations about the user
Returns:
dict: updated details dictionary
"""
details['updated_at'] = now_in_utc().timestamp()
return details
def flush_redis_cache(*, user, **kwargs): # pylint: disable=unused-argument
"""
flush the redis cache on a new login
Args:
user (User): user object
"""
if not user:
return
# Update redis cache if user had invalid credentials
con = get_redis_connection("redis")
user_key = FIELD_USER_ID_BASE_STR.format(user.id)
con.hdel(CACHE_KEY_FAILURE_NUMS_BY_USER, user_key)
con.srem(CACHE_KEY_FAILED_USERS_NOT_TO_UPDATE, user.id)
def limit_one_auth_per_backend(
*, backend, user, strategy, uid, **kwargs # pylint: disable=unused-argument
):
"""Limit the user to one social auth account per backend"""
if not user:
return {}
user_storage = strategy.storage.user
social_auths = user_storage.get_social_auth_for_user(user, backend.name)
# if there's at least one social auth and any of them don't match the incoming uid
# we have or are trying to add mutltiple accounts
if social_auths and any(auth.uid != uid for auth in social_auths):
raise AuthFailed(backend.name, "Another edX account is already linked to your MicroMasters account.")
return {}
| {
"content_hash": "5cb6bac61be32c9fcf6f3e289eef53b1",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 118,
"avg_line_length": 34.74251497005988,
"alnum_prop": 0.6849362288865908,
"repo_name": "mitodl/micromasters",
"id": "601f1b2ff7f34da767460c7b10d049f92ba42e89",
"size": "5802",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backends/pipeline_api.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "9764"
},
{
"name": "Dockerfile",
"bytes": "958"
},
{
"name": "HTML",
"bytes": "84519"
},
{
"name": "JavaScript",
"bytes": "1462849"
},
{
"name": "Procfile",
"bytes": "407"
},
{
"name": "Python",
"bytes": "2098424"
},
{
"name": "SCSS",
"bytes": "135082"
},
{
"name": "Shell",
"bytes": "10764"
}
],
"symlink_target": ""
} |
from multiprocessing import Pool
import numpy as np, pandas as pd
import os
import pandas as pd
# Set defaults
dataDir = '/afs/cbs.mpg.de/projects/mar005_lsd-lemon-surf/probands'
fsDir = '/afs/cbs.mpg.de/projects/mar004_lsd-lemon-preproc/freesurfer'
subjects_file = '/scr/liberia1/data/individual_dist_label/subjects_rest_surf_1a1b2a2b_dist_incl_150714.txt'
out_file = '/scr/liberia1/data/individual_dist_label/res_individual_dist_label_nnb%s_%s_20150722.txt'
hemis = ['lh', 'rh']
n_neighbors_list = [100, 150, 50]
# ----------------------------------- functions -----------------------------------
def run_individual_dist_label(subject, hemi, n_neighbors):
import os, glob, subprocess, h5py
import numpy as np, pandas as pd, nibabel as nib
import nipype.interfaces.freesurfer as fs
from surfer import Brain
from sklearn.utils.arpack import eigsh
dataDir = '/afs/cbs.mpg.de/projects/mar005_lsd-lemon-surf/probands'
fsDir = '/afs/cbs.mpg.de/projects/mar004_lsd-lemon-preproc/freesurfer'
outDir = '/scr/liberia1/data/individual_dist_label'
def img2disc(data, foci_all=False, foci_dmn=False, labelfile=False, hemi='lh', filename='temp.png'):
brain = Brain('fsaverage5', hemi, 'inflated', curv=False)
brain.add_data(data, data.min(), data.max(), colormap="spectral", alpha=0.6)
if labelfile:
brain.add_label(labelfile, borders=True, color='grey')
if foci_all:
brain.add_foci(foci_all, coords_as_verts=True, scale_factor=.5, color='black')
if foci_dmn:
brain.add_foci(foci_dmn, coords_as_verts=True, scale_factor=.7, color='blue')
brain.save_montage(filename, order=['lat', 'med'], orientation='h', border_size=10)
# old
def runFiedler(conn):
# https://github.com/margulies/topography
# prep for embedding
K = (conn + 1) / 2.
v = np.sqrt(np.sum(K, axis=1))
A = K/(v[:, None] * v[None, :])
del K
A = np.squeeze(A * [A > 0])
# diffusion embedding
n_components_embedding = 2
lambdas, vectors = eigsh(A, k=n_components_embedding+1)
del A
lambdas = lambdas[::-1]
vectors = vectors[:, ::-1]
psi = vectors/vectors[:, 0][:, None]
lambdas = lambdas[1:] / (1 - lambdas[1:])
embedding = psi[:, 1:(n_components_embedding + 1 + 1)] * lambdas[:n_components_embedding+1][None, :]
return embedding
# new
def runAllFiedler(corr, cort, hemi='lh', n_neighbors=50):
def compute_diffusion_map(L, alpha=0.5, n_components=None, diffusion_time=0, verbose=False):
# from https://github.com/satra/mapalign/blob/master/mapalign/embed.py
import numpy as np
import scipy.sparse as sps
from sklearn.manifold.spectral_embedding_ import _graph_is_connected
use_sparse = False
if sps.issparse(L):
use_sparse = True
if not _graph_is_connected(L):
raise ValueError('Graph is disconnected')
if verbose:
print 'checked conditions'
ndim = L.shape[0]
L_alpha = L.copy()
if alpha > 0:
if verbose:
print 'step2'
# Step 2
d = np.array(L_alpha.sum(axis=1)).flatten()
d_alpha = np.power(d, -alpha)
if use_sparse:
L_alpha.data *= d_alpha[L_alpha.indices]
L_alpha = sps.csr_matrix(L_alpha.transpose().toarray())
L_alpha.data *= d_alpha[L_alpha.indices]
L_alpha = sps.csr_matrix(L_alpha.transpose().toarray())
else:
L_alpha = d_alpha[:, None] * L_alpha * d_alpha[None, :]
# Step 3
if verbose:
print 'step 3'
d_alpha = np.power(np.array(L_alpha.sum(axis=1)).flatten(), -1)
if use_sparse:
L_alpha.data *= d_alpha[L_alpha.indices]
else:
L_alpha = d_alpha[:, None] * L_alpha
M = L_alpha
from sklearn.utils.arpack import eigsh, eigs
# Step 4
if verbose:
print 'step 4'
func = eigs
if n_components is not None:
lambdas, vectors = func(M, k=n_components + 1)
else:
lambdas, vectors = func(M, k=max(2, int(np.sqrt(ndim))))
del M
if func == eigsh:
lambdas = lambdas[::-1]
vectors = vectors[:, ::-1]
else:
lambdas = np.real(lambdas)
vectors = np.real(vectors)
lambda_idx = np.argsort(lambdas)[::-1]
lambdas = lambdas[lambda_idx]
vectors = vectors[:, lambda_idx]
# Step 5
if verbose:
print 'step 5'
psi = vectors/vectors[:, [0]]
if diffusion_time == 0:
lambdas = lambdas[1:] / (1 - lambdas[1:])
else:
lambdas = lambdas[1:] ** float(diffusion_time)
lambda_ratio = lambdas/lambdas[0]
threshold = max(0.05, lambda_ratio[-1])
n_components_auto = np.amax(np.nonzero(lambda_ratio > threshold)[0])
n_components_auto = min(n_components_auto, ndim)
if n_components is None:
n_components = n_components_auto
embedding = psi[:, 1:(n_components + 1)] * lambdas[:n_components][None, :]
result = dict(lambdas=lambdas, vectors=vectors,
n_components=n_components, diffusion_time=diffusion_time,
n_components_auto=n_components_auto)
return embedding, result
def thresh_neighbors(C, n_neighbors=50):
import scipy.sparse as sps
# K = exp(C)
K = (C + 1) / 2.
idx = np.argsort(K, axis=1)
col = idx[:, -n_neighbors:].flatten()
row = (np.array(range(K.shape[0]))[:, None] * np.ones((1, n_neighbors))).flatten().astype(int)
A1 = sps.csr_matrix((np.ones((len(row))), (row, col)), shape=K.shape)
A1 = (A1 + A1.transpose())# > 0
idx1 = A1.nonzero()
K = sps.csr_matrix((K.flat[idx1[0]*A1.shape[1] + idx1[1]], A1.indices, A1.indptr))
#K = sps.csr_matrix((K.flat[row*K.shape[0] + col], (row, col)), shape=K.shape)
del A1
return K
A = corr[cort, :][:, cort]
L = thresh_neighbors(A, n_neighbors=n_neighbors)
embedding, result = compute_diffusion_map(L,
alpha=0.5,
n_components=2,
diffusion_time=0)
comp1 = np.zeros(len(corr))
comp1[cort] = embedding.T[0]
comp2 = np.zeros(len(corr))
comp2[cort] = embedding.T[1]
return comp1, comp2
def runMasking(data, hemi):
mask = np.zeros((10242))
for label in [39, 40, 46, 47, 49, 50, 51, 68, 85, 86]:
label = np.sort(nib.freesurfer.io.read_label(glob.glob('%s/fsaverage5/label/*%s*label*' % (fsDir, hemi))[label]))
mask[label] = 1
masked = data * mask
return masked
def runSmoothing(data, hemi, subject):
temp1 = './temp1_%s.mgz' % subject
temp2 = './temp2_%s.mgz' % subject
img = np.expand_dims(data, axis=0)
img = np.expand_dims(img, axis=0)
img = nib.freesurfer.mghformat.MGHImage(img.astype(float32), affine=None)
img.to_filename(temp1)
smoothing = fs.SurfaceSmooth(subjects_dir=fsDir,
subject_id='fsaverage5',
in_file=temp1,
out_file=temp2,
hemi=hemi,
fwhm=20,
cortex=True,
terminal_output='none')
smoothing.run()
out = nib.load(temp2).get_data().squeeze()
os.remove(temp1)
os.remove(temp2)
return out
def runExtrema(data, hemi, subject):
temp1 = './temp_%s.mgz' % subject
temp2 = './temp_%s.log' % subject
thmin = (abs(data).max() - 1.3*abs(data).std())
cluster = np.array([x if x > thmin else 0 for x in abs(data)])
cluster_img = np.expand_dims(cluster, axis=0)
cluster_img = np.expand_dims(cluster_img, axis=0)
cluster_img = nib.freesurfer.mghformat.MGHImage(cluster_img.astype(float32), affine=None)
cluster_img.to_filename(temp1)
cml = 'mri_surfcluster --in %s --subject fsaverage5 --hemi %s --thmin %s --annot aparc.a2009s --sum %s' % (temp1, hemi, thmin, temp2)
subprocess.call(cml, shell=True)
extrema_log = pd.read_csv(temp2, skiprows=34, skipinitialspace=21, header=None, dtype={0:np.str})
extrema_vertices = [int(extrema_log[0].iloc[i][15:25]) for i in range(len(extrema_log))]
os.remove(temp1)
os.remove(temp2)
return extrema_vertices
# file names and location
corr_file1 = '%s/%s/correlation_maps/%s_lsd_corr_1ab_fsa5_%s.npy' % (dataDir, subject, subject, hemi)
corr_file2 = '%s/%s/correlation_maps/%s_lsd_corr_2ab_fsa5_%s.npy' % (dataDir, subject, subject, hemi)
dist_file = '%s/%s/distance_maps/%s_%s_geoDist_fsa5.mat' % (dataDir, subject, subject, hemi)
parietal_label_file = '%s/%s/labels/fsa5/%s.G_pariet_inf-Angular_fsa5.label' % (dataDir, subject, hemi)
temporal_label_file = '%s/%s/labels/fsa5/%s.Pole_temporal_fsa5.label' % (dataDir, subject, hemi)
V1_label_file = '%s/%s/labels/fsa5/%s.S_calcarine_fsa5.label' % (dataDir, subject, hemi)
A1_label_file = '%s/%s/labels/fsa5/%s.G_temp_sup-G_T_transv_fsa5.label' % (dataDir, subject, hemi)
fiedler_file = '%s/fiedler/%s_fiedler_nnb%s_%s' % (outDir, subject, n_neighbors, hemi)
comp2_file = '%s/fiedler/%s_comp2_nnb%s_%s' % (outDir, subject, n_neighbors, hemi)
peak_img_file = '%s/qc/%s_fiedler_dmnExtrema_nnb%s_%s.png' % (outDir, subject, n_neighbors, hemi)
try:
#if not False in [os.path.isfile(i) for i in [corr_file, dist_file, parietal_label_file, temporal_label_file, V1_label_file, A1_label_file]]:
# read in data
cort = np.sort(nib.freesurfer.io.read_label('%s/fsaverage5/label/%s.cortex.label' % (fsDir, hemi)))
corr1 = np.load(corr_file1)
corr2 = np.load(corr_file2)
corr = (corr1+corr2) /2
with h5py.File(dist_file, 'r') as f:
dist = f['dataAll'][()]
parietal_vertices = np.sort(nib.freesurfer.io.read_label(parietal_label_file))
temppole_vertices = np.sort(nib.freesurfer.io.read_label(temporal_label_file))
V1_vertices = np.sort(nib.freesurfer.io.read_label(V1_label_file))
A1_vertices = np.sort(nib.freesurfer.io.read_label(A1_label_file))
# local extrema in fiedler vector
#fiedler = np.zeros(len(corr))
#fiedler[cort] = runFiedler(corr[cort, :][:, cort])[:,0]
print 'before embedding'
fiedler, comp2 = runAllFiedler(corr, cort, hemi, n_neighbors=n_neighbors)
print 'after embedding'
del corr
f_smoothed = runSmoothing(fiedler, hemi, subject)
f_masked = runMasking(f_smoothed, hemi)
f_extrema_vertices = runExtrema(f_masked, hemi, subject)
# distances
dist_extrema_2_parietal = [np.mean(dist[parietal_vertices, i]) for i in f_extrema_vertices]
parietal_peak_vertex = f_extrema_vertices[dist_extrema_2_parietal.index(min(dist_extrema_2_parietal))]
dist_extrema_2_temporal = [np.mean(dist[temppole_vertices, i]) for i in f_extrema_vertices]
temporal_peak_vertex = f_extrema_vertices[dist_extrema_2_temporal.index(min(dist_extrema_2_temporal))]
# save standardized fiedler
if fiedler[parietal_peak_vertex] < 0:
f_stand = -fiedler
else:
f_stand = fiedler
np.save(fiedler_file, f_stand)
np.save(comp2_file, comp2)
img2disc(f_stand, foci_all=f_extrema_vertices, foci_dmn=[parietal_peak_vertex, temporal_peak_vertex], hemi=hemi, filename=peak_img_file)
# return results
V1_vertices = nib.freesurfer.io.read_label('%s/%s/labels/fsa5/%s.S_calcarine_fsa5.label' % (dataDir, subject, hemi))
A1_vertices = nib.freesurfer.io.read_label('%s/%s/labels/fsa5/%s.G_temp_sup-G_T_transv_fsa5.label' % (dataDir, subject, hemi))
V1_parietal = dist[V1_vertices, parietal_peak_vertex].mean()
A1_parietal = dist[A1_vertices, parietal_peak_vertex].mean()
V1_temporal = dist[V1_vertices, temporal_peak_vertex].mean()
A1_temporal = dist[A1_vertices, temporal_peak_vertex].mean()
return subject, hemi, V1_parietal, A1_parietal, V1_temporal, A1_temporal
except:
return subject, hemi, None, None, None, None
pass
# --------------------------------------------------------------------------------------------------------------------------------------------------
### run serially ###
subjects = pd.read_csv(subjects_file, header=None, converters={0:str})[0].tolist()
for hemi in hemis:
for n_neighbors in n_neighbors_list:
output_dict = {}
res = []
for subject in subjects:
res.append(run_individual_dist_label(subject, hemi, n_neighbors))
output_dict['subject'], output_dict['hemi'], output_dict['V1_parietal'], output_dict['A1_parietal'], output_dict['V1_temporal'], output_dict['A1_temporal'] = np.array(res).T
pd.DataFrame(output_dict).to_csv(out_file % (n_neighbors, hemi), sep='\t', index=False, columns=['subject', 'hemi', 'V1_parietal', 'A1_parietal', 'V1_temporal', 'A1_temporal'])
### run in parallel ### (not good for qc screenshots though)
#p = Pool(20)
#res = p.map(run_individual_dist_label, subjects)
#output_dict = {}
#output_dict['subject'], output_dict['hemi'], output_dict['V1_parietal'], output_dict['A1_parietal'], output_dict['V1_temporal'], output_dict['A1_temporal'] = np.array(res).T
#pd.DataFrame(output_dict).to_csv(out_file, sep='\t', index=False, columns=['subject', 'hemi', 'V1_parietal', 'A1_parietal', 'V1_temporal', 'A1_temporal']) | {
"content_hash": "c27f5cf4c45818684e19cbca91c860df",
"timestamp": "",
"source": "github",
"line_count": 329,
"max_line_length": 185,
"avg_line_length": 44.95744680851064,
"alnum_prop": 0.5512811845040904,
"repo_name": "soligschlager/topography",
"id": "5831299dfbd14c90d0fc81e6a561bd407c95acda",
"size": "14810",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sandbox/individual_distance/individual_dist_label.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Awk",
"bytes": "2401"
},
{
"name": "C++",
"bytes": "258024"
},
{
"name": "M",
"bytes": "581"
},
{
"name": "Makefile",
"bytes": "2080"
},
{
"name": "Mathematica",
"bytes": "684"
},
{
"name": "Matlab",
"bytes": "823238"
},
{
"name": "Objective-C",
"bytes": "647"
},
{
"name": "Python",
"bytes": "124537"
},
{
"name": "Shell",
"bytes": "9238"
}
],
"symlink_target": ""
} |
"""Generate test data
Generate data needed for unit tests, i.e. certificates, keys, and CRLSet.
"""
import argparse
import subprocess
import sys
from datetime import datetime, timedelta
from typing import List, Tuple
from cryptography import x509
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.x509.oid import NameOID
CERTS_AND_KEYS_HEADER = """// Copyright 2021 The Pigweed Authors
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not
// use this file except in compliance with the License. You may obtain a copy
// of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations under
// the License.
#pragma once
#include "pw_bytes/span.h"
"""
class Subject:
"""A subject wraps a name, private key and extensions for issuers
to issue its certificate"""
def __init__(self, name: str, extensions: List[Tuple[x509.ExtensionType,
bool]]):
self._subject_name = x509.Name([
x509.NameAttribute(NameOID.COUNTRY_NAME, u"US"),
x509.NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, u"California"),
x509.NameAttribute(NameOID.LOCALITY_NAME, u"Mountain View"),
x509.NameAttribute(NameOID.ORGANIZATION_NAME, name),
x509.NameAttribute(NameOID.COMMON_NAME, u"Google-Pigweed"),
])
self._private_key = rsa.generate_private_key(public_exponent=65537,
key_size=2048)
self._extensions = extensions
def subject_name(self) -> x509.Name:
"""Returns the subject name"""
return self._subject_name
def public_key(self) -> rsa.RSAPublicKey:
"""Returns the public key of this subject"""
return self._private_key.public_key()
def private_key(self) -> rsa.RSAPrivateKey:
"""Returns the private key of this subject"""
return self._private_key
def extensions(self) -> List[Tuple[x509.ExtensionType, bool]]:
"""Returns the requested extensions for issuer"""
return self._extensions
class CA(Subject):
"""A CA/Sub-ca that issues certificates"""
def __init__(self, *args, **kwargs):
ext = [(x509.BasicConstraints(True, None), True),
(x509.KeyUsage(
digital_signature=False,
content_commitment=False,
key_encipherment=False,
data_encipherment=False,
key_agreement=False,
crl_sign=False,
encipher_only=False,
decipher_only=False,
key_cert_sign=True,
), True)]
super().__init__(*args, extensions=ext, **kwargs)
def sign(self, subject: Subject, not_before: datetime,
not_after: datetime) -> x509.Certificate:
"""Issues a certificate for another CA/Sub-ca/Server"""
builder = x509.CertificateBuilder()
# Subject name is the target's subject name
builder = builder.subject_name(subject.subject_name())
# Issuer name is this CA/sub-ca's subject name
builder = builder.issuer_name(self._subject_name)
# Public key is the target's public key.
builder = builder.public_key(subject.public_key())
# Validity period.
builder = builder.not_valid_before(not_before).not_valid_after(
not_after)
# Uses a random serial number
builder = builder.serial_number(x509.random_serial_number())
# Add extensions
for extension, critical in subject.extensions():
builder = builder.add_extension(extension, critical)
# Sign and returns the certificate.
return builder.sign(self._private_key, hashes.SHA256())
def self_sign(self, not_before: datetime,
not_after: datetime) -> x509.Certificate:
"""Issues a self sign certificate"""
return self.sign(self, not_before, not_after)
class Server(Subject):
"""The end-entity server"""
def __init__(self, *args, **kwargs):
ext = [
(x509.BasicConstraints(False, None), True),
(x509.KeyUsage(
digital_signature=True,
content_commitment=False,
key_encipherment=False,
data_encipherment=False,
key_agreement=False,
crl_sign=False,
encipher_only=False,
decipher_only=False,
key_cert_sign=False,
), True),
(x509.ExtendedKeyUsage([x509.ExtendedKeyUsageOID.SERVER_AUTH]),
True),
]
super().__init__(*args, extensions=ext, **kwargs)
def c_escaped_string(data: bytes):
"""Generates a C byte string representation for a byte array
For example, given a byte sequence of [0x12, 0x34, 0x56]. The function
generates the following byte string code:
{"\x12\x34\x56", 3}
"""
body = ''.join([f'\\x{b:02x}' for b in data])
return f'{{\"{body}\", {len(data)}}}'
def byte_array_declaration(data: bytes, name: str) -> str:
"""Generates a ConstByteSpan declaration for a byte array"""
type_name = '[[maybe_unused]] const pw::ConstByteSpan'
array_body = f'pw::as_bytes(pw::span{c_escaped_string(data)})'
return f'{type_name} {name} = {array_body};'
class Codegen:
"""Base helper class for code generation"""
def generate_code(self) -> str:
"""Generates C++ code for this object"""
class PrivateKeyGen(Codegen):
"""Codegen class for a private key"""
def __init__(self, key: rsa.RSAPrivateKey, name: str):
self._key = key
self._name = name
def generate_code(self) -> str:
"""Code generation"""
return byte_array_declaration(
self._key.private_bytes(
serialization.Encoding.DER,
serialization.PrivateFormat.TraditionalOpenSSL,
serialization.NoEncryption()), self._name)
class CertificateGen(Codegen):
"""Codegen class for a single certificate"""
def __init__(self, cert: x509.Certificate, name: str):
self._cert = cert
self._name = name
def generate_code(self) -> str:
"""Code generation"""
return byte_array_declaration(
self._cert.public_bytes(serialization.Encoding.DER), self._name)
def generate_test_data() -> str:
"""Generates test data"""
subjects: List[Codegen] = []
# Working valid period.
# Start from yesterday, to make sure we are in the valid period.
not_before = datetime.utcnow() - timedelta(days=1)
# Valid for 1 year.
not_after = not_before + timedelta(days=365)
# Generate a root-A CA certificates
root_a = CA("root-A")
subjects.append(
CertificateGen(root_a.self_sign(not_before, not_after), "kRootACert"))
# Generate a sub CA certificate signed by root-A.
sub = CA("sub")
subjects.append(
CertificateGen(root_a.sign(sub, not_before, not_after), "kSubCACert"))
# Generate a valid server certificate signed by sub
server = Server("server")
subjects.append(
CertificateGen(sub.sign(server, not_before, not_after), "kServerCert"))
subjects.append(PrivateKeyGen(server.private_key(), "kServerKey"))
root_b = CA("root-B")
subjects.append(
CertificateGen(root_b.self_sign(not_before, not_after), "kRootBCert"))
code = 'namespace {\n\n'
for subject in subjects:
code += subject.generate_code() + '\n\n'
code += '}\n'
return code
def clang_format(file):
subprocess.run([
"clang-format",
"-i",
file,
], check=True)
def parse_args():
"""Setup argparse."""
parser = argparse.ArgumentParser()
parser.add_argument(
"certs_and_keys_header",
help="output header file for test certificates and keys")
return parser.parse_args()
def main() -> int:
"""Main"""
args = parse_args()
certs_and_keys = generate_test_data()
with open(args.certs_and_keys_header, 'w') as header:
header.write(CERTS_AND_KEYS_HEADER)
header.write(certs_and_keys)
clang_format(args.certs_and_keys_header)
return 0
if __name__ == "__main__":
sys.exit(main())
| {
"content_hash": "1511b44a7a7e47d18d74ede44a405c32",
"timestamp": "",
"source": "github",
"line_count": 267,
"max_line_length": 80,
"avg_line_length": 32.756554307116104,
"alnum_prop": 0.616624742739538,
"repo_name": "google/pigweed",
"id": "ba609096f72c1e86a6c26103be4cd391f4d33820",
"size": "9330",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "pw_tls_client/py/pw_tls_client/generate_test_data.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "8654"
},
{
"name": "C",
"bytes": "487991"
},
{
"name": "C++",
"bytes": "6119052"
},
{
"name": "CMake",
"bytes": "288698"
},
{
"name": "CSS",
"bytes": "4820"
},
{
"name": "Go",
"bytes": "18932"
},
{
"name": "HTML",
"bytes": "1194"
},
{
"name": "Java",
"bytes": "327548"
},
{
"name": "JavaScript",
"bytes": "12482"
},
{
"name": "Jinja",
"bytes": "2467"
},
{
"name": "Python",
"bytes": "3578966"
},
{
"name": "Rust",
"bytes": "645"
},
{
"name": "SCSS",
"bytes": "1382"
},
{
"name": "Shell",
"bytes": "22974"
},
{
"name": "Smarty",
"bytes": "692"
},
{
"name": "Starlark",
"bytes": "489444"
},
{
"name": "TypeScript",
"bytes": "235169"
}
],
"symlink_target": ""
} |
import os
import sys
if __name__ == "__main__":
if os.environ.get('DJANGO_SETTINGS_MODULE'):
os.environ.setdefault("DJANGO_SETTINGS_MODULE", os.environ.get('DJANGO_SETTINGS_MODULE'))
else:
os.environ.setdefault("DJANGO_SETTINGS_MODULE", 'imgret.settings')
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| {
"content_hash": "ed3395c0cde3a54e7edb9bcb99a83511",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 97,
"avg_line_length": 38.6,
"alnum_prop": 0.6994818652849741,
"repo_name": "internetmosquito/django-celery-task-scheduler",
"id": "ee54d1b981ed832a72db51ee30f2a6261c34d131",
"size": "408",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "3814"
},
{
"name": "Python",
"bytes": "12884"
},
{
"name": "Shell",
"bytes": "455"
}
],
"symlink_target": ""
} |
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
import setup as _setup
import datetime as dt
# Mock out certain modules while building documentation
class Mock(object):
__all__ = []
def __init__(self, *args, **kw):
pass
def __call__(self, *args, **kw):
return Mock()
def __mul__(self, other):
return Mock()
def __and__(self, other):
return Mock()
def __bool__(self):
return False
def __nonzero__(self):
return False
@classmethod
def __getattr__(cls, name):
if name in ('__file__', '__path__'):
return '/dev/null'
else:
return Mock()
sys.modules['ctypes'] = Mock()
sys.modules['numpy'] = Mock()
sys.modules['numpy.lib'] = sys.modules['numpy'].lib
sys.modules['numpy.lib.stride_tricks'] = sys.modules['numpy'].lib.stride_tricks
# -- General configuration ------------------------------------------------
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode', 'sphinx.ext.intersphinx']
if on_rtd:
needs_sphinx = '1.4.0'
extensions.append('sphinx.ext.imgmath')
imgmath_image_format = 'svg'
tags.add('rtd')
else:
extensions.append('sphinx.ext.mathjax')
mathjax_path = '/usr/share/javascript/mathjax/MathJax.js?config=TeX-AMS_HTML'
templates_path = ['_templates']
source_suffix = '.rst'
#source_encoding = 'utf-8-sig'
master_doc = 'index'
project = _setup.__project__.title()
copyright = '2013-%d %s' % (dt.datetime.now().year, _setup.__author__)
version = _setup.__version__
release = _setup.__version__
#language = None
#today_fmt = '%B %d, %Y'
exclude_patterns = ['_build']
#default_role = None
#add_function_parentheses = True
#add_module_names = True
#show_authors = False
pygments_style = 'sphinx'
#modindex_common_prefix = []
#keep_warnings = False
# -- Autodoc configuration ------------------------------------------------
autodoc_member_order = 'groupwise'
autodoc_default_flags = ['members']
# -- Intersphinx configuration --------------------------------------------
intersphinx_mapping = {
'python': ('https://docs.python.org/3.4', None),
'numpy': ('https://docs.scipy.org/doc/numpy/', None),
}
# -- Options for HTML output ----------------------------------------------
if on_rtd:
html_theme = 'sphinx_rtd_theme'
#html_theme_options = {}
#html_theme_path = []
#html_sidebars = {}
else:
html_theme = 'default'
#html_theme_options = {}
#html_theme_path = []
#html_sidebars = {}
html_title = '%s %s Documentation' % (project, version)
#html_short_title = None
#html_logo = None
#html_favicon = None
html_static_path = ['_static']
#html_extra_path = []
#html_last_updated_fmt = '%b %d, %Y'
#html_use_smartypants = True
#html_additional_pages = {}
#html_domain_indices = True
#html_use_index = True
#html_split_index = False
#html_show_sourcelink = True
#html_show_sphinx = True
#html_show_copyright = True
#html_use_opensearch = ''
#html_file_suffix = None
htmlhelp_basename = '%sdoc' % _setup.__project__
# Hack to make wide tables work properly in RTD
# See https://github.com/snide/sphinx_rtd_theme/issues/117 for details
def setup(app):
app.add_stylesheet('style_override.css')
# -- Options for LaTeX output ---------------------------------------------
#latex_engine = 'pdflatex'
latex_elements = {
'papersize': 'a4paper',
'pointsize': '10pt',
'preamble': r'\def\thempfootnote{\arabic{mpfootnote}}', # workaround sphinx issue #2530
}
latex_documents = [
(
'index', # source start file
'%s.tex' % _setup.__project__, # target filename
'%s %s Documentation' % (project, version), # title
_setup.__author__, # author
'manual', # documentclass
True, # documents ref'd from toctree only
),
]
#latex_logo = None
#latex_use_parts = False
latex_show_pagerefs = True
latex_show_urls = 'footnote'
#latex_appendices = []
#latex_domain_indices = True
# -- Options for epub output ----------------------------------------------
epub_basename = _setup.__project__
#epub_theme = 'epub'
#epub_title = html_title
epub_author = _setup.__author__
epub_identifier = 'https://picamera.readthedocs.io/'
#epub_tocdepth = 3
epub_show_urls = 'no'
#epub_use_index = True
# -- Options for manual page output ---------------------------------------
man_pages = []
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
texinfo_documents = []
#texinfo_appendices = []
#texinfo_domain_indices = True
#texinfo_show_urls = 'footnote'
#texinfo_no_detailmenu = False
# -- Options for linkcheck builder ----------------------------------------
linkcheck_retries = 3
linkcheck_workers = 20
linkcheck_anchors = True
| {
"content_hash": "c8d252e3138369773c0538ddb54983f2",
"timestamp": "",
"source": "github",
"line_count": 179,
"max_line_length": 91,
"avg_line_length": 27.446927374301676,
"alnum_prop": 0.5800936291471606,
"repo_name": "tfroehlich82/picamera",
"id": "10599e1223709381215d54ce78466e6ab0b4fb52",
"size": "6613",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6457"
},
{
"name": "Python",
"bytes": "760315"
}
],
"symlink_target": ""
} |
import sys
import os
import numpy as np
from pprint import pprint
from datetime import datetime
from datetime import timedelta
import mysql.connector
import math
import matplotlib.pyplot as plt
import matplotlib.colors
import matplotlib.cm as cm
from matplotlib import dates
import calendar
#database connection
cnx = mysql.connector.connect(user='root', password='Suresh15', host='localhost', database='black_carbon')
cursor = cnx.cursor()
save_file = False
start_date = datetime(2013,10,1)
end_date = datetime(2013,11,1)
hour_step = 168#336#168
min_BC_VED = 80
max_BC_VED = 220
interval = 5
min_rBC_mass = ((min_BC_VED/(10.**7))**3)*(math.pi/6.)*1.8*(10.**15)
max_rBC_mass = ((max_BC_VED/(10.**7))**3)*(math.pi/6.)*1.8*(10.**15)
##############initialize binning variables
def createBinDict(min_VED,max_VED,interval_length):
bins = []
start_size = min_VED #VED in nm
end_size = max_VED #VED in nm
#in nm
#create list of size bins
while start_size < end_size:
bins.append(start_size)
start_size += interval_length
#create dictionary with size bins as keys
bin_data = {}
for bin in bins:
bin_data[bin] = [0,0]
return bin_data
binned_data_min = createBinDict(min_BC_VED,max_BC_VED,interval)
binned_data_max = createBinDict(min_BC_VED,max_BC_VED,interval)
fraction_successful = createBinDict(min_BC_VED,max_BC_VED,interval)
for key in binned_data_min:
binned_data_min[key] = []
binned_data_max[key] = []
os.chdir('C:/Users/Sarah Hanna/Documents/Data/Alert Data/coating data/')
while start_date < end_date:
print start_date
period_end = start_date + timedelta(hours = hour_step)
UNIX_start_time = calendar.timegm(start_date.utctimetuple())
UNIX_end_time = calendar.timegm(period_end.utctimetuple())
cursor.execute(('''SELECT rBC_mass_fg,coat_thickness_nm_min,coat_thickness_nm_max,LF_scat_amp,UNIX_UTC_ts
FROM alert_leo_coating_data
WHERE UNIX_UTC_ts >= %s and UNIX_UTC_ts < %s and HK_flag = 0 and coat_thickness_nm_min >= %s and rBC_mass_fg IS NOT NULL'''),
(UNIX_start_time,UNIX_end_time,0))
coat_data = cursor.fetchall()
#hexbin plot
new_data = []
file_data = []
for row in coat_data:
mass = row[0]
min_coat = row[1]
max_coat = row[2]
LEO_amp = row[3]
UNIX_UTC_ts = row[4]
date_time = datetime.utcfromtimestamp(UNIX_UTC_ts)
VED = (((mass/(10**15*1.8))*6/math.pi)**(1/3.0))*10**7
for key in fraction_successful:
key_value = float(key)
interval_end = key_value + interval
if VED >= key_value and VED < interval_end:
fraction_successful[key][0] = fraction_successful[key][0] + 1
if LEO_amp >= 0:
fraction_successful[key][1] = fraction_successful[key][1] + 1
if min_coat != None:
binned_data_min[key].append(min_coat)
if max_coat != None:
binned_data_max[key].append(max_coat)
#fraction detectable
fractions_detectable = []
for bin, counts in fraction_successful.iteritems():
bin_midpoint = bin + interval/2.0
total_particles = counts[0]
detectable_notches = counts[1]
try:
fraction_detectable = detectable_notches*1.0/total_particles
except:
fraction_detectable=np.nan
fractions_detectable.append([bin_midpoint,fraction_detectable])
fractions_detectable.sort()
#coats for cores
min_coats = []
max_coats = []
for bin, counts in binned_data_min.iteritems():
bin_midpoint = bin + interval/2.0
min_avg_coat = np.mean(binned_data_min[bin])
min_coats.append([bin_midpoint,min_avg_coat])
min_coats.sort()
for bin, counts in binned_data_max.iteritems():
bin_midpoint = bin + interval/2.0
max_avg_coat = np.mean(binned_data_max[bin])
max_coats.append([bin_midpoint,max_avg_coat])
max_coats.sort()
#make lists
bins = [row[0] for row in fractions_detectable]
fractions = [row[1] for row in fractions_detectable]
core_size_min = [row[0] for row in min_coats]
coat_min_size = [row[1] for row in min_coats]
core_size_max = [row[0] for row in max_coats]
coat_max_size = [row[1] for row in max_coats]
#plotting
fig = plt.figure(figsize=(8,6))
ax1 = fig.add_subplot(111)
min = ax1.fill_between(core_size_min, coat_min_size, coat_max_size,label = 'coating min', alpha = 0.5)#, norm= norm) #bins='log', norm=norm
ax1.scatter(core_size_min, coat_min_size, label = 'coating max',color='k')#, norm= norm) #bins='log', norm=norm
ax1.scatter(core_size_max, coat_max_size, label = 'coating max',color='r')#, norm= norm) #bins='log', norm=norm
ax1.set_xlabel('rBC core diameter')
ax1.set_ylabel('range of coating thickness')
ax1.set_ylim(0,220)
ax1.set_xlim(min_BC_VED,max_BC_VED)
fig.subplots_adjust(right=0.8)
ax2 = ax1.twinx()
ax2.scatter(bins, fractions, color = 'g', marker = 's')
ax2.set_ylabel('fraction of detectable notch positions',color='g')
ax2.set_ylim(0,1)
plt.xlim(min_BC_VED,max_BC_VED)
#ax3 = fig.add_subplot(212)
#ax3.scatter(core_size_max, coat_max_size)#, norm= norm) #bins='log', norm=norm
#ax3.set_xlabel('rBC core diameter')
#ax3.set_ylabel('Maximum coating thickness')
#ax3.set_ylim(-30,220)
#ax3.set_xlim(min_BC_VED,max_BC_VED)
#
#ax4 = ax3.twinx()
#ax4.scatter(bins, fractions, color = 'r')
#ax4.set_ylabel('fraction of detectable notch positions',color='r')
#ax4.set_ylim(0,1)
#plt.xlim(min_BC_VED,max_BC_VED)
#plt.savefig('C:/Users/Sarah Hanna/Documents/Data/Alert Data/coating data/' + file_name + '.png', bbox_inches='tight')
plt.legend()
plt.show()
#start_date = start_date + timedelta(hours = hour_step)
#continue
#
start_date = start_date + timedelta(hours = hour_step)
cnx.close() | {
"content_hash": "45d47f139a8c970653bc1c60a0b72698",
"timestamp": "",
"source": "github",
"line_count": 187,
"max_line_length": 140,
"avg_line_length": 29.71657754010695,
"alnum_prop": 0.6841821126507108,
"repo_name": "annahs/atmos_research",
"id": "2b099c611f475dd44ad65fd44e579f78f4bd9fff",
"size": "5557",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "AL_coat_v_core.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1677056"
}
],
"symlink_target": ""
} |
from google.appengine.ext import webapp
from google.appengine.ext.webapp import util
from google.appengine.ext.webapp import template
from suas import session, auth_handlers
HOME_VIEW = template.Template("""
<head><title>Home</title></head>
<body>
<h3>Demo app for <a href="http://github.com/aht/suas/">suas</a></h3>
{% if session.flash_msg %}
<p>{{ session.flash_msg }}</p>
{% endif %}
{% if current_user %}
<p>Logged in as {{ session.user.nick_name }}.</p>
<p><a href="/logout">Log out</a></p>
{% else %}
<p><a href="/login">Log in</a></p>
<p><a href="/signup">Sign up</a></p>
{% endif %}
</body>
""")
class HomeHandler(session.RequestHandler):
def get(self):
ctx = template.Context({"session": self.session})
self.response.out.write(HOME_VIEW.render(ctx))
ROUTES = [('/', HomeHandler)] + auth_handlers.ROUTES
APP = webapp.WSGIApplication(ROUTES, debug=True)
def main():
util.run_wsgi_app(APP)
if __name__ == "__main__":
main()
| {
"content_hash": "a8dc5a62ee36f5e84334c007a2ae5df8",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 68,
"avg_line_length": 24.46153846153846,
"alnum_prop": 0.6572327044025157,
"repo_name": "aht/suas",
"id": "73c7f98b1b04c6df28b3d98c0e2dc3b8ce819de5",
"size": "1004",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20695"
}
],
"symlink_target": ""
} |
"""Base classes for our unit tests.
Allows overriding of CONF for use of fakes, and some black magic for
inline callbacks.
"""
import copy
import logging
import os
import uuid
import fixtures
import mock
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_config import fixture as config_fixture
from oslo_log.fixture import logging_error as log_fixture
from oslo_messaging import conffixture as messaging_conffixture
from oslo_utils import strutils
from oslo_utils import timeutils
from oslotest import moxstubout
import six
import testtools
from cinder.common import config # noqa Need to register global_opts
from cinder import coordination
from cinder.db import migration
from cinder.db.sqlalchemy import api as sqla_api
from cinder import i18n
from cinder.objects import base as objects_base
from cinder import rpc
from cinder import service
from cinder.tests import fixtures as cinder_fixtures
from cinder.tests.unit import conf_fixture
from cinder.tests.unit import fake_notifier
CONF = cfg.CONF
_DB_CACHE = None
class TestingException(Exception):
pass
class Database(fixtures.Fixture):
def __init__(self, db_api, db_migrate, sql_connection):
self.sql_connection = sql_connection
# Suppress logging for test runs
migrate_logger = logging.getLogger('migrate')
migrate_logger.setLevel(logging.WARNING)
self.engine = db_api.get_engine()
self.engine.dispose()
conn = self.engine.connect()
db_migrate.db_sync()
self._DB = "".join(line for line in conn.connection.iterdump())
self.engine.dispose()
def setUp(self):
super(Database, self).setUp()
conn = self.engine.connect()
conn.connection.executescript(self._DB)
self.addCleanup(self.engine.dispose)
class TestCase(testtools.TestCase):
"""Test case base class for all unit tests."""
def _get_joined_notifier(self, *args, **kwargs):
# We create a new fake notifier but we join the notifications with
# the default notifier
notifier = fake_notifier.get_fake_notifier(*args, **kwargs)
notifier.notifications = self.notifier.notifications
return notifier
def setUp(self):
"""Run before each test method to initialize test environment."""
super(TestCase, self).setUp()
# Create default notifier
self.notifier = fake_notifier.get_fake_notifier()
# Mock rpc get notifier with fake notifier method that joins all
# notifications with the default notifier
p = mock.patch('cinder.rpc.get_notifier',
side_effect=self._get_joined_notifier)
p.start()
# Unit tests do not need to use lazy gettext
i18n.enable_lazy(False)
test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0)
try:
test_timeout = int(test_timeout)
except ValueError:
# If timeout value is invalid do not set a timeout.
test_timeout = 0
if test_timeout > 0:
self.useFixture(fixtures.Timeout(test_timeout, gentle=True))
self.useFixture(fixtures.NestedTempfile())
self.useFixture(fixtures.TempHomeDir())
environ_enabled = (lambda var_name:
strutils.bool_from_string(os.environ.get(var_name)))
if environ_enabled('OS_STDOUT_CAPTURE'):
stdout = self.useFixture(fixtures.StringStream('stdout')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
if environ_enabled('OS_STDERR_CAPTURE'):
stderr = self.useFixture(fixtures.StringStream('stderr')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))
self.useFixture(log_fixture.get_logging_handle_error_fixture())
self.useFixture(cinder_fixtures.StandardLogging())
rpc.add_extra_exmods("cinder.tests.unit")
self.addCleanup(rpc.clear_extra_exmods)
self.addCleanup(rpc.cleanup)
self.messaging_conf = messaging_conffixture.ConfFixture(CONF)
self.messaging_conf.transport_driver = 'fake'
self.messaging_conf.response_timeout = 15
self.useFixture(self.messaging_conf)
rpc.init(CONF)
# NOTE(geguileo): This is required because _determine_obj_version_cap
# and _determine_rpc_version_cap functions in cinder.rpc.RPCAPI cache
# versions in LAST_RPC_VERSIONS and LAST_OBJ_VERSIONS so we may have
# weird interactions between tests if we don't clear them before each
# test.
rpc.LAST_OBJ_VERSIONS = {}
rpc.LAST_RPC_VERSIONS = {}
conf_fixture.set_defaults(CONF)
CONF([], default_config_files=[])
# NOTE(vish): We need a better method for creating fixtures for tests
# now that we have some required db setup for the system
# to work properly.
self.start = timeutils.utcnow()
CONF.set_default('connection', 'sqlite://', 'database')
CONF.set_default('sqlite_synchronous', False, 'database')
global _DB_CACHE
if not _DB_CACHE:
_DB_CACHE = Database(sqla_api, migration,
sql_connection=CONF.database.connection)
self.useFixture(_DB_CACHE)
# NOTE(danms): Make sure to reset us back to non-remote objects
# for each test to avoid interactions. Also, backup the object
# registry.
objects_base.CinderObject.indirection_api = None
self._base_test_obj_backup = copy.copy(
objects_base.CinderObjectRegistry._registry._obj_classes)
self.addCleanup(self._restore_obj_registry)
# emulate some of the mox stuff, we can't use the metaclass
# because it screws with our generators
mox_fixture = self.useFixture(moxstubout.MoxStubout())
self.mox = mox_fixture.mox
self.stubs = mox_fixture.stubs
self.addCleanup(CONF.reset)
self.addCleanup(self._common_cleanup)
self.injected = []
self._services = []
fake_notifier.mock_notifier(self)
self.override_config('fatal_exception_format_errors', True)
# This will be cleaned up by the NestedTempfile fixture
lock_path = self.useFixture(fixtures.TempDir()).path
self.fixture = self.useFixture(
config_fixture.Config(lockutils.CONF))
self.fixture.config(lock_path=lock_path,
group='oslo_concurrency')
lockutils.set_defaults(lock_path)
self.override_config('policy_file',
os.path.join(
os.path.abspath(
os.path.join(
os.path.dirname(__file__),
'..',
)
),
'cinder/tests/unit/policy.json'),
group='oslo_policy')
self._disable_osprofiler()
self._disallow_invalid_uuids()
# NOTE(geguileo): This is required because common get_by_id method in
# cinder.db.sqlalchemy.api caches get methods and if we use a mocked
# get method in one test it would carry on to the next test. So we
# clear out the cache.
sqla_api._GET_METHODS = {}
self.override_config('backend_url', 'file://' + lock_path,
group='coordination')
coordination.COORDINATOR.start()
self.addCleanup(coordination.COORDINATOR.stop)
def _restore_obj_registry(self):
objects_base.CinderObjectRegistry._registry._obj_classes = \
self._base_test_obj_backup
def _disable_osprofiler(self):
"""Disable osprofiler.
osprofiler should not run for unit tests.
"""
side_effect = lambda value: value
mock_decorator = mock.MagicMock(side_effect=side_effect)
p = mock.patch("osprofiler.profiler.trace_cls",
return_value=mock_decorator)
p.start()
def _disallow_invalid_uuids(self):
def catch_uuid_warning(message, *args, **kwargs):
ovo_message = "invalid UUID. Using UUIDFields with invalid UUIDs " \
"is no longer supported"
if ovo_message in message:
raise AssertionError(message)
p = mock.patch("warnings.warn",
side_effect=catch_uuid_warning)
p.start()
def _common_cleanup(self):
"""Runs after each test method to tear down test environment."""
# Stop any timers
for x in self.injected:
try:
x.stop()
except AssertionError:
pass
# Kill any services
for x in self._services:
try:
x.kill()
except Exception:
pass
# Delete attributes that don't start with _ so they don't pin
# memory around unnecessarily for the duration of the test
# suite
for key in [k for k in self.__dict__.keys() if k[0] != '_']:
del self.__dict__[key]
def override_config(self, name, override, group=None):
"""Cleanly override CONF variables."""
CONF.set_override(name, override, group)
self.addCleanup(CONF.clear_override, name, group)
def flags(self, **kw):
"""Override CONF variables for a test."""
for k, v in kw.items():
self.override_config(k, v)
def start_service(self, name, host=None, **kwargs):
host = host and host or uuid.uuid4().hex
kwargs.setdefault('host', host)
kwargs.setdefault('binary', 'cinder-%s' % name)
svc = service.Service.create(**kwargs)
svc.start()
self._services.append(svc)
return svc
def mock_object(self, obj, attr_name, *args, **kwargs):
"""Use python mock to mock an object attribute
Mocks the specified objects attribute with the given value.
Automatically performs 'addCleanup' for the mock.
"""
patcher = mock.patch.object(obj, attr_name, *args, **kwargs)
result = patcher.start()
self.addCleanup(patcher.stop)
return result
def patch(self, path, *args, **kwargs):
"""Use python mock to mock a path with automatic cleanup."""
patcher = mock.patch(path, *args, **kwargs)
result = patcher.start()
self.addCleanup(patcher.stop)
return result
# Useful assertions
def assertDictMatch(self, d1, d2, approx_equal=False, tolerance=0.001):
"""Assert two dicts are equivalent.
This is a 'deep' match in the sense that it handles nested
dictionaries appropriately.
NOTE:
If you don't care (or don't know) a given value, you can specify
the string DONTCARE as the value. This will cause that dict-item
to be skipped.
"""
def raise_assertion(msg):
d1str = d1
d2str = d2
base_msg = ('Dictionaries do not match. %(msg)s d1: %(d1str)s '
'd2: %(d2str)s' %
{'msg': msg, 'd1str': d1str, 'd2str': d2str})
raise AssertionError(base_msg)
d1keys = set(d1.keys())
d2keys = set(d2.keys())
if d1keys != d2keys:
d1only = d1keys - d2keys
d2only = d2keys - d1keys
raise_assertion('Keys in d1 and not d2: %(d1only)s. '
'Keys in d2 and not d1: %(d2only)s' %
{'d1only': d1only, 'd2only': d2only})
for key in d1keys:
d1value = d1[key]
d2value = d2[key]
try:
error = abs(float(d1value) - float(d2value))
within_tolerance = error <= tolerance
except (ValueError, TypeError):
# If both values aren't convertible to float, just ignore
# ValueError if arg is a str, TypeError if it's something else
# (like None)
within_tolerance = False
if hasattr(d1value, 'keys') and hasattr(d2value, 'keys'):
self.assertDictMatch(d1value, d2value)
elif 'DONTCARE' in (d1value, d2value):
continue
elif approx_equal and within_tolerance:
continue
elif d1value != d2value:
raise_assertion("d1['%(key)s']=%(d1value)s != "
"d2['%(key)s']=%(d2value)s" %
{
'key': key,
'd1value': d1value,
'd2value': d2value,
})
def assert_notify_called(self, mock_notify, calls):
for i in range(0, len(calls)):
mock_call = mock_notify.call_args_list[i]
call = calls[i]
posargs = mock_call[0]
self.assertEqual(call[0], posargs[0])
self.assertEqual(call[1], posargs[2])
class ModelsObjectComparatorMixin(object):
def _dict_from_object(self, obj, ignored_keys):
if ignored_keys is None:
ignored_keys = []
if isinstance(obj, dict):
items = obj.items()
else:
items = obj.iteritems()
return {k: v for k, v in items
if k not in ignored_keys}
def _assertEqualObjects(self, obj1, obj2, ignored_keys=None):
obj1 = self._dict_from_object(obj1, ignored_keys)
obj2 = self._dict_from_object(obj2, ignored_keys)
self.assertEqual(
len(obj1), len(obj2),
"Keys mismatch: %s" % six.text_type(
set(obj1.keys()) ^ set(obj2.keys())))
for key, value in obj1.items():
self.assertEqual(value, obj2[key])
def _assertEqualListsOfObjects(self, objs1, objs2, ignored_keys=None,
msg=None):
obj_to_dict = lambda o: self._dict_from_object(o, ignored_keys)
sort_key = lambda d: [d[k] for k in sorted(d)]
conv_and_sort = lambda obj: sorted(map(obj_to_dict, obj), key=sort_key)
self.assertListEqual(conv_and_sort(objs1), conv_and_sort(objs2),
msg=msg)
def _assertEqualListsOfPrimitivesAsSets(self, primitives1, primitives2):
self.assertEqual(len(primitives1), len(primitives2))
for primitive in primitives1:
self.assertIn(primitive, primitives2)
for primitive in primitives2:
self.assertIn(primitive, primitives1)
| {
"content_hash": "c5b96772070d5d1ca682bed44f1450b0",
"timestamp": "",
"source": "github",
"line_count": 403,
"max_line_length": 80,
"avg_line_length": 36.712158808933005,
"alnum_prop": 0.5877661372085163,
"repo_name": "Nexenta/cinder",
"id": "a9faef66faac6e2665a13f7bb6782d2fb9cd012f",
"size": "15527",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cinder/test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "18007018"
},
{
"name": "Shell",
"bytes": "13543"
}
],
"symlink_target": ""
} |
import RPi.GPIO as GPIO
import time
from threading import Thread
from buzzerJukeBox import BuzzerJukeBox
_ledPins = [18, 23, 24]
_buttonPin = 25
_buzzerPin = 22
_cleaned = False
_jukeBox = BuzzerJukeBox(_buzzerPin)
_holdOffThread = None
_holdOffPeriodSecs = 15
def holdOffTimer():
print("Hold off begun")
time.sleep(_holdOffPeriodSecs)
print("Hold off complete")
def cleanup():
if _cleaned:
return
else:
GPIO.setwarnings(True)
GPIO.cleanup()
def configureGPIO():
"""
Set the default configuration of the GPIO pins of the RaspberryPi.
Using pins defined in _ledPins for as OUT pins and _buttonPin as IN.
"""
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.cleanup()
for pin in _ledPins:
GPIO.setup(pin, GPIO.OUT)
GPIO.setup(_buttonPin, GPIO.IN)
GPIO.setup(_buzzerPin, GPIO.OUT)
GPIO.output(_buzzerPin, GPIO.LOW)
def changeLEDState(state):
"""
Set the state of the output GPIO pins being used by the LEDs
to the state provided.
"""
for pin in _ledPins:
GPIO.output(pin, state)
def lightLED():
"""
Light all connected LEDs.
"""
changeLEDState(GPIO.HIGH)
def unlightLED():
"""
Turn off all connected LEDs.
"""
changeLEDState(GPIO.LOW)
def performAction():
unlightLED()
# global required so a local variable isn't created but the global variable within this module
# is used instead. Only required for assingment operations.
global _holdOffThread
_holdOffThread = Thread(target=holdOffTimer)
_holdOffThread.start()
_jukeBox.playRandom()
# Running from 10 down to , light and then unlight the LEDs in an accelerated
# fashion to provide a simple ten step sequence.
for i in range(10, 0, -1):
lightLED()
time.sleep(i / 10)
unlightLED()
time.sleep(i / 10)
#########
# start #
#########
try:
configureGPIO()
lightLED()
while True:
if _holdOffThread is None or _holdOffThread.isAlive() is False:
lightLED()
# Only perform check inside once we know the hold off thread is not running
# as this acts as a debounce (stops multiple button presses).
# If the action is longer than the hold off the debounce will still work
# as control will not have been returned to this function.
if GPIO.input(_buttonPin) == False:
performAction()
else:
unlightLED()
finally:
cleanup()
| {
"content_hash": "823b2236d785796b893b8aa063b90ea1",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 98,
"avg_line_length": 24.53846153846154,
"alnum_prop": 0.6355799373040752,
"repo_name": "tomntech/BigRedButton",
"id": "e3425da5ec7a8e7bdcb8e18dc9c7d10c0b22a437",
"size": "2552",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "source/bigRedButton.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19067"
},
{
"name": "Shell",
"bytes": "137"
}
],
"symlink_target": ""
} |
import pytest
from pgcli.packages.parseutils import (
is_destructive,
parse_destructive_warning,
BASE_KEYWORDS,
ALL_KEYWORDS,
)
from pgcli.packages.parseutils.tables import extract_tables
from pgcli.packages.parseutils.utils import find_prev_keyword, is_open_quote
def test_empty_string():
tables = extract_tables("")
assert tables == ()
def test_simple_select_single_table():
tables = extract_tables("select * from abc")
assert tables == ((None, "abc", None, False),)
@pytest.mark.parametrize(
"sql", ['select * from "abc"."def"', 'select * from abc."def"']
)
def test_simple_select_single_table_schema_qualified_quoted_table(sql):
tables = extract_tables(sql)
assert tables == (("abc", "def", '"def"', False),)
@pytest.mark.parametrize("sql", ["select * from abc.def", 'select * from "abc".def'])
def test_simple_select_single_table_schema_qualified(sql):
tables = extract_tables(sql)
assert tables == (("abc", "def", None, False),)
def test_simple_select_single_table_double_quoted():
tables = extract_tables('select * from "Abc"')
assert tables == ((None, "Abc", None, False),)
def test_simple_select_multiple_tables():
tables = extract_tables("select * from abc, def")
assert set(tables) == {(None, "abc", None, False), (None, "def", None, False)}
def test_simple_select_multiple_tables_double_quoted():
tables = extract_tables('select * from "Abc", "Def"')
assert set(tables) == {(None, "Abc", None, False), (None, "Def", None, False)}
def test_simple_select_single_table_deouble_quoted_aliased():
tables = extract_tables('select * from "Abc" a')
assert tables == ((None, "Abc", "a", False),)
def test_simple_select_multiple_tables_deouble_quoted_aliased():
tables = extract_tables('select * from "Abc" a, "Def" d')
assert set(tables) == {(None, "Abc", "a", False), (None, "Def", "d", False)}
def test_simple_select_multiple_tables_schema_qualified():
tables = extract_tables("select * from abc.def, ghi.jkl")
assert set(tables) == {("abc", "def", None, False), ("ghi", "jkl", None, False)}
def test_simple_select_with_cols_single_table():
tables = extract_tables("select a,b from abc")
assert tables == ((None, "abc", None, False),)
def test_simple_select_with_cols_single_table_schema_qualified():
tables = extract_tables("select a,b from abc.def")
assert tables == (("abc", "def", None, False),)
def test_simple_select_with_cols_multiple_tables():
tables = extract_tables("select a,b from abc, def")
assert set(tables) == {(None, "abc", None, False), (None, "def", None, False)}
def test_simple_select_with_cols_multiple_qualified_tables():
tables = extract_tables("select a,b from abc.def, def.ghi")
assert set(tables) == {("abc", "def", None, False), ("def", "ghi", None, False)}
def test_select_with_hanging_comma_single_table():
tables = extract_tables("select a, from abc")
assert tables == ((None, "abc", None, False),)
def test_select_with_hanging_comma_multiple_tables():
tables = extract_tables("select a, from abc, def")
assert set(tables) == {(None, "abc", None, False), (None, "def", None, False)}
def test_select_with_hanging_period_multiple_tables():
tables = extract_tables("SELECT t1. FROM tabl1 t1, tabl2 t2")
assert set(tables) == {(None, "tabl1", "t1", False), (None, "tabl2", "t2", False)}
def test_simple_insert_single_table():
tables = extract_tables('insert into abc (id, name) values (1, "def")')
# sqlparse mistakenly assigns an alias to the table
# AND mistakenly identifies the field list as
# assert tables == ((None, 'abc', 'abc', False),)
assert tables == ((None, "abc", "abc", False),)
@pytest.mark.xfail
def test_simple_insert_single_table_schema_qualified():
tables = extract_tables('insert into abc.def (id, name) values (1, "def")')
assert tables == (("abc", "def", None, False),)
def test_simple_update_table_no_schema():
tables = extract_tables("update abc set id = 1")
assert tables == ((None, "abc", None, False),)
def test_simple_update_table_with_schema():
tables = extract_tables("update abc.def set id = 1")
assert tables == (("abc", "def", None, False),)
@pytest.mark.parametrize("join_type", ["", "INNER", "LEFT", "RIGHT OUTER"])
def test_join_table(join_type):
sql = f"SELECT * FROM abc a {join_type} JOIN def d ON a.id = d.num"
tables = extract_tables(sql)
assert set(tables) == {(None, "abc", "a", False), (None, "def", "d", False)}
def test_join_table_schema_qualified():
tables = extract_tables("SELECT * FROM abc.def x JOIN ghi.jkl y ON x.id = y.num")
assert set(tables) == {("abc", "def", "x", False), ("ghi", "jkl", "y", False)}
def test_incomplete_join_clause():
sql = """select a.x, b.y
from abc a join bcd b
on a.id = """
tables = extract_tables(sql)
assert tables == ((None, "abc", "a", False), (None, "bcd", "b", False))
def test_join_as_table():
tables = extract_tables("SELECT * FROM my_table AS m WHERE m.a > 5")
assert tables == ((None, "my_table", "m", False),)
def test_multiple_joins():
sql = """select * from t1
inner join t2 ON
t1.id = t2.t1_id
inner join t3 ON
t2.id = t3."""
tables = extract_tables(sql)
assert tables == (
(None, "t1", None, False),
(None, "t2", None, False),
(None, "t3", None, False),
)
def test_subselect_tables():
sql = "SELECT * FROM (SELECT FROM abc"
tables = extract_tables(sql)
assert tables == ((None, "abc", None, False),)
@pytest.mark.parametrize("text", ["SELECT * FROM foo.", "SELECT 123 AS foo"])
def test_extract_no_tables(text):
tables = extract_tables(text)
assert tables == tuple()
@pytest.mark.parametrize("arg_list", ["", "arg1", "arg1, arg2, arg3"])
def test_simple_function_as_table(arg_list):
tables = extract_tables(f"SELECT * FROM foo({arg_list})")
assert tables == ((None, "foo", None, True),)
@pytest.mark.parametrize("arg_list", ["", "arg1", "arg1, arg2, arg3"])
def test_simple_schema_qualified_function_as_table(arg_list):
tables = extract_tables(f"SELECT * FROM foo.bar({arg_list})")
assert tables == (("foo", "bar", None, True),)
@pytest.mark.parametrize("arg_list", ["", "arg1", "arg1, arg2, arg3"])
def test_simple_aliased_function_as_table(arg_list):
tables = extract_tables(f"SELECT * FROM foo({arg_list}) bar")
assert tables == ((None, "foo", "bar", True),)
def test_simple_table_and_function():
tables = extract_tables("SELECT * FROM foo JOIN bar()")
assert set(tables) == {(None, "foo", None, False), (None, "bar", None, True)}
def test_complex_table_and_function():
tables = extract_tables(
"""SELECT * FROM foo.bar baz
JOIN bar.qux(x, y, z) quux"""
)
assert set(tables) == {("foo", "bar", "baz", False), ("bar", "qux", "quux", True)}
def test_find_prev_keyword_using():
q = "select * from tbl1 inner join tbl2 using (col1, "
kw, q2 = find_prev_keyword(q)
assert kw.value == "(" and q2 == "select * from tbl1 inner join tbl2 using ("
@pytest.mark.parametrize(
"sql",
[
"select * from foo where bar",
"select * from foo where bar = 1 and baz or ",
"select * from foo where bar = 1 and baz between qux and ",
],
)
def test_find_prev_keyword_where(sql):
kw, stripped = find_prev_keyword(sql)
assert kw.value == "where" and stripped == "select * from foo where"
@pytest.mark.parametrize(
"sql", ["create table foo (bar int, baz ", "select * from foo() as bar (baz "]
)
def test_find_prev_keyword_open_parens(sql):
kw, _ = find_prev_keyword(sql)
assert kw.value == "("
@pytest.mark.parametrize(
"sql",
[
"",
"$$ foo $$",
"$$ 'foo' $$",
'$$ "foo" $$',
"$$ $a$ $$",
"$a$ $$ $a$",
"foo bar $$ baz $$",
],
)
def test_is_open_quote__closed(sql):
assert not is_open_quote(sql)
@pytest.mark.parametrize(
"sql",
[
"$$",
";;;$$",
"foo $$ bar $$; foo $$",
"$$ foo $a$",
"foo 'bar baz",
"$a$ foo ",
'$$ "foo" ',
"$$ $a$ ",
"foo bar $$ baz",
],
)
def test_is_open_quote__open(sql):
assert is_open_quote(sql)
@pytest.mark.parametrize(
("sql", "keywords", "expected"),
[
("update abc set x = 1", ALL_KEYWORDS, True),
("update abc set x = 1 where y = 2", ALL_KEYWORDS, True),
("update abc set x = 1", BASE_KEYWORDS, True),
("update abc set x = 1 where y = 2", BASE_KEYWORDS, False),
("select x, y, z from abc", ALL_KEYWORDS, False),
("drop abc", ALL_KEYWORDS, True),
("alter abc", ALL_KEYWORDS, True),
("delete abc", ALL_KEYWORDS, True),
("truncate abc", ALL_KEYWORDS, True),
("insert into abc values (1, 2, 3)", ALL_KEYWORDS, False),
("insert into abc values (1, 2, 3)", BASE_KEYWORDS, False),
("insert into abc values (1, 2, 3)", ["insert"], True),
("insert into abc values (1, 2, 3)", ["insert"], True),
],
)
def test_is_destructive(sql, keywords, expected):
assert is_destructive(sql, keywords) == expected
@pytest.mark.parametrize(
("warning_level", "expected"),
[
("true", ALL_KEYWORDS),
("false", []),
("all", ALL_KEYWORDS),
("moderate", BASE_KEYWORDS),
("off", []),
("", []),
(None, []),
(ALL_KEYWORDS, ALL_KEYWORDS),
(BASE_KEYWORDS, BASE_KEYWORDS),
("insert", ["insert"]),
("drop,alter,delete", ["drop", "alter", "delete"]),
(["drop", "alter", "delete"], ["drop", "alter", "delete"]),
],
)
def test_parse_destructive_warning(warning_level, expected):
assert parse_destructive_warning(warning_level) == expected
| {
"content_hash": "9b0b0710a1b436b7c446f91b0866b043",
"timestamp": "",
"source": "github",
"line_count": 310,
"max_line_length": 86,
"avg_line_length": 32.061290322580646,
"alnum_prop": 0.5940235436160579,
"repo_name": "dbcli/pgcli",
"id": "349cbd021af4a25e7ef26d35f23c34587cae3005",
"size": "9939",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/parseutils/test_parseutils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "72"
},
{
"name": "Gherkin",
"bytes": "6498"
},
{
"name": "Python",
"bytes": "443552"
},
{
"name": "Shell",
"bytes": "1944"
}
],
"symlink_target": ""
} |
import mock
import netaddr
from oslo_config import cfg
import webob
from jacket.api.compute.openstack.compute import floating_ips_bulk \
as fipbulk_v21
from jacket.api.compute.openstack.compute.legacy_v2.contrib import floating_ips_bulk \
as fipbulk_v2
from jacket import context
from jacket.compute import exception
from jacket.objects import compute
from jacket.compute import test
from jacket.tests.compute.unit.api.openstack import fakes
CONF = cfg.CONF
class FloatingIPBulkV21(test.TestCase):
floating_ips_bulk = fipbulk_v21
bad_request = exception.ValidationError
def setUp(self):
super(FloatingIPBulkV21, self).setUp()
self.context = context.get_admin_context()
self.controller = self.floating_ips_bulk.FloatingIPBulkController()
self.req = fakes.HTTPRequest.blank('')
def _setup_floating_ips(self, ip_range):
body = {'floating_ips_bulk_create': {'ip_range': ip_range}}
res_dict = self.controller.create(self.req, body=body)
response = {"floating_ips_bulk_create": {
'ip_range': ip_range,
'pool': CONF.default_floating_pool,
'interface': CONF.public_interface}}
self.assertEqual(res_dict, response)
def test_create_ips(self):
ip_range = '192.168.1.0/28'
self._setup_floating_ips(ip_range)
def test_create_ips_pool(self):
ip_range = '10.0.1.0/29'
pool = 'a new pool'
body = {'floating_ips_bulk_create':
{'ip_range': ip_range,
'pool': pool}}
res_dict = self.controller.create(self.req, body=body)
response = {"floating_ips_bulk_create": {
'ip_range': ip_range,
'pool': pool,
'interface': CONF.public_interface}}
self.assertEqual(res_dict, response)
def test_list_ips(self):
self._test_list_ips(self.req)
def _test_list_ips(self, req):
ip_range = '192.168.1.1/28'
self._setup_floating_ips(ip_range)
res_dict = self.controller.index(req)
ip_info = [{'address': str(ip_addr),
'pool': CONF.default_floating_pool,
'interface': CONF.public_interface,
'project_id': None,
'instance_uuid': None,
'fixed_ip': None}
for ip_addr in netaddr.IPNetwork(ip_range).iter_hosts()]
response = {'floating_ip_info': ip_info}
self.assertEqual(res_dict, response)
def test_list_ips_associated(self):
self._test_list_ips_associated(self.req)
@mock.patch('compute.compute.FloatingIPList.get_all')
def _test_list_ips_associated(self, req, mock_get):
instance_uuid = "fake-uuid"
fixed_address = "10.0.0.1"
floating_address = "192.168.0.1"
fixed_ip = compute.FixedIP(instance_uuid=instance_uuid,
address=fixed_address)
floating_ip = compute.FloatingIP(address=floating_address,
fixed_ip=fixed_ip,
pool=CONF.default_floating_pool,
interface=CONF.public_interface,
project_id=None)
floating_list = compute.FloatingIPList(compute=[floating_ip])
mock_get.return_value = floating_list
res_dict = self.controller.index(req)
ip_info = [{'address': floating_address,
'pool': CONF.default_floating_pool,
'interface': CONF.public_interface,
'project_id': None,
'instance_uuid': instance_uuid,
'fixed_ip': fixed_address}]
response = {'floating_ip_info': ip_info}
self.assertEqual(res_dict, response)
def test_list_ip_by_host(self):
self._test_list_ip_by_host(self.req)
def _test_list_ip_by_host(self, req):
ip_range = '192.168.1.1/28'
self._setup_floating_ips(ip_range)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, req, 'host')
def test_delete_ips(self):
self._test_delete_ips(self.req)
def _test_delete_ips(self, req):
ip_range = '192.168.1.0/29'
self._setup_floating_ips(ip_range)
body = {'ip_range': ip_range}
res_dict = self.controller.update(req, "delete", body=body)
response = {"floating_ips_bulk_delete": ip_range}
self.assertEqual(res_dict, response)
# Check that the IPs are actually deleted
res_dict = self.controller.index(req)
response = {'floating_ip_info': []}
self.assertEqual(res_dict, response)
def test_create_duplicate_fail(self):
ip_range = '192.168.1.0/30'
self._setup_floating_ips(ip_range)
ip_range = '192.168.1.0/29'
body = {'floating_ips_bulk_create': {'ip_range': ip_range}}
self.assertRaises(webob.exc.HTTPConflict, self.controller.create,
self.req, body=body)
def test_create_bad_cidr_fail(self):
# netaddr can't handle /32 or 31 cidrs
ip_range = '192.168.1.1/32'
body = {'floating_ips_bulk_create': {'ip_range': ip_range}}
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
self.req, body=body)
def test_create_invalid_cidr_fail(self):
ip_range = 'not a cidr'
body = {'floating_ips_bulk_create': {'ip_range': ip_range}}
self.assertRaises(self.bad_request, self.controller.create,
self.req, body=body)
class FloatingIPBulkV2(FloatingIPBulkV21):
floating_ips_bulk = fipbulk_v2
bad_request = webob.exc.HTTPBadRequest
def setUp(self):
super(FloatingIPBulkV2, self).setUp()
self.non_admin_req = fakes.HTTPRequest.blank('')
self.admin_req = fakes.HTTPRequest.blank('', use_admin_context=True)
def test_list_ips_with_non_admin(self):
ip_range = '192.168.1.1/28'
self._setup_floating_ips(ip_range)
self.assertRaises(exception.AdminRequired,
self.controller.index, self.non_admin_req)
def test_list_ip_with_non_admin(self):
ip_range = '192.168.1.1/28'
self._setup_floating_ips(ip_range)
self.assertRaises(exception.AdminRequired, self.controller.show,
self.non_admin_req, "host")
def test_delete_ips(self):
self._test_delete_ips(self.admin_req)
def test_list_ip_by_host(self):
self._test_list_ip_by_host(self.admin_req)
def test_list_ips_associated(self):
self._test_list_ips_associated(self.admin_req)
def test_list_ips(self):
self._test_list_ips(self.admin_req)
class FloatingIPBulkPolicyEnforcementV21(test.NoDBTestCase):
def setUp(self):
super(FloatingIPBulkPolicyEnforcementV21, self).setUp()
self.controller = fipbulk_v21.FloatingIPBulkController()
self.req = fakes.HTTPRequest.blank('')
def _common_policy_check(self, func, *arg, **kwarg):
rule_name = "os_compute_api:os-floating-ips-bulk"
rule = {rule_name: "project:non_fake"}
self.policy.set_rules(rule)
exc = self.assertRaises(
exception.PolicyNotAuthorized, func, *arg, **kwarg)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
def test_index_policy_failed(self):
self._common_policy_check(self.controller.index, self.req)
def test_show_ip_policy_failed(self):
self._common_policy_check(self.controller.show, self.req, "host")
def test_create_policy_failed(self):
ip_range = '192.168.1.0/28'
body = {'floating_ips_bulk_create': {'ip_range': ip_range}}
self._common_policy_check(self.controller.create, self.req, body=body)
def test_update_policy_failed(self):
ip_range = '192.168.1.0/29'
body = {'ip_range': ip_range}
self._common_policy_check(self.controller.update, self.req,
"delete", body=body)
| {
"content_hash": "c8e94cb094c4c810b671bb149d896068",
"timestamp": "",
"source": "github",
"line_count": 221,
"max_line_length": 86,
"avg_line_length": 37.22171945701358,
"alnum_prop": 0.5962800875273523,
"repo_name": "HybridF5/jacket",
"id": "50d1a366724bb8397facb237e85722e22be45d7c",
"size": "8828",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jacket/tests/compute/unit/api/openstack/compute/test_floating_ips_bulk.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "26995056"
},
{
"name": "Shell",
"bytes": "28464"
},
{
"name": "Smarty",
"bytes": "291947"
}
],
"symlink_target": ""
} |
from typing import Callable, Optional, Sequence, Tuple, Union
import equinox.internal as eqxi
import jax
import jax.numpy as jnp
from ..custom_types import Array, Int, PyTree, Scalar
from ..solution import RESULTS
from ..term import AbstractTerm
from .base import AbstractStepSizeController
class ConstantStepSize(AbstractStepSizeController):
"""Use a constant step size, equal to the `dt0` argument of
[`diffrax.diffeqsolve`][].
"""
compile_steps: Optional[bool] = False
def wrap(self, direction: Scalar):
return self
def init(
self,
terms: PyTree[AbstractTerm],
t0: Scalar,
t1: Scalar,
y0: PyTree,
dt0: Optional[Scalar],
args: PyTree,
func: Callable[[Scalar, PyTree, PyTree], PyTree],
error_order: Optional[Scalar],
) -> Tuple[Scalar, Scalar]:
del terms, t1, y0, args, func, error_order
if dt0 is None:
raise ValueError(
"Constant step size solvers cannot select step size automatically; "
"please pass a value for `dt0`."
)
return t0 + dt0, dt0
def adapt_step_size(
self,
t0: Scalar,
t1: Scalar,
y0: PyTree,
y1_candidate: PyTree,
args: PyTree,
y_error: PyTree,
error_order: Scalar,
controller_state: Scalar,
) -> Tuple[bool, Scalar, Scalar, bool, Scalar, RESULTS]:
del t0, y0, y1_candidate, args, y_error, error_order
return (
True,
t1,
t1 + controller_state,
False,
controller_state,
RESULTS.successful,
)
ConstantStepSize.__init__.__doc__ = """**Arguments:**
- `compile_steps`: If `True` then the number of steps taken in the differential
equation solve will be baked into the compilation. When this is possible then
this can improve compile times and run times slightly. The downside is that this
implies re-compiling if this changes, and that this is only possible if the exact
number of steps to be taken is known in advance (i.e. `t0`, `t1`, `dt0` cannot be
traced values) -- and an error will be thrown if the exact number of steps could
not be determined. Set to `False` (the default) to not bake in the number of steps.
Set to `None` to attempt to bake in the number of steps, but to fall back to
`False`-behaviour if the number of steps could not be determined (rather than
throwing an error).
"""
class StepTo(AbstractStepSizeController):
"""Make steps to just prespecified times."""
ts: Union[Sequence[Scalar], Array["times"]] # noqa: F821
compile_steps: Optional[bool] = False
def __post_init__(self):
with jax.ensure_compile_time_eval():
object.__setattr__(self, "ts", jnp.asarray(self.ts))
if self.ts.ndim != 1:
raise ValueError("`ts` must be one-dimensional.")
if len(self.ts) < 2:
raise ValueError("`ts` must have length at least 2.")
def wrap(self, direction: Scalar):
ts = self.ts * direction
# Only tested after we've set the direction.
ts = eqxi.error_if(
ts,
ts[1:] <= ts[:-1],
"`StepTo(ts=...)` must be strictly increasing (or strictly decreasing if "
"t0 > t1).",
)
return type(self)(ts=ts, compile_steps=self.compile_steps)
def init(
self,
terms: PyTree[AbstractTerm],
t0: Scalar,
t1: Scalar,
y0: PyTree,
dt0: None,
args: PyTree,
func: Callable[[Scalar, PyTree, PyTree], PyTree],
error_order: Optional[Scalar],
) -> Tuple[Scalar, int]:
del y0, args, func, error_order
if dt0 is not None:
raise ValueError(
"`dt0` should be `None`. Step location is already determined "
f"by {type(self).__name__}(ts=...).",
)
ts = eqxi.error_if(
self.ts,
(t0 != self.ts[0]) | (t1 != self.ts[-1]),
"Must have `t0==ts[0]` and `t1==ts[-1]`.",
)
return ts[1], 2
def adapt_step_size(
self,
t0: Scalar,
t1: Scalar,
y0: Array["state"], # noqa: F821
y1_candidate: Array["state"], # noqa: F821
args: PyTree,
y_error: Array["state"], # noqa: F821
error_order: Scalar,
controller_state: int,
) -> Tuple[bool, Scalar, Scalar, bool, Int, RESULTS]:
del t0, y0, y1_candidate, args, y_error, error_order
return (
True,
t1,
self.ts[controller_state],
False,
controller_state + 1,
RESULTS.successful,
)
StepTo.__init__.__doc__ = """**Arguments:**
- `ts`: The times to step to. Must be an increasing/decreasing sequence of times
between the `t0` and `t1` (inclusive) passed to [`diffrax.diffeqsolve`][].
Correctness of `ts` with respect to `t0` and `t1` as well as its
monotonicity is checked by the implementation.
- `compile_steps`: As [`diffrax.ConstantStepSize.__init__`][].
"""
| {
"content_hash": "220458854ee6575b4644593d63d4e74e",
"timestamp": "",
"source": "github",
"line_count": 157,
"max_line_length": 87,
"avg_line_length": 32.9171974522293,
"alnum_prop": 0.5789473684210527,
"repo_name": "patrick-kidger/diffrax",
"id": "7eaf5605c593cef35f65bcd4a911e573e51aa415",
"size": "5168",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "diffrax/step_size_controller/constant.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "416334"
}
],
"symlink_target": ""
} |
from flask_wtf import Form
from wtforms.validators import DataRequired
from wtforms import TextAreaField, IntegerField
from wtforms.validators import DataRequired, NumberRange
class WorkReviewForm(Form):
score = IntegerField('Score',validators=[DataRequired(), NumberRange(min=0, max=100)])
comment = TextAreaField('Comment')
def __init__(self, *args, **kwargs):
"""Create instance."""
super(WorkReviewForm, self).__init__(*args, **kwargs)
def validate(self):
initial_validation = super(WorkReviewForm, self).validate()
if not initial_validation:
return False
return True
| {
"content_hash": "ee743bf9495a9b627f00da9f530c1225",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 90,
"avg_line_length": 35.72222222222222,
"alnum_prop": 0.6936236391912908,
"repo_name": "B-O-P/innovator",
"id": "5c338bf91ac8e5161b299910dde89cca99d43c69",
"size": "667",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "innovator/reviewer/form.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "43496"
},
{
"name": "Python",
"bytes": "116721"
}
],
"symlink_target": ""
} |
r"""UUID objects (universally unique identifiers) according to RFC 4122.
This module provides immutable UUID objects (class UUID) and the functions
uuid1(), uuid3(), uuid4(), uuid5() for generating version 1, 3, 4, and 5
UUIDs as specified in RFC 4122.
If all you want is a unique ID, you should probably call uuid1() or uuid4().
Note that uuid1() may compromise privacy since it creates a UUID containing
the computer's network address. uuid4() creates a random UUID.
Typical usage:
>>> import uuid
# make a UUID based on the host ID and current time
>>> uuid.uuid1()
UUID('a8098c1a-f86e-11da-bd1a-00112444be1e')
# make a UUID using an MD5 hash of a namespace UUID and a name
>>> uuid.uuid3(uuid.NAMESPACE_DNS, 'python.org')
UUID('6fa459ea-ee8a-3ca4-894e-db77e160355e')
# make a random UUID
>>> uuid.uuid4()
UUID('16fd2706-8baf-433b-82eb-8c7fada847da')
# make a UUID using a SHA-1 hash of a namespace UUID and a name
>>> uuid.uuid5(uuid.NAMESPACE_DNS, 'python.org')
UUID('886313e1-3b8a-5372-9b90-0c9aee199e5d')
# make a UUID from a string of hex digits (braces and hyphens ignored)
>>> x = uuid.UUID('{00010203-0405-0607-0809-0a0b0c0d0e0f}')
# convert a UUID to a string of hex digits in standard form
>>> str(x)
'00010203-0405-0607-0809-0a0b0c0d0e0f'
# get the raw 16 bytes of the UUID
>>> x.bytes
'\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f'
# make a UUID from a 16-byte string
>>> uuid.UUID(bytes=x.bytes)
UUID('00010203-0405-0607-0809-0a0b0c0d0e0f')
"""
import struct
__author__ = 'Ka-Ping Yee <[email protected]>'
RESERVED_NCS, RFC_4122, RESERVED_MICROSOFT, RESERVED_FUTURE = [
'reserved for NCS compatibility', 'specified in RFC 4122',
'reserved for Microsoft compatibility', 'reserved for future definition']
class UUID(object):
"""Instances of the UUID class represent UUIDs as specified in RFC 4122.
UUID objects are immutable, hashable, and usable as dictionary keys.
Converting a UUID to a string with str() yields something in the form
'12345678-1234-1234-1234-123456789abc'. The UUID constructor accepts
five possible forms: a similar string of hexadecimal digits, or a tuple
of six integer fields (with 32-bit, 16-bit, 16-bit, 8-bit, 8-bit, and
48-bit values respectively) as an argument named 'fields', or a string
of 16 bytes (with all the integer fields in big-endian order) as an
argument named 'bytes', or a string of 16 bytes (with the first three
fields in little-endian order) as an argument named 'bytes_le', or a
single 128-bit integer as an argument named 'int'.
UUIDs have these read-only attributes:
bytes the UUID as a 16-byte string (containing the six
integer fields in big-endian byte order)
bytes_le the UUID as a 16-byte string (with time_low, time_mid,
and time_hi_version in little-endian byte order)
fields a tuple of the six integer fields of the UUID,
which are also available as six individual attributes
and two derived attributes:
time_low the first 32 bits of the UUID
time_mid the next 16 bits of the UUID
time_hi_version the next 16 bits of the UUID
clock_seq_hi_variant the next 8 bits of the UUID
clock_seq_low the next 8 bits of the UUID
node the last 48 bits of the UUID
time the 60-bit timestamp
clock_seq the 14-bit sequence number
hex the UUID as a 32-character hexadecimal string
int the UUID as a 128-bit integer
urn the UUID as a URN as specified in RFC 4122
variant the UUID variant (one of the constants RESERVED_NCS,
RFC_4122, RESERVED_MICROSOFT, or RESERVED_FUTURE)
version the UUID version number (1 through 5, meaningful only
when the variant is RFC_4122)
"""
def __init__(self, hex=None, bytes=None, bytes_le=None, fields=None,
int=None, version=None):
r"""Create a UUID from either a string of 32 hexadecimal digits,
a string of 16 bytes as the 'bytes' argument, a string of 16 bytes
in little-endian order as the 'bytes_le' argument, a tuple of six
integers (32-bit time_low, 16-bit time_mid, 16-bit time_hi_version,
8-bit clock_seq_hi_variant, 8-bit clock_seq_low, 48-bit node) as
the 'fields' argument, or a single 128-bit integer as the 'int'
argument. When a string of hex digits is given, curly braces,
hyphens, and a URN prefix are all optional. For example, these
expressions all yield the same UUID:
UUID('{12345678-1234-5678-1234-567812345678}')
UUID('12345678123456781234567812345678')
UUID('urn:uuid:12345678-1234-5678-1234-567812345678')
UUID(bytes='\x12\x34\x56\x78'*4)
UUID(bytes_le='\x78\x56\x34\x12\x34\x12\x78\x56' +
'\x12\x34\x56\x78\x12\x34\x56\x78')
UUID(fields=(0x12345678, 0x1234, 0x5678, 0x12, 0x34, 0x567812345678))
UUID(int=0x12345678123456781234567812345678)
Exactly one of 'hex', 'bytes', 'bytes_le', 'fields', or 'int' must
be given. The 'version' argument is optional; if given, the resulting
UUID will have its variant and version set according to RFC 4122,
overriding the given 'hex', 'bytes', 'bytes_le', 'fields', or 'int'.
"""
if hex is not None:
if (bytes is not None or bytes_le is not None or
fields is not None or int is not None):
raise TypeError('if the hex argument is given, bytes,'
' bytes_le, fields, and int need to be None')
hex = hex.replace('urn:', '').replace('uuid:', '')
hex = hex.strip('{}').replace('-', '')
if len(hex) != 32:
raise ValueError('badly formed hexadecimal UUID string')
int = long(hex, 16)
elif bytes_le is not None:
if bytes is not None or fields is not None or int is not None:
raise TypeError('if the bytes_le argument is given, bytes,'
' fields, and int need to be None')
if len(bytes_le) != 16:
raise ValueError('bytes_le is not a 16-char string')
bytes = (bytes_le[3] + bytes_le[2] + bytes_le[1] + bytes_le[0] +
bytes_le[5] + bytes_le[4] + bytes_le[7] + bytes_le[6] +
bytes_le[8:])
int = (struct.unpack('>Q', bytes[:8])[0] << 64 |
struct.unpack('>Q', bytes[8:])[0])
elif bytes is not None:
if fields is not None or int is not None:
raise TypeError('if the bytes argument is given, fields '
'and int need to be None')
if len(bytes) != 16:
raise ValueError('bytes is not a 16-char string')
int = (struct.unpack('>Q', bytes[:8])[0] << 64 |
struct.unpack('>Q', bytes[8:])[0])
elif fields is not None:
if int is not None:
raise TypeError('if the fields argument is given, int needs'
' to be None')
if len(fields) != 6:
raise ValueError('fields is not a 6-tuple')
(time_low, time_mid, time_hi_version,
clock_seq_hi_variant, clock_seq_low, node) = fields
if not 0 <= time_low < 1<<32L:
raise ValueError('field 1 out of range (need a 32-bit value)')
if not 0 <= time_mid < 1<<16L:
raise ValueError('field 2 out of range (need a 16-bit value)')
if not 0 <= time_hi_version < 1<<16L:
raise ValueError('field 3 out of range (need a 16-bit value)')
if not 0 <= clock_seq_hi_variant < 1<<8L:
raise ValueError('field 4 out of range (need an 8-bit value)')
if not 0 <= clock_seq_low < 1<<8L:
raise ValueError('field 5 out of range (need an 8-bit value)')
if not 0 <= node < 1<<48L:
raise ValueError('field 6 out of range (need a 48-bit value)')
clock_seq = (clock_seq_hi_variant << 8L) | clock_seq_low
int = ((time_low << 96L) | (time_mid << 80L) |
(time_hi_version << 64L) | (clock_seq << 48L) | node)
elif int is not None:
if not 0 <= int < 1<<128L:
raise ValueError('int is out of range (need a 128-bit value)')
else:
raise TypeError('one of hex, bytes, bytes_le, fields,'
' or int need to be not None')
if version is not None:
if not 1 <= version <= 5:
raise ValueError('illegal version number')
# Set the variant to RFC 4122.
int &= ~(0xc000 << 48L)
int |= 0x8000 << 48L
# Set the version number.
int &= ~(0xf000 << 64L)
int |= version << 76L
object.__setattr__(self, 'int', int)
def __cmp__(self, other):
if isinstance(other, UUID):
return cmp(self.int, other.int)
return NotImplemented
def __hash__(self):
return hash(self.int)
def __int__(self):
return self.int
def __repr__(self):
return 'UUID(%r)' % str(self)
def __setattr__(self, name, value):
raise TypeError('UUID objects are immutable')
def __str__(self):
hex = '%032x' % self.int
return '%s-%s-%s-%s-%s' % (
hex[:8], hex[8:12], hex[12:16], hex[16:20], hex[20:])
def get_bytes(self):
bytes = ''
for shift in range(0, 128, 8):
bytes = chr((self.int >> shift) & 0xff) + bytes
return bytes
bytes = property(get_bytes)
def get_bytes_le(self):
bytes = self.bytes
return (bytes[3] + bytes[2] + bytes[1] + bytes[0] +
bytes[5] + bytes[4] + bytes[7] + bytes[6] + bytes[8:])
bytes_le = property(get_bytes_le)
def get_fields(self):
return (self.time_low, self.time_mid, self.time_hi_version,
self.clock_seq_hi_variant, self.clock_seq_low, self.node)
fields = property(get_fields)
def get_time_low(self):
return self.int >> 96L
time_low = property(get_time_low)
def get_time_mid(self):
return (self.int >> 80L) & 0xffff
time_mid = property(get_time_mid)
def get_time_hi_version(self):
return (self.int >> 64L) & 0xffff
time_hi_version = property(get_time_hi_version)
def get_clock_seq_hi_variant(self):
return (self.int >> 56L) & 0xff
clock_seq_hi_variant = property(get_clock_seq_hi_variant)
def get_clock_seq_low(self):
return (self.int >> 48L) & 0xff
clock_seq_low = property(get_clock_seq_low)
def get_time(self):
return (((self.time_hi_version & 0x0fffL) << 48L) |
(self.time_mid << 32L) | self.time_low)
time = property(get_time)
def get_clock_seq(self):
return (((self.clock_seq_hi_variant & 0x3fL) << 8L) |
self.clock_seq_low)
clock_seq = property(get_clock_seq)
def get_node(self):
return self.int & 0xffffffffffff
node = property(get_node)
def get_hex(self):
return '%032x' % self.int
hex = property(get_hex)
def get_urn(self):
return 'urn:uuid:' + str(self)
urn = property(get_urn)
def get_variant(self):
if not self.int & (0x8000 << 48L):
return RESERVED_NCS
elif not self.int & (0x4000 << 48L):
return RFC_4122
elif not self.int & (0x2000 << 48L):
return RESERVED_MICROSOFT
else:
return RESERVED_FUTURE
variant = property(get_variant)
def get_version(self):
# The version bits are only meaningful for RFC 4122 UUIDs.
if self.variant == RFC_4122:
return int((self.int >> 76L) & 0xf)
version = property(get_version)
def _popen(command, args):
import os
path = os.environ.get("PATH", os.defpath).split(os.pathsep)
path.extend(('/sbin', '/usr/sbin'))
for dir in path:
executable = os.path.join(dir, command)
if (os.path.exists(executable) and
os.access(executable, os.F_OK | os.X_OK) and
not os.path.isdir(executable)):
break
else:
return None
# LC_ALL to ensure English output, 2>/dev/null to prevent output on
# stderr (Note: we don't have an example where the words we search for
# are actually localized, but in theory some system could do so.)
cmd = 'LC_ALL=C %s %s 2>/dev/null' % (executable, args)
return os.popen(cmd)
def _find_mac(command, args, hw_identifiers, get_index):
try:
pipe = _popen(command, args)
if not pipe:
return
with pipe:
for line in pipe:
words = line.lower().rstrip().split()
for i in range(len(words)):
if words[i] in hw_identifiers:
try:
word = words[get_index(i)]
mac = int(word.replace(':', ''), 16)
if mac:
return mac
except (ValueError, IndexError):
# Virtual interfaces, such as those provided by
# VPNs, do not have a colon-delimited MAC address
# as expected, but a 16-byte HWAddr separated by
# dashes. These should be ignored in favor of a
# real MAC address
pass
except IOError:
pass
def _ifconfig_getnode():
"""Get the hardware address on Unix by running ifconfig."""
# This works on Linux ('' or '-a'), Tru64 ('-av'), but not all Unixes.
for args in ('', '-a', '-av'):
mac = _find_mac('ifconfig', args, ['hwaddr', 'ether'], lambda i: i+1)
if mac:
return mac
def _arp_getnode():
"""Get the hardware address on Unix by running arp."""
import os, socket
try:
ip_addr = socket.gethostbyname(socket.gethostname())
except EnvironmentError:
return None
# Try getting the MAC addr from arp based on our IP address (Solaris).
return _find_mac('arp', '-an', [ip_addr], lambda i: -1)
def _lanscan_getnode():
"""Get the hardware address on Unix by running lanscan."""
# This might work on HP-UX.
return _find_mac('lanscan', '-ai', ['lan0'], lambda i: 0)
def _netstat_getnode():
"""Get the hardware address on Unix by running netstat."""
# This might work on AIX, Tru64 UNIX and presumably on IRIX.
try:
pipe = _popen('netstat', '-ia')
if not pipe:
return
with pipe:
words = pipe.readline().rstrip().split()
try:
i = words.index('Address')
except ValueError:
return
for line in pipe:
try:
words = line.rstrip().split()
word = words[i]
if len(word) == 17 and word.count(':') == 5:
mac = int(word.replace(':', ''), 16)
if mac:
return mac
except (ValueError, IndexError):
pass
except OSError:
pass
def _ipconfig_getnode():
"""Get the hardware address on Windows by running ipconfig.exe."""
import os, re
dirs = ['', r'c:\windows\system32', r'c:\winnt\system32']
try:
import ctypes
buffer = ctypes.create_string_buffer(300)
ctypes.windll.kernel32.GetSystemDirectoryA(buffer, 300)
dirs.insert(0, buffer.value.decode('mbcs'))
except:
pass
for dir in dirs:
try:
pipe = os.popen(os.path.join(dir, 'ipconfig') + ' /all')
except IOError:
continue
with pipe:
for line in pipe:
value = line.split(':')[-1].strip().lower()
if re.match('([0-9a-f][0-9a-f]-){5}[0-9a-f][0-9a-f]', value):
return int(value.replace('-', ''), 16)
def _netbios_getnode():
"""Get the hardware address on Windows using NetBIOS calls.
See http://support.microsoft.com/kb/118623 for details."""
import win32wnet, netbios
ncb = netbios.NCB()
ncb.Command = netbios.NCBENUM
ncb.Buffer = adapters = netbios.LANA_ENUM()
adapters._pack()
if win32wnet.Netbios(ncb) != 0:
return
adapters._unpack()
for i in range(adapters.length):
ncb.Reset()
ncb.Command = netbios.NCBRESET
ncb.Lana_num = ord(adapters.lana[i])
if win32wnet.Netbios(ncb) != 0:
continue
ncb.Reset()
ncb.Command = netbios.NCBASTAT
ncb.Lana_num = ord(adapters.lana[i])
ncb.Callname = '*'.ljust(16)
ncb.Buffer = status = netbios.ADAPTER_STATUS()
if win32wnet.Netbios(ncb) != 0:
continue
status._unpack()
bytes = map(ord, status.adapter_address)
return ((bytes[0]<<40L) + (bytes[1]<<32L) + (bytes[2]<<24L) +
(bytes[3]<<16L) + (bytes[4]<<8L) + bytes[5])
# Thanks to Thomas Heller for ctypes and for his help with its use here.
# If ctypes is available, use it to find system routines for UUID generation.
_uuid_generate_random = _uuid_generate_time = _UuidCreate = None
try:
import ctypes, ctypes.util
# The uuid_generate_* routines are provided by libuuid on at least
# Linux and FreeBSD, and provided by libc on Mac OS X.
for libname in ['uuid', 'c']:
try:
lib = ctypes.CDLL(ctypes.util.find_library(libname))
except:
continue
if hasattr(lib, 'uuid_generate_random'):
_uuid_generate_random = lib.uuid_generate_random
_uuid_generate_random.argtypes = [ctypes.c_char * 16]
_uuid_generate_random.restype = None
if hasattr(lib, 'uuid_generate_time'):
_uuid_generate_time = lib.uuid_generate_time
_uuid_generate_time.argtypes = [ctypes.c_char * 16]
_uuid_generate_time.restype = None
if _uuid_generate_random is not None:
break # found everything we were looking for
# The uuid_generate_* functions are broken on MacOS X 10.5, as noted
# in issue #8621 the function generates the same sequence of values
# in the parent process and all children created using fork (unless
# those children use exec as well).
#
# Assume that the uuid_generate functions are broken from 10.5 onward,
# the test can be adjusted when a later version is fixed.
import sys
if sys.platform == 'darwin':
import os
if int(os.uname()[2].split('.')[0]) >= 9:
_uuid_generate_random = _uuid_generate_time = None
# On Windows prior to 2000, UuidCreate gives a UUID containing the
# hardware address. On Windows 2000 and later, UuidCreate makes a
# random UUID and UuidCreateSequential gives a UUID containing the
# hardware address. These routines are provided by the RPC runtime.
# NOTE: at least on Tim's WinXP Pro SP2 desktop box, while the last
# 6 bytes returned by UuidCreateSequential are fixed, they don't appear
# to bear any relationship to the MAC address of any network device
# on the box.
try:
lib = ctypes.windll.rpcrt4
except:
lib = None
_UuidCreate = getattr(lib, 'UuidCreateSequential',
getattr(lib, 'UuidCreate', None))
if _UuidCreate is not None:
_UuidCreate.argtypes = [ctypes.c_char * 16]
_UuidCreate.restype = ctypes.c_int
except:
pass
def _unixdll_getnode():
"""Get the hardware address on Unix using ctypes."""
_buffer = ctypes.create_string_buffer(16)
_uuid_generate_time(_buffer)
return UUID(bytes=_buffer.raw).node
def _windll_getnode():
"""Get the hardware address on Windows using ctypes."""
_buffer = ctypes.create_string_buffer(16)
if _UuidCreate(_buffer) == 0:
return UUID(bytes=_buffer.raw).node
def _random_getnode():
"""Get a random node ID, with eighth bit set as suggested by RFC 4122."""
import random
return random.randrange(0, 1<<48L) | 0x010000000000L
_node = None
def getnode():
"""Get the hardware address as a 48-bit positive integer.
The first time this runs, it may launch a separate program, which could
be quite slow. If all attempts to obtain the hardware address fail, we
choose a random 48-bit number with its eighth bit set to 1 as recommended
in RFC 4122.
"""
global _node
if _node is not None:
return _node
import sys
if sys.platform == 'win32':
getters = [_windll_getnode, _netbios_getnode, _ipconfig_getnode]
else:
getters = [_unixdll_getnode, _ifconfig_getnode, _arp_getnode,
_lanscan_getnode, _netstat_getnode]
for getter in getters + [_random_getnode]:
try:
_node = getter()
except:
continue
if _node is not None:
return _node
_last_timestamp = None
def uuid1(node=None, clock_seq=None):
"""Generate a UUID from a host ID, sequence number, and the current time.
If 'node' is not given, getnode() is used to obtain the hardware
address. If 'clock_seq' is given, it is used as the sequence number;
otherwise a random 14-bit sequence number is chosen."""
# When the system provides a version-1 UUID generator, use it (but don't
# use UuidCreate here because its UUIDs don't conform to RFC 4122).
if _uuid_generate_time and node is clock_seq is None:
_buffer = ctypes.create_string_buffer(16)
_uuid_generate_time(_buffer)
return UUID(bytes=_buffer.raw)
global _last_timestamp
import time
nanoseconds = int(time.time() * 1e9)
# 0x01b21dd213814000 is the number of 100-ns intervals between the
# UUID epoch 1582-10-15 00:00:00 and the Unix epoch 1970-01-01 00:00:00.
timestamp = int(nanoseconds//100) + 0x01b21dd213814000L
if _last_timestamp is not None and timestamp <= _last_timestamp:
timestamp = _last_timestamp + 1
_last_timestamp = timestamp
if clock_seq is None:
import random
clock_seq = random.randrange(1<<14L) # instead of stable storage
time_low = timestamp & 0xffffffffL
time_mid = (timestamp >> 32L) & 0xffffL
time_hi_version = (timestamp >> 48L) & 0x0fffL
clock_seq_low = clock_seq & 0xffL
clock_seq_hi_variant = (clock_seq >> 8L) & 0x3fL
if node is None:
node = getnode()
return UUID(fields=(time_low, time_mid, time_hi_version,
clock_seq_hi_variant, clock_seq_low, node), version=1)
def uuid3(namespace, name):
"""Generate a UUID from the MD5 hash of a namespace UUID and a name."""
from hashlib import md5
hash = md5(namespace.bytes + name).digest()
return UUID(bytes=hash[:16], version=3)
def uuid4():
"""Generate a random UUID."""
# When the system provides a version-4 UUID generator, use it.
if _uuid_generate_random:
_buffer = ctypes.create_string_buffer(16)
_uuid_generate_random(_buffer)
return UUID(bytes=_buffer.raw)
# Otherwise, get randomness from urandom or the 'random' module.
try:
import os
return UUID(bytes=os.urandom(16), version=4)
except:
import random
bytes = [chr(random.randrange(256)) for i in range(16)]
return UUID(bytes=bytes, version=4)
def uuid5(namespace, name):
"""Generate a UUID from the SHA-1 hash of a namespace UUID and a name."""
from hashlib import sha1
hash = sha1(namespace.bytes + name).digest()
return UUID(bytes=hash[:16], version=5)
# The following standard UUIDs are for use with uuid3() or uuid5().
NAMESPACE_DNS = UUID('6ba7b810-9dad-11d1-80b4-00c04fd430c8')
NAMESPACE_URL = UUID('6ba7b811-9dad-11d1-80b4-00c04fd430c8')
NAMESPACE_OID = UUID('6ba7b812-9dad-11d1-80b4-00c04fd430c8')
NAMESPACE_X500 = UUID('6ba7b814-9dad-11d1-80b4-00c04fd430c8')
| {
"content_hash": "f6f8defbd531c7e437d90358ca9f23ba",
"timestamp": "",
"source": "github",
"line_count": 634,
"max_line_length": 78,
"avg_line_length": 38.782334384858046,
"alnum_prop": 0.587156336424272,
"repo_name": "andela-earinde/bellatrix-py",
"id": "4a64801d0f7893cbcbac8cbc3086c7b4c68b3b47",
"size": "24588",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "app/js/lib/lib/modules/uuid.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "8833"
},
{
"name": "HTML",
"bytes": "2381"
},
{
"name": "JavaScript",
"bytes": "12775582"
},
{
"name": "Python",
"bytes": "15057969"
}
],
"symlink_target": ""
} |
import sys, os, re, platform
from os.path import exists, abspath, dirname, join, isdir
try:
# Allow use of setuptools so eggs can be built.
from setuptools import setup, Command
except ImportError:
from distutils.core import setup, Command
from distutils.extension import Extension
from distutils.errors import *
if sys.hexversion >= 0x03000000:
from configparser import ConfigParser
else:
from ConfigParser import ConfigParser
OFFICIAL_BUILD = 9999
def _print(s):
# Python 2/3 compatibility
sys.stdout.write(s + '\n')
class VersionCommand(Command):
description = "prints the pyodbc version, determined from git"
user_options = []
def initialize_options(self):
self.verbose = 0
def finalize_options(self):
pass
def run(self):
version_str, version = get_version()
sys.stdout.write(version_str + '\n')
class TagsCommand(Command):
description = 'runs etags'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
# Windows versions of etag do not seem to expand wildcards (which Unix shells normally do for Unix utilities),
# so find all of the files ourselves.
files = [ join('src', f) for f in os.listdir('src') if f.endswith(('.h', '.cpp')) ]
cmd = 'etags %s' % ' '.join(files)
return os.system(cmd)
def main():
version_str, version = get_version()
settings = get_compiler_settings(version_str)
files = [ abspath(join('src', f)) for f in os.listdir('src') if f.endswith('.cpp') ]
if exists('MANIFEST'):
os.remove('MANIFEST')
kwargs = {
'name': "pyodbc",
'version': version_str,
'description': "DB API Module for ODBC",
'long_description': ('A Python DB API 2 module for ODBC. This project provides an up-to-date, '
'convenient interface to ODBC using native data types like datetime and decimal.'),
'maintainer': "Michael Kleehammer",
'maintainer_email': "[email protected]",
'ext_modules': [Extension('pyodbc', files, **settings)],
'license': 'MIT',
'classifiers': ['Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Database',
],
'url': 'http://code.google.com/p/pyodbc',
'download_url': 'http://code.google.com/p/pyodbc/downloads/list',
'cmdclass': { 'version' : VersionCommand,
'tags' : TagsCommand }
}
if sys.hexversion >= 0x02060000:
kwargs['options'] = {
'bdist_wininst': {'user_access_control' : 'auto'}
}
setup(**kwargs)
def get_compiler_settings(version_str):
settings = {
'extra_compile_args' : [],
'libraries': [],
'include_dirs': [],
'define_macros' : [ ('PYODBC_VERSION', version_str) ]
}
# This isn't the best or right way to do this, but I don't see how someone is supposed to sanely subclass the build
# command.
for option in ['assert', 'trace', 'leak-check']:
try:
sys.argv.remove('--%s' % option)
settings['define_macros'].append(('PYODBC_%s' % option.replace('-', '_').upper(), 1))
except ValueError:
pass
from array import array
UNICODE_WIDTH = array('u').itemsize
settings['define_macros'].append(('PYODBC_UNICODE_WIDTH', str(UNICODE_WIDTH)))
if os.name == 'nt':
settings['extra_compile_args'].extend([
'/Wall',
'/wd4668',
'/wd4820',
'/wd4711', # function selected for automatic inline expansion
'/wd4100', # unreferenced formal parameter
'/wd4127', # "conditional expression is constant" testing compilation constants
'/wd4191', # casts to PYCFunction which doesn't have the keywords parameter
])
if '--debug' in sys.argv:
sys.argv.remove('--debug')
settings['extra_compile_args'].extend('/Od /Ge /GS /GZ /RTC1 /Wp64 /Yd'.split())
settings['libraries'].append('odbc32')
settings['libraries'].append('advapi32')
elif os.environ.get("OS", '').lower().startswith('windows'):
# Windows Cygwin (posix on windows)
# OS name not windows, but still on Windows
settings['libraries'].append('odbc32')
elif sys.platform == 'darwin':
# OS/X now ships with iODBC.
settings['libraries'].append('iodbc')
# Python functions take a lot of 'char *' that really should be const. gcc complains about this *a lot*
settings['extra_compile_args'].extend([
'-Wno-write-strings',
'-Wno-deprecated-declarations'
])
# Apple has decided they won't maintain the iODBC system in OS/X and has added deprecation warnings in 10.8.
# For now target 10.7 to eliminate the warnings.
settings['define_macros'].append( ('MAC_OS_X_VERSION_10_7',) )
else:
# Other posix-like: Linux, Solaris, etc.
# Python functions take a lot of 'char *' that really should be const. gcc complains about this *a lot*
settings['extra_compile_args'].append('-Wno-write-strings')
if UNICODE_WIDTH == 4:
# This makes UnixODBC use UCS-4 instead of UCS-2, which works better with sizeof(wchar_t)==4.
# Thanks to Marc-Antoine Parent
settings['define_macros'].append(('SQL_WCHART_CONVERT', '1'))
# What is the proper way to detect iODBC, MyODBC, unixODBC, etc.?
settings['libraries'].append('odbc')
return settings
def add_to_path():
"""
Prepends the build directory to the path so pyodbcconf can be imported without installing it.
"""
# Now run the utility
import imp
library_exts = [ t[0] for t in imp.get_suffixes() if t[-1] == imp.C_EXTENSION ]
library_names = [ 'pyodbcconf%s' % ext for ext in library_exts ]
# Only go into directories that match our version number.
dir_suffix = '-%s.%s' % (sys.version_info[0], sys.version_info[1])
build = join(dirname(abspath(__file__)), 'build')
for top, dirs, files in os.walk(build):
dirs = [ d for d in dirs if d.endswith(dir_suffix) ]
for name in library_names:
if name in files:
sys.path.insert(0, top)
return
raise SystemExit('Did not find pyodbcconf')
def get_version():
"""
Returns the version of the product as (description, [major,minor,micro,beta]).
If the release is official, `beta` will be 9999 (OFFICIAL_BUILD).
1. If in a git repository, use the latest tag (git describe).
2. If in an unzipped source directory (from setup.py sdist),
read the version from the PKG-INFO file.
3. Use 3.0.0.0 and complain a lot.
"""
# My goal is to (1) provide accurate tags for official releases but (2) not have to manage tags for every test
# release.
#
# Official versions are tagged using 3 numbers: major, minor, micro. A build of a tagged version should produce
# the version using just these pieces, such as 2.1.4.
#
# Unofficial versions are "working towards" the next version. So the next unofficial build after 2.1.4 would be a
# beta for 2.1.5. Using 'git describe' we can find out how many changes have been made after 2.1.4 and we'll use
# this count as the beta id (beta1, beta2, etc.)
#
# Since the 4 numbers are put into the Windows DLL, we want to make sure the beta versions sort *before* the
# official, so we set the official build number to 9999, but we don't show it.
name = None # branch/feature name. Should be None for official builds.
numbers = None # The 4 integers that make up the version.
# If this is a source release the version will have already been assigned and be in the PKG-INFO file.
name, numbers = _get_version_pkginfo()
# If not a source release, we should be in a git repository. Look for the latest tag.
if not numbers:
name, numbers = _get_version_git()
if not numbers:
_print('WARNING: Unable to determine version. Using 3.0.0.0')
name, numbers = '3.0.0-unsupported', [3,0,0,0]
return name, numbers
def _get_version_pkginfo():
filename = join(dirname(abspath(__file__)), 'PKG-INFO')
if exists(filename):
re_ver = re.compile(r'^Version: \s+ (\d+)\.(\d+)\.(\d+) (?: -beta(\d+))?', re.VERBOSE)
for line in open(filename):
match = re_ver.search(line)
if match:
name = line.split(':', 1)[1].strip()
numbers = [int(n or 0) for n in match.groups()[:3]]
numbers.append(int(match.group(4) or OFFICIAL_BUILD)) # don't use 0 as a default for build
return name, numbers
return None, None
def _get_version_git():
n, result = getoutput('git describe --tags --match 3.*')
if n:
_print('WARNING: git describe failed with: %s %s' % (n, result))
return None, None
match = re.match(r'(\d+).(\d+).(\d+) (?: -(\d+)-g[0-9a-z]+)?', result, re.VERBOSE)
if not match:
return None, None
numbers = [int(n or OFFICIAL_BUILD) for n in match.groups()]
if numbers[-1] == OFFICIAL_BUILD:
name = '%s.%s.%s' % tuple(numbers[:3])
if numbers[-1] != OFFICIAL_BUILD:
# This is a beta of the next micro release, so increment the micro number to reflect this.
numbers[-2] += 1
name = '%s.%s.%s-beta%02d' % tuple(numbers)
n, result = getoutput('git branch')
branch = re.search(r'\* (\w+)', result).group(1)
if branch != 'master' and not re.match('^v\d+$', branch):
name = branch + '-' + name
return name, numbers
def getoutput(cmd):
pipe = os.popen(cmd, 'r')
text = pipe.read().rstrip('\n')
status = pipe.close() or 0
return status, text
if __name__ == '__main__':
main()
| {
"content_hash": "8e8e0defcf4f13bcc14c831c4c7060ab",
"timestamp": "",
"source": "github",
"line_count": 308,
"max_line_length": 119,
"avg_line_length": 34.496753246753244,
"alnum_prop": 0.5899294117647059,
"repo_name": "Bjwebb/pyodbc",
"id": "e5dd1643861454f214a05571f7ddba4def41e32d",
"size": "10644",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "19466"
},
{
"name": "C++",
"bytes": "251124"
},
{
"name": "CSS",
"bytes": "1379"
},
{
"name": "HTML",
"bytes": "59264"
},
{
"name": "Python",
"bytes": "516065"
}
],
"symlink_target": ""
} |
__author__ = 'Eric'
#SEE LICENSE.txt for this program's licensing
import subprocess
import os
import main_html
#main method of class
#PARAM -- path, which is the current path that will be passed into the html injector,
# --- along with the path/directory to look for additional subdirectories to inject into
#RETURN -- returns 0 if it works, something else if not
def driver(path):
main_html.driver(path)
sub = get_subs(path)
for i in range(len(sub)):
new_path = path + '/' + sub[i]
driver(new_path)
return 0
#method to get the subdirectories of the current folder (path) that is passed in as a parameter
#PARAM -- path in which to look for subdirectories from
#RETURN -- returns a list containing the subdirectories of the current path
def get_subs(path):
subs = []
for x, y, z in os.walk(path):
subs = y
break
return subs
#method to return the path which the current directory is located from the subprocess module
#PARAM -- NONE
#RETURN -- returns a string containing the path of the current directory where this file is located
def get_dir_path():
loc = subprocess.Popen('pwd', stdout=subprocess.PIPE).communicate()
loc = loc[0]
loc = str(loc)
loc = (loc[2:len(loc)-3])
return loc
driver(get_dir_path())
| {
"content_hash": "492fe4442b9cf0811aa2d3eff9268503",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 99,
"avg_line_length": 28.955555555555556,
"alnum_prop": 0.6884113584036838,
"repo_name": "albmin/html_injector",
"id": "732c96e9465e639bd93f8104e859178398b3ff84",
"size": "1303",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "folder_hunter.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8969"
}
],
"symlink_target": ""
} |
from django import template
from django.urls import reverse
from oauth.oauthmanager import get_oauth_apps
register = template.Library()
@register.inclusion_tag('oauth/oauth_applications.html')
def load_oauth_applications(request):
applications = get_oauth_apps()
if applications:
baseurl = reverse('oauth:oauthlogin')
path = request.get_full_path()
apps = list(map(lambda x: (x.ICON_NAME, '{baseurl}?type={type}&next_url={next}'.format(
baseurl=baseurl, type=x.ICON_NAME, next=path)), applications))
else:
apps = []
return {
'apps': apps
}
| {
"content_hash": "b1bce27fd008bb066148cf11f8ea2d03",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 95,
"avg_line_length": 28.045454545454547,
"alnum_prop": 0.6596434359805511,
"repo_name": "liangliangyy/DjangoBlog",
"id": "7b687d5892fa97c8f0467a8ad68342b60a9d8063",
"size": "617",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "oauth/templatetags/oauth_tags.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "487"
},
{
"name": "HTML",
"bytes": "54461"
},
{
"name": "Python",
"bytes": "224212"
},
{
"name": "Shell",
"bytes": "1247"
}
],
"symlink_target": ""
} |
"""
uploadComp.py is uploads the results of cross_graphs.py
Ussage:
uploadComp.py url filename
where url is the instance of inspectra to upload to and filename is the path to the comp to upload.
"""
import sys
import os
import httplib
import urllib
import urlparse
import exceptions
import socket
supporttls = True
try:
import ssl
except ImportError:
supporttls = False
print "Error importing ssl module. Https will not be supported."
class HTTPSTLSv1Connection(httplib.HTTPConnection):
"""This class allows communication via TLS, it is version of httplib.HTTPSConnection that specifies TLSv1."""
default_port = httplib.HTTPS_PORT
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
httplib.HTTPConnection.__init__(self, host, port, strict, timeout)
self.key_file = key_file
self.cert_file = cert_file
def connect(self):
"""Connect to a host on a given (TLS) port."""
sock = socket.create_connection((self.host, self.port),
self.timeout)
if self._tunnel_host:
self.sock = sock
self._tunnel()
self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, False, ssl.CERT_NONE, ssl.PROTOCOL_TLSv1)
def encode_multipart_formdata(data, filebody, filename):
"""multipart encodes a form. data should be a dictionary of the the form fields and filebody
should be a string of the body of the file"""
BOUNDARY = '----------ThIs_Is_tHe_bouNdaRY_$'
CRLF = '\r\n'
L = []
for key, value in data.iteritems():
L.append('--' + BOUNDARY)
L.append('Content-Disposition: form-data; name="%s"' % key)
L.append('')
L.append(value)
if filebody != "":
L.append('--' + BOUNDARY)
L.append('Content-Disposition: form-data; name="graph"; filename="%s"'%(filename))
L.append('Content-Type: text/plain')
L.append('')
L.append(filebody)
L.append('--' + BOUNDARY + '--')
L.append('')
body = CRLF.join(L)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body
def doPost(url, paramMap, jsondata, filename,loud=True):
"""
posts a multipart form to url, paramMap should be a dictionary of the form fields, json data
should be a string of the body of the file (json in our case) and password should be the password
to include in the header
"""
u = urlparse.urlparse(url)
content_type, body = encode_multipart_formdata(paramMap, jsondata, filename)
headers = {"Content-type": content_type,
'content-length': str(len(body)),
"Accept": "text/plain"
}
if loud:
print "scheme: %s host: %s port: %s" % (u.scheme, u.hostname, u.port)
if u.scheme == "http":
conn = httplib.HTTPConnection(u.hostname, u.port)
else:
conn = HTTPSTLSv1Connection(u.hostname, u.port) #,privateKey=key,certChain=X509CertChain([cert]))
try:
conn.request("POST", u.path, body, headers)
except ssl.SSLError:
print "Ssl error. Did you mean to specify 'http://'?"
output = None
resp = conn.getresponse()
if resp.status == 200:
print "got 200"
output = resp.read()
if loud:
try:
print json.dumps(json.JSONDecoder().decode(output), sort_keys=True, indent=4)
except:
print output
elif loud:
print resp.status, resp.reason
return resp, output
def uploadGraph(url, filename):
"""
Upload a grpah comp specified by filename to the instance of inspectra specified by url
"""
fo = open(filename)
return doPost(os.path.join(url,"upload"), {},fo.read(),os.path.basename(filename))
def main():
print uploadGraph(sys.argv[1],sys.argv[2])
if __name__ == '__main__':
main() | {
"content_hash": "3706551611c377861a0f21a0bc2a8801",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 118,
"avg_line_length": 32.82786885245902,
"alnum_prop": 0.6184769038701623,
"repo_name": "cancerregulome/inspectra",
"id": "17f2c8aeeaf1acfeef8cc50d2513947c59b7ac17",
"size": "4005",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/uploadComp.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "10027"
},
{
"name": "Go",
"bytes": "2725"
},
{
"name": "JavaScript",
"bytes": "272735"
},
{
"name": "Python",
"bytes": "36631"
}
],
"symlink_target": ""
} |
import os
#DEBUG = False
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'intertrace', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': 'intertrace',
'PASSWORD': 'intertrace',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = [ "localhost" ]
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Europe/Paris'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = 'static/'
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# os.path.join(BASE_DIR, "static"),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = ''
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'intertrace.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'intertrace.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'recordtrace',
)
LOGIN_REDIRECT_URL = "/recordtrace/"
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| {
"content_hash": "dbe383cfb1a9968884ceae2ed0c7eb33",
"timestamp": "",
"source": "github",
"line_count": 157,
"max_line_length": 127,
"avg_line_length": 33.11464968152866,
"alnum_prop": 0.6868628582419696,
"repo_name": "INRIA/intertrace",
"id": "0c422c23797f5349d5776eac60cffa3c6603279c",
"size": "5242",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "intertrace/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1040"
},
{
"name": "HTML",
"bytes": "56171"
},
{
"name": "JavaScript",
"bytes": "58302"
},
{
"name": "Python",
"bytes": "27884"
},
{
"name": "Shell",
"bytes": "332"
}
],
"symlink_target": ""
} |
raise NotImplementedError("tty is not yet implemented in Skulpt")
| {
"content_hash": "d32a9f056f72465061cf4778d3aa2153",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 65,
"avg_line_length": 66,
"alnum_prop": 0.8181818181818182,
"repo_name": "ArcherSys/ArcherSys",
"id": "86b30a795fc232874f1027986324aacc17ee0e0c",
"size": "66",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "skulpt/src/lib/tty.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
__all__ = ['startClient', 'startServer']
from twisted.internet import reactor
import autobahn
from autobahn.websocket import connectWS, listenWS
from autobahn.websocket import WebSocketProtocol
from autobahn.websocket import WebSocketClientFactory, \
WebSocketClientProtocol
from autobahn.websocket import WebSocketServerFactory, \
WebSocketServerProtocol
from autobahn.compress import *
class TesteeServerProtocol(WebSocketServerProtocol):
def onMessage(self, msg, binary):
self.sendMessage(msg, binary)
class StreamingTesteeServerProtocol(WebSocketServerProtocol):
def onMessageBegin(self, opcode):
#print "onMessageBegin"
WebSocketServerProtocol.onMessageBegin(self, opcode)
self.beginMessage(binary = opcode == WebSocketProtocol.MESSAGE_TYPE_BINARY)
def onMessageFrameBegin(self, length, reserved):
#print "onMessageFrameBegin", length
WebSocketServerProtocol.onMessageFrameBegin(self, length, reserved)
self.beginMessageFrame(length)
def onMessageFrameData(self, data):
#print "onMessageFrameData", len(data)
self.sendMessageFrameData(data)
def onMessageFrameEnd(self):
#print "onMessageFrameEnd"
pass
def onMessageEnd(self):
#print "onMessageEnd"
self.endMessage()
class TesteeServerFactory(WebSocketServerFactory):
protocol = TesteeServerProtocol
#protocol = StreamingTesteeServerProtocol
def __init__(self, url, debug = False, ident = None):
if ident is not None:
server = ident
else:
server = "AutobahnPython/%s" % autobahn.version
WebSocketServerFactory.__init__(self, url, debug = debug, debugCodePaths = debug, server = server)
self.setProtocolOptions(failByDrop = False) # spec conformance
#self.setProtocolOptions(failByDrop = True) # needed for streaming mode
#self.setProtocolOptions(utf8validateIncoming = False)
## enable permessage-XXX compression extensions
##
def accept(offers):
for offer in offers:
if isinstance(offer, PerMessageDeflateOffer):
return PerMessageDeflateOfferAccept(offer)
elif isinstance(offer, PerMessageBzip2Offer):
return PerMessageBzip2OfferAccept(offer)
elif isinstance(offer, PerMessageSnappyOffer):
return PerMessageSnappyOfferAccept(offer)
self.setProtocolOptions(perMessageCompressionAccept = accept)
class TesteeClientProtocol(WebSocketClientProtocol):
def onOpen(self):
if self.factory.endCaseId is None:
print "Getting case count .."
elif self.factory.currentCaseId <= self.factory.endCaseId:
print "Running test case %d/%d as user agent %s on peer %s" % (self.factory.currentCaseId, self.factory.endCaseId, self.factory.agent, self.peerstr)
def onMessage(self, msg, binary):
raise Exception("sdfs")
if self.factory.endCaseId is None:
self.factory.endCaseId = int(msg)
print "Ok, will run %d cases" % self.factory.endCaseId
else:
self.sendMessage(msg, binary)
class TesteeClientFactory(WebSocketClientFactory):
protocol = TesteeClientProtocol
def __init__(self, url, debug = False, ident = None):
WebSocketClientFactory.__init__(self, url, useragent = ident, debug = debug, debugCodePaths = debug)
self.setProtocolOptions(failByDrop = False) # spec conformance
## enable permessage-XXX compression extensions
##
offers = [PerMessageDeflateOffer()]
#offers = [PerMessageSnappyOffer(), PerMessageBzip2Offer(), PerMessageDeflateOffer()]
self.setProtocolOptions(perMessageCompressionOffers = offers)
def accept(response):
if isinstance(response, PerMessageDeflateResponse):
return PerMessageDeflateResponseAccept(response)
elif isinstance(response, PerMessageBzip2Response):
return PerMessageBzip2ResponseAccept(response)
elif isinstance(response, PerMessageSnappyResponse):
return PerMessageSnappyResponseAccept(response)
self.setProtocolOptions(perMessageCompressionAccept = accept)
self.endCaseId = None
self.currentCaseId = 0
self.updateReports = True
if ident is not None:
self.agent = ident
else:
self.agent = "AutobahnPython/%s" % autobahn.version
self.resource = "/getCaseCount"
def clientConnectionLost(self, connector, reason):
self.currentCaseId += 1
if self.currentCaseId <= self.endCaseId:
self.resource = "/runCase?case=%d&agent=%s" % (self.currentCaseId, self.agent)
connector.connect()
elif self.updateReports:
self.resource = "/updateReports?agent=%s" % self.agent
self.updateReports = False
connector.connect()
else:
reactor.stop()
def clientConnectionFailed(self, connector, reason):
print "Connection to %s failed (%s)" % (self.url, reason.getErrorMessage())
reactor.stop()
def startClient(wsuri, ident = None, debug = False):
factory = TesteeClientFactory(wsuri, ident = ident, debug = debug)
connectWS(factory)
return True
def startServer(wsuri, sslKey = None, sslCert = None, debug = False):
factory = TesteeServerFactory(wsuri, debug)
if sslKey and sslCert:
sslContext = ssl.DefaultOpenSSLContextFactory(sslKey, sslCert)
else:
sslContext = None
listenWS(factory, sslContext)
return True
| {
"content_hash": "095da86c1cc91b0022a4c1b08f7cb541",
"timestamp": "",
"source": "github",
"line_count": 172,
"max_line_length": 157,
"avg_line_length": 33.18604651162791,
"alnum_prop": 0.6743167484232656,
"repo_name": "normanmaurer/autobahntestsuite-maven-plugin",
"id": "12534115e59acd2c3f232641a161f7ff9cebf5b2",
"size": "6495",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/main/resources/autobahntestsuite/testee.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "106"
},
{
"name": "C",
"bytes": "70690"
},
{
"name": "C++",
"bytes": "1291"
},
{
"name": "CSS",
"bytes": "6075"
},
{
"name": "GAP",
"bytes": "14120"
},
{
"name": "Gherkin",
"bytes": "2218"
},
{
"name": "HTML",
"bytes": "56655"
},
{
"name": "Java",
"bytes": "24931"
},
{
"name": "JavaScript",
"bytes": "9151"
},
{
"name": "Python",
"bytes": "13888733"
},
{
"name": "Shell",
"bytes": "1406"
},
{
"name": "Smarty",
"bytes": "393"
}
],
"symlink_target": ""
} |
from twilio import IpMessagingGrant
endpoint = 'https://some.endpoint'
service_sid = 'ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'
ipm_grant = IpMessagingGrant(
endpoint_id=endpoint,
service_sid=service_sid,
push_credential_sid='CRXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'
)
| {
"content_hash": "249fc8bbfa9e98edfc290d934243f037",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 60,
"avg_line_length": 27.2,
"alnum_prop": 0.7867647058823529,
"repo_name": "teoreteetik/api-snippets",
"id": "fbd37cb7f0655c8ef83f4937af216744af7f363d",
"size": "272",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ip-messaging/users/token-gen-server-push/token-gen-server-push.5.x.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "643369"
},
{
"name": "HTML",
"bytes": "335"
},
{
"name": "Java",
"bytes": "943336"
},
{
"name": "JavaScript",
"bytes": "539577"
},
{
"name": "M",
"bytes": "117"
},
{
"name": "Mathematica",
"bytes": "93"
},
{
"name": "Objective-C",
"bytes": "46198"
},
{
"name": "PHP",
"bytes": "538312"
},
{
"name": "Python",
"bytes": "467248"
},
{
"name": "Ruby",
"bytes": "470316"
},
{
"name": "Shell",
"bytes": "1564"
},
{
"name": "Swift",
"bytes": "36563"
}
],
"symlink_target": ""
} |
import urwid
class FancyListBox(urwid.LineBox):
def get_listbox(self, items):
class _FancyListBox(urwid.ListBox):
def keypress(_self, size, key):
key = super(_FancyListBox, _self).keypress(size, key)
self.update_corners(_self.ends_visible(size))
return key
def render(_self, size, focus=False):
self.update_corners(_self.ends_visible(size))
return super(_FancyListBox, _self).render(size, focus)
return _FancyListBox(urwid.SimpleListWalker(items))
def __init__(self, items, title="",
tlcorner=u'┌', tline=u' ', lline=u' ',
trcorner=u'┐', blcorner=u'└', rline=u' ',
bline=u' ', brcorner=u'┘'):
#self.length = len(items[2].contents) + 5
try:
x = items[2].contents
except:
x = items[2].get_text()[0]
#self.length = len(items[2].get_text()[0])
self.length = len(x)
self.listbox = self.get_listbox(items)
tline, bline = urwid.Divider(tline), urwid.Divider(bline)
lline, rline = urwid.SolidFill(lline), urwid.SolidFill(rline)
self.tlcorner, self.trcorner = urwid.Text(tlcorner), urwid.Text(trcorner)
self.blcorner, self.brcorner = urwid.Text(blcorner), urwid.Text(brcorner)
title_widget = urwid.Text(self.format_title(title))
tline_widget = urwid.Columns([
tline,
('flow', title_widget),
tline,
])
top = urwid.Columns([
('fixed', 1, self.tlcorner),
tline_widget,
('fixed', 1, self.trcorner),
])
middle = urwid.Columns([
('fixed', 1, lline),
self.listbox,
('fixed', 1, rline),
], box_columns=[0, 2], focus_column=1)
bottom = urwid.Columns([
('fixed', 1, self.blcorner), bline, ('fixed', 1, self.brcorner),
])
pile = urwid.Pile([('flow', top), middle, ('flow', bottom)], focus_item=1)
urwid.WidgetDecoration.__init__(self, self.listbox)
urwid.WidgetWrap.__init__(self, pile)
def top_scroll(self):
self.trcorner.set_text(u"⇧")
self.tlcorner.set_text(u"⇧")
def top_noscroll(self):
self.trcorner.set_text(u"┐")
self.tlcorner.set_text(u"┌")
def bottom_scroll(self):
self.brcorner.set_text(u"⇩")
self.blcorner.set_text(u"⇩")
def bottom_noscroll(self):
self.brcorner.set_text(u"┘")
self.blcorner.set_text(u"└")
def update_corners(self, ends):
if 'top' in ends:
self.top_noscroll()
else:
self.top_scroll()
if 'bottom' in ends:
self.bottom_noscroll()
else:
self.bottom_scroll()
| {
"content_hash": "1187b2cea9f4a881e202c69f71925d51",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 82,
"avg_line_length": 30.934782608695652,
"alnum_prop": 0.5351370344342937,
"repo_name": "dustinlacewell/console",
"id": "6d74f3e3de16e80cc4bc07bdd9ae33192142f619",
"size": "2895",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "console/widgets/listbox.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "84"
},
{
"name": "Python",
"bytes": "71762"
}
],
"symlink_target": ""
} |
from collections import Counter
__author__ = "LaughDonor"
#Get the Counter of all letters, then get the counter of how many times they appear based on odd/even.
#If any odd values appear more than once, then it can't be a palindrome
print ["YES", "NO"][Counter(i % 2 for i in Counter(raw_input()).values())[1] > 1] | {
"content_hash": "fa56d8316187042dc63c9f2a2fec3c40",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 102,
"avg_line_length": 62.8,
"alnum_prop": 0.7229299363057324,
"repo_name": "LaughDonor/hackerrank",
"id": "aa3cfb4d756fcfe4dc654d0a919f14e849e2b3f8",
"size": "314",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python2/game-of-thrones.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "14379"
}
],
"symlink_target": ""
} |
"""
Example experiment. Train HMMs with PC 1 to 8, k=3..20.
"""
from judgmentHMM import Experiment
arg = {"ratedTurns":'../data/complete_april_2014.csv',
"validationMethod":"lolo",
"quantizationLevel":"parameters"}
models = {"hmm_multi":{}}
args = list()
for k in xrange(3,21):
for total in xrange(2,9):
newarg = arg.copy()
params = list()
for cur in xrange(1,total+1):
params.append("pc-"+str(total)+"-"+str(cur))
newarg["modelParams"] = params
newarg["k"] = k
args.append(newarg)
exp = Experiment()
exp.run(args,models,permute=False)
| {
"content_hash": "fc692aaff4a6e8bb7689b8bb8dc0005d",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 56,
"avg_line_length": 24.96,
"alnum_prop": 0.592948717948718,
"repo_name": "phihes/judgmentHMM",
"id": "b89b00807a430b36f7ae7b55f5a7e14f8c23042c",
"size": "648",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/example_pca_clustering.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42034"
}
],
"symlink_target": ""
} |
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import LinearSVC
from sklearn.svm import SVC
import numpy
CVectorizer = CountVectorizer(min_df=1)
TfIdfVectorizer = TfidfTransformer()
T, E = map(int, input().split(' '))
RawData = []
Labels = []
for i in range(T) :
labels = map(int, input().split(' '))
RawData.append(input())
Labels.append(labels)
Queries = []
for i in range(E) :
Queries.append(input())
RawData.extend(Queries)
X = CVectorizer.fit_transform(RawData)
Xtf = TfIdfVectorizer.fit_transform(X)
del X
MLB = MultiLabelBinarizer()
Yt = MLB.fit_transform(Labels)
XtfTrain = Xtf[0:T]
XtfTest = Xtf[T:]
Clf = OneVsRestClassifier(LinearSVC(loss='l1', class_weight={1:100,0:1})).fit(XtfTrain, Yt)
Classes = list(MLB.classes_)
for xTest in XtfTest:
y = Clf.decision_function(xTest)
y1 = list(y[0])
c1 = Classes
lbls = [x for (y,x) in sorted(zip(y1,c1))][-10:]
list.reverse(lbls)
print (' '.join([str(i) for i in lbls])) | {
"content_hash": "6124b2ef2455fd3a8ed9ed4159586eb3",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 91,
"avg_line_length": 28.365853658536587,
"alnum_prop": 0.7162510748065348,
"repo_name": "rahulsrma26/code-gems",
"id": "8c27655a263e3b2870a6daea31077abfd28b26cb",
"size": "1163",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ML/labeler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "225"
},
{
"name": "C++",
"bytes": "505736"
},
{
"name": "Jupyter Notebook",
"bytes": "37358"
},
{
"name": "Python",
"bytes": "84391"
}
],
"symlink_target": ""
} |
import dateutil.relativedelta
from django.utils import timezone
import mock
from nose.tools import * # noqa:
import pytest
from urlparse import urlparse
from api.base.settings.defaults import API_BASE
from api_tests.nodes.views.test_node_draft_registration_list import DraftRegistrationTestCase
from api_tests.subjects.mixins import SubjectsFilterMixin
from api_tests.registrations.filters.test_filters import RegistrationListFilteringMixin
from framework.auth.core import Auth
from osf.models import RegistrationSchema, DraftRegistration
from osf_tests.factories import (
EmbargoFactory,
ProjectFactory,
RegistrationFactory,
AuthUserFactory,
CollectionFactory,
DraftRegistrationFactory,
OSFGroupFactory,
)
from rest_framework import exceptions
from tests.base import ApiTestCase
from website.views import find_bookmark_collection
from osf.utils import permissions
SCHEMA_VERSION = 2
@pytest.mark.enable_quickfiles_creation
class TestRegistrationList(ApiTestCase):
def setUp(self):
super(TestRegistrationList, self).setUp()
self.user = AuthUserFactory()
self.project = ProjectFactory(is_public=False, creator=self.user)
self.registration_project = RegistrationFactory(
creator=self.user, project=self.project)
self.url = '/{}registrations/'.format(API_BASE)
self.public_project = ProjectFactory(is_public=True, creator=self.user)
self.public_registration_project = RegistrationFactory(
creator=self.user, project=self.public_project, is_public=True)
self.user_two = AuthUserFactory()
def test_return_public_registrations_logged_out(self):
res = self.app.get(self.url)
assert_equal(len(res.json['data']), 1)
assert_equal(res.status_code, 200)
assert_equal(res.status_code, 200)
assert_equal(res.content_type, 'application/vnd.api+json')
url = res.json['data'][0]['relationships']['registered_from']['links']['related']['href']
assert_equal(
urlparse(url).path,
'/{}nodes/{}/'.format(API_BASE, self.public_project._id)
)
def test_return_registrations_logged_in_contributor(self):
res = self.app.get(self.url, auth=self.user.auth)
assert_equal(len(res.json['data']), 2)
assert_equal(res.status_code, 200)
registered_from_one = urlparse(
res.json['data'][0]['relationships']['registered_from']['links']['related']['href']).path
registered_from_two = urlparse(
res.json['data'][1]['relationships']['registered_from']['links']['related']['href']).path
assert_equal(res.content_type, 'application/vnd.api+json')
assert_items_equal(
[registered_from_one, registered_from_two],
['/{}nodes/{}/'.format(API_BASE, self.public_project._id),
'/{}nodes/{}/'.format(API_BASE, self.project._id)]
)
def test_return_registrations_logged_in_non_contributor(self):
res = self.app.get(self.url, auth=self.user_two.auth)
assert_equal(len(res.json['data']), 1)
assert_equal(res.status_code, 200)
registered_from = urlparse(
res.json['data'][0]['relationships']['registered_from']['links']['related']['href']).path
assert_equal(res.content_type, 'application/vnd.api+json')
assert_equal(
registered_from,
'/{}nodes/{}/'.format(API_BASE, self.public_project._id))
def test_total_biographic_contributor_in_registration(self):
user3 = AuthUserFactory()
registration = RegistrationFactory(is_public=True, creator=self.user)
registration.add_contributor(self.user_two, auth=Auth(self.user))
registration.add_contributor(
user3, auth=Auth(self.user), visible=False)
registration.save()
registration_url = '/{0}registrations/{1}/?embed=contributors'.format(
API_BASE, registration._id)
res = self.app.get(registration_url)
assert_true(
res.json['data']['embeds']['contributors']['links']['meta']['total_bibliographic']
)
assert_equal(
res.json['data']['embeds']['contributors']['links']['meta']['total_bibliographic'], 2
)
def test_exclude_nodes_from_registrations_endpoint(self):
res = self.app.get(self.url, auth=self.user.auth)
ids = [each['id'] for each in res.json['data']]
assert_in(self.registration_project._id, ids)
assert_in(self.public_registration_project._id, ids)
assert_not_in(self.public_project._id, ids)
assert_not_in(self.project._id, ids)
@pytest.mark.enable_bookmark_creation
class TestRegistrationFiltering(ApiTestCase):
def setUp(self):
super(TestRegistrationFiltering, self).setUp()
self.user_one = AuthUserFactory()
self.user_two = AuthUserFactory()
self.project_one = ProjectFactory(
title='Project One',
description='Two',
is_public=True,
creator=self.user_one,
category='hypothesis')
self.project_two = ProjectFactory(
title='Project Two',
description='One Three',
is_public=True,
creator=self.user_one)
self.project_three = ProjectFactory(
title='Three',
description='',
is_public=True,
creator=self.user_two
)
self.private_project_user_one = ProjectFactory(
title='Private Project User One', is_public=False, creator=self.user_one)
self.private_project_user_two = ProjectFactory(
title='Private Project User Two', is_public=False, creator=self.user_two)
self.project_one.add_tag('tag1', Auth(
self.project_one.creator), save=False)
self.project_one.add_tag('tag2', Auth(
self.project_one.creator), save=False)
self.project_one.save()
self.project_two.add_tag('tag1', Auth(
self.project_two.creator), save=True)
self.project_two.save()
self.project_one_reg = RegistrationFactory(
creator=self.user_one, project=self.project_one, is_public=True)
self.project_two_reg = RegistrationFactory(
creator=self.user_one, project=self.project_two, is_public=True)
self.project_three_reg = RegistrationFactory(
creator=self.user_two, project=self.project_three, is_public=True, title='No search terms!')
self.private_project_user_one_reg = RegistrationFactory(
creator=self.user_one, project=self.private_project_user_one, is_public=False, title='No search terms!')
self.private_project_user_two_reg = RegistrationFactory(
creator=self.user_two, project=self.private_project_user_two, is_public=False, title='No search terms!')
self.folder = CollectionFactory()
self.bookmark_collection = find_bookmark_collection(self.user_one)
self.url = '/{}registrations/'.format(API_BASE)
def test_filtering_by_category(self):
url = '/{}registrations/?filter[category]=hypothesis'.format(API_BASE)
res = self.app.get(url, auth=self.user_one.auth)
registration_json = res.json['data']
ids = [each['id'] for each in registration_json]
assert_in(self.project_one_reg._id, ids)
assert_not_in(self.project_two_reg._id, ids)
assert_not_in(self.project_three_reg._id, ids)
assert_not_in(self.private_project_user_one_reg._id, ids)
assert_not_in(self.private_project_user_two_reg._id, ids)
def test_filtering_by_public(self):
url = '/{}registrations/?filter[public]=false'.format(API_BASE)
res = self.app.get(url, auth=self.user_one.auth)
reg_json = res.json['data']
# No public projects returned
assert_false(
any([each['attributes']['public'] for each in reg_json])
)
ids = [each['id'] for each in reg_json]
assert_not_in(self.project_one_reg._id, ids)
assert_not_in(self.project_two_reg._id, ids)
url = '/{}registrations/?filter[public]=true'.format(API_BASE)
res = self.app.get(url, auth=self.user_one.auth)
reg_json = res.json['data']
# No private projects returned
assert_true(
all([each['attributes']['public'] for each in reg_json])
)
ids = [each['id'] for each in reg_json]
assert_in(self.project_one_reg._id, ids)
assert_in(self.project_two_reg._id, ids)
assert_in(self.project_three_reg._id, ids)
assert_not_in(self.private_project_user_one_reg._id, ids)
assert_not_in(self.private_project_user_two_reg._id, ids)
def test_filtering_tags(self):
# both project_one and project_two have tag1
url = '/{}registrations/?filter[tags]={}'.format(API_BASE, 'tag1')
res = self.app.get(url, auth=self.project_one.creator.auth)
reg_json = res.json['data']
ids = [each['id'] for each in reg_json]
assert_in(self.project_one_reg._id, ids)
assert_in(self.project_two_reg._id, ids)
assert_not_in(self.project_three_reg._id, ids)
assert_not_in(self.private_project_user_one_reg._id, ids)
assert_not_in(self.private_project_user_two_reg._id, ids)
# filtering two tags
# project_one has both tags; project_two only has one
url = '/{}registrations/?filter[tags]={}&filter[tags]={}'.format(
API_BASE, 'tag1', 'tag2')
res = self.app.get(url, auth=self.project_one.creator.auth)
reg_json = res.json['data']
ids = [each['id'] for each in reg_json]
assert_in(self.project_one_reg._id, ids)
assert_not_in(self.project_two_reg._id, ids)
assert_not_in(self.project_three_reg._id, ids)
assert_not_in(self.private_project_user_one_reg._id, ids)
assert_not_in(self.private_project_user_two_reg._id, ids)
def test_filtering_tags_exact(self):
self.project_one.add_tag('cats', Auth(self.user_one))
self.project_two.add_tag('cats', Auth(self.user_one))
self.project_one.add_tag('cat', Auth(self.user_one))
self.project_one_reg = RegistrationFactory(
creator=self.user_one, project=self.project_one, is_public=True)
self.project_two_reg = RegistrationFactory(
creator=self.user_one, project=self.project_two, is_public=True)
res = self.app.get(
'/{}registrations/?filter[tags]=cat'.format(
API_BASE
),
auth=self.user_one.auth
)
assert_equal(len(res.json.get('data')), 1)
def test_filtering_tags_capitalized_query(self):
self.project_one.add_tag('cat', Auth(self.user_one))
self.project_one_reg = RegistrationFactory(
creator=self.user_one, project=self.project_one, is_public=True)
res = self.app.get(
'/{}registrations/?filter[tags]=CAT'.format(
API_BASE
),
auth=self.user_one.auth
)
assert_equal(len(res.json.get('data')), 1)
def test_filtering_tags_capitalized_tag(self):
self.project_one.add_tag('CAT', Auth(self.user_one))
self.project_one_reg = RegistrationFactory(
creator=self.user_one, project=self.project_one, is_public=True)
res = self.app.get(
'/{}registrations/?filter[tags]=cat'.format(
API_BASE
),
auth=self.user_one.auth
)
assert_equal(len(res.json.get('data')), 1)
def test_filtering_on_multiple_tags(self):
self.project_one.add_tag('cat', Auth(self.user_one))
self.project_one.add_tag('sand', Auth(self.user_one))
self.project_one_reg = RegistrationFactory(
creator=self.user_one, project=self.project_one, is_public=True)
res = self.app.get(
'/{}registrations/?filter[tags]=cat&filter[tags]=sand'.format(
API_BASE
),
auth=self.user_one.auth
)
assert_equal(len(res.json.get('data')), 1)
def test_filtering_on_multiple_tags_must_match_both(self):
self.project_one.add_tag('cat', Auth(self.user_one))
self.project_one_reg = RegistrationFactory(
creator=self.user_one, project=self.project_one, is_public=True)
res = self.app.get(
'/{}registrations/?filter[tags]=cat&filter[tags]=sand'.format(
API_BASE
),
auth=self.user_one.auth
)
assert_equal(len(res.json.get('data')), 0)
def test_filtering_tags_returns_distinct(self):
# regression test for returning multiple of the same file
self.project_one.add_tag('cat', Auth(self.user_one))
self.project_one.add_tag('cAt', Auth(self.user_one))
self.project_one.add_tag('caT', Auth(self.user_one))
self.project_one.add_tag('CAT', Auth(self.user_one))
self.project_one_reg = RegistrationFactory(
creator=self.user_one, project=self.project_one, is_public=True, title='No search terms!')
res = self.app.get(
'/{}registrations/?filter[tags]=cat'.format(
API_BASE
),
auth=self.user_one.auth
)
assert_equal(len(res.json.get('data')), 1)
def test_filtering_contributors(self):
res = self.app.get(
'/{}registrations/?filter[contributors]={}'.format(
API_BASE, self.user_one._id
),
auth=self.user_one.auth
)
assert_equal(len(res.json.get('data')), 3)
def test_filtering_contributors_bad_id(self):
res = self.app.get(
'/{}registrations/?filter[contributors]=acatdresseduplikeahuman'.format(
API_BASE
),
auth=self.user_one.auth
)
assert_equal(len(res.json.get('data')), 0)
def test_get_all_registrations_with_no_filter_logged_in(self):
res = self.app.get(self.url, auth=self.user_one.auth)
reg_json = res.json['data']
ids = [each['id'] for each in reg_json]
assert_in(self.project_one_reg._id, ids)
assert_in(self.project_two_reg._id, ids)
assert_in(self.project_three_reg._id, ids)
assert_in(self.private_project_user_one_reg._id, ids)
assert_not_in(self.private_project_user_two_reg._id, ids)
assert_not_in(self.project_one._id, ids)
assert_not_in(self.project_two._id, ids)
assert_not_in(self.project_three._id, ids)
assert_not_in(self.private_project_user_one._id, ids)
assert_not_in(self.private_project_user_two._id, ids)
assert_not_in(self.folder._id, ids)
assert_not_in(self.bookmark_collection._id, ids)
def test_get_all_registrations_with_no_filter_not_logged_in(self):
res = self.app.get(self.url)
reg_json = res.json['data']
ids = [each['id'] for each in reg_json]
assert_in(self.project_one_reg._id, ids)
assert_in(self.project_two_reg._id, ids)
assert_in(self.project_three_reg._id, ids)
assert_not_in(self.private_project_user_one_reg._id, ids)
assert_not_in(self.private_project_user_two_reg._id, ids)
assert_not_in(self.project_one._id, ids)
assert_not_in(self.project_two._id, ids)
assert_not_in(self.project_three._id, ids)
assert_not_in(self.private_project_user_one._id, ids)
assert_not_in(self.private_project_user_two._id, ids)
assert_not_in(self.folder._id, ids)
assert_not_in(self.bookmark_collection._id, ids)
def test_get_one_registration_with_exact_filter_logged_in(self):
url = '/{}registrations/?filter[title]=Project%20One'.format(API_BASE)
res = self.app.get(url, auth=self.user_one.auth)
reg_json = res.json['data']
ids = [each['id'] for each in reg_json]
assert_in(self.project_one_reg._id, ids)
assert_not_in(self.project_two_reg._id, ids)
assert_not_in(self.project_three_reg._id, ids)
assert_not_in(self.private_project_user_one_reg._id, ids)
assert_not_in(self.private_project_user_two_reg._id, ids)
assert_not_in(self.folder._id, ids)
assert_not_in(self.bookmark_collection._id, ids)
def test_get_one_registration_with_exact_filter_not_logged_in(self):
url = '/{}registrations/?filter[title]=Private%20Project%20User%20One'.format(
API_BASE)
res = self.app.get(url)
reg_json = res.json['data']
ids = [each['id'] for each in reg_json]
assert_not_in(self.project_one_reg._id, ids)
assert_not_in(self.project_two_reg._id, ids)
assert_not_in(self.project_three_reg._id, ids)
assert_not_in(self.private_project_user_one_reg._id, ids)
assert_not_in(self.private_project_user_two_reg._id, ids)
assert_not_in(self.folder._id, ids)
assert_not_in(self.bookmark_collection._id, ids)
def test_get_some_registrations_with_substring_logged_in(self):
url = '/{}registrations/?filter[title]=Two'.format(API_BASE)
res = self.app.get(url, auth=self.user_one.auth)
reg_json = res.json['data']
ids = [each['id'] for each in reg_json]
assert_not_in(self.project_one_reg._id, ids)
assert_in(self.project_two_reg._id, ids)
assert_not_in(self.project_three_reg._id, ids)
assert_not_in(self.private_project_user_one_reg._id, ids)
assert_not_in(self.private_project_user_two_reg._id, ids)
assert_not_in(self.folder._id, ids)
assert_not_in(self.bookmark_collection._id, ids)
def test_get_some_registrations_with_substring_not_logged_in(self):
url = '/{}registrations/?filter[title]=One'.format(API_BASE)
res = self.app.get(url)
reg_json = res.json['data']
ids = [each['id'] for each in reg_json]
assert_in(self.project_one_reg._id, ids)
assert_not_in(self.project_two_reg._id, ids)
assert_not_in(self.project_three_reg._id, ids)
assert_not_in(self.private_project_user_one_reg._id, ids)
assert_not_in(self.private_project_user_two_reg._id, ids)
assert_not_in(self.folder._id, ids)
assert_not_in(self.bookmark_collection._id, ids)
def test_get_only_public_or_my_registrations_with_filter_logged_in(self):
url = '/{}registrations/?filter[title]=Project'.format(API_BASE)
res = self.app.get(url, auth=self.user_one.auth)
reg_json = res.json['data']
ids = [each['id'] for each in reg_json]
assert_in(self.project_one_reg._id, ids)
assert_in(self.project_two_reg._id, ids)
assert_not_in(self.project_three_reg._id, ids)
assert_in(self.private_project_user_one_reg._id, ids)
assert_not_in(self.private_project_user_two_reg._id, ids)
assert_not_in(self.folder._id, ids)
assert_not_in(self.bookmark_collection._id, ids)
def test_get_only_public_registrations_with_filter_not_logged_in(self):
url = '/{}registrations/?filter[title]=Project'.format(API_BASE)
res = self.app.get(url)
reg_json = res.json['data']
ids = [each['id'] for each in reg_json]
assert_in(self.project_one_reg._id, ids)
assert_in(self.project_two_reg._id, ids)
assert_not_in(self.project_three_reg._id, ids)
assert_not_in(self.private_project_user_one_reg._id, ids)
assert_not_in(self.private_project_user_two_reg._id, ids)
assert_not_in(self.folder._id, ids)
assert_not_in(self.bookmark_collection._id, ids)
def test_alternate_filtering_field_logged_in(self):
url = '/{}registrations/?filter[description]=Three'.format(API_BASE)
res = self.app.get(url, auth=self.user_one.auth)
reg_json = res.json['data']
ids = [each['id'] for each in reg_json]
assert_not_in(self.project_one_reg._id, ids)
assert_in(self.project_two_reg._id, ids)
assert_not_in(self.project_three_reg._id, ids)
assert_not_in(self.private_project_user_one_reg._id, ids)
assert_not_in(self.private_project_user_two_reg._id, ids)
assert_not_in(self.folder._id, ids)
assert_not_in(self.bookmark_collection._id, ids)
def test_alternate_filtering_field_not_logged_in(self):
url = '/{}registrations/?filter[description]=Two'.format(API_BASE)
res = self.app.get(url)
reg_json = res.json['data']
ids = [each['id'] for each in reg_json]
assert_in(self.project_one_reg._id, ids)
assert_not_in(self.project_two_reg._id, ids)
assert_not_in(self.project_three_reg._id, ids)
assert_not_in(self.private_project_user_one_reg._id, ids)
assert_not_in(self.private_project_user_two_reg._id, ids)
assert_not_in(self.folder._id, ids)
assert_not_in(self.bookmark_collection._id, ids)
def test_incorrect_filtering_field_not_logged_in(self):
url = '/{}registrations/?filter[notafield]=bogus'.format(API_BASE)
res = self.app.get(url, expect_errors=True)
assert_equal(res.status_code, 400)
errors = res.json['errors']
assert_equal(len(errors), 1)
assert_equal(
errors[0]['detail'],
"'notafield' is not a valid field for this endpoint.")
class TestRegistrationSubjectFiltering(SubjectsFilterMixin):
@pytest.fixture()
def resource(self, user):
return RegistrationFactory(creator=user)
@pytest.fixture()
def resource_two(self, user):
return RegistrationFactory(creator=user)
@pytest.fixture()
def url(self):
return '/{}registrations/'.format(API_BASE)
class TestRegistrationCreate(DraftRegistrationTestCase):
@pytest.fixture()
def schema(self):
return RegistrationSchema.objects.get(
name='Replication Recipe (Brandt et al., 2013): Post-Completion',
schema_version=SCHEMA_VERSION)
@pytest.fixture()
def project_public_child(self, project_public):
return ProjectFactory(parent=project_public)
@pytest.fixture()
def project_public_grandchild(self, project_public_child):
return ProjectFactory(parent=project_public_child)
@pytest.fixture()
def project_public_excluded_sibling(self, project_public):
return ProjectFactory(parent=project_public)
@pytest.fixture()
def draft_registration(self, user, project_public, schema):
return DraftRegistrationFactory(
initiator=user,
registration_schema=schema,
branched_from=project_public,
registration_metadata={
'item29': {'value': 'Yes'},
'item33': {'value': 'success'}
}
)
@pytest.fixture()
def url_registrations(self, project_public):
return '/{}nodes/{}/registrations/'.format(
API_BASE, project_public._id)
@pytest.fixture()
def payload(self, draft_registration):
return {
'data': {
'type': 'registrations',
'attributes': {
'draft_registration': draft_registration._id,
'registration_choice': 'immediate'
}
}
}
@pytest.fixture()
def payload_with_children(self, draft_registration, project_public_child, project_public_grandchild):
return {
'data': {
'type': 'registrations',
'attributes': {
'draft_registration': draft_registration._id,
'children': [project_public_child._id, project_public_grandchild._id],
'registration_choice': 'immediate'
}
}
}
@pytest.fixture()
def payload_with_grandchildren_but_no_children(self, draft_registration, project_public_child, project_public_grandchild):
return {
'data': {
'type': 'registrations',
'attributes': {
'draft_registration': draft_registration._id,
'children': [project_public_grandchild._id],
'registration_choice': 'immediate'
}
}
}
@pytest.fixture()
def payload_with_bad_child_node_guid(self, draft_registration):
return {
'data': {
'type': 'registrations',
'attributes': {
'draft_registration': draft_registration._id,
'children': ['fake0', 'fake3'],
'registration_choice': 'immediate'
}
}
}
@mock.patch('framework.celery_tasks.handlers.enqueue_task')
def test_admin_can_create_registration(
self, mock_enqueue, app, user, payload, url_registrations):
res = app.post_json_api(url_registrations, payload, auth=user.auth)
data = res.json['data']['attributes']
assert res.status_code == 201
assert data['registration'] is True
assert data['pending_registration_approval'] is True
assert data['public'] is False
@mock.patch('framework.celery_tasks.handlers.enqueue_task')
def test_admin_can_create_registration_with_specific_children(
self, mock_enqueue, app, user, payload_with_children, project_public, project_public_child, project_public_excluded_sibling, project_public_grandchild, url_registrations):
res = app.post_json_api(url_registrations, payload_with_children, auth=user.auth)
data = res.json['data']['attributes']
assert res.status_code == 201
assert data['registration'] is True
assert data['pending_registration_approval'] is True
assert data['public'] is False
assert project_public.registrations.all().count() == 1
assert project_public_child.registrations.all().count() == 1
assert project_public_grandchild.registrations.all().count() == 1
assert project_public_excluded_sibling.registrations.all().count() == 0
@mock.patch('framework.celery_tasks.handlers.enqueue_task')
def test_admin_400_with_bad_child_node_guid(
self, mock_enqueue, app, user, payload_with_bad_child_node_guid, url_registrations):
res = app.post_json_api(url_registrations, payload_with_bad_child_node_guid, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Some child nodes could not be found.'
@mock.patch('framework.celery_tasks.handlers.enqueue_task')
def test_admin_cant_register_grandchildren_without_children(
self, mock_enqueue, app, user, payload_with_grandchildren_but_no_children, url_registrations, project_public_grandchild):
res = app.post_json_api(url_registrations, payload_with_grandchildren_but_no_children, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'The parents of all child nodes being registered must be registered.'
def test_cannot_create_registration(
self, app, user_write_contrib, user_read_contrib,
payload, url_registrations, project_public):
# def test_write_only_contributor_cannot_create_registration(self):
res = app.post_json_api(
url_registrations,
payload,
auth=user_write_contrib.auth,
expect_errors=True)
assert res.status_code == 403
# def test_read_only_contributor_cannot_create_registration(self):
res = app.post_json_api(
url_registrations,
payload,
auth=user_read_contrib.auth,
expect_errors=True)
assert res.status_code == 403
# def test_non_authenticated_user_cannot_create_registration(self):
res = app.post_json_api(url_registrations, payload, expect_errors=True)
assert res.status_code == 401
# admin via a group cannot create registration
group_mem = AuthUserFactory()
group = OSFGroupFactory(creator=group_mem)
project_public.add_osf_group(group, permissions.ADMIN)
res = app.post_json_api(url_registrations, payload, auth=group_mem.auth, expect_errors=True)
assert res.status_code == 403
@mock.patch('framework.celery_tasks.handlers.enqueue_task')
def test_registration_draft_must_be_specified(
self, mock_enqueue, app, user, url_registrations):
payload = {
'data': {
'type': 'registrations',
'attributes': {
'registration_choice': 'immediate'
}
}
}
res = app.post_json_api(
url_registrations,
payload,
auth=user.auth,
expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['source']['pointer'] == '/data/attributes/draft_registration'
assert res.json['errors'][0]['detail'] == 'This field is required.'
@mock.patch('framework.celery_tasks.handlers.enqueue_task')
def test_registration_draft_must_be_valid(
self, mock_enqueue, app, user, url_registrations):
payload = {
'data': {
'type': 'registrations',
'attributes': {
'registration_choice': 'immediate',
'draft_registration': '12345'
}
}
}
res = app.post_json_api(
url_registrations,
payload,
auth=user.auth,
expect_errors=True)
assert res.status_code == 404
@mock.patch('framework.celery_tasks.handlers.enqueue_task')
def test_registration_draft_must_be_draft_of_current_node(
self, mock_enqueue, app, user, schema, url_registrations):
project_new = ProjectFactory(creator=user)
draft_registration = DraftRegistrationFactory(
initiator=user,
registration_schema=schema,
branched_from=project_new,
registration_metadata={
'item29': {'value': 'Yes'},
'item33': {'value': 'success'}
}
)
payload = {
'data': {
'type': 'registrations',
'attributes': {
'registration_choice': 'immediate',
'draft_registration': draft_registration._id
}
}
}
res = app.post_json_api(
url_registrations,
payload,
auth=user.auth,
expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'This draft registration is not created from the given node.'
@pytest.mark.skip('TEMPORARY: Unskip when JSON Schemas are updated')
@mock.patch('framework.celery_tasks.handlers.enqueue_task')
def test_required_top_level_questions_must_be_answered_on_draft(
self, mock_enqueue, app, user, project_public,
prereg_metadata, url_registrations):
prereg_schema = RegistrationSchema.objects.get(
name='Prereg Challenge',
schema_version=SCHEMA_VERSION)
prereg_draft_registration = DraftRegistrationFactory(
initiator=user,
registration_schema=prereg_schema,
branched_from=project_public
)
registration_metadata = prereg_metadata(prereg_draft_registration)
del registration_metadata['q1']
prereg_draft_registration.registration_metadata = registration_metadata
prereg_draft_registration.save()
payload = {
'data': {
'type': 'registrations',
'attributes': {
'registration_choice': 'immediate',
'draft_registration': prereg_draft_registration._id,
}
}
}
res = app.post_json_api(
url_registrations,
payload,
auth=user.auth,
expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'For your registration the \'Title\' field is required'
@pytest.mark.skip('TEMPORARY: Unskip when JSON Schemas are updated')
@mock.patch('framework.celery_tasks.handlers.enqueue_task')
def test_required_second_level_questions_must_be_answered_on_draft(
self, mock_enqueue, app, user, project_public, prereg_metadata, url_registrations):
prereg_schema = RegistrationSchema.objects.get(
name='Prereg Challenge',
schema_version=SCHEMA_VERSION)
prereg_draft_registration = DraftRegistrationFactory(
initiator=user,
registration_schema=prereg_schema,
branched_from=project_public
)
registration_metadata = prereg_metadata(prereg_draft_registration)
registration_metadata['q11'] = {'value': {}}
prereg_draft_registration.registration_metadata = registration_metadata
prereg_draft_registration.save()
payload = {
'data': {
'type': 'registrations',
'attributes': {
'registration_choice': 'immediate',
'draft_registration': prereg_draft_registration._id,
}
}
}
res = app.post_json_api(
url_registrations, payload,
auth=user.auth,
expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'For your registration your response to the \'Manipulated variables\' field is invalid.'
@pytest.mark.skip('TEMPORARY: Unskip when JSON Schemas are updated')
@mock.patch('framework.celery_tasks.handlers.enqueue_task')
def test_required_third_level_questions_must_be_answered_on_draft(
self, mock_enqueue, app, user, project_public,
prereg_metadata, url_registrations):
prereg_schema = RegistrationSchema.objects.get(
name='Prereg Challenge',
schema_version=SCHEMA_VERSION)
prereg_draft_registration = DraftRegistrationFactory(
initiator=user,
registration_schema=prereg_schema,
branched_from=project_public
)
registration_metadata = prereg_metadata(prereg_draft_registration)
registration_metadata['q11'] = {'value': {'question': {}}}
prereg_draft_registration.registration_metadata = registration_metadata
prereg_draft_registration.save()
payload = {
'data': {
'type': 'registrations',
'attributes': {
'registration_choice': 'immediate',
'draft_registration': prereg_draft_registration._id,
}
}
}
res = app.post_json_api(
url_registrations, payload,
auth=user.auth,
expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'For your registration your response to the \'Manipulated variables\' field is invalid.'
@pytest.mark.skip('TEMPORARY: Unskip when JSON Schemas are updated')
@mock.patch('framework.celery_tasks.handlers.enqueue_task')
def test_multiple_choice_in_registration_schema_must_match_one_of_choices(
self, mock_enqueue, app, user, project_public, schema, payload, url_registrations):
draft_registration = DraftRegistrationFactory(
initiator=user,
registration_schema=schema,
branched_from=project_public,
registration_metadata={
'item29': {'value': 'Yes'},
'item33': {'value': 'success!'}
}
)
payload['data']['attributes']['draft_registration'] = draft_registration._id
res = app.post_json_api(
url_registrations,
payload,
auth=user.auth,
expect_errors=True)
assert res.status_code == 400
assert (
res.json['errors'][0]['detail'] == 'For your registration your response to the \'I judge the replication to be a(n)\''
' field is invalid, your response must be one of the provided options.')
def test_invalid_registration_choice(
self, app, user, draft_registration, url_registrations):
payload = {
'data': {
'type': 'registrations',
'attributes': {
'draft_registration': draft_registration._id,
'registration_choice': 'tomorrow'
}
}
}
res = app.post_json_api(
url_registrations, payload,
auth=user.auth,
expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['source']['pointer'] == '/data/attributes/registration_choice'
assert res.json['errors'][0]['detail'] == '"tomorrow" is not a valid choice.'
@mock.patch('framework.celery_tasks.handlers.enqueue_task')
def test_embargo_end_date_provided_if_registration_choice_is_embargo(
self, mock_enqueue, app, user, draft_registration, url_registrations):
payload = {
'data': {
'type': 'registrations',
'attributes': {
'draft_registration': draft_registration._id,
'registration_choice': 'embargo'
}
}
}
res = app.post_json_api(
url_registrations,
payload,
auth=user.auth,
expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'lift_embargo must be specified.'
@mock.patch('framework.celery_tasks.handlers.enqueue_task')
def test_embargo_must_be_less_than_four_years(
self, mock_enqueue, app, user, draft_registration,
url_registrations):
today = timezone.now()
five_years = (
today +
dateutil.relativedelta.relativedelta(
years=5)).strftime('%Y-%m-%dT%H:%M:%S')
payload = {
'data': {
'type': 'registrations',
'attributes': {
'draft_registration': draft_registration._id,
'registration_choice': 'embargo',
'lift_embargo': five_years
}
}
}
res = app.post_json_api(
url_registrations,
payload,
auth=user.auth,
expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Registrations can only be embargoed for up to four years.'
@mock.patch('framework.celery_tasks.handlers.enqueue_task')
def test_embargo_registration(
self, mock_enqueue, app, user,
draft_registration, url_registrations):
today = timezone.now()
next_week = (
today +
dateutil.relativedelta.relativedelta(
months=1)).strftime('%Y-%m-%dT%H:%M:%S')
payload = {
'data': {
'type': 'registrations',
'attributes': {
'draft_registration': draft_registration._id,
'registration_choice': 'embargo',
'lift_embargo': next_week
}
}
}
res = app.post_json_api(
url_registrations,
payload,
auth=user.auth,
expect_errors=True)
assert res.status_code == 201
data = res.json['data']['attributes']
assert data['registration'] is True
assert data['pending_embargo_approval'] is True
def test_embargo_end_date_must_be_in_the_future(
self, app, user, draft_registration, url_registrations):
today = timezone.now().strftime('%Y-%m-%dT%H:%M:%S')
payload = {
'data': {
'type': 'registrations',
'attributes': {
'draft_registration': draft_registration._id,
'registration_choice': 'embargo',
'lift_embargo': today
}
}
}
res = app.post_json_api(
url_registrations,
payload,
auth=user.auth,
expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Embargo end date must be at least three days in the future.'
def test_invalid_embargo_end_date_format(
self, app, user, draft_registration, url_registrations):
today = timezone.now().isoformat()
payload = {
'data': {
'type': 'registrations',
'attributes': {
'draft_registration': draft_registration._id,
'registration_choice': 'embargo',
'lift_embargo': today
}
}
}
res = app.post_json_api(
url_registrations,
payload,
auth=user.auth,
expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Datetime has wrong format. Use one of these formats instead: YYYY-MM-DDThh:mm:ss.'
@mock.patch('framework.celery_tasks.handlers.enqueue_task')
def test_cannot_register_draft_that_has_already_been_registered(
self, mock_enqueue, app, user, payload, draft_registration, url_registrations):
draft_registration.register(auth=Auth(user), save=True)
res = app.post_json_api(
url_registrations,
payload,
auth=user.auth,
expect_errors=True)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == 'This draft has already been registered and cannot be modified.'
@mock.patch('framework.celery_tasks.handlers.enqueue_task')
def test_cannot_register_draft_that_is_pending_review(
self, mock_enqueue, app, user, payload, url_registrations):
with mock.patch.object(DraftRegistration, 'is_pending_review', mock.PropertyMock(return_value=True)):
res = app.post_json_api(
url_registrations,
payload,
auth=user.auth,
expect_errors=True)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == 'This draft is pending review and cannot be modified.'
def test_cannot_register_draft_that_has_already_been_approved(
self, app, user, payload, url_registrations):
with mock.patch.object(DraftRegistration, 'requires_approval', mock.PropertyMock(return_value=True)), mock.patch.object(DraftRegistration, 'is_approved', mock.PropertyMock(return_value=True)):
res = app.post_json_api(
url_registrations,
payload,
auth=user.auth,
expect_errors=True)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == 'This draft has already been approved and cannot be modified.'
def test_cannot_register_draft_that_has_orphan_files(
self, app, user, payload, draft_registration, url_registrations):
schema = draft_registration.registration_schema
schema.schema['pages'][0]['questions'][0].update({
u'description': u'Upload files!',
u'format': u'osf-upload-open',
u'qid': u'qwhatever',
u'title': u'Upload an analysis script with clear comments',
u'type': u'osf-upload',
})
schema.save()
draft_registration.registration_metadata = {
'qwhatever': {
'value': 'file 1',
'extra': [{
'nodeId': 'badid',
'selectedFileName': 'file 1',
}]
}
}
draft_registration.save()
res = app.post_json_api(
url_registrations,
payload,
auth=user.auth,
expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'All files attached to this form must be registered to complete the' \
' process. The following file(s) are attached, but are not part of' \
' a component being registered: file 1'
@pytest.mark.django_db
class TestRegistrationBulkUpdate:
@pytest.fixture()
def url(self):
return '/{}registrations/'.format(API_BASE)
@pytest.fixture()
def user(self):
return AuthUserFactory()
@pytest.fixture()
def registration_one(self, user):
return RegistrationFactory(
creator=user,
title='Birds',
embargo=EmbargoFactory(
user=user),
is_public=False)
@pytest.fixture()
def registration_two(self, user):
return RegistrationFactory(
creator=user,
title='Birds II',
embargo=EmbargoFactory(
user=user),
is_public=False)
@pytest.fixture()
def private_payload(self, registration_one, registration_two):
return {
'data': [
{
'id': registration_one._id,
'type': 'registrations',
'attributes': {
'public': False
}
},
{
'id': registration_two._id,
'type': 'registrations',
'attributes': {
'public': False
}
}
]
}
@pytest.fixture()
def public_payload(self, registration_one, registration_two):
return {
'data': [
{
'id': registration_one._id,
'type': 'registrations',
'attributes': {
'public': True
}
},
{
'id': registration_two._id,
'type': 'registrations',
'attributes': {
'public': True
}
}
]
}
@pytest.fixture()
def empty_payload(self, registration_one, registration_two):
return {
'data': [
{
'id': registration_one._id,
'type': 'registrations',
'attributes': {}
},
{
'id': registration_two._id,
'type': 'registrations',
'attributes': {}
}
]
}
@pytest.fixture()
def bad_payload(self, registration_one, registration_two):
return {
'data': [
{
'id': registration_one._id,
'type': 'registrations',
'attributes': {
'public': True,
}
},
{
'id': registration_two._id,
'type': 'registrations',
'attributes': {
'title': 'Nerds II: Attack of the Nerds',
}
}
]
}
def test_bulk_update_errors(
self, app, user, registration_one, registration_two,
public_payload, private_payload, url):
# test_bulk_update_registrations_blank_request
res = app.put_json_api(
url,
auth=user.auth,
expect_errors=True,
bulk=True)
assert res.status_code == 400
# test_bulk_update_registrations_one_not_found
payload = {'data': [
{
'id': '12345',
'type': 'registrations',
'attributes': {
'public': True,
}
}, public_payload['data'][0]
]}
res = app.put_json_api(
url, payload,
auth=user.auth,
expect_errors=True,
bulk=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Could not find all objects to update.'
# test_bulk_update_registrations_logged_out
res = app.put_json_api(
url, public_payload,
expect_errors=True,
bulk=True)
assert res.status_code == 401
assert res.json['errors'][0]['detail'] == exceptions.NotAuthenticated.default_detail
# test_bulk_update_registrations_logged_in_non_contrib
non_contrib = AuthUserFactory()
res = app.put_json_api(
url, private_payload,
auth=non_contrib.auth,
expect_errors=True,
bulk=True)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == exceptions.PermissionDenied.default_detail
# test_bulk_update_registrations_send_dictionary_not_list
res = app.put_json_api(
url, {
'data': {
'id': registration_one._id,
'type': 'nodes',
'attributes': {'public': True}
}
},
auth=user.auth,
expect_errors=True, bulk=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Expected a list of items but got type "dict".'
# test_bulk_update_id_not_supplied
res = app.put_json_api(
url, {
'data': [
public_payload['data'][1], {
'type': 'registrations',
'attributes': {'public': True}}
]
},
auth=user.auth,
expect_errors=True, bulk=True)
assert res.status_code == 400
assert len(res.json['errors']) == 1
assert res.json['errors'][0]['source']['pointer'] == '/data/1/id'
assert res.json['errors'][0]['detail'] == 'This field may not be null.'
# test_bulk_update_type_not_supplied
res = app.put_json_api(
url, {
'data': [
public_payload['data'][1], {
'id': registration_one._id,
'attributes': {'public': True}
}
]
}, auth=user.auth,
expect_errors=True, bulk=True)
assert res.status_code == 400
assert len(res.json['errors']) == 1
assert res.json['errors'][0]['source']['pointer'] == '/data/1/type'
assert res.json['errors'][0]['detail'] == 'This field may not be null.'
# test_bulk_update_incorrect_type
res = app.put_json_api(
url, {
'data': [
public_payload['data'][1], {
'id': registration_one._id,
'type': 'Incorrect',
'attributes': {'public': True}
}
]
}, auth=user.auth,
expect_errors=True, bulk=True)
assert res.status_code == 409
# test_bulk_update_limits
registration_update_list = {'data': [public_payload['data'][0]] * 101}
res = app.put_json_api(
url,
registration_update_list,
auth=user.auth,
expect_errors=True,
bulk=True)
assert res.json['errors'][0]['detail'] == 'Bulk operation limit is 100, got 101.'
assert res.json['errors'][0]['source']['pointer'] == '/data'
# 400 from attempting to make a registration private
res = app.put_json_api(
url,
private_payload,
auth=user.auth,
bulk=True,
expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Registrations can only be turned from private to public.'
# Confirm no changes have occured
registration_one.refresh_from_db()
registration_two.refresh_from_db()
assert registration_one.embargo_termination_approval is None
assert registration_two.embargo_termination_approval is None
assert registration_one.is_public is False
assert registration_two.is_public is False
assert registration_one.title == 'Birds'
assert registration_two.title == 'Birds II'
def test_bulk_update_embargo_logged_in_read_only_contrib(
self, app, user, registration_one, registration_two, public_payload, url):
read_contrib = AuthUserFactory()
registration_one.add_contributor(
read_contrib, permissions=permissions.READ, save=True)
registration_two.add_contributor(
read_contrib, permissions=permissions.READ, save=True)
res = app.put_json_api(
url,
public_payload,
auth=read_contrib.auth,
expect_errors=True,
bulk=True)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == exceptions.PermissionDenied.default_detail
def test_bulk_update_embargo_logged_in_contrib(
self, app, user, registration_one, registration_two,
public_payload, url):
assert registration_one.is_pending_embargo_termination is False
assert registration_two.is_pending_embargo_termination is False
res = app.put_json_api(url, public_payload, auth=user.auth, bulk=True)
assert res.status_code == 200
assert ({registration_one._id, registration_two._id} == {
res.json['data'][0]['id'], res.json['data'][1]['id']})
# Needs confirmation before it will become public
assert res.json['data'][0]['attributes']['public'] is False
assert res.json['data'][1]['attributes']['public'] is False
assert res.json['data'][0]['attributes']['pending_embargo_termination_approval'] is True
assert res.json['data'][1]['attributes']['pending_embargo_termination_approval'] is True
registration_one.refresh_from_db()
registration_two.refresh_from_db()
# registrations should have pending terminations
assert registration_one.is_pending_embargo_termination is True
assert registration_two.is_pending_embargo_termination is True
class TestRegistrationListFiltering(
RegistrationListFilteringMixin,
ApiTestCase):
url = '/{}registrations/?'.format(API_BASE)
| {
"content_hash": "d8cb024904a34449e299733488cadafb",
"timestamp": "",
"source": "github",
"line_count": 1413,
"max_line_length": 200,
"avg_line_length": 39.174097664543524,
"alnum_prop": 0.5753437031416545,
"repo_name": "mattclark/osf.io",
"id": "05f7dc777c1a2d3a540987f58ae87ebc3ee8aac8",
"size": "55353",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "api_tests/registrations/views/test_registration_list.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "92773"
},
{
"name": "Dockerfile",
"bytes": "8456"
},
{
"name": "HTML",
"bytes": "317371"
},
{
"name": "JavaScript",
"bytes": "1792241"
},
{
"name": "Mako",
"bytes": "654772"
},
{
"name": "Python",
"bytes": "10166997"
},
{
"name": "VCL",
"bytes": "13885"
}
],
"symlink_target": ""
} |
"""Package contenant les actions.
Du point de vue du codeur, une action est une classe héritée de Action.
Chaque action se trouve séparée dans un nom de fichier .py qui est le nom de
l'action. Par exemple, 'donner_xp.py' contiendra l'action donner_xp.
Toutes les classes héritées d'Action doivent avoir pour nom
ClasseAction.
"""
| {
"content_hash": "541fdc84cb6260e79503957a3590d6f7",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 76,
"avg_line_length": 30.363636363636363,
"alnum_prop": 0.7664670658682635,
"repo_name": "stormi/tsunami",
"id": "0b525336d94bff85d91e57e94ec817e8ff20de39",
"size": "1905",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/primaires/scripting/actions/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "7188300"
},
{
"name": "Ruby",
"bytes": "373"
}
],
"symlink_target": ""
} |
import pymysql as sql
con = sql.connect('mysqltest.cyzy5ereqqzd.us-east-1.rds.amazonaws.com', user, password, port=3306)
| {
"content_hash": "3569d6715c92d9c793899031201e7454",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 98,
"avg_line_length": 40.666666666666664,
"alnum_prop": 0.7704918032786885,
"repo_name": "marcoikeda/etl_ga",
"id": "69cf994d5fdb27623f3750e1868b851038de71a1",
"size": "164",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "etl/persist_sessions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1322"
}
],
"symlink_target": ""
} |
import os
import channels.asgi
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "stormtrooper.settings")
channel_layer = channels.asgi.get_channel_layer()
| {
"content_hash": "1482605ba284645ba00172077b7e8ddb",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 72,
"avg_line_length": 26,
"alnum_prop": 0.7948717948717948,
"repo_name": "CompileInc/stormtrooper",
"id": "586e368c0db42ca5ec5a6c1885622763801630e6",
"size": "156",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stormtrooper/stormtrooper/asgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "28720"
},
{
"name": "Python",
"bytes": "55662"
}
],
"symlink_target": ""
} |
from django import forms
from djblets.auth.forms import RegistrationForm as DjbletsRegistrationForm
from djblets.recaptcha.mixins import RecaptchaFormMixin
from djblets.siteconfig.models import SiteConfiguration
from reviewboard.accounts.mixins import PolicyConsentFormMixin
class RegistrationForm(RecaptchaFormMixin, PolicyConsentFormMixin,
DjbletsRegistrationForm):
"""A registration form with reCAPTCHA support.
This is a version of the Djblets RegistrationForm which knows how to
validate a reCAPTCHA widget. Any error received is stored in the form
for use when generating the widget so that the widget can properly display
the error.
"""
first_name = forms.CharField(required=False)
last_name = forms.CharField(required=False)
def save(self):
"""Save the form."""
user = DjbletsRegistrationForm.save(self)
if user:
user.first_name = self.cleaned_data['first_name']
user.last_name = self.cleaned_data['last_name']
user.save()
self.accept_policies(user)
return user
@property
def verify_recaptcha(self):
siteconfig = SiteConfiguration.objects.get_current()
return siteconfig.get('auth_registration_show_captcha')
| {
"content_hash": "913f093299db01e603aab510036700c2",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 78,
"avg_line_length": 33.1025641025641,
"alnum_prop": 0.7095274980635167,
"repo_name": "reviewboard/reviewboard",
"id": "21d5004e3a9a6fc6728376ff9e2895cb2a996b80",
"size": "1291",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reviewboard/accounts/forms/registration.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "10167"
},
{
"name": "Dockerfile",
"bytes": "7721"
},
{
"name": "HTML",
"bytes": "226489"
},
{
"name": "JavaScript",
"bytes": "3991608"
},
{
"name": "Less",
"bytes": "438017"
},
{
"name": "Python",
"bytes": "9186415"
},
{
"name": "Shell",
"bytes": "3855"
}
],
"symlink_target": ""
} |
import os
from datetime import datetime
import flask
from flask import Flask,render_template,request, redirect, flash, session,url_for,send_from_directory, Response
from dbfunctions import AuthDatabase
app = Flask(__name__)
database = AuthDatabase('exel.db')
app.secret_key = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
@app.route('/')
@app.route('/index')
def index():
return render_template("index.html")
@app.route('/learnmore')
def learnmore():
return render_template("learnmore.html")
@app.route('/contact')
def contact():
return render_template("contact.html")
@app.route('/mentorship')
def mentorship():
return render_template("mentorship.html")
@app.route('/collegevalue')
def collegevalue():
return render_template("collegevalue.html")
@app.route('/elgibility')
def elgibility():
return render_template("elgibility.html")
@app.route('/sunycuny')
def sunycuny():
return render_template("sunycuny.html")
@app.route('/forum', methods=['GET', 'POST'])
def forum():
if request.method == 'GET':
posts = database.getPosts()
replies = database.getComments()
print replies
return render_template("forum.html",comments=posts,replies=replies)
else:
database.insertPost(request.form['addTitle'],request.form['addComment'])
return render_template("forum.html",comments=database.getPosts(),replies=database.getComments())
def main():
app.debug = True
app.run(host='0.0.0.0', port=8000)
if __name__ == '__main__':
main()
pass
| {
"content_hash": "27e09050cf15d80d87528177a2f0194e",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 111,
"avg_line_length": 21.71641791044776,
"alnum_prop": 0.7243986254295532,
"repo_name": "anthonyc1/excelsior-hack",
"id": "fbc5740062814c33d97dea9232b678fe4361aa95",
"size": "1610",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "8815"
},
{
"name": "HTML",
"bytes": "36230"
},
{
"name": "Python",
"bytes": "2545"
}
],
"symlink_target": ""
} |
import re
total = 0
reVowels = r'[aeiou]'
vowelCount = 3
reDoubleLetters = r'.*([a-z])\1.*'
reBadStrings = r'(ab|cd|pq|xy)'
doubleLetters = re.compile(reDoubleLetters)
with open('../inputs/05.txt') as f:
for line in f:
vowels = re.findall(reVowels, line)
if len(vowels) < vowelCount:
continue
if not doubleLetters.match(line):
continue
badStrings = re.findall(reBadStrings, line)
if len(badStrings) > 0:
continue
total += 1
print total
| {
"content_hash": "476edd748363ba45c3331ae350daea7b",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 49,
"avg_line_length": 18.88888888888889,
"alnum_prop": 0.615686274509804,
"repo_name": "opello/adventofcode",
"id": "f103175e0ffaf64a7639577ef05194558ff38c4d",
"size": "533",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "2015/python/05-1.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "35422"
}
],
"symlink_target": ""
} |
"""
rna
Contains extensions to the blender data model
"""
import bpy
_edm_matTypes = (
("def_material", "Default", "Default"),
("glass_material","Glass", "Glass"),
("self_illum_material","Self-illuminated", "Self-illuminated"),
("transparent_self_illum_material","Transparent Self-illuminated", "Transparent Self-illuminated"),
("additive_self_illum_material", "additive_self_illum_material", "additive_self_illum_material"),
("bano_material", "bano_material", "bano_material"),
("building_material", "building_material", "building_material"),
("chrome_material", "chrome_material", "chrome_material"),
("color_material", "color_material", "color_material"),
("fake_omni_lights", "fake_omni_lights", "fake_omni_lights"),
("fake_spot_lights", "fake_spot_lights", "fake_spot_lights"),
("forest_material", "forest_material", "forest_material"),
("lines_material", "lines_material", "lines_material"),
("mirror_material", "mirror_material", "mirror_material"),
("fake_als_lights", "fake_als_lights", "fake_als_lights")
)
_edm_blendTypes = (
("0", "None", "None"),
("1", "Blend", "Blend"),
("2", "Alpha Test", "Alpha Test"),
("3", "Sum. Blending", "Sum. Blending"),
# (4. "Z Written Blending", "Z Written Blending"),
)
def _updateIsRenderable(self, context):
if self.is_renderable and self.is_collision_shell:
self.is_collision_shell = False
def _updateIsCollision(self, context):
if self.is_renderable and self.is_collision_shell:
self.is_renderable = False
def _updateIsConnector(self, context):
if self.is_connector and self.is_lod_root:
self.is_lod_root = False
def _updateIsLOD(self, context):
if self.is_connector and self.is_lod_root:
self.is_connector = False
class EDMObjectSettings(bpy.types.PropertyGroup):
# Only for empty objects: Is this a connector
is_connector = bpy.props.BoolProperty(
default=False,
name="Is Connector?",
description="Is this empty a connector object?",
update=_updateIsConnector)
is_lod_root = bpy.props.BoolProperty(
default=False,
name="Is LOD Root?",
description="Does this object control child LOD visibility?",
update=_updateIsLOD)
is_renderable = bpy.props.BoolProperty(
default=True,
name="Renderable",
description="Can this object's mesh be rendered",
update=_updateIsRenderable)
is_collision_shell = bpy.props.BoolProperty(
default=False,
name="Collision Shell",
description="Is this mesh used for collision calculations?",
update=_updateIsCollision)
damage_argument = bpy.props.IntProperty(
default=-1,
name="Damage Argument",
description="The damage argument affecting this object")
# LOD Control
lod_min_distance = bpy.props.FloatProperty(
default=0, min=0, soft_max=10000, step=10, unit="LENGTH",
name="LOD Min Distance",
description="The minimum distance this object should be visible")
lod_max_distance = bpy.props.FloatProperty(
default=10000, min=0, soft_max=10000, step=10, unit="LENGTH",
name="LOD Max Distance",
description="The maximum distance this object should be visible")
nouse_lod_distance = bpy.props.BoolProperty(
default=True,
name="No Max",
description="Should there be no maximum view distance?")
def updateSceneArgument(self, context):
print(self, context)
print("Updating scene argument")
def register():
bpy.utils.register_class(EDMObjectSettings)
bpy.types.Object.edm = bpy.props.PointerProperty(type=EDMObjectSettings)
bpy.types.Action.argument = bpy.props.IntProperty(name="Argument", default=-1, min=-1)
bpy.types.Material.edm_material = bpy.props.EnumProperty(
items=_edm_matTypes, default="def_material", name="Base Material",
description="The internal EDM material to use as a basis")
bpy.types.Material.edm_blending = bpy.props.EnumProperty(
items=_edm_blendTypes, default="0", name="Opacity mode",
description="The method to use for calculating material opacity/alpha blending")
bpy.types.Scene.active_edm_argument = bpy.props.IntProperty(name="Active Argument", default=-1, min=-1, update=updateSceneArgument)
def unregister():
del bpy.types.Scene.active_edm_argument
del bpy.types.Material.edm_blending
del bpy.types.Material.edm_material
del bpy.types.Action.argument
del bpy.types.Object.edm
bpy.utils.unregister_class(EDMObjectSettings)
| {
"content_hash": "45165549e2f64d6afac808fa98b16ac7",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 133,
"avg_line_length": 37.567796610169495,
"alnum_prop": 0.7029099932325739,
"repo_name": "ndevenish/Blender_ioEDM",
"id": "10894c75bf66ee47fe332abb89ece66e3e5a4d3e",
"size": "4434",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "io_EDM/rna.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "73"
},
{
"name": "Python",
"bytes": "128957"
},
{
"name": "Shell",
"bytes": "632"
}
],
"symlink_target": ""
} |
"""
Unified object oriented interface for interacting with file system objects.
File system operations in python are distributed across modules: os, os.path,
fnamtch, shutil and distutils. This module attempts to make the right choices
for common operations to provide a single interface.
"""
import codecs
from datetime import datetime
import mimetypes
import os
import shutil
from distutils import dir_util
import functools
import fnmatch
import logging
logger = logging.getLogger('fswrap')
# pylint: disable-msg=E0611
__all__ = ['File', 'Folder']
class FS(object):
"""
The base file system object
"""
def __init__(self, path):
super(FS, self).__init__()
if path == os.sep:
self.path = path
else:
self.path = os.path.expandvars(os.path.expanduser(
unicode(path).strip().rstrip(os.sep)))
def __str__(self):
return self.path
def __repr__(self):
return self.path
def __eq__(self, other):
return unicode(self) == unicode(other)
def __ne__(self, other):
return unicode(self) != unicode(other)
@property
def fully_expanded_path(self):
"""
Returns the absolutely absolute path. Calls os.(
normpath, normcase, expandvars and expanduser).
"""
return os.path.abspath(
os.path.normpath(
os.path.normcase(
os.path.expandvars(
os.path.expanduser(self.path)))))
@property
def exists(self):
"""
Does the file system object exist?
"""
return os.path.exists(self.path)
@property
def name(self):
"""
Returns the name of the FS object with its extension
"""
return os.path.basename(self.path)
@property
def parent(self):
"""
The parent folder. Returns a `Folder` object.
"""
return Folder(os.path.dirname(self.path))
@property
def depth(self):
"""
Returns the number of ancestors of this directory.
"""
return len(self.path.rstrip(os.sep).split(os.sep))
def ancestors(self, stop=None):
"""
Generates the parents until stop or the absolute
root directory is reached.
"""
folder = self
while folder.parent != stop:
if folder.parent == folder:
return
yield folder.parent
folder = folder.parent
def is_descendant_of(self, ancestor):
"""
Checks if this folder is inside the given ancestor.
"""
stop = Folder(ancestor)
for folder in self.ancestors():
if folder == stop:
return True
if stop.depth > folder.depth:
return False
return False
def get_relative_path(self, root):
"""
Gets the fragment of the current path starting at root.
"""
if self.path == root:
return ''
ancestors = self.ancestors(stop=root)
return functools.reduce(lambda f, p: Folder(p.name).child(f),
ancestors,
self.name)
def get_mirror(self, target_root, source_root=None):
"""
Returns a File or Folder object that reperesents if the entire
fragment of this directory starting with `source_root` were copied
to `target_root`.
>>> Folder('/usr/local/hyde/stuff').get_mirror('/usr/tmp',
source_root='/usr/local/hyde')
Folder('/usr/tmp/stuff')
"""
fragment = self.get_relative_path(
source_root if source_root else self.parent)
return Folder(target_root).child(fragment)
@staticmethod
def file_or_folder(path):
"""
Returns a File or Folder object that would represent the given path.
"""
target = unicode(path)
return Folder(target) if os.path.isdir(target) else File(target)
def __get_destination__(self, destination):
"""
Returns a File or Folder object that would represent this entity
if it were copied or moved to `destination`.
"""
if isinstance(destination, File) or os.path.isfile(unicode(destination)):
return destination
else:
return FS.file_or_folder(Folder(destination).child(self.name))
class File(FS):
"""
The File object.
"""
def __init__(self, path):
super(File, self).__init__(path)
@property
def name_without_extension(self):
"""
Returns the name of the FS object without its extension
"""
return os.path.splitext(self.name)[0]
@property
def extension(self):
"""
File extension prefixed with a dot.
"""
return os.path.splitext(self.path)[1]
@property
def kind(self):
"""
File extension without dot prefix.
"""
return self.extension.lstrip(".")
@property
def size(self):
"""
Size of this file.
"""
if not self.exists:
return -1
return os.path.getsize(self.path)
@property
def mimetype(self):
"""
Gets the mimetype of this file.
"""
(mime, _) = mimetypes.guess_type(self.path)
return mime
@property
def is_binary(self):
"""Return true if this is a binary file."""
with open(self.path, 'rb') as fin:
CHUNKSIZE = 1024
while 1:
chunk = fin.read(CHUNKSIZE)
if '\0' in chunk:
return True
if len(chunk) < CHUNKSIZE:
break
return False
@property
def is_text(self):
"""Return true if this is a text file."""
return (not self.is_binary)
@property
def is_image(self):
"""Return true if this is an image file."""
return self.mimetype.split("/")[0] == "image"
@property
def last_modified(self):
"""
Returns a datetime object representing the last modified time.
Calls os.path.getmtime.
"""
return datetime.fromtimestamp(os.path.getmtime(self.path))
def has_changed_since(self, basetime):
"""
Returns True if the file has been changed since the given time.
"""
return self.last_modified > basetime
def older_than(self, another_file):
"""
Checks if this file is older than the given file. Uses last_modified to
determine age.
"""
return self.last_modified < File(unicode(another_file)).last_modified
@staticmethod
def make_temp(text):
"""
Creates a temprorary file and writes the `text` into it
"""
import tempfile
(handle, path) = tempfile.mkstemp(text=True)
os.close(handle)
afile = File(path)
afile.write(text)
return afile
def read_all(self, encoding='utf-8'):
"""
Reads from the file and returns the content as a string.
"""
logger.info("Reading everything from %s" % self)
with codecs.open(self.path, 'r', encoding) as fin:
read_text = fin.read()
return read_text
def write(self, text, encoding="utf-8"):
"""
Writes the given text to the file using the given encoding.
"""
logger.info("Writing to %s" % self)
with codecs.open(self.path, 'w', encoding) as fout:
fout.write(text)
def copy_to(self, destination):
"""
Copies the file to the given destination. Returns a File
object that represents the target file. `destination` must
be a File or Folder object.
"""
target = self.__get_destination__(destination)
logger.info("Copying %s to %s" % (self, target))
shutil.copy(self.path, unicode(destination))
return target
def delete(self):
"""
Delete the file if it exists.
"""
if self.exists:
os.remove(self.path)
@property
def etag(self):
"""
Generates etag from file contents.
"""
CHUNKSIZE = 1024 * 64
from hashlib import md5
hash = md5()
with open(self.path) as fin:
chunk = fin.read(CHUNKSIZE)
while chunk:
hash.update(chunk)
chunk = fin.read(CHUNKSIZE)
return hash.hexdigest()
class FSVisitor(object):
"""
Implements syntactic sugar for walking and listing folders
"""
def __init__(self, folder, pattern=None):
super(FSVisitor, self).__init__()
self.folder = folder
self.pattern = pattern
def folder_visitor(self, function):
"""
Decorator for `visit_folder` protocol
"""
self.visit_folder = function
return function
def file_visitor(self, function):
"""
Decorator for `visit_file` protocol
"""
self.visit_file = function
return function
def finalizer(self, function):
"""
Decorator for `visit_complete` protocol
"""
self.visit_complete = function
return function
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
class FolderWalker(FSVisitor):
"""
Walks the entire hirearchy of this directory starting with itself.
If a pattern is provided, only the files that match the pattern are
processed.
"""
def walk(self, walk_folders=False, walk_files=False):
"""
A simple generator that yields a File or Folder object based on
the arguments.
"""
if not walk_files and not walk_folders:
return
for root, _, a_files in os.walk(self.folder.path, followlinks=True):
folder = Folder(root)
if walk_folders:
yield folder
if walk_files:
for a_file in a_files:
if (not self.pattern or
fnmatch.fnmatch(a_file, self.pattern)):
yield File(folder.child(a_file))
def walk_all(self):
"""
Yield both Files and Folders as the tree is walked.
"""
return self.walk(walk_folders=True, walk_files=True)
def walk_files(self):
"""
Yield only Files.
"""
return self.walk(walk_folders=False, walk_files=True)
def walk_folders(self):
"""
Yield only Folders.
"""
return self.walk(walk_folders=True, walk_files=False)
def __exit__(self, exc_type, exc_val, exc_tb):
"""
Automatically walk the folder when the context manager is exited.
Calls self.visit_folder first and then calls self.visit_file for
any files found. After all files and folders have been exhausted
self.visit_complete is called.
If visitor.visit_folder returns False, the files in the folder are not
processed.
"""
def __visit_folder__(folder):
process_folder = True
if hasattr(self, 'visit_folder'):
process_folder = self.visit_folder(folder)
# If there is no return value assume true
#
if process_folder is None:
process_folder = True
return process_folder
def __visit_file__(a_file):
if hasattr(self, 'visit_file'):
self.visit_file(a_file)
def __visit_complete__():
if hasattr(self, 'visit_complete'):
self.visit_complete()
for root, dirs, a_files in os.walk(self.folder.path, followlinks=True):
folder = Folder(root)
if not __visit_folder__(folder):
dirs[:] = []
continue
for a_file in a_files:
if not self.pattern or fnmatch.fnmatch(a_file, self.pattern):
__visit_file__(File(folder.child(a_file)))
__visit_complete__()
class FolderLister(FSVisitor):
"""
Lists the contents of this directory.
If a pattern is provided, only the files that match the pattern are
processed.
"""
def list(self, list_folders=False, list_files=False):
"""
A simple generator that yields a File or Folder object based on
the arguments.
"""
a_files = os.listdir(self.folder.path)
for a_file in a_files:
path = self.folder.child(a_file)
if os.path.isdir(path):
if list_folders:
yield Folder(path)
elif list_files:
if not self.pattern or fnmatch.fnmatch(a_file, self.pattern):
yield File(path)
def list_all(self):
"""
Yield both Files and Folders as the folder is listed.
"""
return self.list(list_folders=True, list_files=True)
def list_files(self):
"""
Yield only Files.
"""
return self.list(list_folders=False, list_files=True)
def list_folders(self):
"""
Yield only Folders.
"""
return self.list(list_folders=True, list_files=False)
def __exit__(self, exc_type, exc_val, exc_tb):
"""
Automatically list the folder contents when the context manager
is exited.
Calls self.visit_folder first and then calls self.visit_file for
any files found. After all files and folders have been exhausted
self.visit_complete is called.
"""
a_files = os.listdir(self.folder.path)
for a_file in a_files:
path = self.folder.child(a_file)
if os.path.isdir(path) and hasattr(self, 'visit_folder'):
self.visit_folder(Folder(path))
elif hasattr(self, 'visit_file'):
if not self.pattern or fnmatch.fnmatch(a_file, self.pattern):
self.visit_file(File(path))
if hasattr(self, 'visit_complete'):
self.visit_complete()
class Folder(FS):
"""
Represents a directory.
"""
def __init__(self, path):
super(Folder, self).__init__(path)
def child_folder(self, fragment):
"""
Returns a folder object by combining the fragment to this folder's path
"""
return Folder(os.path.join(self.path, Folder(fragment).path))
def child_file(self, fragment):
"""
Returns a `File` object representing the `fragment`.
"""
return File(self.child(fragment))
def child(self, fragment):
"""
Returns a path of a child item represented by `fragment`.
"""
return os.path.join(self.path, FS(fragment).path)
def make(self):
"""
Creates this directory and any of the missing directories in the path.
Any errors that may occur are eaten.
"""
try:
if not self.exists:
logger.info("Creating %s" % self.path)
os.makedirs(self.path)
except os.error:
pass
return self
def zip(self, target=None, basepath=None):
"""
Zips the contents of this folder. If `target` is not provided,
<name>.zip is used instead. `basepath` is used to specify the
base path for files in the archive. The path stored along with
the files in the archive will be relative to the `basepath`.
"""
target = self.parent.child(target or self.name + '.zip')
basepath = basepath or self.path
from zipfile import ZipFile
with ZipFile(target, 'w') as zip:
with self.walker as walker:
@walker.file_visitor
def add_file(f):
zip.write(f.path, f.get_relative_path(basepath))
def delete(self):
"""
Deletes the directory if it exists.
"""
if self.exists:
logger.info("Deleting %s" % self.path)
shutil.rmtree(self.path)
def copy_to(self, destination):
"""
Copies this directory to the given destination. Returns a Folder object
that represents the moved directory.
"""
target = self.__get_destination__(destination)
logger.info("Copying %s to %s" % (self, target))
shutil.copytree(self.path, unicode(target))
return target
def move_to(self, destination):
"""
Moves this directory to the given destination. Returns a Folder object
that represents the moved directory.
"""
target = self.__get_destination__(destination)
logger.info("Move %s to %s" % (self, target))
shutil.move(self.path, unicode(target))
return target
def rename_to(self, destination_name):
"""
Moves this directory to the given destination. Returns a Folder object
that represents the moved directory.
"""
target = self.parent.child_folder(destination_name)
logger.info("Rename %s to %s" % (self, target))
shutil.move(self.path, unicode(target))
return target
def _create_target_tree(self, target):
"""
There is a bug in dir_util that makes `copy_tree` crash if a folder in
the tree has been deleted before and readded now. To workaround the
bug, we first walk the tree and create directories that are needed.
"""
source = self
with source.walker as walker:
@walker.folder_visitor
def visit_folder(folder):
"""
Create the mirror directory
"""
if folder != source:
Folder(folder.get_mirror(target, source)).make()
def copy_contents_to(self, destination):
"""
Copies the contents of this directory to the given destination.
Returns a Folder object that represents the moved directory.
"""
logger.info("Copying contents of %s to %s" % (self, destination))
target = Folder(destination)
target.make()
self._create_target_tree(target)
dir_util.copy_tree(self.path, unicode(target))
return target
def get_walker(self, pattern=None):
"""
Return a `FolderWalker` object with a set pattern.
"""
return FolderWalker(self, pattern)
@property
def walker(self):
"""
Return a `FolderWalker` object
"""
return FolderWalker(self)
def get_lister(self, pattern=None):
"""
Return a `FolderLister` object with a set pattern.
"""
return FolderLister(self, pattern)
@property
def lister(self):
"""
Return a `FolderLister` object
"""
return FolderLister(self)
| {
"content_hash": "96203e71d637f456bec4ee760405370c",
"timestamp": "",
"source": "github",
"line_count": 652,
"max_line_length": 81,
"avg_line_length": 29.271472392638035,
"alnum_prop": 0.5611212994498297,
"repo_name": "laurentb/fswrap",
"id": "a1254f35ecabee23188a1610ec608295a6652b35",
"size": "19109",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fswrap.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "49148"
}
],
"symlink_target": ""
} |
from smart_server.smart.utils.startup import check_environment
import os
# check_environment()
| {
"content_hash": "b9a3ce9a5a5c6514d89658bb35cf1ac8",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 62,
"avg_line_length": 31.666666666666668,
"alnum_prop": 0.8210526315789474,
"repo_name": "smart-classic/smart_server",
"id": "6510416a175b928f2d0b37920ea60b0ca2b09b0a",
"size": "95",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1903"
},
{
"name": "Python",
"bytes": "249743"
},
{
"name": "Shell",
"bytes": "462"
},
{
"name": "XSLT",
"bytes": "8343"
}
],
"symlink_target": ""
} |
"""
The system for scheduling tasks and executing them in order.
Deals with dependencies, priorities, resources, etc.
The :py:class:`~luigi.worker.Worker` pulls tasks from the scheduler (usually over the REST interface) and executes them.
See :doc:`/central_scheduler` for more info.
"""
import collections
try:
import cPickle as pickle
except ImportError:
import pickle
import datetime
import functools
import itertools
import logging
import os
import time
from luigi import six
from luigi import configuration
from luigi import notifications
from luigi import parameter
from luigi import task_history as history
from luigi.task_status import DISABLED, DONE, FAILED, PENDING, RUNNING, SUSPENDED, UNKNOWN
from luigi.task import Config
logger = logging.getLogger("luigi.server")
class Scheduler(object):
"""
Abstract base class.
Note that the methods all take string arguments, not Task objects...
"""""
add_task = NotImplemented
get_work = NotImplemented
ping = NotImplemented
UPSTREAM_RUNNING = 'UPSTREAM_RUNNING'
UPSTREAM_MISSING_INPUT = 'UPSTREAM_MISSING_INPUT'
UPSTREAM_FAILED = 'UPSTREAM_FAILED'
UPSTREAM_DISABLED = 'UPSTREAM_DISABLED'
UPSTREAM_SEVERITY_ORDER = (
'',
UPSTREAM_RUNNING,
UPSTREAM_MISSING_INPUT,
UPSTREAM_FAILED,
UPSTREAM_DISABLED,
)
UPSTREAM_SEVERITY_KEY = UPSTREAM_SEVERITY_ORDER.index
STATUS_TO_UPSTREAM_MAP = {
FAILED: UPSTREAM_FAILED,
RUNNING: UPSTREAM_RUNNING,
PENDING: UPSTREAM_MISSING_INPUT,
DISABLED: UPSTREAM_DISABLED,
}
class scheduler(Config):
# TODO(erikbern): the config_path is needed for backwards compatilibity. We should drop the compatibility
# at some point (in particular this would force users to replace all dashes with underscores in the config)
retry_delay = parameter.FloatParameter(default=900.0)
remove_delay = parameter.FloatParameter(default=600.0)
worker_disconnect_delay = parameter.FloatParameter(default=60.0)
state_path = parameter.Parameter(default='/var/lib/luigi-server/state.pickle')
# Jobs are disabled if we see more than disable_failures failures in disable_window seconds.
# These disables last for disable_persist seconds.
disable_window = parameter.IntParameter(default=3600,
config_path=dict(section='scheduler', name='disable-window-seconds'))
disable_failures = parameter.IntParameter(default=None,
config_path=dict(section='scheduler', name='disable-num-failures'))
disable_hard_timeout = parameter.IntParameter(default=None,
config_path=dict(section='scheduler', name='disable-hard-timeout'))
disable_persist = parameter.IntParameter(default=86400,
config_path=dict(section='scheduler', name='disable-persist-seconds'))
max_shown_tasks = parameter.IntParameter(default=100000)
prune_done_tasks = parameter.BoolParameter(default=False)
record_task_history = parameter.BoolParameter(default=False)
visualization_graph = parameter.Parameter(default="svg", config_path=dict(section='scheduler', name='visualization-graph'))
def fix_time(x):
# Backwards compatibility for a fix in Dec 2014. Prior to the fix, pickled state might store datetime objects
# Let's remove this function soon
if isinstance(x, datetime.datetime):
return time.mktime(x.timetuple())
else:
return x
class Failures(object):
"""
This class tracks the number of failures in a given time window.
Failures added are marked with the current timestamp, and this class counts
the number of failures in a sliding time window ending at the present.
"""
def __init__(self, window):
"""
Initialize with the given window.
:param window: how long to track failures for, as a float (number of seconds).
"""
self.window = window
self.failures = collections.deque()
self.first_failure_time = None
def add_failure(self):
"""
Add a failure event with the current timestamp.
"""
failure_time = time.time()
if not self.first_failure_time:
self.first_failure_time = failure_time
self.failures.append(failure_time)
def num_failures(self):
"""
Return the number of failures in the window.
"""
min_time = time.time() - self.window
while self.failures and fix_time(self.failures[0]) < min_time:
self.failures.popleft()
return len(self.failures)
def clear(self):
"""
Clear the failure queue.
"""
self.failures.clear()
def _get_default(x, default):
if x is not None:
return x
else:
return default
class Task(object):
def __init__(self, task_id, status, deps, resources=None, priority=0, family='', module=None,
params=None, disable_failures=None, disable_window=None, disable_hard_timeout=None):
self.id = task_id
self.stakeholders = set() # workers ids that are somehow related to this task (i.e. don't prune while any of these workers are still active)
self.workers = set() # workers ids that can perform task - task is 'BROKEN' if none of these workers are active
if deps is None:
self.deps = set()
else:
self.deps = set(deps)
self.status = status # PENDING, RUNNING, FAILED or DONE
self.time = time.time() # Timestamp when task was first added
self.retry = None
self.remove = None
self.worker_running = None # the worker id that is currently running the task or None
self.time_running = None # Timestamp when picked up by worker
self.expl = None
self.priority = priority
self.resources = _get_default(resources, {})
self.family = family
self.module = module
self.params = _get_default(params, {})
self.disable_failures = disable_failures
self.disable_hard_timeout = disable_hard_timeout
self.failures = Failures(disable_window)
self.scheduler_disable_time = None
def __repr__(self):
return "Task(%r)" % vars(self)
def add_failure(self):
self.failures.add_failure()
def has_excessive_failures(self):
excessive_failures = False
if (self.failures.first_failure_time is not None and
self.disable_hard_timeout):
if (time.time() >= self.failures.first_failure_time +
self.disable_hard_timeout):
excessive_failures = True
if self.failures.num_failures() >= self.disable_failures:
excessive_failures = True
return excessive_failures
def can_disable(self):
return (self.disable_failures is not None or
self.disable_hard_timeout is not None)
class Worker(object):
"""
Structure for tracking worker activity and keeping their references.
"""
def __init__(self, worker_id, last_active=None):
self.id = worker_id
self.reference = None # reference to the worker in the real world. (Currently a dict containing just the host)
self.last_active = last_active # seconds since epoch
self.started = time.time() # seconds since epoch
self.tasks = set() # task objects
self.info = {}
def add_info(self, info):
self.info.update(info)
def update(self, worker_reference):
if worker_reference:
self.reference = worker_reference
self.last_active = time.time()
def prune(self, config):
# Delete workers that haven't said anything for a while (probably killed)
if self.last_active + config.worker_disconnect_delay < time.time():
return True
def get_pending_tasks(self, state):
"""
Get PENDING (and RUNNING) tasks for this worker.
You have to pass in the state for optimization reasons.
"""
if len(self.tasks) < state.num_pending_tasks():
return six.moves.filter(lambda task: task.status in [PENDING, RUNNING],
self.tasks)
else:
return state.get_pending_tasks()
def is_trivial_worker(self, state):
"""
If it's not an assistant having only tasks that are without
requirements.
We have to pass the state parameter for optimization reasons.
"""
if self.assistant:
return False
return all(not task.resources for task in self.get_pending_tasks(state))
@property
def assistant(self):
return self.info.get('assistant', False)
def __str__(self):
return self.id
class SimpleTaskState(object):
"""
Keep track of the current state and handle persistance.
The point of this class is to enable other ways to keep state, eg. by using a database
These will be implemented by creating an abstract base class that this and other classes
inherit from.
"""
def __init__(self, state_path):
self._state_path = state_path
self._tasks = {} # map from id to a Task object
self._status_tasks = collections.defaultdict(dict)
self._active_workers = {} # map from id to a Worker object
def dump(self):
state = (self._tasks, self._active_workers)
try:
with open(self._state_path, 'wb') as fobj:
pickle.dump(state, fobj)
except IOError:
logger.warning("Failed saving scheduler state", exc_info=1)
else:
logger.info("Saved state in %s", self._state_path)
# prone to lead to crashes when old state is unpickled with updated code. TODO some kind of version control?
def load(self):
if os.path.exists(self._state_path):
logger.info("Attempting to load state from %s", self._state_path)
try:
with open(self._state_path, 'rb') as fobj:
state = pickle.load(fobj)
except BaseException:
logger.exception("Error when loading state. Starting from clean slate.")
return
self._tasks, self._active_workers = state
self._status_tasks = collections.defaultdict(dict)
for task in six.itervalues(self._tasks):
self._status_tasks[task.status][task.id] = task
# Convert from old format
# TODO: this is really ugly, we need something more future-proof
# Every time we add an attribute to the Worker class, this code needs to be updated
for k, v in six.iteritems(self._active_workers):
if isinstance(v, float):
self._active_workers[k] = Worker(worker_id=k, last_active=v)
if any(not hasattr(w, 'tasks') for k, w in six.iteritems(self._active_workers)):
# If you load from an old format where Workers don't contain tasks.
for k, worker in six.iteritems(self._active_workers):
worker.tasks = set()
for task in six.itervalues(self._tasks):
for worker_id in task.workers:
self._active_workers[worker_id].tasks.add(task)
else:
logger.info("No prior state file exists at %s. Starting with clean slate", self._state_path)
def get_active_tasks(self, status=None):
if status:
for task in six.itervalues(self._status_tasks[status]):
yield task
else:
for task in six.itervalues(self._tasks):
yield task
def get_running_tasks(self):
return six.itervalues(self._status_tasks[RUNNING])
def get_pending_tasks(self):
return itertools.chain.from_iterable(six.itervalues(self._status_tasks[status])
for status in [PENDING, RUNNING])
def num_pending_tasks(self):
"""
Return how many tasks are PENDING + RUNNING. O(1).
"""
return len(self._status_tasks[PENDING]) + len(self._status_tasks[RUNNING])
def get_task(self, task_id, default=None, setdefault=None):
if setdefault:
task = self._tasks.setdefault(task_id, setdefault)
self._status_tasks[task.status][task.id] = task
return task
else:
return self._tasks.get(task_id, default)
def has_task(self, task_id):
return task_id in self._tasks
def re_enable(self, task, config=None):
task.scheduler_disable_time = None
task.failures.clear()
if config:
self.set_status(task, FAILED, config)
task.failures.clear()
def set_status(self, task, new_status, config=None):
if new_status == FAILED:
assert config is not None
# not sure why we have SUSPENDED, as it can never be set
if new_status == SUSPENDED:
new_status = PENDING
if new_status == DISABLED and task.status == RUNNING:
return
if task.status == DISABLED:
if new_status == DONE:
self.re_enable(task)
# don't allow workers to override a scheduler disable
elif task.scheduler_disable_time is not None:
return
if new_status == FAILED and task.can_disable():
task.add_failure()
if task.has_excessive_failures():
task.scheduler_disable_time = time.time()
new_status = DISABLED
notifications.send_error_email(
'Luigi Scheduler: DISABLED {task} due to excessive failures'.format(task=task.id),
'{task} failed {failures} times in the last {window} seconds, so it is being '
'disabled for {persist} seconds'.format(
failures=config.disable_failures,
task=task.id,
window=config.disable_window,
persist=config.disable_persist,
))
elif new_status == DISABLED:
task.scheduler_disable_time = None
self._status_tasks[task.status].pop(task.id)
self._status_tasks[new_status][task.id] = task
task.status = new_status
def prune(self, task, config, assistants):
remove = False
# Mark tasks with no remaining active stakeholders for deletion
if not task.stakeholders:
if task.remove is None:
logger.info("Task %r has stakeholders %r but none remain connected -> will remove "
"task in %s seconds", task.id, task.stakeholders, config.remove_delay)
task.remove = time.time() + config.remove_delay
# If a running worker disconnects, tag all its jobs as FAILED and subject it to the same retry logic
if task.status == RUNNING and task.worker_running and task.worker_running not in task.stakeholders | assistants:
logger.info("Task %r is marked as running by disconnected worker %r -> marking as "
"FAILED with retry delay of %rs", task.id, task.worker_running,
config.retry_delay)
task.worker_running = None
self.set_status(task, FAILED, config)
task.retry = time.time() + config.retry_delay
# Re-enable task after the disable time expires
if task.status == DISABLED and task.scheduler_disable_time:
if time.time() - fix_time(task.scheduler_disable_time) > config.disable_persist:
self.re_enable(task, config)
# Remove tasks that have no stakeholders
if task.remove and time.time() > task.remove:
logger.info("Removing task %r (no connected stakeholders)", task.id)
remove = True
# Reset FAILED tasks to PENDING if max timeout is reached, and retry delay is >= 0
if task.status == FAILED and config.retry_delay >= 0 and task.retry < time.time():
self.set_status(task, PENDING, config)
return remove
def inactivate_tasks(self, delete_tasks):
# The terminology is a bit confusing: we used to "delete" tasks when they became inactive,
# but with a pluggable state storage, you might very well want to keep some history of
# older tasks as well. That's why we call it "inactivate" (as in the verb)
for task in delete_tasks:
task_obj = self._tasks.pop(task)
self._status_tasks[task_obj.status].pop(task)
def get_active_workers(self, last_active_lt=None):
for worker in six.itervalues(self._active_workers):
if last_active_lt is not None and worker.last_active >= last_active_lt:
continue
yield worker
def get_assistants(self, last_active_lt=None):
return filter(lambda w: w.assistant, self.get_active_workers(last_active_lt))
def get_worker_ids(self):
return self._active_workers.keys() # only used for unit tests
def get_worker(self, worker_id):
return self._active_workers.setdefault(worker_id, Worker(worker_id))
def inactivate_workers(self, delete_workers):
# Mark workers as inactive
for worker in delete_workers:
self._active_workers.pop(worker)
# remove workers from tasks
for task in self.get_active_tasks():
task.stakeholders.difference_update(delete_workers)
task.workers.difference_update(delete_workers)
def get_necessary_tasks(self):
necessary_tasks = set()
for task in self.get_active_tasks():
if task.status not in (DONE, DISABLED) or \
getattr(task, 'scheduler_disable_time', None) is not None:
necessary_tasks.update(task.deps)
necessary_tasks.add(task.id)
return necessary_tasks
class CentralPlannerScheduler(Scheduler):
"""
Async scheduler that can handle multiple workers, etc.
Can be run locally or on a server (using RemoteScheduler + server.Server).
"""
def __init__(self, config=None, resources=None, task_history_impl=None, **kwargs):
"""
Keyword Arguments:
:param config: an object of class "scheduler" or None (in which the global instance will be used)
:param resources: a dict of str->int constraints
:param task_history_override: ignore config and use this object as the task history
"""
self._config = config or scheduler(**kwargs)
self._state = SimpleTaskState(self._config.state_path)
if task_history_impl:
self._task_history = task_history_impl
elif self._config.record_task_history:
from luigi import db_task_history # Needs sqlalchemy, thus imported here
self._task_history = db_task_history.DbTaskHistory()
else:
self._task_history = history.NopHistory()
self._resources = resources or configuration.get_config().getintdict('resources') # TODO: Can we make this a Parameter?
self._make_task = functools.partial(
Task, disable_failures=self._config.disable_failures,
disable_hard_timeout=self._config.disable_hard_timeout,
disable_window=self._config.disable_window)
def load(self):
self._state.load()
def dump(self):
self._state.dump()
def prune(self):
logger.info("Starting pruning of task graph")
remove_workers = []
for worker in self._state.get_active_workers():
if worker.prune(self._config):
logger.info("Worker %s timed out (no contact for >=%ss)", worker, self._config.worker_disconnect_delay)
remove_workers.append(worker.id)
self._state.inactivate_workers(remove_workers)
assistant_ids = set(w.id for w in self._state.get_assistants())
remove_tasks = []
if assistant_ids:
necessary_tasks = self._state.get_necessary_tasks()
else:
necessary_tasks = ()
for task in self._state.get_active_tasks():
if task.id not in necessary_tasks and self._state.prune(task, self._config, assistant_ids):
remove_tasks.append(task.id)
self._state.inactivate_tasks(remove_tasks)
logger.info("Done pruning task graph")
def update(self, worker_id, worker_reference=None):
"""
Keep track of whenever the worker was last active.
"""
worker = self._state.get_worker(worker_id)
worker.update(worker_reference)
def _update_priority(self, task, prio, worker):
"""
Update priority of the given task.
Priority can only be increased.
If the task doesn't exist, a placeholder task is created to preserve priority when the task is later scheduled.
"""
task.priority = prio = max(prio, task.priority)
for dep in task.deps or []:
t = self._state.get_task(dep)
if t is not None and prio > t.priority:
self._update_priority(t, prio, worker)
def add_task(self, task_id=None, status=PENDING, runnable=True,
deps=None, new_deps=None, expl=None, resources=None,
priority=0, family='', module=None, params=None,
assistant=False, **kwargs):
"""
* add task identified by task_id if it doesn't exist
* if deps is not None, update dependency list
* update status of task
* add additional workers/stakeholders
* update priority when needed
"""
worker_id = kwargs['worker']
self.update(worker_id)
task = self._state.get_task(task_id, setdefault=self._make_task(
task_id=task_id, status=PENDING, deps=deps, resources=resources,
priority=priority, family=family, module=module, params=params))
# for setting priority, we'll sometimes create tasks with unset family and params
if not task.family:
task.family = family
if not getattr(task, 'module', None):
task.module = module
if not task.params:
task.params = _get_default(params, {})
if task.remove is not None:
task.remove = None # unmark task for removal so it isn't removed after being added
if not (task.status == RUNNING and status == PENDING):
# don't allow re-scheduling of task while it is running, it must either fail or succeed first
if status == PENDING or status != task.status:
# Update the DB only if there was a acctual change, to prevent noise.
# We also check for status == PENDING b/c that's the default value
# (so checking for status != task.status woule lie)
self._update_task_history(task_id, status)
self._state.set_status(task, PENDING if status == SUSPENDED else status, self._config)
if status == FAILED:
task.retry = time.time() + self._config.retry_delay
if deps is not None:
task.deps = set(deps)
if new_deps is not None:
task.deps.update(new_deps)
if resources is not None:
task.resources = resources
if not assistant:
task.stakeholders.add(worker_id)
# Task dependencies might not exist yet. Let's create dummy tasks for them for now.
# Otherwise the task dependencies might end up being pruned if scheduling takes a long time
for dep in task.deps or []:
t = self._state.get_task(dep, setdefault=self._make_task(task_id=dep, status=UNKNOWN, deps=None, priority=priority))
t.stakeholders.add(worker_id)
self._update_priority(task, priority, worker_id)
if runnable:
task.workers.add(worker_id)
self._state.get_worker(worker_id).tasks.add(task)
if expl is not None:
task.expl = expl
def add_worker(self, worker, info, **kwargs):
self._state.get_worker(worker).add_info(info)
def update_resources(self, **resources):
if self._resources is None:
self._resources = {}
self._resources.update(resources)
def _has_resources(self, needed_resources, used_resources):
if needed_resources is None:
return True
available_resources = self._resources or {}
for resource, amount in six.iteritems(needed_resources):
if amount + used_resources[resource] > available_resources.get(resource, 1):
return False
return True
def _used_resources(self):
used_resources = collections.defaultdict(int)
if self._resources is not None:
for task in self._state.get_active_tasks():
if task.status == RUNNING and task.resources:
for resource, amount in six.iteritems(task.resources):
used_resources[resource] += amount
return used_resources
def _rank(self, among_tasks):
"""
Return worker's rank function for task scheduling.
:return:
"""
dependents = collections.defaultdict(int)
def not_done(t):
task = self._state.get_task(t, default=None)
return task is None or task.status != DONE
for task in among_tasks:
if task.status != DONE:
deps = list(filter(not_done, task.deps))
inverse_num_deps = 1.0 / max(len(deps), 1)
for dep in deps:
dependents[dep] += inverse_num_deps
return lambda task: (task.priority, dependents[task.id], -task.time)
def _schedulable(self, task):
if task.status != PENDING:
return False
for dep in task.deps:
dep_task = self._state.get_task(dep, default=None)
if dep_task is None or dep_task.status != DONE:
return False
return True
def get_work(self, host=None, assistant=False, **kwargs):
# TODO: remove any expired nodes
# Algo: iterate over all nodes, find the highest priority node no dependencies and available
# resources.
# Resource checking looks both at currently available resources and at which resources would
# be available if all running tasks died and we rescheduled all workers greedily. We do both
# checks in order to prevent a worker with many low-priority tasks from starving other
# workers with higher priority tasks that share the same resources.
# TODO: remove tasks that can't be done, figure out if the worker has absolutely
# nothing it can wait for
worker_id = kwargs['worker']
# Return remaining tasks that have no FAILED descendents
self.update(worker_id, {'host': host})
if assistant:
self.add_worker(worker_id, [('assistant', assistant)])
best_task = None
locally_pending_tasks = 0
running_tasks = []
upstream_table = {}
greedy_resources = collections.defaultdict(int)
n_unique_pending = 0
worker = self._state.get_worker(worker_id)
if worker.is_trivial_worker(self._state):
relevant_tasks = worker.get_pending_tasks(self._state)
used_resources = collections.defaultdict(int)
greedy_workers = dict() # If there's no resources, then they can grab any task
else:
relevant_tasks = self._state.get_pending_tasks()
used_resources = self._used_resources()
greedy_workers = dict((worker.id, worker.info.get('workers', 1))
for worker in self._state.get_active_workers())
tasks = list(relevant_tasks)
tasks.sort(key=self._rank(among_tasks=tasks), reverse=True)
for task in tasks:
upstream_status = self._upstream_status(task.id, upstream_table)
in_workers = (assistant and task.workers) or worker_id in task.workers
if task.status == RUNNING and in_workers:
# Return a list of currently running tasks to the client,
# makes it easier to troubleshoot
other_worker = self._state.get_worker(task.worker_running)
more_info = {'task_id': task.id, 'worker': str(other_worker)}
if other_worker is not None:
more_info.update(other_worker.info)
running_tasks.append(more_info)
if task.status == PENDING and in_workers and upstream_status != UPSTREAM_DISABLED:
locally_pending_tasks += 1
if len(task.workers) == 1 and not assistant:
n_unique_pending += 1
if task.status == RUNNING and (task.worker_running in greedy_workers):
greedy_workers[task.worker_running] -= 1
for resource, amount in six.iteritems((task.resources or {})):
greedy_resources[resource] += amount
if not best_task and self._schedulable(task) and self._has_resources(task.resources, greedy_resources):
if in_workers and self._has_resources(task.resources, used_resources):
best_task = task
else:
workers = itertools.chain(task.workers, [worker_id]) if assistant else task.workers
for task_worker in workers:
if greedy_workers.get(task_worker, 0) > 0:
# use up a worker
greedy_workers[task_worker] -= 1
# keep track of the resources used in greedy scheduling
for resource, amount in six.iteritems((task.resources or {})):
greedy_resources[resource] += amount
break
reply = {'n_pending_tasks': locally_pending_tasks,
'running_tasks': running_tasks,
'task_id': None,
'n_unique_pending': n_unique_pending}
if best_task:
self._state.set_status(best_task, RUNNING, self._config)
best_task.worker_running = worker_id
best_task.time_running = time.time()
self._update_task_history(best_task.id, RUNNING, host=host)
reply['task_id'] = best_task.id
reply['task_family'] = best_task.family
reply['task_module'] = getattr(best_task, 'module', None)
reply['task_params'] = best_task.params
return reply
def ping(self, **kwargs):
worker_id = kwargs['worker']
self.update(worker_id)
def _upstream_status(self, task_id, upstream_status_table):
if task_id in upstream_status_table:
return upstream_status_table[task_id]
elif self._state.has_task(task_id):
task_stack = [task_id]
while task_stack:
dep_id = task_stack.pop()
if self._state.has_task(dep_id):
dep = self._state.get_task(dep_id)
if dep_id not in upstream_status_table:
if dep.status == PENDING and dep.deps:
task_stack = task_stack + [dep_id] + list(dep.deps)
upstream_status_table[dep_id] = '' # will be updated postorder
else:
dep_status = STATUS_TO_UPSTREAM_MAP.get(dep.status, '')
upstream_status_table[dep_id] = dep_status
elif upstream_status_table[dep_id] == '' and dep.deps:
# This is the postorder update step when we set the
# status based on the previously calculated child elements
upstream_status = [upstream_status_table.get(task_id, '') for task_id in dep.deps]
upstream_status.append('') # to handle empty list
status = max(upstream_status, key=UPSTREAM_SEVERITY_KEY)
upstream_status_table[dep_id] = status
return upstream_status_table[dep_id]
def _serialize_task(self, task_id, include_deps=True):
task = self._state.get_task(task_id)
ret = {
'status': task.status,
'workers': list(task.workers),
'worker_running': task.worker_running,
'time_running': getattr(task, "time_running", None),
'start_time': task.time,
'params': task.params,
'name': task.family,
'priority': task.priority,
'resources': task.resources,
}
if task.status == DISABLED:
ret['re_enable_able'] = task.scheduler_disable_time is not None
if include_deps:
ret['deps'] = list(task.deps)
return ret
def graph(self, **kwargs):
self.prune()
serialized = {}
for task in self._state.get_active_tasks():
serialized[task.id] = self._serialize_task(task.id)
return serialized
def _recurse_deps(self, task_id, serialized):
if task_id not in serialized:
task = self._state.get_task(task_id)
if task is None or not task.family:
logger.warn('Missing task for id [%s]', task_id)
# try to infer family and params from task_id
try:
family, _, param_str = task_id.rstrip(')').partition('(')
params = dict(param.split('=') for param in param_str.split(', '))
except BaseException:
family, params = '', {}
serialized[task_id] = {
'deps': [],
'status': UNKNOWN,
'workers': [],
'start_time': UNKNOWN,
'params': params,
'name': family,
'priority': 0,
}
else:
serialized[task_id] = self._serialize_task(task_id)
for dep in task.deps:
self._recurse_deps(dep, serialized)
def dep_graph(self, task_id, **kwargs):
self.prune()
serialized = {}
if self._state.has_task(task_id):
self._recurse_deps(task_id, serialized)
return serialized
def task_list(self, status, upstream_status, limit=True, search=None, **kwargs):
"""
Query for a subset of tasks by status.
"""
self.prune()
result = {}
upstream_status_table = {} # used to memoize upstream status
if search is None:
filter_func = lambda _: True
else:
terms = search.split()
filter_func = lambda t: all(term in t.id for term in terms)
for task in filter(filter_func, self._state.get_active_tasks(status)):
if (task.status != PENDING or not upstream_status or
upstream_status == self._upstream_status(task.id, upstream_status_table)):
serialized = self._serialize_task(task.id, False)
result[task.id] = serialized
if limit and len(result) > self._config.max_shown_tasks:
return {'num_tasks': len(result)}
return result
def worker_list(self, include_running=True, **kwargs):
self.prune()
workers = [
dict(
name=worker.id,
last_active=worker.last_active,
started=getattr(worker, 'started', None),
**worker.info
) for worker in self._state.get_active_workers()]
workers.sort(key=lambda worker: worker['started'], reverse=True)
if include_running:
running = collections.defaultdict(dict)
num_pending = collections.defaultdict(int)
num_uniques = collections.defaultdict(int)
for task in self._state.get_pending_tasks():
if task.status == RUNNING and task.worker_running:
running[task.worker_running][task.id] = self._serialize_task(task.id, False)
elif task.status == PENDING:
for worker in task.workers:
num_pending[worker] += 1
if len(task.workers) == 1:
num_uniques[list(task.workers)[0]] += 1
for worker in workers:
tasks = running[worker['name']]
worker['num_running'] = len(tasks)
worker['num_pending'] = num_pending[worker['name']]
worker['num_uniques'] = num_uniques[worker['name']]
worker['running'] = tasks
return workers
def inverse_dep_graph(self, task_id, **kwargs):
self.prune()
serialized = {}
if self._state.has_task(task_id):
self._traverse_inverse_deps(task_id, serialized)
return serialized
def _traverse_inverse_deps(self, task_id, serialized):
stack = [task_id]
serialized[task_id] = self._serialize_task(task_id)
while len(stack) > 0:
curr_id = stack.pop()
for task in self._state.get_active_tasks():
if curr_id in task.deps:
serialized[curr_id]["deps"].append(task.id)
if task.id not in serialized:
serialized[task.id] = self._serialize_task(task.id)
serialized[task.id]["deps"] = []
stack.append(task.id)
def task_search(self, task_str, **kwargs):
"""
Query for a subset of tasks by task_id.
:param task_str:
:return:
"""
self.prune()
result = collections.defaultdict(dict)
for task in self._state.get_active_tasks():
if task.id.find(task_str) != -1:
serialized = self._serialize_task(task.id, False)
result[task.status][task.id] = serialized
return result
def re_enable_task(self, task_id):
serialized = {}
task = self._state.get_task(task_id)
if task and task.status == DISABLED and task.scheduler_disable_time:
self._state.re_enable(task, self._config)
serialized = self._serialize_task(task_id)
return serialized
def fetch_error(self, task_id, **kwargs):
if self._state.has_task(task_id):
return {"taskId": task_id, "error": self._state.get_task(task_id).expl}
else:
return {"taskId": task_id, "error": ""}
def _update_task_history(self, task_id, status, host=None):
try:
if status == DONE or status == FAILED:
successful = (status == DONE)
self._task_history.task_finished(task_id, successful)
elif status == PENDING:
self._task_history.task_scheduled(task_id)
elif status == RUNNING:
self._task_history.task_started(task_id, host)
except BaseException:
logger.warning("Error saving Task history", exc_info=True)
@property
def task_history(self):
# Used by server.py to expose the calls
return self._task_history
| {
"content_hash": "86871365cda43aaa763c46a6e3358d5f",
"timestamp": "",
"source": "github",
"line_count": 983,
"max_line_length": 149,
"avg_line_length": 40.28891149542218,
"alnum_prop": 0.5906726593273407,
"repo_name": "laserson/luigi",
"id": "bbd00efa6a01e9b66c8d1c500a11d99f948c447c",
"size": "40207",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "luigi/scheduler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2162"
},
{
"name": "HTML",
"bytes": "31302"
},
{
"name": "JavaScript",
"bytes": "441840"
},
{
"name": "Python",
"bytes": "1079517"
},
{
"name": "Shell",
"bytes": "2451"
}
],
"symlink_target": ""
} |
"""Test hassbian config."""
import asyncio
import os
from unittest.mock import patch
from homeassistant.bootstrap import async_setup_component
from homeassistant.components import config
from homeassistant.components.config.hassbian import (
HassbianSuitesView, HassbianSuiteInstallView)
def test_setup_check_env_prevents_load(hass, loop):
"""Test it does not set up hassbian if environment var not present."""
with patch.dict(os.environ, clear=True), \
patch.object(config, 'SECTIONS', ['hassbian']), \
patch('homeassistant.components.http.'
'HomeAssistantHTTP.register_view') as reg_view:
loop.run_until_complete(async_setup_component(hass, 'config', {}))
assert 'config' in hass.config.components
assert reg_view.called is False
def test_setup_check_env_works(hass, loop):
"""Test it sets up hassbian if environment var present."""
with patch.dict(os.environ, {'FORCE_HASSBIAN': '1'}), \
patch.object(config, 'SECTIONS', ['hassbian']), \
patch('homeassistant.components.http.'
'HomeAssistantHTTP.register_view') as reg_view:
loop.run_until_complete(async_setup_component(hass, 'config', {}))
assert 'config' in hass.config.components
assert len(reg_view.mock_calls) == 2
assert isinstance(reg_view.mock_calls[0][1][0], HassbianSuitesView)
assert isinstance(reg_view.mock_calls[1][1][0], HassbianSuiteInstallView)
@asyncio.coroutine
def test_get_suites(hass, aiohttp_client):
"""Test getting suites."""
with patch.dict(os.environ, {'FORCE_HASSBIAN': '1'}), \
patch.object(config, 'SECTIONS', ['hassbian']):
yield from async_setup_component(hass, 'config', {})
client = yield from aiohttp_client(hass.http.app)
resp = yield from client.get('/api/config/hassbian/suites')
assert resp.status == 200
result = yield from resp.json()
assert 'mosquitto' in result
info = result['mosquitto']
assert info['state'] == 'failed'
assert info['description'] == \
'Installs the Mosquitto package for setting up a local MQTT server'
@asyncio.coroutine
def test_install_suite(hass, aiohttp_client):
"""Test getting suites."""
with patch.dict(os.environ, {'FORCE_HASSBIAN': '1'}), \
patch.object(config, 'SECTIONS', ['hassbian']):
yield from async_setup_component(hass, 'config', {})
client = yield from aiohttp_client(hass.http.app)
resp = yield from client.post(
'/api/config/hassbian/suites/openzwave/install')
assert resp.status == 200
result = yield from resp.json()
assert result == {"status": "ok"}
| {
"content_hash": "b43123a842149a2f96b7682dccf90699",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 77,
"avg_line_length": 39.279411764705884,
"alnum_prop": 0.6712841632347435,
"repo_name": "persandstrom/home-assistant",
"id": "85fbf0c2e5a899875326d714c12a04d8f54c2d53",
"size": "2671",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tests/components/config/test_hassbian.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1067"
},
{
"name": "Python",
"bytes": "11745210"
},
{
"name": "Ruby",
"bytes": "518"
},
{
"name": "Shell",
"bytes": "16652"
}
],
"symlink_target": ""
} |
from ....testing import assert_equal
from ..utils import MRIsCalc
def test_MRIsCalc_inputs():
input_map = dict(action=dict(argstr='%s',
mandatory=True,
position=-2,
),
args=dict(argstr='%s',
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
in_file1=dict(argstr='%s',
mandatory=True,
position=-3,
),
in_file2=dict(argstr='%s',
position=-1,
xor=[u'in_float', u'in_int'],
),
in_float=dict(argstr='%f',
position=-1,
xor=[u'in_file2', u'in_int'],
),
in_int=dict(argstr='%d',
position=-1,
xor=[u'in_file2', u'in_float'],
),
out_file=dict(argstr='-o %s',
mandatory=True,
),
subjects_dir=dict(),
terminal_output=dict(nohash=True,
),
)
inputs = MRIsCalc.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_MRIsCalc_outputs():
output_map = dict(out_file=dict(),
)
outputs = MRIsCalc.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(outputs.traits()[key], metakey), value
| {
"content_hash": "f4da256ae42ee3c05eb72b4244278f9c",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 78,
"avg_line_length": 24.363636363636363,
"alnum_prop": 0.5917910447761194,
"repo_name": "carolFrohlich/nipype",
"id": "d7160510a73a28f4cecd414c00e289efa8c8fd54",
"size": "1394",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nipype/interfaces/freesurfer/tests/test_auto_MRIsCalc.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "9823"
},
{
"name": "KiCad",
"bytes": "3797"
},
{
"name": "Makefile",
"bytes": "2320"
},
{
"name": "Matlab",
"bytes": "1717"
},
{
"name": "Python",
"bytes": "5451077"
},
{
"name": "Shell",
"bytes": "3302"
},
{
"name": "Tcl",
"bytes": "43408"
}
],
"symlink_target": ""
} |
__author__ = 'PAY.ON'
from . import abstract_payments_test_case | {
"content_hash": "4e220369d0a89f666ceaabd277013da3",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 41,
"avg_line_length": 31.5,
"alnum_prop": 0.7142857142857143,
"repo_name": "OpenPaymentPlatform/python",
"id": "d1189b4daf8fba9b27922642c2c35864c40d27ec",
"size": "63",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/core/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "136569"
}
],
"symlink_target": ""
} |
from string import Template
from datetime import date
grumpycoinDir = "./";
inFile = grumpycoinDir+"/share/qt/Info.plist"
outFile = "GrumpyCoin-Qt.app/Contents/Info.plist"
version = "unknown";
fileForGrabbingVersion = grumpycoinDir+"grumpycoin-qt.pro"
for line in open(fileForGrabbingVersion):
lineArr = line.replace(" ", "").split("=");
if lineArr[0].startswith("VERSION"):
version = lineArr[1].replace("\n", "");
fIn = open(inFile, "r")
fileContent = fIn.read()
s = Template(fileContent)
newFileContent = s.substitute(VERSION=version,YEAR=date.today().year)
fOut = open(outFile, "w");
fOut.write(newFileContent);
print "Info.plist fresh created" | {
"content_hash": "f0b267aa600bfde2ad51582889664a80",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 69,
"avg_line_length": 27.791666666666668,
"alnum_prop": 0.7166416791604198,
"repo_name": "grumpycoin/grumpycoin-v.1.2",
"id": "06d905fbec706e1f6ba21d3dc31e8b00163c5e9d",
"size": "912",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "share/qt/clean_mac_info_plist.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "78622"
},
{
"name": "C++",
"bytes": "1375855"
},
{
"name": "IDL",
"bytes": "11702"
},
{
"name": "Objective-C",
"bytes": "2463"
},
{
"name": "Python",
"bytes": "36361"
},
{
"name": "Shell",
"bytes": "17733"
},
{
"name": "TypeScript",
"bytes": "3810608"
}
],
"symlink_target": ""
} |
from django.test import TestCase
class webeditTest(TestCase):
"""
Tests for django-webedit
"""
def test_webedit(self):
pass | {
"content_hash": "cc631d2abcc92b57805645d2571fbddb",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 32,
"avg_line_length": 16.555555555555557,
"alnum_prop": 0.6375838926174496,
"repo_name": "callowayproject/django-webedit",
"id": "b333d5196a9b5ba33f4a54639af120dad7cc30a2",
"size": "149",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "webedit/tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "38563"
},
{
"name": "Python",
"bytes": "25462"
}
],
"symlink_target": ""
} |
"""The command group for the RuntimeConfig CLI."""
from googlecloudsdk.calliope import base
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class RuntimeConfig(base.Group):
"""Manage runtime configuration resources."""
| {
"content_hash": "ad0ad2888eea317448f815e9179a791a",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 50,
"avg_line_length": 27.625,
"alnum_prop": 0.7828054298642534,
"repo_name": "KaranToor/MA450",
"id": "9221ede20ddee3d1a1a70de40e79bad4c62b850c",
"size": "817",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "google-cloud-sdk/lib/surface/deployment_manager/runtime_configs/__init__.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3162"
},
{
"name": "CSS",
"bytes": "1930"
},
{
"name": "HTML",
"bytes": "13381"
},
{
"name": "Java",
"bytes": "151442"
},
{
"name": "JavaScript",
"bytes": "4906"
},
{
"name": "Makefile",
"bytes": "1636"
},
{
"name": "Objective-C",
"bytes": "13335"
},
{
"name": "PHP",
"bytes": "9086"
},
{
"name": "Pascal",
"bytes": "62"
},
{
"name": "Python",
"bytes": "19710731"
},
{
"name": "Roff",
"bytes": "2069494"
},
{
"name": "Ruby",
"bytes": "690"
},
{
"name": "Shell",
"bytes": "32272"
},
{
"name": "Smarty",
"bytes": "4968"
},
{
"name": "SourcePawn",
"bytes": "616"
},
{
"name": "Swift",
"bytes": "14225"
}
],
"symlink_target": ""
} |
from office365.entity import Entity
class DirectoryAudit(Entity):
"""Represents the directory audit items and its collection."""
pass
| {
"content_hash": "26370d286b4847fe36badfd2687d0764",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 66,
"avg_line_length": 24,
"alnum_prop": 0.75,
"repo_name": "vgrem/Office365-REST-Python-Client",
"id": "309097984b7b1099204ad33d92516403d514dc68",
"size": "144",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "office365/directory/audit/directory_audit.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1659292"
}
],
"symlink_target": ""
} |
from devinclude import *
# from bustime.models import *
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks
from twisted.internet.endpoints import serverFromString
from autobahn.wamp import types
from autobahn.twisted import wamp, websocket
import zmq
import zerorpc
PORT = 9002
RC = "tcp://127.0.0.1:9001"
C = zerorpc.Client
def rpc_bdata(*args, **kwargs):
c = C()
c.connect(RC)
r = c.rpc_bdata(*args, **kwargs)
c.close()
return r
def rpc_bootstrap_amounts(*args, **kwargs):
c = C()
c.connect(RC)
r = c.rpc_bootstrap_amounts(*args, **kwargs)
c.close()
return r
def rpc_passenger(*args, **kwargs):
c = C()
c.connect(RC)
r = c.rpc_passenger(*args, **kwargs)
c.close()
return r
def rpc_tcard(*args, **kwargs):
c = C()
c.connect(RC)
r = c.rpc_tcard(*args, **kwargs)
c.close()
return r
def rpc_stop_ids(ids):
c = C()
c.connect(RC)
r = c.rpc_stop_ids(*args, **kwargs)
c.close()
return r
class MyBackendComponent(wamp.ApplicationSession):
@inlineCallbacks
def onJoin(self, details):
# regs = []
yield self.register(rpc_bdata, u'ru.bustime.rpc_bdata')
yield self.register(rpc_passenger, u'ru.bustime.rpc_passenger')
# mobile support only
yield self.register(rpc_bootstrap_amounts,
u'ru.bustime.rpc_bootstrap_amounts')
yield self.register(rpc_tcard, u'ru.bustime.rpc_tcard')
yield self.register(rpc_stop_ids, u'ru.bustime.rpc_stop_ids')
def accept(offers):
for offer in offers:
if isinstance(offer, PerMessageDeflateOffer):
return PerMessageDeflateOfferAccept(offer)
if __name__ == '__main__':
router_factory = wamp.RouterFactory()
session_factory = wamp.RouterSessionFactory(router_factory)
component_config = types.ComponentConfig(realm="realm1")
component_session = MyBackendComponent(component_config)
session_factory.add(component_session)
transport_factory = websocket.WampWebSocketServerFactory(session_factory,
debug=False,
debug_wamp=False)
server = serverFromString(reactor, "tcp:%s" % PORT)
server.listen(transport_factory)
reactor.run()
| {
"content_hash": "5dac81d413c1ea80a92876661182b90a",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 78,
"avg_line_length": 26.943181818181817,
"alnum_prop": 0.6296921130324757,
"repo_name": "norn/bustime",
"id": "709c5ae89e07d80580621d698f0dc1da98888d67",
"size": "2417",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zbusd/zwebsocket-pure-rpc.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "56066"
},
{
"name": "HTML",
"bytes": "158846"
},
{
"name": "JavaScript",
"bytes": "166643"
},
{
"name": "PLpgSQL",
"bytes": "2143"
},
{
"name": "Python",
"bytes": "161770"
},
{
"name": "Shell",
"bytes": "2282"
}
],
"symlink_target": ""
} |
import asyncio
import socket
import threading
import typing
from asyncpg import cluster
class StopServer(Exception):
pass
class TCPFuzzingProxy:
def __init__(self, *, listening_addr: str='127.0.0.1',
listening_port: typing.Optional[int]=None,
backend_host: str, backend_port: int,
settings: typing.Optional[dict]=None) -> None:
self.listening_addr = listening_addr
self.listening_port = listening_port
self.backend_host = backend_host
self.backend_port = backend_port
self.settings = settings or {}
self.loop = None
self.connectivity = None
self.connectivity_loss = None
self.stop_event = None
self.connections = {}
self.sock = None
self.listen_task = None
async def _wait(self, work):
work_task = asyncio.ensure_future(work)
stop_event_task = asyncio.ensure_future(self.stop_event.wait())
try:
await asyncio.wait(
[work_task, stop_event_task],
return_when=asyncio.FIRST_COMPLETED)
if self.stop_event.is_set():
raise StopServer()
else:
return work_task.result()
finally:
if not work_task.done():
work_task.cancel()
if not stop_event_task.done():
stop_event_task.cancel()
def start(self):
started = threading.Event()
self.thread = threading.Thread(
target=self._start_thread, args=(started,))
self.thread.start()
if not started.wait(timeout=2):
raise RuntimeError('fuzzer proxy failed to start')
def stop(self):
self.loop.call_soon_threadsafe(self._stop)
self.thread.join()
def _stop(self):
self.stop_event.set()
def _start_thread(self, started_event):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
self.connectivity = asyncio.Event()
self.connectivity.set()
self.connectivity_loss = asyncio.Event()
self.stop_event = asyncio.Event()
if self.listening_port is None:
self.listening_port = cluster.find_available_port()
self.sock = socket.socket()
self.sock.bind((self.listening_addr, self.listening_port))
self.sock.listen(50)
self.sock.setblocking(False)
try:
self.loop.run_until_complete(self._main(started_event))
finally:
self.loop.close()
async def _main(self, started_event):
self.listen_task = asyncio.ensure_future(self.listen())
# Notify the main thread that we are ready to go.
started_event.set()
try:
await self.listen_task
finally:
for c in list(self.connections):
c.close()
await asyncio.sleep(0.01)
if hasattr(self.loop, 'remove_reader'):
self.loop.remove_reader(self.sock.fileno())
self.sock.close()
async def listen(self):
while True:
try:
client_sock, _ = await self._wait(
self.loop.sock_accept(self.sock))
backend_sock = socket.socket()
backend_sock.setblocking(False)
await self._wait(self.loop.sock_connect(
backend_sock, (self.backend_host, self.backend_port)))
except StopServer:
break
conn = Connection(client_sock, backend_sock, self)
conn_task = self.loop.create_task(conn.handle())
self.connections[conn] = conn_task
def trigger_connectivity_loss(self):
self.loop.call_soon_threadsafe(self._trigger_connectivity_loss)
def _trigger_connectivity_loss(self):
self.connectivity.clear()
self.connectivity_loss.set()
def restore_connectivity(self):
self.loop.call_soon_threadsafe(self._restore_connectivity)
def _restore_connectivity(self):
self.connectivity.set()
self.connectivity_loss.clear()
def reset(self):
self.restore_connectivity()
def _close_connection(self, connection):
conn_task = self.connections.pop(connection, None)
if conn_task is not None:
conn_task.cancel()
def close_all_connections(self):
for conn in list(self.connections):
self.loop.call_soon_threadsafe(self._close_connection, conn)
class Connection:
def __init__(self, client_sock, backend_sock, proxy):
self.client_sock = client_sock
self.backend_sock = backend_sock
self.proxy = proxy
self.loop = proxy.loop
self.connectivity = proxy.connectivity
self.connectivity_loss = proxy.connectivity_loss
self.proxy_to_backend_task = None
self.proxy_from_backend_task = None
self.is_closed = False
def close(self):
if self.is_closed:
return
self.is_closed = True
if self.proxy_to_backend_task is not None:
self.proxy_to_backend_task.cancel()
self.proxy_to_backend_task = None
if self.proxy_from_backend_task is not None:
self.proxy_from_backend_task.cancel()
self.proxy_from_backend_task = None
self.proxy._close_connection(self)
async def handle(self):
self.proxy_to_backend_task = asyncio.ensure_future(
self.proxy_to_backend())
self.proxy_from_backend_task = asyncio.ensure_future(
self.proxy_from_backend())
try:
await asyncio.wait(
[self.proxy_to_backend_task, self.proxy_from_backend_task],
return_when=asyncio.FIRST_COMPLETED)
finally:
if self.proxy_to_backend_task is not None:
self.proxy_to_backend_task.cancel()
if self.proxy_from_backend_task is not None:
self.proxy_from_backend_task.cancel()
# Asyncio fails to properly remove the readers and writers
# when the task doing recv() or send() is cancelled, so
# we must remove the readers and writers manually before
# closing the sockets.
self.loop.remove_reader(self.client_sock.fileno())
self.loop.remove_writer(self.client_sock.fileno())
self.loop.remove_reader(self.backend_sock.fileno())
self.loop.remove_writer(self.backend_sock.fileno())
self.client_sock.close()
self.backend_sock.close()
async def _read(self, sock, n):
read_task = asyncio.ensure_future(
self.loop.sock_recv(sock, n))
conn_event_task = asyncio.ensure_future(
self.connectivity_loss.wait())
try:
await asyncio.wait(
[read_task, conn_event_task],
return_when=asyncio.FIRST_COMPLETED)
if self.connectivity_loss.is_set():
return None
else:
return read_task.result()
finally:
if not self.loop.is_closed():
if not read_task.done():
read_task.cancel()
if not conn_event_task.done():
conn_event_task.cancel()
async def _write(self, sock, data):
write_task = asyncio.ensure_future(
self.loop.sock_sendall(sock, data))
conn_event_task = asyncio.ensure_future(
self.connectivity_loss.wait())
try:
await asyncio.wait(
[write_task, conn_event_task],
return_when=asyncio.FIRST_COMPLETED)
if self.connectivity_loss.is_set():
return None
else:
return write_task.result()
finally:
if not self.loop.is_closed():
if not write_task.done():
write_task.cancel()
if not conn_event_task.done():
conn_event_task.cancel()
async def proxy_to_backend(self):
buf = None
try:
while True:
await self.connectivity.wait()
if buf is not None:
data = buf
buf = None
else:
data = await self._read(self.client_sock, 4096)
if data == b'':
break
if self.connectivity_loss.is_set():
if data:
buf = data
continue
await self._write(self.backend_sock, data)
except ConnectionError:
pass
finally:
if not self.loop.is_closed():
self.loop.call_soon(self.close)
async def proxy_from_backend(self):
buf = None
try:
while True:
await self.connectivity.wait()
if buf is not None:
data = buf
buf = None
else:
data = await self._read(self.backend_sock, 4096)
if data == b'':
break
if self.connectivity_loss.is_set():
if data:
buf = data
continue
await self._write(self.client_sock, data)
except ConnectionError:
pass
finally:
if not self.loop.is_closed():
self.loop.call_soon(self.close)
| {
"content_hash": "921dc3a1e2fe6aa9113c42078d18ef27",
"timestamp": "",
"source": "github",
"line_count": 299,
"max_line_length": 75,
"avg_line_length": 32.070234113712374,
"alnum_prop": 0.5471894879549484,
"repo_name": "MagicStack/asyncpg",
"id": "887456462fffd0dd028dd54335c9964ac319683c",
"size": "9804",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "asyncpg/_testbase/fuzzer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "25495"
},
{
"name": "Cython",
"bytes": "220040"
},
{
"name": "Makefile",
"bytes": "904"
},
{
"name": "Python",
"bytes": "620096"
}
],
"symlink_target": ""
} |
from .prod import *
from ._admin import *
| {
"content_hash": "bd574d03f92a94d9144cdd4b436ba20c",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 21,
"avg_line_length": 21,
"alnum_prop": 0.6904761904761905,
"repo_name": "sunForest/AviPost",
"id": "6184fcb15e7959d8294566eb0b65f8877451858e",
"size": "42",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "avipost/avipost/settings/prod_admin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Gherkin",
"bytes": "3188"
},
{
"name": "Python",
"bytes": "35993"
},
{
"name": "Shell",
"bytes": "2516"
}
],
"symlink_target": ""
} |
''' main '''
import argparse
import json
import logging
import os
import pkgutil
import re
import sys
import time
import uuid
from httplib import HTTPConnection
from ..common import status
from heron.common.src.python.utils import log
# The location of default configure file
DEFAULT_TEST_CONF_FILE = "integration_test/src/python/test_runner/resources/test.json"
RETRY_ATTEMPTS = 15
#seconds
RETRY_INTERVAL = 10
class FileBasedExpectedResultsHandler(object):
def __init__(self, file_path):
self.file_path = file_path
def fetch_results(self):
# Read expected result from the expected result file
try:
if not os.path.exists(self.file_path):
raise status.TestFailure("Expected results file %s does not exist" % self.file_path)
else:
with open(self.file_path, "r") as expected_result_file:
return expected_result_file.read().rstrip()
except Exception as e:
raise status.TestFailure("Failed to read expected result file %s" % self.file_path, e)
class HttpBasedExpectedResultsHandler(object):
def __init__(self, server_host_port, topology_name, task_count):
self.server_host_port = server_host_port
self.topology_name = topology_name
self.task_count = task_count
# pylint: disable=unnecessary-lambda
def fetch_results(self):
try:
result = []
decoder = json.JSONDecoder(strict=False)
for i in range(0, self.task_count):
task_result = fetch_from_server(self.server_host_port, self.topology_name,
'expected results',
'/state/%s_tuples_emitted_%d' % (self.topology_name, i))
json_result = decoder.decode(task_result)
logging.info("Found %d tuples emitted from spout task %d", len(json_result), i)
result = result + json_result
if len(result) == 0:
raise status.TestFailure(
"Expected result set is empty for topology %s" % self.topology_name)
# need to convert from a list of json objects to a string of a python list,
# without the unicode using double quotes, not single quotes.
return str(map(lambda x: str(x), result)).replace("'", '"')
except Exception as e:
raise status.TestFailure(
"Fetching expected result failed for %s topology" % self.topology_name, e)
class HttpBasedActualResultsHandler(object):
def __init__(self, server_host_port, topology_name):
self.server_host_port = server_host_port
self.topology_name = topology_name
def fetch_results(self):
try:
return fetch_from_server(self.server_host_port, self.topology_name,
'results', '/results/%s' % self.topology_name)
except Exception as e:
raise status.TestFailure("Fetching result failed for %s topology" % self.topology_name, e)
# pylint: disable=unnecessary-lambda
class ExactlyOnceResultsChecker(object):
"""Compares what results we found against what was expected. Verifies and exact match"""
def __init__(self, topology_name, expected_results_handler, actual_results_handler):
self.topology_name = topology_name
self.expected_results_handler = expected_results_handler
self.actual_results_handler = actual_results_handler
def check_results(self):
""" Checks the topology results from the server with the expected results from the file """
actual_result = self.actual_results_handler.fetch_results()
expected_result = self.expected_results_handler.fetch_results()
# Build a new instance of json decoder since the default one could not ignore "\n"
decoder = json.JSONDecoder(strict=False)
# The Heron doesn't guarantee the order of messages in any case, so we should sort the result.
# Notice: here we treat every data unique even they are the same,
# since we could judge here whether two messages are duplicates or not.
# User may deal with emit message along with MESSAGE_ID or remove duplicates in topology.
actual_results = sorted(decoder.decode(actual_result))
expected_results = sorted(decoder.decode(expected_result))
return self._compare(expected_results, actual_results)
def _compare(self, expected_results, actual_results):
# Compare the actual and expected result
if actual_results == expected_results:
return status.TestSuccess(
"Topology %s result matches expected result: %s expected tuples found exactly once" %
(len(expected_results), self.topology_name))
else:
failure = status.TestFailure("Actual result did not match expected result")
# lambda required below to remove the unicode 'u' from the output
logging.info("Actual result ---------- \n" + str(map(lambda x: str(x), actual_results)))
logging.info("Expected result ---------- \n" + str(map(lambda x: str(x), expected_results)))
raise failure
class AtLeastOnceResultsChecker(ExactlyOnceResultsChecker):
"""Compares what results we found against what was expected. Verifies and exact match"""
def _compare(self, expected_results, actual_results):
expected_counts = _frequency_dict(expected_results)
actual_counts = _frequency_dict(actual_results)
missed_counts = {}
for expected_value in expected_counts:
expected_count = expected_counts[expected_value]
if expected_value in actual_counts:
actual_count = actual_counts[expected_value]
if actual_count < expected_count:
missed_counts[expected_value] = expected_count
else:
missed_counts[expected_value] = expected_count
if len(missed_counts) == 0:
return status.TestSuccess(
"Topology %s result matches expected result: %s expected tuples found at least once" %
(self.topology_name, len(expected_counts)))
else:
failure = status.TestFailure("Actual result did not match expected result")
# lambda required below to remove the unicode 'u' from the output
logging.info("Actual value frequencies ---------- \n" + ', '.join(
map(lambda (k, v): "%s(%s)" % (str(k), v), actual_counts.iteritems())))
logging.info("Expected value frequencies ---------- \n" + ', '.join(
map(lambda (k, v): "%s(%s)" % (str(k), v), expected_counts.iteritems())))
raise failure
def _frequency_dict(values):
frequency = {}
for value in values:
count = 0
if value in frequency:
count = frequency[value]
frequency[value] = count + 1
return frequency
def run_test(topology_name, classpath, results_checker,
params, http_server_host_port, update_args, extra_topology_args):
''' Runs the test for one topology '''
#submit topology
try:
args = "-r http://%s/results -t %s %s" %\
(http_server_host_port, topology_name, extra_topology_args)
submit_topology(params.heron_cli_path, params.cli_config_path, params.cluster, params.role,
params.env, params.tests_bin_path, classpath,
params.release_package_uri, args)
except Exception as e:
raise status.TestFailure("Failed to submit %s topology" % topology_name, e)
logging.info("Successfully submitted %s topology", topology_name)
try:
if update_args:
# wait for the topology to be started before triggering an update
poll_state_server(http_server_host_port, topology_name, "topology_started")
logging.info("Verified topology successfully started, proceeding to update it")
update_topology(params.heron_cli_path, params.cli_config_path, params.cluster,
params.role, params.env, topology_name, update_args)
# update state server to trigger more emits from the topology
logging.info("Topology successfully updated, updating state server")
update_state_server(http_server_host_port, topology_name, "topology_updated", "true")
return results_checker.check_results()
except Exception as e:
raise status.TestFailure("Checking result failed for %s topology" % topology_name, e)
finally:
kill_topology(params.heron_cli_path, params.cli_config_path, params.cluster,
params.role, params.env, topology_name)
def poll_state_server(server_host_port, topology_name, key):
return fetch_from_server(
server_host_port, topology_name, key, '/state/%s_%s' % (topology_name, key))
def update_state_server(http_server_host_port, topology_name, key, value):
connection = HTTPConnection(http_server_host_port)
connection.request('POST', '/state/%s_%s' % (topology_name, key), '"%s"' % value)
response = connection.getresponse()
return response.status == 200
def fetch_from_server(server_host_port, topology_name, data_name, path):
''' Make a http get request to fetch actual results from http server '''
for i in range(0, RETRY_ATTEMPTS):
logging.info("Fetching %s for topology %s, retry count: %d", data_name, topology_name, i)
response = get_http_response(server_host_port, path)
if response.status == 200:
return response.read()
elif i != RETRY_ATTEMPTS:
logging.info("Fetching %s failed with status: %s; reason: %s; body: %s",
data_name, response.status, response.reason, response.read())
time.sleep(RETRY_INTERVAL)
raise status.TestFailure("Failed to fetch %s after %d attempts" % (data_name, RETRY_ATTEMPTS))
def get_http_response(server_host_port, path):
''' get HTTP response '''
for _ in range(0, RETRY_ATTEMPTS):
try:
connection = HTTPConnection(server_host_port)
connection.request('GET', path)
response = connection.getresponse()
return response
except Exception:
time.sleep(RETRY_INTERVAL)
continue
raise status.TestFailure("Failed to get HTTP Response after %d attempts" % RETRY_ATTEMPTS)
def cluster_token(cluster, role, env):
if cluster == "local":
return cluster
return "%s/%s/%s" % (cluster, role, env)
def submit_topology(heron_cli_path, cli_config_path, cluster, role,
env, jar_path, classpath, pkg_uri, args=None):
''' Submit topology using heron-cli '''
# Form the command to submit a topology.
# Note the single quote around the arg for heron.package.core.uri.
# This is needed to prevent shell expansion.
cmd = "%s submit --config-path=%s %s %s %s %s" %\
(heron_cli_path, cli_config_path, cluster_token(cluster, role, env),
jar_path, classpath, args)
if pkg_uri is not None:
cmd = "%s --config-property heron.package.core.uri='%s'" %(cmd, pkg_uri)
logging.info("Submitting topology: %s", cmd)
if os.system(cmd) != 0:
raise status.TestFailure("Unable to submit the topology")
def kill_topology(heron_cli_path, cli_config_path, cluster, role, env, topology_name):
''' Kill a topology using heron-cli '''
cmd = "%s kill --config-path=%s %s %s" %\
(heron_cli_path, cli_config_path, cluster_token(cluster, role, env), topology_name)
logging.info("Killing topology: %s", cmd)
if os.system(cmd) != 0:
raise status.TestFailure("Failed to kill topology %s" % topology_name)
logging.info("Successfully killed topology %s", topology_name)
def update_topology(heron_cli_path, cli_config_path, cluster,
role, env, topology_name, update_args):
cmd = "%s update --config-path=%s %s %s %s --verbose" %\
(heron_cli_path, cli_config_path,
cluster_token(cluster, role, env), update_args, topology_name)
logging.info("Update topology: %s", cmd)
if os.system(cmd) != 0:
raise status.TestFailure("Failed to update topology %s" % topology_name)
logging.info("Successfully updated topology %s", topology_name)
def filter_test_topologies(test_topologies, test_pattern):
initial_topologies = test_topologies
if test_pattern:
pattern = re.compile(test_pattern)
test_topologies = filter(lambda x: pattern.match(x['topologyName']), test_topologies)
if len(test_topologies) == 0:
logging.error("Test filter '%s' did not match any configured test names:\n%s",
test_pattern, '\n'.join(map(lambda x: x['topologyName'], initial_topologies)))
sys.exit(1)
return test_topologies
def run_tests(conf, args):
''' Run the test for each topology specified in the conf file '''
successes = []
failures = []
timestamp = time.strftime('%Y%m%d%H%M%S')
http_server_host_port = "%s:%d" % (args.http_server_hostname, args.http_server_port)
if args.tests_bin_path.endswith("scala-integration-tests.jar"):
test_topologies = filter_test_topologies(conf["scalaTopologies"], args.test_topology_pattern)
topology_classpath_prefix = conf["topologyClasspathPrefix"]
extra_topology_args = "-s http://%s/state" % http_server_host_port
elif args.tests_bin_path.endswith("integration-tests.jar"):
test_topologies = filter_test_topologies(conf["javaTopologies"], args.test_topology_pattern)
topology_classpath_prefix = conf["topologyClasspathPrefix"]
extra_topology_args = "-s http://%s/state" % http_server_host_port
elif args.tests_bin_path.endswith("heron_integ_topology.pex"):
test_topologies = filter_test_topologies(conf["pythonTopologies"], args.test_topology_pattern)
topology_classpath_prefix = ""
extra_topology_args = ""
else:
raise ValueError("Unrecognized binary file type: %s" % args.tests_bin_path)
current = 1
for topology_conf in test_topologies:
topology_name = ("%s_%s_%s") % (timestamp, topology_conf["topologyName"], str(uuid.uuid4()))
classpath = topology_classpath_prefix + topology_conf["classPath"]
# if the test includes an update we need to pass that info to the topology so it can send
# data accordingly. This flag causes the test spout to emit, then check the state of this
# token, then emit more.
update_args = ""
topology_args = extra_topology_args
if "updateArgs" in topology_conf:
update_args = topology_conf["updateArgs"]
if "topologyArgs" in topology_conf:
if topology_conf["topologyArgs"] == "emit_util" and update_args == "":
raise ValueError("Specifying a test with emit_until spout wrapper without updateArgs "
+ "will cause the spout to emit indefinitely. Not running topology "
+ topology_name)
topology_args = "%s %s" % (topology_args, topology_conf["topologyArgs"])
results_checker = load_result_checker(
topology_name, topology_conf,
load_expected_result_handler(topology_name, topology_conf, args, http_server_host_port),
HttpBasedActualResultsHandler(http_server_host_port, topology_name))
logging.info("==== Starting test %s of %s: %s ====",
current, len(test_topologies), topology_name)
start_secs = int(time.time())
try:
result = run_test(topology_name, classpath, results_checker,
args, http_server_host_port, update_args, topology_args)
test_tuple = (topology_name, int(time.time()) - start_secs)
if isinstance(result, status.TestSuccess):
successes += [test_tuple]
elif isinstance(result, status.TestFailure):
failures += [test_tuple]
else:
logging.error("Unrecognized test response returned for test %s: %s",
topology_name, str(result))
failures += [test_tuple]
except status.TestFailure:
test_tuple = (topology_name, int(time.time()) - start_secs)
failures += [test_tuple]
current += 1
return successes, failures
def load_result_checker(topology_name, topology_conf,
expected_result_handler, actual_result_handler):
# the task count setting controls is used to trigger the emit until spout wrapper, which is
# currently only used in at least once tests. if that changes we need to expand our config
# settings
if "expectedHttpResultTaskCount" in topology_conf:
return AtLeastOnceResultsChecker(
topology_name, expected_result_handler, actual_result_handler)
else:
return ExactlyOnceResultsChecker(
topology_name, expected_result_handler, actual_result_handler)
def load_expected_result_handler(topology_name, topology_conf, args, http_server_host_port):
if "expectedResultRelativePath" in topology_conf:
expected_result_file_path =\
args.topologies_path + "/" + topology_conf["expectedResultRelativePath"]
return FileBasedExpectedResultsHandler(expected_result_file_path)
elif "expectedHttpResultTaskCount" in topology_conf:
return HttpBasedExpectedResultsHandler(
http_server_host_port, topology_name, topology_conf["expectedHttpResultTaskCount"])
else:
raise status.TestFailure("Either expectedResultRelativePath or expectedHttpResultTaskCount "
+ "must be specified for test %s " % topology_name)
def main():
''' main '''
log.configure(level=logging.DEBUG)
conf_file = DEFAULT_TEST_CONF_FILE
# Read the configuration file from package
conf_string = pkgutil.get_data(__name__, conf_file)
decoder = json.JSONDecoder(strict=False)
# Convert the conf file to a json format
conf = decoder.decode(conf_string)
# Parse the arguments passed via command line
parser = argparse.ArgumentParser(description='This is the heron integration test framework')
parser.add_argument('-hc', '--heron-cli-path', dest='heron_cli_path',
default=conf['heronCliPath'])
parser.add_argument('-tb', '--tests-bin-path', dest='tests_bin_path')
parser.add_argument('-cl', '--cluster', dest='cluster', default=conf['cluster'])
parser.add_argument('-ev', '--env', dest='env', default=conf['env'])
parser.add_argument('-rl', '--role', dest='role', default=conf['role'])
parser.add_argument('-rh', '--http-server-hostname', dest='http_server_hostname')
parser.add_argument('-rp', '--http-server-port', dest='http_server_port', type=int,
default=conf['resultsServerPort'])
parser.add_argument('-tp', '--topologies-path', dest='topologies_path')
parser.add_argument('-ts', '--test-topology-pattern', dest='test_topology_pattern', default=None)
parser.add_argument('-pi', '--release-package-uri', dest='release_package_uri', default=None)
parser.add_argument('-cd', '--cli-config-path', dest='cli_config_path',
default=conf['cliConfigPath'])
#parser.add_argument('-dt', '--disable-topologies', dest='disabledTopologies', default='',
# help='comma separated test case(classpath) name that will not be run')
#parser.add_argument('-et', '--enable-topologies', dest='enableTopologies', default=None,
# help='comma separated test case(classpath) name that will be run only')
args, unknown_args = parser.parse_known_args()
if unknown_args:
logging.error('Unknown argument passed to %s: %s', sys.argv[0], unknown_args[0])
sys.exit(1)
(successes, failures) = run_tests(conf, args)
total = len(failures) + len(successes)
if not failures:
logging.info("SUCCESS: %s (all) tests passed:", len(successes))
for test in successes:
logging.info(" - %s: %s", ("[%ss]" % test[1]).ljust(8), test[0])
sys.exit(0)
else:
logging.error("FAILURE: %s/%s tests failed:", len(failures), total)
for test in failures:
logging.error(" - %s: %s", ("[%ss]" % test[1]).ljust(8), test[0])
sys.exit(1)
if __name__ == '__main__':
main()
| {
"content_hash": "0d18d20e11e16b129f439716aad7698f",
"timestamp": "",
"source": "github",
"line_count": 435,
"max_line_length": 99,
"avg_line_length": 44.53793103448276,
"alnum_prop": 0.6770930112521937,
"repo_name": "ashvina/heron",
"id": "f5d4d1cad10d4cf3ddd0aa179fc1ab3f4ac4de0b",
"size": "19374",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "integration_test/src/python/test_runner/main.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "14063"
},
{
"name": "C++",
"bytes": "1710172"
},
{
"name": "CSS",
"bytes": "109554"
},
{
"name": "HCL",
"bytes": "2114"
},
{
"name": "HTML",
"bytes": "156836"
},
{
"name": "Java",
"bytes": "4695767"
},
{
"name": "JavaScript",
"bytes": "1112006"
},
{
"name": "M4",
"bytes": "17941"
},
{
"name": "Makefile",
"bytes": "1046"
},
{
"name": "Objective-C",
"bytes": "2143"
},
{
"name": "Perl",
"bytes": "9298"
},
{
"name": "Python",
"bytes": "1635113"
},
{
"name": "Ruby",
"bytes": "1930"
},
{
"name": "Scala",
"bytes": "83215"
},
{
"name": "Shell",
"bytes": "162792"
},
{
"name": "Smarty",
"bytes": "528"
}
],
"symlink_target": ""
} |
"""
NFINDR function
"""
import math
import random
import numpy as np
import scipy as sp
from . import eea
def NFINDR(data, q, transform=None, maxit=None, ATGP_init=False):
"""
N-FINDR endmembers induction algorithm.
Parameters:
data: `numpy array`
Column data matrix [nvariables x nsamples].
q: `int`
Number of endmembers to be induced.
transform: `numpy array [default None]`
The transformed 'data' matrix by MNF (N x components). In this
case the number of components must == q-1. If None, the built-in
call to PCA is used to transform the data.
maxit: `int [default None]`
Maximum number of iterations. Default = 3*q.
ATGP_init: `boolean [default False]`
Use ATGP to generate the first endmembers set instead
of a random selection.
Returns: `tuple: numpy array, numpy array, int`
* Set of induced endmembers (N x p)
* Set of transformed induced endmembers (N x p)
* Array of indices into the array data corresponding to the
induced endmembers
* The number of iterations.
References:
Winter, M. E., "N-FINDR: an algorithm for fast autonomous spectral
end-member determination in hyperspectral data", presented at the Imaging
Spectrometry V, Denver, CO, USA, 1999, vol. 3753, pgs. 266-275.
"""
# data size
nsamples, nvariables = data.shape
if maxit == None:
maxit = 3*q
if transform == None:
# transform as shape (N x p)
transform = data
transform = eea._PCA_transform(data, q-1)
else:
transform = transform
# Initialization
# TestMatrix is a square matrix, the first row is set to 1
TestMatrix = np.zeros((q, q), dtype=np.float32, order='F')
TestMatrix[0,:] = 1
IDX = None
if ATGP_init == True:
induced_em, idx = eea.ATGP(transform, q)
IDX = np.array(idx, dtype=np.int64)
for i in range(q):
TestMatrix[1:q, i] = induced_em[i]
else:
IDX = np.zeros((q), dtype=np.int64)
for i in range(q):
idx = int(math.floor(random.random()*nsamples))
TestMatrix[1:q, i] = transform[idx]
IDX[i] = idx
actualVolume = 0
it = 0
v1 = -1.0
v2 = actualVolume
while it <= maxit and v2 > v1:
for k in range(q):
for i in range(nsamples):
TestMatrix[1:q, k] = transform[i]
volume = math.fabs(sp.linalg._flinalg.sdet_c(TestMatrix)[0])
if volume > actualVolume:
actualVolume = volume
IDX[k] = i
TestMatrix[1:q, k] = transform[IDX[k]]
it = it + 1
v1 = v2
v2 = actualVolume
E = np.zeros((len(IDX), nvariables), dtype=np.float32)
Et = np.zeros((len(IDX), q-1), dtype=np.float32)
for j in range(len(IDX)):
E[j] = data[IDX[j]]
Et[j] = transform[IDX[j]]
return E, Et, IDX, it
| {
"content_hash": "536b1fd366bbcb1413aded98e471bde7",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 79,
"avg_line_length": 30.941176470588236,
"alnum_prop": 0.5541825095057035,
"repo_name": "ctherien/pysptools",
"id": "2cafdf288548f6a8b8a9e7a04145c453d3e28993",
"size": "3989",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pysptools/eea/nfindr.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7264"
},
{
"name": "HTML",
"bytes": "8140529"
},
{
"name": "Jupyter Notebook",
"bytes": "4526823"
},
{
"name": "Makefile",
"bytes": "7765"
},
{
"name": "Python",
"bytes": "490805"
}
],
"symlink_target": ""
} |
from project.models.fields.Field import Field
from project.models.fields.exceptions import FieldValidException
MESSAGE_MIN_LENGTH = 0
MESSAGE_MAX_LENGTH = 10000
class MessageField(Field):
def __init__(self, message):
self.set(message)
def set(self, message):
if not isinstance(message, str):
raise FieldValidException("message must be string")
if len(message) not in range(MESSAGE_MIN_LENGTH, MESSAGE_MAX_LENGTH):
raise FieldValidException(
"length theme should be from " + str(MESSAGE_MIN_LENGTH) + " to " + str(MESSAGE_MAX_LENGTH))
self._message = message
def get(self):
return self._message | {
"content_hash": "b517729f717bbaeba61bd5277607ab15",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 108,
"avg_line_length": 32.857142857142854,
"alnum_prop": 0.6666666666666666,
"repo_name": "AbramovVitaliy/Abramov-RIS-13",
"id": "5780223f015aa093acc0f661a872620074a446d5",
"size": "690",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lab4_5_6/project/models/fields/MessageField.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "919"
},
{
"name": "HTML",
"bytes": "3401"
},
{
"name": "Python",
"bytes": "18261"
},
{
"name": "Smarty",
"bytes": "1125"
}
],
"symlink_target": ""
} |
"""Generates default implementations of operator<< for enum types."""
import codecs
import os
import re
import string
import sys
_ENUM_START_RE = re.compile(r'\benum\b\s+(class\s+)?(\S+)\s+:?.*\{(\s+// private)?')
_ENUM_VALUE_RE = re.compile(r'([A-Za-z0-9_]+)(.*)')
_ENUM_END_RE = re.compile(r'^\s*\};$')
_ENUMS = {}
_NAMESPACES = {}
_ENUM_CLASSES = {}
def Confused(filename, line_number, line):
sys.stderr.write('%s:%d: confused by:\n%s\n' % (filename, line_number, line))
raise Exception("giving up!")
sys.exit(1)
def ProcessFile(filename):
lines = codecs.open(filename, 'r', 'utf8', 'replace').read().split('\n')
in_enum = False
is_enum_class = False
line_number = 0
namespaces = []
enclosing_classes = []
for raw_line in lines:
line_number += 1
if not in_enum:
# Is this the start of a new enum?
m = _ENUM_START_RE.search(raw_line)
if m:
# Yes, so add an empty entry to _ENUMS for this enum.
# Except when it's private
if m.group(3) is not None:
continue
is_enum_class = m.group(1) is not None
enum_name = m.group(2)
if len(enclosing_classes) > 0:
enum_name = '::'.join(enclosing_classes) + '::' + enum_name
_ENUMS[enum_name] = []
_NAMESPACES[enum_name] = '::'.join(namespaces)
_ENUM_CLASSES[enum_name] = is_enum_class
in_enum = True
continue
# Is this the start or end of a namespace?
m = re.compile(r'^namespace (\S+) \{').search(raw_line)
if m:
namespaces.append(m.group(1))
continue
m = re.compile(r'^\}\s+// namespace').search(raw_line)
if m:
namespaces = namespaces[0:len(namespaces) - 1]
continue
# Is this the start or end of an enclosing class or struct?
m = re.compile(r'^(?:class|struct)(?: MANAGED)? (\S+).* \{').search(raw_line)
if m:
enclosing_classes.append(m.group(1))
continue
m = re.compile(r'^\};').search(raw_line)
if m:
enclosing_classes = enclosing_classes[0:len(enclosing_classes) - 1]
continue
continue
# Is this the end of the current enum?
m = _ENUM_END_RE.search(raw_line)
if m:
if not in_enum:
Confused(filename, line_number, raw_line)
in_enum = False
continue
# The only useful thing in comments is the <<alternate text>> syntax for
# overriding the default enum value names. Pull that out...
enum_text = None
m_comment = re.compile(r'// <<(.*?)>>').search(raw_line)
if m_comment:
enum_text = m_comment.group(1)
# ...and then strip // comments.
line = re.sub(r'//.*', '', raw_line)
# Strip whitespace.
line = line.strip()
# Skip blank lines.
if len(line) == 0:
continue
# Since we know we're in an enum type, and we're not looking at a comment
# or a blank line, this line should be the next enum value...
m = _ENUM_VALUE_RE.search(line)
if not m:
Confused(filename, line_number, raw_line)
enum_value = m.group(1)
# By default, we turn "kSomeValue" into "SomeValue".
if enum_text == None:
enum_text = enum_value
if enum_text.startswith('k'):
enum_text = enum_text[1:]
# Lose literal values because we don't care; turn "= 123, // blah" into ", // blah".
rest = m.group(2).strip()
m_literal = re.compile(r'= (0x[0-9a-f]+|-?[0-9]+|\'.\')').search(rest)
if m_literal:
rest = rest[(len(m_literal.group(0))):]
# With "kSomeValue = kOtherValue," we take the original and skip later synonyms.
# TODO: check that the rhs is actually an existing value.
if rest.startswith('= k'):
continue
# Remove any trailing comma and whitespace
if rest.startswith(','):
rest = rest[1:]
rest = rest.strip()
# There shouldn't be anything left.
if len(rest):
Confused(filename, line_number, raw_line)
if len(enclosing_classes) > 0:
if is_enum_class:
enum_value = enum_name + '::' + enum_value
else:
enum_value = '::'.join(enclosing_classes) + '::' + enum_value
_ENUMS[enum_name].append((enum_value, enum_text))
def main():
local_path = sys.argv[1]
header_files = []
for header_file in sys.argv[2:]:
header_files.append(header_file)
ProcessFile(header_file)
print('#include <iostream>')
print('')
for header_file in header_files:
header_file = header_file.replace(local_path + '/', '')
print('#include "%s"' % header_file)
print('')
for enum_name in _ENUMS:
print('// This was automatically generated by %s --- do not edit!' % sys.argv[0])
namespaces = _NAMESPACES[enum_name].split('::')
for namespace in namespaces:
print('namespace %s {' % namespace)
print('std::ostream& operator<<(std::ostream& os, const %s& rhs) {' % enum_name)
print(' switch (rhs) {')
for (enum_value, enum_text) in _ENUMS[enum_name]:
print(' case %s: os << "%s"; break;' % (enum_value, enum_text))
if not _ENUM_CLASSES[enum_name]:
print(' default: os << "%s[" << static_cast<int>(rhs) << "]"; break;' % enum_name)
print(' }')
print(' return os;')
print('}')
for namespace in reversed(namespaces):
print('} // namespace %s' % namespace)
print('')
sys.exit(0)
if __name__ == '__main__':
main()
| {
"content_hash": "5894e154ac46e21a4585b2c57fc5c991",
"timestamp": "",
"source": "github",
"line_count": 184,
"max_line_length": 91,
"avg_line_length": 29.277173913043477,
"alnum_prop": 0.5832559866344904,
"repo_name": "treadstoneproject/artinst",
"id": "f666ad154b592ed0ab757ea7435a3297efcdd2d5",
"size": "6012",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tools/generate-operator-out.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "806950"
},
{
"name": "C",
"bytes": "48281"
},
{
"name": "C++",
"bytes": "22875315"
},
{
"name": "CMake",
"bytes": "10721"
},
{
"name": "Java",
"bytes": "2823986"
},
{
"name": "LLVM",
"bytes": "15556"
},
{
"name": "Makefile",
"bytes": "289495"
},
{
"name": "Objective-J",
"bytes": "1784"
},
{
"name": "Python",
"bytes": "10480"
},
{
"name": "Shell",
"bytes": "146364"
}
],
"symlink_target": ""
} |
def extractFalinmer(item):
"""
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol or frag) or 'preview' in item['title'].lower():
return None
match = re.search('(\\d+)\\-(\\d+)', item['title'])
if not vol and match:
vol = match.group(1)
chp = match.group(2)
if item['title'].lower().startswith('mcm') and not 'raw' in item['title'].lower():
return buildReleaseMessageWithType(item, 'Magi Craft Meister', vol, chp, frag=frag, postfix=postfix)
return False
| {
"content_hash": "5ff4ee0a2b637fb165ad3013fd557c19",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 102,
"avg_line_length": 37,
"alnum_prop": 0.6718146718146718,
"repo_name": "fake-name/ReadableWebProxy",
"id": "0b5c1187b2fc2b7ba2dffac7c726b303d0478940",
"size": "518",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "WebMirror/management/rss_parser_funcs/feed_parse_extractFalinmer.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "105811"
},
{
"name": "Dockerfile",
"bytes": "1178"
},
{
"name": "HTML",
"bytes": "119737"
},
{
"name": "JavaScript",
"bytes": "3006524"
},
{
"name": "Jupyter Notebook",
"bytes": "148075"
},
{
"name": "Mako",
"bytes": "1454"
},
{
"name": "Python",
"bytes": "5264346"
},
{
"name": "Shell",
"bytes": "1059"
}
],
"symlink_target": ""
} |
'''
Copyright (c) 2009, Patrick Maupin, Austin, Texas
A wrapper around subprocess that performs two functions:
1) Adds non-blocking I/O
2) Adds process killability and timeouts
Currently only works under Linux.
'''
import sys
import subprocess
import select
import os
import time
import textwrap
from signal import SIGTERM, SIGKILL
import traceback
class BaseExec(object):
''' BaseExec is designed to be subclassed.
It wraps subprocess.Popen, and adds the
ability to kill a process and to manage
timeouts. By default, it uses pipes for
the new process, but doesn't do anything
with them.
'''
is_python_proc = False
defaults = dict(
bufsize=0,
executable=None,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
preexec_fn=None, # Callable object in child process
close_fds=False,
shell=False,
cwd=None,
env=None,
universal_newlines=False,
startupinfo=None,
creationflags=0,
timeout=500.0, # Time in seconds before termination
killdelay=20.0, # Time in seconds after termination before kill
python_proc=None,
)
def before_init(self, keywords):
# Replace this in subclass to do more setup
pass
def after_init(self):
# Replace this in subclass to execute code after
# process creation
pass
def wrap_python_exec(self, preexec_fn):
# Don't let anything in our buffer wrap back into new process
# Otherwise, it might (will!) come out twice...
sys.stdout.flush()
sys.stderr.flush()
self.is_python_proc = True
def wrapper():
sys.argv = self.args
try:
preexec_fn()
except Exception:
sys.stdout.flush()
print >> sys.stderr, traceback.format_exc()
sys.stderr.write(chr(1))
except SystemExit, s:
sys.stdout.flush()
code = s.code
try:
code = int(code)
except:
pass
if code:
print >> sys.stderr, code
sys.stderr.write(chr(1))
else:
sys.stdout.flush()
sys.stderr.flush()
return wrapper
def __init__(self, *args, **kw):
# Allow flexible args handling.
if len(args) < 2:
try:
args[0] + ''
except TypeError:
args = args[0]
else:
args = args[0].split()
self.args = args
# Handle defaults
keywords = self.defaults.copy()
keywords.update(kw)
# Get our timeout information, and call
# subclass to get other parameters
self.timeout = keywords.pop('timeout') + time.time()
self.killdelay = keywords.pop('killdelay')
self.before_init(keywords)
# Handle any special Python proc
python_proc = keywords.pop('python_proc')
if python_proc is not None:
assert keywords.pop('preexec_fn') is None
keywords['preexec_fn'] = self.wrap_python_exec(python_proc)
args = ['true']
# Start the process and let subclass execute
proc = subprocess.Popen(args, **keywords)
self.proc = proc
self.after_init()
def kill(self, force=False):
action = force and SIGKILL or SIGTERM
os.kill(self.proc.pid, action)
return action
def checktimeout(self):
# Poll to decide if subprocess needs to be killed
now = time.time()
if now < self.timeout:
return 0
killdelay, self.killdelay = self.killdelay, 0
self.timeout = now + killdelay
return self.kill(not killdelay)
class PipeReader(object):
''' PipeReader is an iterator class designed to read from
the next ready pipe.
It can handle as many pipes at a time as desired,
and each call to next() will yield one of the following:
pipe, data -- After reading data from pipe
pipe, None -- When pipe is closing
None, None -- On timeout if no data
It raises StopIteration if no pipes are still open.
A logical extension would be to handle output pipes as well,
such as the subprocess's stdin, but the initial version is
input pipes only (the subprocess's stdout and stderr).
'''
TIMEOUT = 1.0 # Poll interval in seconds
BUFSIZE = 100000
def __init__(self, *pipes, **kw):
self.timeout = kw.pop('timeout', self.TIMEOUT)
self.bufsize = kw.pop('bufsize', self.BUFSIZE)
self.by_pipenum = {} # Dictionary of read functions
self.ready = [] # List of ready pipes
assert not kw, kw # Check for mispelings :)
for pipe in pipes:
self.addpipe(pipe)
def addpipe(self, pipe):
pipenum = pipe.fileno()
bufsize = self.bufsize
by_pipenum = self.by_pipenum
def getdata():
chunk = os.read(pipenum, bufsize)
if chunk:
return pipe, chunk
else:
# Here, we're done. Remove ourselves from
# the dictionary and return None as a notification
del by_pipenum[pipenum]
return pipe, None
assert by_pipenum.setdefault(pipenum, getdata) is getdata
def __iter__(self):
return self
def next(self):
ready = self.ready
if not ready:
allpipes = list(self.by_pipenum)
if not allpipes:
raise StopIteration
ready[:] = select.select(allpipes,[],[],self.timeout)[0]
if not ready:
return None, None # Allow code to execute after timeout
return self.by_pipenum[ready.pop()]()
class LineSplitter(object):
''' LineSplitter takes arbitrary string
data and splits it into text lines.
It manages the case where a single
line of data returned from a pipe is
split across multiple reads.
'''
def __init__(self, prefix):
self.prefix = prefix
self.leftovers = ''
self.lines = []
def __call__(self, chunk):
if not chunk:
if self.leftovers:
chunk = '\n'
else:
return self
chunk = chunk.replace('\r\n', '\n').replace('\r', '\n')
chunk = self.leftovers + chunk
newlines = chunk.split('\n')
self.leftovers = newlines.pop()
oldlines = self.lines
oldlines.reverse()
oldlines.extend(newlines)
oldlines.reverse()
return self
def __iter__(self):
return self
def next(self):
try:
return self.prefix, self.lines.pop()
except IndexError:
raise StopIteration
class TextOutExec(BaseExec):
''' TextOutExec is used for when an executed subprocess's
stdout and stderr are line-oriented text output.
This class is its own iterator. Each line from
the subprocess is yielded from here, with a prefix:
' ' -- line written by subprocess to stdout
'* ' -- line written by subprocess to stderr
'** ' -- line represents subprocess exit code
NB: Current implementation is probably not that secure,
in that it assumes that once the pipes are closed,
the process should be terminating itself shortly.
If this proves to be a problem in real life, we
can add timeout checking to the "wait for things
to finish up" logic.
'''
defaults = dict(
pollinterval=1.0,
readbufsize=100000,
)
defaults.update(BaseExec.defaults)
def before_init(self, keywords):
self.pollinterval = keywords.pop('pollinterval')
self.bufsize = keywords.pop('readbufsize')
def after_init(self):
proc = self.proc
self.pipes = PipeReader(proc.stdout, proc.stderr,
timeout=self.pollinterval, bufsize=self.bufsize)
self.pipedir = {proc.stdout : LineSplitter(' '),
proc.stderr : LineSplitter('*')}
self.lines = []
self.finished = False
def __iter__(self):
return self
def next(self):
lines = self.lines
while not lines:
self.checktimeout()
for pipe, data in self.pipes:
if pipe is not None:
lines.extend(self.pipedir[pipe](data))
lines.reverse()
break
else:
if self.finished:
raise StopIteration
else:
self.finished = True
lines.append(('**', str(self.proc.wait())))
return '%s %s' % lines.pop()
def elapsedtime(when=time.time()):
mins, secs = divmod(round(time.time() - when, 1), 60)
hrs, mins = divmod(mins, 60)
hrs = hrs and ('%02d:' % int(round(hrs))) or ''
mins = mins and ('%02d:' % int(round(mins))) or ''
secs = '%04.1f' % secs
units = hrs and 'hours' or mins and 'minutes' or 'seconds'
return '%s%s%s %s' % (hrs, mins, secs, units)
def default_logger(resultlist, data=None, data2=None):
if data is not None:
resultlist.append(data)
if data2 is None:
data2 = data
print data2
def textexec(*arg, **kw):
''' Exec a subprocess, print lines, and also return
them to caller
'''
logger = kw.pop('logger', default_logger)
formatcmd = textwrap.TextWrapper(initial_indent=' ',
subsequent_indent=' ',
break_long_words=False).fill
subproc = TextOutExec(*arg, **kw)
args = subproc.args
procname = args[0]
starttime = time.time()
result = []
logger(result,
'Process "%s" started on %s\n\n%s\n\n' % (
procname, time.asctime(), formatcmd(' '.join(args))))
errcode = 0
badexit = '* ' + chr(1)
for line in subproc:
if line == badexit and subproc.is_python_proc:
errcode = 1
continue
if not line.startswith('**'):
logger(result, line)
continue
errcode = errcode or int(line.split()[-1])
status = errcode and 'FAIL' or 'PASS'
logger(result,
'\nProgram %s exit code: %s (%d) elapsed time: %s\n' %
(procname, status, errcode, elapsedtime(starttime)))
logger(result, None,
'Cumulative execution time is %s\n' % elapsedtime())
return errcode, result
if __name__ == '__main__':
def goodfunc():
print "Good func", sys.argv
def badfunc():
assert 0, "Boo! %s" % sys.argv
#raise SystemExit('I am bad')
if len(sys.argv) > 1:
print "Starting subprocess"
sys.stdout.flush()
for i in range(10):
time.sleep(0.2)
print "This is line", i
sys.stdout.flush()
print >> sys.stderr, "This is an error message"
print "Ending subprocess"
if sys.argv[1] == 'die':
raise SystemExit('Deliberately croaking')
else:
print 'Calling good python_proc 1'
textexec('goodfunc', '1', python_proc=goodfunc)
print 'Calling bad python_proc 1'
textexec('badfunc', '1', python_proc=badfunc)
print 'Calling good python_proc 2'
textexec('goodfunc', '2', python_proc=goodfunc)
print 'Calling bad python_proc 2'
textexec('badfunc', '2', python_proc=badfunc)
print "Calling myself"
textexec(__file__, 'subprocess')
print "Calling myself with kill time"
textexec(__file__, 'subprocess', timeout=0.8)
print "Calling myself with forced error exit"
textexec(__file__, 'die')
print 'All Done'
| {
"content_hash": "34aaa918b1732fced97506d5d15b7ae1",
"timestamp": "",
"source": "github",
"line_count": 380,
"max_line_length": 81,
"avg_line_length": 32.286842105263155,
"alnum_prop": 0.5513896813106203,
"repo_name": "lamby/pkg-rst2pdf",
"id": "5fc863061f415958a8abb00c21038ae785eee788",
"size": "12535",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rst2pdf/tests/execmgr.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1120983"
},
{
"name": "Shell",
"bytes": "37634"
}
],
"symlink_target": ""
} |
class Node(object):
def __init__(self, data):
self.data = data
self.next = None
def set_next(self, next):
self.next = next
def return_next(self):
return self.next
def set_data(self, data):
self.data = data
def return_data(self):
return self.data
class LinkedList(object):
def __init__(self, data):
self.root = Node(data)
def add_next(self, data):
node = self.root
new_item = Node(data)
while node.return_next() is not None:
node = node.return_next()
node.set_next(new_item)
def add_many(self, array):
for i in array:
self.add_next(i)
def add_after(self, data, key):
node = self.root
temp = self.root
item = Node(data)
temp = temp.return_next()
while node is not None:
if node.return_data() == key:
item.set_next(temp)
node.set_next(item)
break
else:
node = node.return_next()
temp = temp.return_next()
def add_before(self, data, key):
node = self.root
temp = self.root
item = Node(data)
if node.return_data() == key:
item.set_next(self.root)
self.root = item
else:
node = node.return_next()
while node is not None:
if node.return_data() == key:
item.set_next(temp.return_next())
temp.set_next(item)
break
else:
node = node.return_next()
temp = temp.return_next()
def print_list(self):
array = []
node = self.root
while node is not None:
array.append(node.return_data())
node = node.return_next()
print(array)
def contain_data(self, data):
node = self.root
while node is not None:
if node.return_data() == data:
return True
else:
node = node.return_next()
def remove_data(self, data):
node = self.root
temp = self.root
temp = temp.return_next()
while node is not None:
if (node.return_next()).return_data() == data:
node.set_next(temp.return_next())
break
else:
temp = temp.return_next()
node = node.return_next()
| {
"content_hash": "ffe63856ff8bcf94e68fda7b67da2700",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 58,
"avg_line_length": 27.282608695652176,
"alnum_prop": 0.48167330677290837,
"repo_name": "miki4920/Python-Projects",
"id": "1439a50db722c505d8861ec6d835ade9afd7e54f",
"size": "2510",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "SmallProjects/LinkedList.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "16688"
},
{
"name": "Python",
"bytes": "98900"
}
],
"symlink_target": ""
} |
"""MPSSE.py: A python wrapper for the FTDI-provided libMPSSE DLL (I2C only)
__author__ = "Jason M. Battle"
"""
import ctypes
from collections import OrderedDict
dll_loc = r'C:\Python27\Lib\site-packages\libMPSSE.dll'
try:
dll = ctypes.CDLL(dll_loc)
except:
print '%s not found' % dll_loc.split('\\')[-1]
#####GLOBALS###################################################################
# Clock Rate Defines
I2C_CLOCK_STANDARD_MODE = 100000 # 1kHz Mode
I2C_CLOCK_FAST_MODE = 400000 # 4000kHz Mode
I2C_CLOCK_FAST_MODE_PLUS = 1000000 # 1MHz Mode
I2C_CLOCK_HIGH_SPEED_MODE = 3400000 # 3.4MHz Mode
# Latency Defines
I2C_LATENCY_TIMER = 255 # 255ms default; valid range of 0 - 255ms
# Hardware Option Defines
# Three-phase clocking ON and open-drain drive ON -> I2C_DISABLE_3PHASE_CLK & I2C_DISABLE_DRIVE_ONLY_ZERO
# Three-phase clocking OFF and open-drain drive ON -> I2C_DISABLE_3PHASE_CLK
# Three-phase clocking ON and open-drain drive OFF -> I2C_DISABLE_DRIVE_ONLY_ZERO
# Three-phase clocking OFF and open-drain drive OFF -> I2C_DISABLE_3PHASE_CLK | I2C_DISABLE_DRIVE_ONLY_ZERO
I2C_DISABLE_3PHASE_CLK = 1
I2C_DISABLE_DRIVE_ONLY_ZERO = 2
# I2C Transfer Option Defines
I2C_TRANSFER_OPTIONS_START_BIT = 1 # Default I2C transaction option
I2C_TRANSFER_OPTIONS_STOP_BIT = 2 # Default I2C transaction option
I2C_TRANSFER_OPTIONS_NACK_LAST_BYTE = 8 # Default I2C transaction option
I2C_TRANSFER_OPTIONS_FAST_TRANSFER_BYTES = 16
I2C_TRANSFER_OPTIONS_FAST_TRANSFER_BITS = 32
I2C_TRANSFER_OPTIONS_NO_ADDRESS = 64
# Status Codes
STATUS_CODES = {0: 'FT_OK',
1: 'FT_INVALID_HANDLE',
2: 'FT_DEVICE_NOT_FOUND',
3: 'FT_DEVICE_NOT_OPENED',
4: 'FT_INVALID_HANDLE',
5: 'FT_IO_ERROR',
6: 'FT_INVALID_PARAMETER',
7: 'FT_INVALID_BAUD_RATE',
8: 'FT_DEVICE_NOT_OPENED_FOR_ERASE',
9: 'FT_DEVICE_NOT_OPENED_FOR_WRITE',
10: 'FT_FAILED_TO_WRITE_DEVICE',
11: 'FT_EEPROM_READ_FAILED',
12: 'FT_EEPROM_WRITE_FAILED',
13: 'FT_EEPROM_ERASE_FAILED',
14: 'FT_EEPROM_NOT_PRESENT',
15: 'FT_EEPROM_NOT_PROGRAMMED',
16: 'FT_INVALID_ARGS',
17: 'FT_NOT_SUPPORTED',
18: 'FT_OTHER_ERROR',
19: 'FT_DEVICE_LIST_NOT_READY'}
# Device Types
DEVICE_TYPES = {0: 'FT_DEVICE_BM',
1: 'FT_DEVICE_BM',
2: 'FT_DEVICE_100AX',
3: 'FT_DEVICE_UNKNOWN',
4: 'FT_DEVICE_2232C',
5: 'FT_DEVICE_232R',
6: 'FT_DEVICE_2232H',
7: 'FT_DEVICE_4232H',
8: 'FT_DEVICE_232H',
9: 'FT_DEVICE_X_SERIES'}
#####STRUCTS###################################################################
class FT_DEVICE_LIST_INFO_NODE(ctypes.Structure):
_fields_ = [
('Flags', ctypes.c_ulong),
('Type', ctypes.c_ulong),
('ID', ctypes.c_ulong),
('LocID', ctypes.c_ulong),
('SerialNumber', ctypes.c_ubyte*16),
('Description', ctypes.c_ubyte*64),
('ftHandle', ctypes.c_ulong)]
class CHANNEL_CONFIG(ctypes.Structure):
_fields_ = [
('ClockRate', ctypes.c_ulong),
('LatencyTimer', ctypes.c_ubyte),
('Options', ctypes.c_ulong)]
#####CLASSES###################################################################
class I2CMaster():
def __init__(self):
pass
# I2C_GetNumChannels(uint32 *numChannels)
def GetNumChannels(self):
dll.I2C_GetNumChannels.argtypes = [ctypes.POINTER(ctypes.c_ulong)]
dll.I2C_GetNumChannels.restype = ctypes.c_ulong
self._numchannels = ctypes.c_ulong()
if dll.I2C_GetNumChannels(ctypes.byref(self._numchannels)) != 0:
print STATUS_CODES[dll.I2C_GetNumChannels(ctypes.byref(self._numchannels))]
else:
print 'Number of Channels: %i' % self._numchannels.value
return self._numchannels.value
# I2C_GetChannelInfo(uint32 index, FT_DEVICE_LIST_INFO_NODE *chanInfo)
def GetChannelInfo(self):
dll.I2C_GetChannelInfo.argtypes = [ctypes.c_ulong, ctypes.POINTER(FT_DEVICE_LIST_INFO_NODE)]
dll.I2C_GetChannelInfo.restype = ctypes.c_ulong
self._chaninfo = FT_DEVICE_LIST_INFO_NODE()
self._fulldevlist = OrderedDict()
for idx in range(self._numchannels.value):
self._index = ctypes.c_ulong(idx)
if dll.I2C_GetChannelInfo(self._index, ctypes.byref(self._chaninfo)) != 0:
print STATUS_CODES[dll.I2C_GetChannelInfo(self._index, ctypes.byref(self._chaninfo))]
else:
self._Type = DEVICE_TYPES[self._chaninfo.Type]
self._SerialNumber = ''.join(map(chr, self._chaninfo.SerialNumber)).split('\x00')[0] # Remove non-ASCII characters
self._Description = ''.join(map(chr, self._chaninfo.Description)).split('\x00')[0] # Remove non-ASCII characters
print 'Flags: %i' % self._chaninfo.Flags
print 'Type: %s' % self._Type
print 'ID: %i' % self._chaninfo.ID
print 'LocID: %i' % self._chaninfo.LocID
print 'SerialNumber: %s' % self._SerialNumber
print 'Description: %s' % self._Description
print 'Handle: %i' % self._chaninfo.ftHandle
devinfolist = OrderedDict([('Flags', self._chaninfo.Flags), ('Type', self._Type), ('ID', self._chaninfo.ID), ('LocID', self._chaninfo.LocID), ('SerialNumber', self._SerialNumber), ('Description', self._Description), ('Handle', self._chaninfo.ftHandle)])
self._fulldevlist['Dev%i' % idx] = devinfolist
return self._fulldevlist
# I2C_OpenChannel(uint32 index, FT_HANDLE *handle)
def OpenChannel(self):
dll.I2C_OpenChannel.argtypes = [ctypes.c_ulong, ctypes.POINTER(ctypes.c_ulong)]
dll.I2C_OpenChannel.restype = ctypes.c_ulong
for idx, device in enumerate(self._fulldevlist.values()):
if device['Type'] == 'FT_DEVICE_232H':
self._index = ctypes.c_ulong(idx)
if device['Handle'] == 0:
self._handle = ctypes.c_ulong()
else:
self._handle = ctypes.c_ulong(device['Handle'])
else:
continue
break
if self._handle.value == 0:
if dll.I2C_OpenChannel(self._index, ctypes.byref(self._handle)) != 0:
print STATUS_CODES[dll.I2C_OpenChannel(self._index, ctypes.byref(self._handle))]
else:
print 'Successfully opened device channel %i with handle %i' % (self._index.value, self._handle.value)
else:
print 'Device channel %i is already open with handle %i' % (self._index.value, self._handle.value)
# I2C_InitChannel(FT_HANDLE handle, ChannelConfig *config)
def InitChannel(self, mode='Standard'):
dll.I2C_InitChannel.argtypes = [ctypes.c_ulong, ctypes.POINTER(CHANNEL_CONFIG)]
dll.I2C_InitChannel.restype = ctypes.c_ulong
if mode == 'Standard': # All modes default to open-drain drive with three-phase clocking
self._config = CHANNEL_CONFIG(I2C_CLOCK_STANDARD_MODE, I2C_LATENCY_TIMER, I2C_DISABLE_3PHASE_CLK & I2C_DISABLE_DRIVE_ONLY_ZERO)
elif mode == 'Fast':
self._config = CHANNEL_CONFIG(I2C_CLOCK_FAST_MODE, I2C_LATENCY_TIMER, I2C_DISABLE_3PHASE_CLK & I2C_DISABLE_DRIVE_ONLY_ZERO)
elif mode == 'FastPlus':
self._config = CHANNEL_CONFIG(I2C_CLOCK_FAST_MODE_PLUS, I2C_LATENCY_TIMER, I2C_DISABLE_3PHASE_CLK & I2C_DISABLE_DRIVE_ONLY_ZERO)
elif mode == 'HighSpeed':
self._config = CHANNEL_CONFIG(I2C_CLOCK_HIGH_SPEED_MODE, I2C_LATENCY_TIMER, I2C_DISABLE_3PHASE_CLK & I2C_DISABLE_DRIVE_ONLY_ZERO)
else:
self._config = CHANNEL_CONFIG(I2C_CLOCK_STANDARD_MODE, I2C_LATENCY_TIMER, I2C_DISABLE_3PHASE_CLK & I2C_DISABLE_DRIVE_ONLY_ZERO)
print 'Mode not recognized. Defaulted to standard mode'
if dll.I2C_InitChannel(self._handle, ctypes.byref(self._config)) != 0:
print STATUS_CODES[dll.I2C_InitChannel(self._handle, ctypes.byref(self._config))]
else:
print 'Successfully initialized device channel %i with handle %i' % (self._index.value, self._handle.value)
if (self._config.Options == 0 or self._config.Options == 2 ):
print 'Clock Rate: %i' % int(self._config.ClockRate / 1.5) # libMPSSE DLL increases base clock by 1.5x when Three-phase clocking enabled
else:
print self._config.ClockRate
print 'Latency Timer: %i' % self._config.LatencyTimer
print 'Options: %i' % self._config.Options
# I2C_CloseChannel(FT_HANDLE handle)
def CloseChannel(self):
dll.I2C_CloseChannel.argtypes = [ctypes.c_ulong]
dll.I2C_CloseChannel.restype = ctypes.c_ulong
if dll.I2C_CloseChannel(self._handle) != 0:
print STATUS_CODES[dll.I2C_CloseChannel(self._handle)]
else:
print 'Successfully closed device channel %i with handle %i' % (self._index.value, self._handle.value)
# I2C_DeviceRead(FT_HANDLE handle, uint32 deviceAddress, uint32 sizeToTransfer, uint8 *buffer, uint32 *sizeTransfered, uint32 options)
def DeviceRead(self, devaddress, regaddress, numbytes, fastbytes=False):
dll.I2C_DeviceWrite.argtypes = [ctypes.c_ulong, ctypes.c_ulong, ctypes.c_ulong, ctypes.POINTER(ctypes.c_ubyte), ctypes.POINTER(ctypes.c_ulong), ctypes.c_ulong] # Buffer argtype is single byte only (register address)
dll.I2C_DeviceWrite.restype = ctypes.c_ulong
dll.I2C_DeviceRead.argtypes = [ctypes.c_ulong, ctypes.c_ulong, ctypes.c_ulong, ctypes.POINTER(ctypes.c_ubyte*numbytes), ctypes.POINTER(ctypes.c_ulong), ctypes.c_ulong] # Buffer argtype is specified read length
dll.I2C_DeviceRead.restype = ctypes.c_ulong
self._writebuffer = (ctypes.c_ubyte)(regaddress) # Buffer size is set from total data length. Pass data to buffer as variable length argument
self._writebytes = ctypes.c_ulong(1) # Number of bytes to write is total data length (register address + data)
self._devaddress = ctypes.c_ulong(devaddress) # Slave address of target device
self._readbytes = ctypes.c_ulong(numbytes) # Number of bytes to read is user-specified (passed to function)
self._readbuffer = (ctypes.c_ubyte*numbytes)() # Buffer size is set from number of bytes to read
self._numsent = ctypes.c_ulong() # Number of bytes transmitted is number of bytes to read
if fastbytes == True:
self._options = ctypes.c_ulong(I2C_TRANSFER_OPTIONS_START_BIT | I2C_TRANSFER_OPTIONS_STOP_BIT | I2C_TRANSFER_OPTIONS_NACK_LAST_BYTE | I2C_TRANSFER_OPTIONS_FAST_TRANSFER_BYTES)
else:
self._options = ctypes.c_ulong(I2C_TRANSFER_OPTIONS_START_BIT | I2C_TRANSFER_OPTIONS_STOP_BIT | I2C_TRANSFER_OPTIONS_NACK_LAST_BYTE)
if dll.I2C_DeviceWrite(self._handle, self._devaddress, self._writebytes, ctypes.byref(self._writebuffer), ctypes.byref(self._numsent), self._options) != 0:
print STATUS_CODES[dll.I2C_DeviceWrite(self._handle, self._devaddress, self._writebytes, ctypes.byref(self._writebuffer), ctypes.byref(self._numsent), self._options)]
if dll.I2C_DeviceRead(self._handle, self._devaddress, self._readbytes, ctypes.byref(self._readbuffer), ctypes.byref(self._numsent), self._options) != 0:
print STATUS_CODES[dll.I2C_DeviceRead(self._handle, self._devaddress, self._readbytes, ctypes.byref(self._readbuffer), ctypes.byref(self._numsent), self._options)]
else:
print 'I2C read transaction complete'
print 'Device Address: 0x%02X' % self._devaddress.value
print 'Register Address: 0x%02X' % regaddress
for idx, byte in enumerate(self._readbuffer[:]):
print 'Data Byte %i: 0x%02X' % (idx+1, byte)
print 'Data Length: %i' % self._numsent.value
return self._readbuffer[:]
# I2C_DeviceWrite(FT_HANDLE handle, uint32 deviceAddress, uint32 sizeToTransfer, uint8 *buffer, uint32 *sizeTransfered, uint32 options)
def DeviceWrite(self, devaddress, regaddress, data, fastbytes=False):
data.insert(0, regaddress) # Prepend data array with register address
dll.I2C_DeviceWrite.argtypes = [ctypes.c_ulong, ctypes.c_ulong, ctypes.c_ulong, ctypes.POINTER(ctypes.c_ubyte*len(data)), ctypes.POINTER(ctypes.c_ulong), ctypes.c_ulong] # Buffer argtype is total data length (register address + data)
dll.I2C_DeviceWrite.restype = ctypes.c_ulong
self._devaddress = ctypes.c_ulong(devaddress) # Slave address of target device
self._writebytes = ctypes.c_ulong(len(data)) # Number of bytes to write is total data length (register address + data)
self._buffer = (ctypes.c_ubyte*len(data))(*data) # Buffer size is set from total data length. Pass data to buffer as variable length argument
self._numsent = ctypes.c_ulong() # Number of bytes transmitted is total data length (register address + data)
if fastbytes == True:
self._options = ctypes.c_ulong(I2C_TRANSFER_OPTIONS_START_BIT | I2C_TRANSFER_OPTIONS_STOP_BIT | I2C_TRANSFER_OPTIONS_NACK_LAST_BYTE | I2C_TRANSFER_OPTIONS_FAST_TRANSFER_BYTES)
else:
self._options = ctypes.c_ulong(I2C_TRANSFER_OPTIONS_START_BIT | I2C_TRANSFER_OPTIONS_STOP_BIT | I2C_TRANSFER_OPTIONS_NACK_LAST_BYTE)
if dll.I2C_DeviceWrite(self._handle, self._devaddress, self._writebytes, ctypes.byref(self._buffer), ctypes.byref(self._numsent), self._options) != 0:
print STATUS_CODES[dll.I2C_DeviceWrite(self._handle, self._devaddress, self._writebytes, ctypes.byref(self._buffer), ctypes.byref(self._numsent), self._options)]
else:
print 'I2C write transaction complete'
print 'Device Address: 0x%02X' % self._devaddress.value
print 'Register Address: 0x%02X' % regaddress
for idx, byte in enumerate(self._buffer[:]):
print 'Data Byte %i: 0x%02X' % (idx+1, byte)
print 'Data Length: %i' % self._numsent.value
# FT_WriteGPIO(FT_HANDLE handle, uint8 dir, uint8 value)
def WriteGPIO(self, direction, value):
dll.FT_WriteGPIO.argtypes = [ctypes.c_ulong, ctypes.c_ubyte, ctypes.c_ubyte]
dll.FT_WriteGPIO.restype = ctypes.c_ulong
self._direction = ctypes.c_ubyte(direction) # 1 is output, 0 is input for 8-bit GPIO port; valid range of 0-255
self._value = ctypes.c_ubyte(value) # 1 is logic high, 0 is logic low for 8-bit GPIO port; valid range of 0-255
if dll.FT_WriteGPIO(self._handle, self._direction, self._value) != 0:
print STATUS_CODES[dll.FT_WriteGPIO(self._handle, self._direction, self._value)]
else:
print 'GPIO write transaction complete'
print 'P.0: Direction = %s, State = %s' % ('Output' if (self._direction.value & 1) else 'Input', 'High' if (self._value.value & 1) else 'Low')
print 'P.1: Direction = %s, State = %s' % ('Output' if (self._direction.value & 2) else 'Input', 'High' if (self._value.value & 2) else 'Low')
print 'P.2: Direction = %s, State = %s' % ('Output' if (self._direction.value & 4) else 'Input', 'High' if (self._value.value & 4) else 'Low')
print 'P.3: Direction = %s, State = %s' % ('Output' if (self._direction.value & 8) else 'Input', 'High' if (self._value.value & 8) else 'Low')
print 'P.4: Direction = %s, State = %s' % ('Output' if (self._direction.value & 16) else 'Input', 'High' if (self._value.value & 16) else 'Low')
print 'P.5: Direction = %s, State = %s' % ('Output' if (self._direction.value & 32) else 'Input', 'High' if (self._value.value & 32) else 'Low')
print 'P.6: Direction = %s, State = %s' % ('Output' if (self._direction.value & 64) else 'Input', 'High' if (self._value.value & 64) else 'Low')
print 'P.7: Direction = %s, State = %s' % ('Output' if (self._direction.value & 128) else 'Input', 'High' if (self._value.value & 128) else 'Low')
# FT_ReadGPIO(FT_HANDLE handle, uint8 *value)
def ReadGPIO(self):
dll.FT_ReadGPIO.argtypes = [ctypes.c_ulong, ctypes.POINTER(ctypes.c_ubyte)]
dll.FT_ReadGPIO.restype = ctypes.c_ulong
self._value = ctypes.c_ubyte() # 1 is logic high, 0 is logic low for 8-bit GPIO port; valid range of 0-255
if dll.FT_ReadGPIO(self._handle, ctypes.byref(self._value)) != 0:
print STATUS_CODES[dll.FT_ReadGPIO(self._handle, ctypes.byref(self._value))]
else:
print 'GPIO read transaction complete'
print 'P.0: State = %s' % ('High' if (self._value.value & 1) else 'Low')
print 'P.1: State = %s' % ('High' if (self._value.value & 2) else 'Low')
print 'P.2: State = %s' % ('High' if (self._value.value & 4) else 'Low')
print 'P.3: State = %s' % ('High' if (self._value.value & 8) else 'Low')
print 'P.4: State = %s' % ('High' if (self._value.value & 16) else 'Low')
print 'P.5: State = %s' % ('High' if (self._value.value & 32) else 'Low')
print 'P.6: State = %s' % ('High' if (self._value.value & 64) else 'Low')
print 'P.7: State = %s' % ('High' if (self._value.value & 128) else 'Low')
return self._value.value
# Init_libMPSSE(void)
def Init_libMPSSE(self):
dll.Init_libMPSSE.argtypes = []
dll.Init_libMPSSE.restype = None
dll.Init_libMPSSE()
# Cleanup_libMPSSE(void)
def Cleanup_libMPSSE(self):
dll.Cleanup_libMPSSE.argtypes = []
dll.Cleanup_libMPSSE.restype = None
dll.Cleanup_libMPSSE()
| {
"content_hash": "f87c52096a157018da5688d912b783ee",
"timestamp": "",
"source": "github",
"line_count": 306,
"max_line_length": 269,
"avg_line_length": 59.8562091503268,
"alnum_prop": 0.6088665647521293,
"repo_name": "jmbattle/pyMPSSE",
"id": "0aa3ad985c669da10fc2dc535bd0e31f16626ce0",
"size": "18341",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "MPSSE.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19384"
}
],
"symlink_target": ""
} |
"""
sphinx.errors
~~~~~~~~~~~~~
Contains SphinxError and a few subclasses (in an extra module to avoid
circular import problems).
:copyright: Copyright 2007-2021 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from typing import Any
class SphinxError(Exception):
"""Base class for Sphinx errors.
This is the base class for "nice" exceptions. When such an exception is
raised, Sphinx will abort the build and present the exception category and
message to the user.
Extensions are encouraged to derive from this exception for their custom
errors.
Exceptions *not* derived from :exc:`SphinxError` are treated as unexpected
and shown to the user with a part of the traceback (and the full traceback
saved in a temporary file).
.. attribute:: category
Description of the exception "category", used in converting the
exception to a string ("category: message"). Should be set accordingly
in subclasses.
"""
category = 'Sphinx error'
class SphinxWarning(SphinxError):
"""Warning, treated as error."""
category = 'Warning, treated as error'
class ApplicationError(SphinxError):
"""Application initialization error."""
category = 'Application error'
class ExtensionError(SphinxError):
"""Extension error."""
def __init__(self, message: str, orig_exc: Exception = None, modname: str = None) -> None:
super().__init__(message)
self.message = message
self.orig_exc = orig_exc
self.modname = modname
@property
def category(self) -> str: # type: ignore
if self.modname:
return 'Extension error (%s)' % self.modname
else:
return 'Extension error'
def __repr__(self) -> str:
if self.orig_exc:
return '%s(%r, %r)' % (self.__class__.__name__,
self.message, self.orig_exc)
return '%s(%r)' % (self.__class__.__name__, self.message)
def __str__(self) -> str:
parent_str = super().__str__()
if self.orig_exc:
return '%s (exception: %s)' % (parent_str, self.orig_exc)
return parent_str
class BuildEnvironmentError(SphinxError):
"""BuildEnvironment error."""
category = 'BuildEnvironment error'
class ConfigError(SphinxError):
"""Configuration error."""
category = 'Configuration error'
class DocumentError(SphinxError):
"""Document error."""
category = 'Document error'
class ThemeError(SphinxError):
"""Theme error."""
category = 'Theme error'
class VersionRequirementError(SphinxError):
"""Incompatible Sphinx version error."""
category = 'Sphinx version error'
class SphinxParallelError(SphinxError):
"""Sphinx parallel build error."""
category = 'Sphinx parallel build error'
def __init__(self, message: str, traceback: Any) -> None:
self.message = message
self.traceback = traceback
def __str__(self) -> str:
return self.message
class PycodeError(Exception):
"""Pycode Python source code analyser error."""
def __str__(self) -> str:
res = self.args[0]
if len(self.args) > 1:
res += ' (exception was: %r)' % self.args[1]
return res
class NoUri(Exception):
"""Raised by builder.get_relative_uri() or from missing-reference handlers
if there is no URI available."""
pass
class FiletypeNotFoundError(Exception):
"""Raised by get_filetype() if a filename matches no source suffix."""
pass
| {
"content_hash": "6965f0d76098b4379274344d46d1cbd6",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 94,
"avg_line_length": 26.977443609022558,
"alnum_prop": 0.6340579710144928,
"repo_name": "sonntagsgesicht/regtest",
"id": "d84d8c4c676902c00f89baab1e57182758f0167d",
"size": "3588",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": ".aux/venv/lib/python3.9/site-packages/sphinx/errors.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "13888"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.