commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
---|---|---|---|---|---|---|---|
0fa30986e1f97331f96444e0b3b0f86cbe20c68a | Add tests for JsonBackend __init__ and commit methods | shadho/backend/json/tests/test_db.py | shadho/backend/json/tests/test_db.py | Python | 0.000002 | @@ -0,0 +1,2158 @@
+import pytest%0A%0Afrom shadho.backend.base.tests.test_db import TestBaseBackend%0Afrom shadho.backend.json.db import JsonBackend%0A%0Aimport json%0Aimport os%0Aimport shutil%0A%0A%0Aclass TestJsonBackend(object):%0A%0A def test_init(self):%0A %22%22%22Ensure that initialization sets up the db and filepath.%22%22%22%0A # Test default initialization%0A b = JsonBackend()%0A assert b.path == os.path.join(os.getcwd(), 'shadho.json')%0A assert b.db == %7B'models': %7B%7D,%0A 'domains': %7B%7D,%0A 'results': %7B%7D,%0A 'values': %7B%7D%7D%0A assert b.commit_frequency == 10%0A assert b.update_frequency == 10%0A%0A # Test custom initialization%0A b = JsonBackend(path='foo.bar',%0A commit_frequency=42,%0A update_frequency=42)%0A assert b.path == os.path.join(os.getcwd(), 'foo.bar')%0A assert b.db == %7B'models': %7B%7D,%0A 'domains': %7B%7D,%0A 'results': %7B%7D,%0A 'values': %7B%7D%7D%0A assert b.commit_frequency == 42%0A assert b.update_frequency == 42%0A%0A # Test without specifying a file name%0A b = JsonBackend(path='/tmp')%0A assert b.path == os.path.join('/tmp', 'shadho.json')%0A assert b.db == %7B'models': %7B%7D,%0A 'domains': %7B%7D,%0A 'results': %7B%7D,%0A 'values': %7B%7D%7D%0A assert b.commit_frequency == 10%0A assert b.update_frequency == 10%0A%0A def test_commit(self):%0A %22%22%22Ensure that commit writes to file and the file is loadable.%22%22%22%0A temp = shutil.mkdtemp()%0A fpath = os.path.join(temp, 'shahdo.json')%0A%0A # Test saving and loading%0A b = JsonBackend(path=temp)%0A assert os.path.isfile(fpath)%0A with open(fpath, 'r') as f:%0A db = json.load(f)%0A assert db == %7B'models': %7B%7D,%0A 'domains': %7B%7D,%0A 'results': %7B%7D,%0A 'values': %7B%7D%7D%0A%0A shutil.rmtree(temp)%0A%0A def test_count(self):%0A %22%22%22Ensure that the correct counts are returned for object classes%22%22%22%0A %0A
|
|
871f79a0b2bd235df457e3a1dc502d5c18bd934a | Add some generic python utilities as a basis for scripts | tools/build/common_utils.py | tools/build/common_utils.py | Python | 0.000001 | @@ -0,0 +1,1678 @@
+from __future__ import print_function%0A%0Aimport os%0A%0Adef game_root_path():%0A file_path = os.path.dirname(os.path.abspath(__file__))%0A return os.path.abspath(os.path.join(file_path, '..', '..'))%0A%0Adef files_with_type(root, type):%0A all_files = %5Bos.path.join(root, filename) for filename in os.listdir(root)%5D%0A typed_files = %5Bpath for path in all_files if path.endswith('.' + type)%5D%0A return typed_files%0A%0Adef sha1_of_file(filepath):%0A import hashlib%0A if not os.path.exists(filepath):%0A return ''%0A with open(filepath, 'rb') as f:%0A return hashlib.sha1(f.read()).hexdigest()%0A%0Adef fetch_file(url, target_path, sha1):%0A if sha1_of_file(target_path) == sha1:%0A return True # Already downloaded%0A import urllib%0A if hasattr(urllib, 'urlretrieve'):%0A # Python 2%0A urllib.urlretrieve(url, target_path)%0A else:%0A # Python 3%0A import urllib.request%0A urllib.request.urlretrieve(url, target_path)%0A if sha1 == None:%0A print('sha1 of ' + target_path + ': ' + sha1_of_file(target_path))%0A elif sha1_of_file(target_path) != sha1:%0A if os.path.exists(target_path):%0A os.remove(target_path)%0A return False%0A return True%0A%0Adef python27_path():%0A import sys%0A exe = ''%0A if sys.version_info.minor == 7 and sys.version_info.major == 2:%0A exe = sys.executable%0A elif sys.platform.startswith(%22linux%22):%0A exe = '/usr/local/bin/python2.7'%0A elif sys.platform == %22darwin%22:%0A exe = '/usr/local/bin/python2.7'%0A elif sys.platform == %22win32%22:%0A exe = 'C:%5CPython27%5Cpython.exe'%0A return exe%0A%0Aif __name__ == '__main__':%0A print('Game root path: ' + game_root_path())%0A
|
|
7d22c38348ccd411871942ef0dd43ed57794de16 | include benchmark code | bench.py | bench.py | Python | 0.000001 | @@ -0,0 +1,1383 @@
+from statistics import mean%0Aimport heapq%0Aimport importlib%0Aimport time%0A%0Aimport numpy as np%0A%0A%0Ar = np.random.random(1000*1000)%0A%0A%0Amergers = %7B%0A 'heapq': ('merge', 'nlargest', 'nsmallest'),%0A 'cyheapq': ('merge', 'nlargest', 'nsmallest'),%0A 'cytoolz': ('merge_sorted', 'topk', None),%0A%7D%0A%0Amods = list(mergers.keys())%0Aname_max_len = max(map(len, mods))%0A%0A%0Adef test(runs, loops, f, *args):%0A times = %5B%5D%0A for _ in range(runs):%0A start = time.monotonic()%0A for _ in range(loops):%0A f(*args)%0A stop = time.monotonic()%0A times.append(stop-start)%0A times.sort()%0A return mean(times%5B1:-2%5D)%0A%0A%0A%0Afor t in ('merge', 'nlargest', 'nsmallest'):%0A print('---', t, '---')%0A for mod, (merge, nlargest, nsmallest) in sorted(mergers.items()):%0A module = importlib.import_module(mod)%0A merge = getattr(module, merge)%0A nlargest = getattr(module, nlargest)%0A nsmallest = getattr(module, nsmallest) if nsmallest else None%0A %0A a = list(r)%0A b = list(r)%0A if t == 'merge':%0A print(mod.rjust(name_max_len), 'merge', test(5, 100000, merge, a, a, b, b))%0A elif t == 'nlargest':%0A print(mod.rjust(name_max_len), 'nlargest', test(5, 5, nlargest, 10, a))%0A elif t == 'nsmallest' and nsmallest:%0A print(mod.rjust(name_max_len), 'nsmallest', test(5, 5, nsmallest, 10, a))%0A
|
|
5b2aebb9b9f9fafe291f0890f03c44abd661ca68 | add celery_work | celery_work.py | celery_work.py | Python | 0.999978 | @@ -0,0 +1,1330 @@
+#!/usr/bin/env python%0A# -*- coding: utf-8 -*-%0A%0Aimport os%0Afrom celery import Celery, platforms%0Afrom app import create_app%0A%0A%0Adef make_celery(app):%0A %22%22%22Create the celery process.%22%22%22%0A%0A # Init the celery object via app's configuration.%0A celery = Celery(%0A app.import_name,%0A backend=app.config%5B'CELERY_RESULT_BACKEND'%5D,%0A broker=app.config%5B'CELERY_BROKER_URL'%5D)%0A%0A # Flask-Celery-Helper to auto-setup the config.%0A celery.conf.update(app.config)%0A TaskBase = celery.Task%0A platforms.C_FORCE_ROOT = True%0A%0A class ContextTask(TaskBase):%0A%0A abstract = True%0A%0A def __call__(self, *args, **kwargs):%0A %22%22%22Will be execute when create the instance object of ContextTesk.%22%22%22%0A%0A # Will context(Flask's Extends) of app object(Producer Sit)%0A # be included in celery object(Consumer Site).%0A with app.app_context():%0A return TaskBase.__call__(self, *args, **kwargs)%0A%0A # Include the app_context into celery.Task.%0A # Let other Flask extensions can be normal calls.%0A celery.Task = ContextTask%0A return celery%0A%0Aflask_app = create_app(os.getenv('FLASK_CONFIG') or 'default')%0A# 1. Each celery process needs to create an instance of the Flask application.%0A# 2. Register the celery object into the app object.%0Acelery = make_celery(flask_app)
|
|
771d44902a6b1fddc98e7801b58d718bf1b030b5 | use formatted title so we make sure all browsers will correctly read them | gunicorn/http/request.py | gunicorn/http/request.py | # -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
import logging
import os
import re
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
import sys
from urllib import unquote
from gunicorn import __version__
from gunicorn.http.parser import Parser
from gunicorn.http.tee import TeeInput
from gunicorn.util import CHUNK_SIZE
NORMALIZE_SPACE = re.compile(r'(?:\r\n)?[ \t]+')
class RequestError(Exception):
pass
class Request(object):
SERVER_VERSION = "gunicorn/%s" % __version__
DEFAULTS = {
"wsgi.url_scheme": 'http',
"wsgi.input": StringIO(),
"wsgi.errors": sys.stderr,
"wsgi.version": (1, 0),
"wsgi.multithread": False,
"wsgi.multiprocess": True,
"wsgi.run_once": False,
"SCRIPT_NAME": "",
"SERVER_SOFTWARE": "gunicorn/%s" % __version__
}
def __init__(self, socket, client_address, server_address, conf):
self.debug = conf['debug']
self.conf = conf
self._sock = socket
self.client_address = client_address
self.server_address = server_address
self.response_status = None
self.response_headers = []
self._version = 11
self.parser = Parser.parse_request()
self.start_response_called = False
self.log = logging.getLogger(__name__)
self.response_chunked = False
def read(self):
environ = {}
headers = []
buf = StringIO()
data = self._sock.recv(CHUNK_SIZE)
buf.write(data)
buf2 = self.parser.filter_headers(headers, buf)
if not buf2:
while True:
data = self._sock.recv(CHUNK_SIZE)
if not data:
break
buf.write(data)
buf2 = self.parser.filter_headers(headers, buf)
if buf2:
break
self.log.debug("%s", self.parser.status)
self.log.debug("Headers:\n%s" % headers)
if self.parser.headers_dict.get('Expect','').lower() == "100-continue":
self._sock.send("HTTP/1.1 100 Continue\r\n\r\n")
if not self.parser.content_len and not self.parser.is_chunked:
wsgi_input = TeeInput(self._sock, self.parser, StringIO(),
self.conf)
content_length = "0"
else:
wsgi_input = TeeInput(self._sock, self.parser, buf2, self.conf)
content_length = str(wsgi_input.len)
# This value should evaluate true if an equivalent application
# object may be simultaneously invoked by another process, and
# should evaluate false otherwise. In debug mode we fall to one
# worker so we comply to pylons and other paster app.
wsgi_multiprocess = (self.debug == False)
# authors should be aware that REMOTE_HOST and REMOTE_ADDR
# may not qualify the remote addr:
# http://www.ietf.org/rfc/rfc3875
client_address = self.client_address or "127.0.0.1"
forward_adress = self.parser.headers_dict.get('X-Forwarded-For',
client_address)
if isinstance(forward_adress, basestring):
# we only took the last one
# http://en.wikipedia.org/wiki/X-Forwarded-For
if "," in forward_adress:
forward_adress = forward_adress.split(",")[-1].strip()
remote_addr = forward_adress.split(":")
if len(remote_addr) == 1:
remote_addr.append('')
else:
remote_addr = forward_adress
# Try to server address from headers
server_address = self.parser.headers_dict.get('Host',
self.server_address)
if isinstance(server_address, basestring):
server_address = server_address.split(":")
if len(server_address) == 1:
server_address.append('')
script_name = self.parser.headers_dict.get("SCRIPT_NAME",
os.environ.get("SCRIPT_NAME", ""))
path_info = self.parser.path
if script_name:
path_info = path_info.split(script_name, 1)[-1]
environ = {
"wsgi.url_scheme": 'http',
"wsgi.input": wsgi_input,
"wsgi.errors": sys.stderr,
"wsgi.version": (1, 0),
"wsgi.multithread": False,
"wsgi.multiprocess": wsgi_multiprocess,
"wsgi.run_once": False,
"SCRIPT_NAME": script_name,
"SERVER_SOFTWARE": self.SERVER_VERSION,
"REQUEST_METHOD": self.parser.method,
"PATH_INFO": unquote(path_info),
"QUERY_STRING": self.parser.query_string,
"RAW_URI": self.parser.raw_path,
"CONTENT_TYPE": self.parser.headers_dict.get('Content-Type', ''),
"CONTENT_LENGTH": content_length,
"REMOTE_ADDR": remote_addr[0],
"REMOTE_PORT": str(remote_addr[1]),
"SERVER_NAME": server_address[0],
"SERVER_PORT": str(server_address[1]),
"SERVER_PROTOCOL": self.parser.raw_version
}
for key, value in self.parser.headers:
key = 'HTTP_' + key.upper().replace('-', '_')
if key not in ('HTTP_CONTENT_TYPE', 'HTTP_CONTENT_LENGTH'):
environ[key] = value
return environ
def start_response(self, status, response_headers, exc_info=None):
if exc_info:
try:
if self.start_response_called:
raise exc_info[0], exc_info[1], exc_info[2]
finally:
exc_info = None
elif self.start_response_called:
raise AssertionError("Response headers already set!")
self.response_status = status
for name, value in response_headers:
if name.lower() == "transfer-encoding":
if value.lower() == "chunked":
self.response_chunked = True
if not isinstance(value, basestring):
value = str(value)
self.response_headers.append((name, value.strip()))
self.start_response_called = True
| Python | 0 | @@ -6415,24 +6415,32 @@
append((name
+.title()
, value.stri
|
caba92d9370a230d561675284979cb888964d9da | split only once | gunicorn/http/request.py | gunicorn/http/request.py | # -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
import logging
import os
import re
import StringIO
import sys
from urllib import unquote
from gunicorn import __version__
from gunicorn.http.parser import Parser
from gunicorn.http.tee import TeeInput
from gunicorn.util import CHUNK_SIZE, read_partial, normalize_name
NORMALIZE_SPACE = re.compile(r'(?:\r\n)?[ \t]+')
class RequestError(Exception):
pass
class Request(object):
SERVER_VERSION = "gunicorn/%s" % __version__
DEFAULTS = {
"wsgi.url_scheme": 'http',
"wsgi.input": StringIO.StringIO(),
"wsgi.errors": sys.stderr,
"wsgi.version": (1, 0),
"wsgi.multithread": False,
"wsgi.multiprocess": True,
"wsgi.run_once": False,
"SCRIPT_NAME": "",
"SERVER_SOFTWARE": "gunicorn/%s" % __version__
}
def __init__(self, socket, client_address, server_address, debug=False):
self.debug = debug
self.socket = socket
self.client_address = client_address
self.server_address = server_address
self.response_status = None
self.response_headers = []
self._version = 11
self.parser = Parser()
self.start_response_called = False
self.log = logging.getLogger(__name__)
def read(self):
environ = {}
headers = []
buf = ""
buf = read_partial(self.socket, CHUNK_SIZE)
i = self.parser.filter_headers(headers, buf)
if i == -1 and buf:
while True:
data = read_partial(self.socket, CHUNK_SIZE)
if not data: break
buf += data
i = self.parser.filter_headers(headers, buf)
if i != -1: break
self.log.debug("%s", self.parser.status)
self.log.debug("Headers:\n%s" % headers)
if self.parser.headers_dict.get('Expect','').lower() == "100-continue":
self.socket.send("100 Continue\n")
if not self.parser.content_len and not self.parser.is_chunked:
wsgi_input = StringIO.StringIO()
else:
wsgi_input = TeeInput(self.socket, self.parser, buf[i:])
# This value should evaluate true if an equivalent application
# object may be simultaneously invoked by another process, and
# should evaluate false otherwise. In debug mode we fall to one
# worker so we comply to pylons and other paster app.
wsgi_multiprocess = (self.debug == False)
# authors should be aware that REMOTE_HOST and REMOTE_ADDR
# may not qualify the remote addr:
# http://www.ietf.org/rfc/rfc3875
client_address = self.client_address or "127.0.0.1"
forward_adress = self.parser.headers_dict.get('X-Forwarded-For',
client_address)
if isinstance(forward_adress, basestring):
# we only took the last one
# http://en.wikipedia.org/wiki/X-Forwarded-For
if "," in forward_adress:
forward_adress = forward_adress.split(",")[-1].strip()
remote_addr = forward_adress.split(":")
if len(remote_addr) == 1:
remote_addr.append('')
else:
remote_addr = forward_adress
# Try to server address from headers
server_address = self.parser.headers_dict.get('Host',
self.server_address)
if isinstance(server_address, basestring):
server_address = server_address.split(":")
if len(server_address) == 1:
server_address.append('')
script_name = self.parser.headers_dict.get("SCRIPT_NAME",
os.environ.get("SCRIPT_NAME", ""))
path_info = self.parser.path
if script_name:
path_info = path_info.split(script_name)[-1]
environ = {
"wsgi.url_scheme": 'http',
"wsgi.input": wsgi_input,
"wsgi.errors": sys.stderr,
"wsgi.version": (1, 0),
"wsgi.multithread": False,
"wsgi.multiprocess": wsgi_multiprocess,
"wsgi.run_once": False,
"SCRIPT_NAME": script_name,
"SERVER_SOFTWARE": self.SERVER_VERSION,
"REQUEST_METHOD": self.parser.method,
"PATH_INFO": unquote(path_info),
"QUERY_STRING": self.parser.query_string,
"RAW_URI": self.parser.raw_path,
"CONTENT_TYPE": self.parser.headers_dict.get('Content-Type', ''),
"CONTENT_LENGTH": str(wsgi_input.len),
"REMOTE_ADDR": remote_addr[0],
"REMOTE_PORT": remote_addr[1],
"SERVER_NAME": server_address[0],
"SERVER_PORT": server_address[1],
"SERVER_PROTOCOL": self.parser.raw_version
}
for key, value in self.parser.headers:
key = 'HTTP_' + key.upper().replace('-', '_')
if key not in ('HTTP_CONTENT_TYPE', 'HTTP_CONTENT_LENGTH'):
environ[key] = value
return environ
def start_response(self, status, response_headers, exc_info=None):
if exc_info:
try:
if self.start_response_called:
raise exc_info[0], exc_info[1], exc_info[2]
finally:
exc_info = None
elif self.start_response_called:
raise AssertionError("Response headers already set!")
self.response_status = status
for name, value in response_headers:
name = normalize_name(name)
if not isinstance(value, basestring):
value = str(value)
self.response_headers.append((name, value.strip()))
self.start_response_called = True
| Python | 0.000001 | @@ -4085,16 +4085,19 @@
ipt_name
+, 1
)%5B-1%5D%0A%0A
|
9c53e59ee0c4e5418b54d47c932454b7b907dc03 | Revert escape nickname, desc, etc in user profile | seahub/profile/forms.py | seahub/profile/forms.py | # encoding: utf-8
from django import forms
from django.utils.html import escape
from seahub.profile.models import Profile, DetailedProfile
class ProfileForm(forms.Form):
nickname = forms.CharField(max_length=64, required=False)
intro = forms.CharField(max_length=256, required=False)
def save(self, username):
nickname = escape(self.cleaned_data['nickname'])
intro = escape(self.cleaned_data['intro'])
Profile.objects.add_or_update(username, nickname, intro)
class DetailedProfileForm(ProfileForm):
department = forms.CharField(max_length=512, required=False)
telephone = forms.CharField(max_length=100, required=False)
def save(self, username):
super(DetailedProfileForm, self).save(username)
department = escape(self.cleaned_data['department'])
telephone = escape(self.cleaned_data['telephone'])
DetailedProfile.objects.add_or_update(username, department, telephone)
| Python | 0 | @@ -39,45 +39,8 @@
orms
-%0Afrom django.utils.html import escape
%0A%0Afr
@@ -296,31 +296,24 @@
nickname =
-escape(
self.cleaned
@@ -329,17 +329,16 @@
ckname'%5D
-)
%0A
@@ -346,23 +346,16 @@
intro =
-escape(
self.cle
@@ -372,17 +372,16 @@
'intro'%5D
-)
%0A
@@ -716,23 +716,16 @@
tment =
-escape(
self.cle
@@ -747,17 +747,16 @@
rtment'%5D
-)
%0A
@@ -772,15 +772,8 @@
e =
-escape(
self
@@ -798,17 +798,16 @@
ephone'%5D
-)
%0A
|
b23ec502b89ab70b9e8edd1868f4e9717392b7b2 | Add missing migrations | account/migrations/0004_auto_20170416_1821.py | account/migrations/0004_auto_20170416_1821.py | Python | 0.000029 | @@ -0,0 +1,488 @@
+# -*- coding: utf-8 -*-%0A# Generated by Django 1.11 on 2017-04-16 18:21%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('account', '0003_passwordexpiry_passwordhistory'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterModelOptions(%0A name='passwordhistory',%0A options=%7B'verbose_name': 'password history', 'verbose_name_plural': 'password histories'%7D,%0A ),%0A %5D%0A
|
|
ae94990bc8b790b5307ccaee992f09fefc045692 | add Tester lockedNormal | python/medic/plugins/Tester/lockedNormal.py | python/medic/plugins/Tester/lockedNormal.py | Python | 0 | @@ -0,0 +1,1824 @@
+from medic.core import testerBase%0Afrom maya import OpenMaya%0A%0A%0Aclass LockedNormal(testerBase.TesterBase):%0A Name = %22LockedNormal%22%0A Description = %22vertex(s) which has locked normal%22%0A Fixable = True%0A%0A def __init__(self):%0A super(LockedNormal, self).__init__()%0A%0A def Match(self, node):%0A return node.object().hasFn(OpenMaya.MFn.kMesh)%0A%0A def Test(self, node):%0A it = None%0A mesh = None%0A try:%0A it = OpenMaya.MItMeshVertex(node.object())%0A mesh = OpenMaya.MFnMesh(node.object())%0A except:%0A return (False, None)%0A%0A result = False%0A%0A comp = OpenMaya.MFnSingleIndexedComponent()%0A comp_obj = comp.create(OpenMaya.MFn.kMeshVertComponent)%0A%0A while (not it.isDone()):%0A normal_indices = OpenMaya.MIntArray()%0A it.getNormalIndices(normal_indices)%0A%0A for i in range(normal_indices.length()):%0A if mesh.isNormalLocked(normal_indices%5Bi%5D):%0A result = True%0A comp.addElement(it.index())%0A break%0A%0A it.next()%0A%0A return (result, comp_obj if result else None)%0A%0A def Fix(self, node, component, parameterParser):%0A if node.dg().isFromReferencedFile():%0A return False%0A%0A target_normal_indices = OpenMaya.MIntArray()%0A%0A mesh = OpenMaya.MFnMesh(node.object())%0A it = OpenMaya.MItMeshVertex(node.getPath(), component)%0A while (not it.isDone()):%0A normal_indices = OpenMaya.MIntArray()%0A it.getNormalIndices(normal_indices)%0A%0A for i in range(normal_indices.length()):%0A target_normal_indices.append(normal_indices%5Bi%5D)%0A it.next()%0A%0A mesh.unlockVertexNormals(target_normal_indices)%0A%0A return True%0A%0A%0ATester = LockedNormal%0A
|
|
088ec16cf33d4be4b396976d9e9ab1a5f17045fc | make contrib an app | adhocracy4/contrib/apps.py | adhocracy4/contrib/apps.py | Python | 0.000012 | @@ -0,0 +1,130 @@
+from django.apps import AppConfig%0A%0A%0Aclass OrganisationsConfig(AppConfig):%0A name = 'adhocracy4.contrib'%0A label = 'a4contrib'%0A
|
|
e020f81593268899a04cce726823c512b8b54762 | copy over the PlotContainerEditor to the more appropriately named and located ComponentEditor. | enthought/enable2/component_editor.py | enthought/enable2/component_editor.py | Python | 0 | @@ -0,0 +1,2645 @@
+%22%22%22 Defines a Traits editor for displaying an Enable component.%0D%0A%22%22%22%0D%0A#-------------------------------------------------------------------------------%0D%0A# Written by: David C. Morrill%0D%0A# Date: 01/26/2007%0D%0A# (c) Copyright 2007 by Enthought, Inc.%0D%0A#----------------------------------------------------------------------------%0D%0A%0D%0Afrom enthought.enable2.api import ColorTrait%0D%0A%0D%0Afrom enthought.etsconfig.api import ETSConfig%0D%0A%0D%0Afrom enthought.traits.ui.api import BasicEditorFactory%0D%0A%0D%0Aif ETSConfig.toolkit == 'wx':%0D%0A from enthought.traits.ui.wx.editor import Editor%0D%0A from enthought.enable2.wx_backend.api import Window%0D%0Aelif ETSConfig.toolkit == 'qt4':%0D%0A from enthought.traits.ui.qt4.editor import Editor%0D%0A from enthought.enable2.qt4_backend.api import Window%0D%0Aelse:%0D%0A Editor = object%0D%0A Window = None%0D%0A%0D%0Aclass _ComponentEditor( Editor ):%0D%0A%0D%0A #---------------------------------------------------------------------------%0D%0A # Trait definitions: %0D%0A #---------------------------------------------------------------------------%0D%0A%0D%0A # The plot editor is scrollable (overrides Traits UI Editor).%0D%0A scrollable = True%0D%0A%0D%0A #---------------------------------------------------------------------------%0D%0A # Finishes initializing the editor by creating the underlying toolkit%0D%0A # widget:%0D%0A #---------------------------------------------------------------------------%0D%0A def init( self, parent ):%0D%0A %22%22%22 Finishes initializing the editor by creating the underlying toolkit%0D%0A widget.%0D%0A %22%22%22%0D%0A self._window = Window( parent, component=self.value )%0D%0A self.control = self._window.control%0D%0A self._window.bg_color = self.factory.bgcolor%0D%0A%0D%0A #---------------------------------------------------------------------------%0D%0A # Updates the editor when the object trait changes externally to the editor:%0D%0A #---------------------------------------------------------------------------%0D%0A def update_editor( self ):%0D%0A %22%22%22 Updates the editor when the object trait changes externally to the%0D%0A editor.%0D%0A %22%22%22%0D%0A pass%0D%0A%0D%0A%0D%0Aclass ComponentEditor( BasicEditorFactory ):%0D%0A %22%22%22 wxPython editor factory for Enable components. %0D%0A %22%22%22%0D%0A #---------------------------------------------------------------------------%0D%0A # Trait definitions: %0D%0A #---------------------------------------------------------------------------%0D%0A%0D%0A # The class used to create all editor styles (overrides BasicEditorFactory).%0D%0A klass = _ComponentEditor%0D%0A%0D%0A # The background color for the window%0D%0A bgcolor = ColorTrait('sys_window')%0D%0A%0D%0A
|
|
06164dbeb1ec113b24ca25a41e624793d878875f | implement a transferrable voting algorithm | instant_runoff_voting.py | instant_runoff_voting.py | Python | 0 | @@ -0,0 +1,2056 @@
+from collections import defaultdict, Counter%0Adef runoff(voters):%0A %22%22%22%0A a function that calculates an election winner from a list of voter selections using an%0A Instant Runoff Voting algorithm. https://en.wikipedia.org/wiki/Instant-runoff_voting%0A%0A Each voter selects several candidates in order of preference.%0A The votes are tallied from the each voter's first choice.%0A If the first-place candidate has more than half the total votes, they win.%0A Otherwise, find the candidate who got the least votes and remove them from each person's voting list.%0A In case of a tie for least, remove all of the tying candidates.%0A In case of a complete tie between every candidate, return None%0A Continue until somebody has more than half the votes; they are the winner.%0A%0AThe function takes a list of voter ballots; each ballot will be a list of candidates in descending order of%0Apreference. %0AReturns the symbol corresponding to the winning candidate.%0A %22%22%22%0A final_tally = defaultdict(int)%0A removed_candidates = %5B%5D%0A for this_round in range(len(voters%5B0%5D)):%0A this_round_votes = %5Bvoter%5Bthis_round%5D for voter in voters if voter%5Bthis_round%5D not in removed_candidates%5D%0A tally = dict(Counter(this_round_votes))%0A for candidate in tally:%0A final_tally%5Bcandidate%5D +=tally%5Bcandidate%5D%0A leader = max(final_tally, key=tally.get)%0A total_votes = sum(%5Bfinal_tally%5Bi%5D for i in final_tally%5D)%0A if final_tally%5Bleader%5D %3E= total_votes / 2.0:%0A return leader%0A # no clear winner%0A knockout_candidate = min(tally, key=tally.get)%0A knockout_candidate_votes = tally%5Bknockout_candidate%5D%0A for candidate in tally:%0A if tally%5Bcandidate%5D == knockout_candidate_votes:%0A removed_candidates.append(candidate)%0A del final_tally%5Bknockout_candidate%5D%0A %0A%0Avoters = %5B%0A%5B'c', 'a', 'b', 'd', 'e'%5D, %0A%5B'b', 'e', 'd', 'c', 'a'%5D, %0A%5B'b', 'e', 'c', 'a', 'd'%5D, %0A%5B'd', 'b', 'c', 'a', 'e'%5D, %0A%5B'c', 'b', 'd', 'a', 'e'%5D%0A%5D%0A%0Aassert(runoff(voters) == %22b%22)%0A
|
|
5d6ef1cf969bac9fb53db0224eebdeb4a1bb6ff0 | Update app/exceptions/__init__.py | app/exceptions/__init__.py | app/exceptions/__init__.py | Python | 0 | @@ -0,0 +1,157 @@
+%0Aclass BadConfigurationError(Exception):%0A pass%0A%0A%0Aclass ClientUnavailableError(Exception):%0A pass%0A%0A%0Aclass ClusterNotConfiguredError(Exception):%0A pass%0A
|
|
8affeda715b1facf12de1dab1d445bbe54616306 | Fix JSON serialisation problem with AJAX basket | oscar/core/ajax.py | oscar/core/ajax.py | import six
from django.contrib import messages
from six.moves import map
class FlashMessages(object):
"""
Intermediate container for flash messages.
This is useful as, at the time of creating the message, we don't know
whether the response is an AJAX response or not.
"""
def __init__(self):
self.msgs = {}
def add_message(self, level, message):
self.msgs.setdefault(level, []).append(message)
def add_messages(self, level, messages):
for msg in messages:
self.add_message(level, msg)
def info(self, message):
self.add_message(messages.INFO, message)
def warning(self, message):
self.add_message(messages.WARNING, message)
def error(self, message):
self.add_message(messages.ERROR, message)
def success(self, message):
self.add_message(messages.SUCCESS, message)
def to_json(self):
payload = {}
for level, msgs in self.msgs.items():
tag = messages.DEFAULT_TAGS.get(level, 'info')
payload[tag] = map(six.text_type, msgs)
return payload
def apply_to_request(self, request):
for level, msgs in self.msgs.items():
for msg in msgs:
messages.add_message(request, level, msg)
| Python | 0 | @@ -1063,12 +1063,9 @@
%5D =
-map(
+%5B
six.
@@ -1077,15 +1077,30 @@
type
-, msgs)
+(msg) for msg in msgs%5D
%0A
|
63d22058d15a11fad7232683630976d472997c33 | Add planetary time recipe | recipes/planetarytime.py | recipes/planetarytime.py | Python | 0.999993 | @@ -0,0 +1,793 @@
+%22%22%22%0A Author: Jo%C3%A3o Ventura %[email protected]%3E%0A %0A %0A This recipe shows sample code for handling %0A planetary times.%0A%0A%22%22%22%0A%0Afrom flatlib.datetime import Datetime%0Afrom flatlib.geopos import GeoPos%0Afrom flatlib.tools import planetarytime%0A%0A%0A# Build a date and location%0Adate = Datetime('2015/03/13', '17:00', '+00:00')%0Apos = GeoPos('38n32', '8w54')%0A%0A# Get the planetary hour table%0AhourTable = planetarytime.getHourTable(date, pos) %0Aprint(hourTable.dayRuler()) # Venus%0Aprint(hourTable.nightRuler()) # Mars%0Aprint(hourTable.hourRuler()) # Saturn%0A%0A# Use the info Dict to print hour number information%0Ainfo = hourTable.currInfo()%0Aprint(info%5B'hourNumber'%5D) # 11%0Aprint(info%5B'start'%5D) # %3C2015/03/13 16:42:10 00:00:00%3E%0Aprint(info%5B'end'%5D) # %3C2015/03/13 17:41:20 00:00:00%3E
|
|
98c863c9d45bdfb328a5e79d0928c8b9694bc753 | Remove redundant import | check_mesos.py | check_mesos.py | #!/usr/bin/env python
import nagiosplugin
import urllib2
import argparse
import logging
import re
INFINITY = float('inf')
HEALTHY = 1
UNHEALTHY = -1
try:
from urllib2 import *
except ImportError:
from urllib.request import *
from urllib.error import HTTPError
try:
import json
except ImportError:
import simplejson as json
class MesosMaster(nagiosplugin.Resource):
def __init__(self, baseuri, frameworks):
self.baseuri = baseuri
self.frameworks = frameworks
def probe(self):
logging.info('Base URI is %s', self.baseuri)
try:
response = urlopen(self.baseuri + '/health')
logging.debug('Response from %s is %s', response.geturl(), response)
if response.getcode() in [200, 204]:
yield nagiosplugin.Metric('master health', HEALTHY)
else:
yield nagiosplugin.Metric('master health', UNHEALTHY)
except HTTPError, e:
logging.debug('HTTP error %s', e)
yield nagiosplugin.Metric('master health', UNHEALTHY)
response = urlopen(self.baseuri + '/master/state.json')
logging.debug('Response from %s is %s', response.geturl(), response)
state = json.load(response)
has_leader = len(state.get('leader', '')) > 0
yield nagiosplugin.Metric('active slaves', state['activated_slaves'])
yield nagiosplugin.Metric('active leader', 1 if has_leader else 0)
for framework_regex in self.frameworks:
framework = None
for candidate in state['frameworks']:
if re.search(framework_regex, candidate['name']) is not None:
framework = candidate
unregistered_time = INFINITY
if framework is not None:
unregistered_time = framework['unregistered_time']
if not framework['active'] and unregistered_time == 0:
unregistered_time = INFINITY
yield nagiosplugin.Metric('framework ' + framework_regex, unregistered_time, context='framework')
@nagiosplugin.guarded
def main():
argp = argparse.ArgumentParser()
argp.add_argument('-H', '--host', required=True,
help='The hostname of a Mesos master to check')
argp.add_argument('-P', '--port', default=5050,
help='The Mesos master HTTP port - defaults to 5050')
argp.add_argument('-n', '--slaves', default=1,
help='The minimum number of slaves the cluster must be running')
argp.add_argument('-F', '--framework', default=[], action='append',
help='Check that a framework is registered matching the given regex, may be specified multiple times')
argp.add_argument('-v', '--verbose', action='count', default=0,
help='increase output verbosity (use up to 3 times)')
args = argp.parse_args()
unhealthy_range = nagiosplugin.Range('%d:%d' % (HEALTHY - 1, HEALTHY + 1))
slave_range = nagiosplugin.Range('%s:' % (args.slaves,))
check = nagiosplugin.Check(
MesosMaster('http://%s:%d' % (args.host, args.port), args.framework),
nagiosplugin.ScalarContext('master health', unhealthy_range, unhealthy_range),
nagiosplugin.ScalarContext('active slaves', slave_range, slave_range),
nagiosplugin.ScalarContext('active leader', '1:1', '1:1'),
nagiosplugin.ScalarContext('framework', '0:0', '0:0'))
check.main(verbose=args.verbose)
if __name__ == '__main__':
main()
| Python | 0.001503 | @@ -39,23 +39,8 @@
gin%0A
-import urllib2%0A
impo
|
92c8afbb5131374611fb21b4da0b0af1a2f37a45 | add dummy test | tests/dummy.py | tests/dummy.py | Python | 0.999602 | @@ -0,0 +1,123 @@
+import pytest%0A%0Afrom pyannote.database import get_databases%0A%0Adef test_dummy():%0A assert isinstance(get_databases(), list)%0A
|
|
0c8b7fa865df535f5baa33025c184bbf4234b7b1 | Create script to transform shapefile into csv distance matrix | shp_to_csv_distances.py | shp_to_csv_distances.py | Python | 0 | @@ -0,0 +1,1543 @@
+%22%22%22Create a csv matrix of distances between shapefile geometry objects.%0A%0ARequirements: fiona, shapely%0A%0AWritten by: Taylor Denouden%0ADate: November 25, 2015%0A%22%22%22%0A%0Aimport random%0Aimport fiona%0Afrom shapely.geometry import shape%0Afrom scripts.printer import print_progress%0A%0A%0Adef main():%0A %22%22%22Main script execution.%22%22%22%0A outfile = open(%22out.csv%22, %22w%22)%0A ids = extract_ids(%22data/high_polys.shp%22)%0A%0A # Write header%0A print %22Writing Header%22%0A outfile.write(%22NODE%22)%0A for i in ids:%0A outfile.write(%22,%22 + i)%0A outfile.write(%22%5Cn%22)%0A%0A # Write rows%0A print %22Writing Rows%22%0A for i, j in enumerate(ids):%0A print_progress(i/len(ids))%0A outfile.write(j)%0A write_row_distances(j, ids, %22data/high_polys.shp%22, outfile)%0A outfile.write(%22%5Cn%22)%0A print_progress(1)%0A print%0A%0A%0Adef extract_ids(input_file):%0A %22%22%22Extract all polygon ids from input shapefile.%22%22%22%0A with fiona.open(input_file, 'r') as source:%0A return %5Bshp%5B'id'%5D for shp in source%5D%0A%0A%0Adef write_row_distances(i, ids, input_file, outfile):%0A %22%22%22Write distances between shape with id i and all other shapes in ids.%22%22%22%0A with fiona.open(input_file, 'r') as source:%0A source = list(source)%0A i_shp = shape(source%5Bint(i)%5D%5B'geometry'%5D)%0A%0A for j in ids:%0A j_shp = shape(source%5Bint(j)%5D%5B'geometry'%5D)%0A if i_shp.is_valid and j_shp.is_valid:%0A dist = i_shp.distance(j_shp)%0A else:%0A dist = -1%0A%0A outfile.write(%22,%22 + str(dist))%0A%0Aif __name__ == %22__main__%22:%0A main()%0A
|
|
9a4dd1c0c51cf2732b50d5594b2a4bf661b8262f | Add geoip_lookup.py | geoip_lookup.py | geoip_lookup.py | Python | 0.000027 | @@ -0,0 +1,763 @@
+import sys%0A%0Aif len(sys.argv) %3C 2:%0A print %22%22%22geoip_lookup.py ---%0A %22resolve%22 IP addresses to approximate geo-information%0A%0AUsage:%0A python geoip_lookup.py IP %5B GEOIP_SERVER %5D%0A%0Awhere IP is the address to resolve, and%0AGEOIP_SERVER is an optional GeoIP server to contact.%0A%0A(The Seattle network testbed provides two GeoIP servers,%0Ahttp://geoipserver.poly.edu:12679 and http://geoipserver2.poly.edu:12679 )%0A%22%22%22%0A sys.exit(0)%0A%0Afrom repyportability import *%0Aadd_dy_support(locals())%0A%0Ageoip_client = dy_import_module(%22geoip_client.r2py%22)%0A%0Atry:%0A geoipserver = sys.argv%5B2%5D%0A geoip_client.geoip_init_client(url=geoipserver)%0Aexcept IndexError:%0A geoip_client.geoip_init_client()%0A%0A%0Aip = sys.argv%5B1%5D%0A%0Aprint %22Address%22, ip, %22is located in%22, geoip_client.geoip_record_by_addr(ip)%0A
|
|
24d1162740aa9a9948665d97dc082a555a1ccf13 | Rename initial_args to standard argv. | grip/command.py | grip/command.py | """\
grip.command
~~~~~~~~~~~~
Implements the command-line interface for Grip.
Usage:
grip [options] [<path>] [<address>]
grip -h | --help
grip --version
Where:
<path> is a file to render or a directory containing a README.md file
<address> is what to listen on, of the form <host>[:<port>], or just <port>
Options:
--gfm Use GitHub-Flavored Markdown, e.g. comments or issues
--context=<repo> The repository context, only taken into account with --gfm
"""
import sys
from path_and_address import resolve, split_address
from docopt import docopt
from .server import serve
from . import __version__
usage = '\n\n\n'.join(__doc__.split('\n\n\n')[1:])
def main(initial_args=None):
"""The entry point of the application."""
if initial_args is None:
initial_args = sys.argv[1:]
version = 'Grip ' + __version__
# Parse options
args = docopt(usage, argv=initial_args, version=version)
# Parse arguments
path, address = resolve(args['<path>'], args['<address>'])
host, port = split_address(address)
# Validate address
if address and not host and not port:
print 'Error: Invalid address', repr(address)
# Run server
try:
serve(path, host, port, args['--gfm'], args['--context'])
return 0
except ValueError, ex:
print 'Error:', ex
return 1
| Python | 0 | @@ -687,28 +687,20 @@
ef main(
-initial_
arg
-s
+v
=None):%0A
@@ -752,28 +752,20 @@
%0A if
-initial_
arg
-s
+v
is None
@@ -774,28 +774,20 @@
-initial_
arg
-s
+v
= sys.a
@@ -885,20 +885,12 @@
rgv=
-initial_
arg
-s
+v
, ve
|
cf357e46b3d9664325ca69f3b7c0393c89ad44a7 | Add some function tests. | tests/test_func.py | tests/test_func.py | Python | 0 | @@ -0,0 +1,770 @@
+from .utils import assert_eval%0A%0A%0Adef test_simple_func():%0A assert_eval('(def @a $a 8) (@a)', 1, 8)%0A%0A%0Adef test_simple_func_args():%0A assert_eval(%0A '(def @a $a $a)'%0A '(@a 1)'%0A '(@a 2)'%0A '(@a 5)',%0A 1,%0A 1,%0A 2,%0A 5)%0A%0A%0Adef test_func_args_overwrite_globals():%0A assert_eval(%0A '(def @a $a 3)'%0A '(set $a 10)'%0A '$a'%0A '(@a 8)'%0A '$a',%0A 1,%0A 10,%0A 10,%0A 3,%0A 8,%0A )%0A%0A%0Adef test_func_args_with_offset():%0A assert_eval(%0A '(def @a $d (+ $d $i))'%0A '(def @b $i (+ $i $j))'%0A '(@a 1 2 3)'%0A '(@b 8 9 10)'%0A '$a%5Cn$b%5Cn$c%5Cn$d%5Cn$e%5Cn$i%5Cn$j%5Cn$k%5Cn',%0A 1, 1,%0A 4,%0A 17,%0A 0, 0, 0, 1, 2, 8, 9, 10,%0A )%0A
|
|
79f57f27824caa423ff873fdee3a9b8916ed410b | extract speech information for reps | import/parse/speeches.py | import/parse/speeches.py | Python | 0.998676 | @@ -0,0 +1,956 @@
+%22%22%22%0Aparse data from govtrack.us%0A%0Afrom: data/crawl/govtrack/people.xml%0A%22%22%22%0A%0Aimport web%0Afrom xml.sax import make_parser, handler%0A%0Aclass SpeechesXML(handler.ContentHandler):%0A def __init__(self,callback):%0A self.callback = callback%0A self.current = None%0A %0A def startElement(self, name, attrs):%0A if name == 'representative':%0A self.current = web.storage(attrs)%0A self.current.speech_data = (self.current.id,self.current.Speeches,%0A self.current.WordsPerSpeech)%0A %0A def endElement(self, name):%0A if name == 'representative':%0A self.callback(self.current)%0A self.current = None%0A%0Adef callback(rep):%0A if rep.get('Speeches') != '':%0A print rep.speech_data%0A%0Adef main(callback):%0A parser = make_parser()%0A parser.setContentHandler(SpeechesXML(callback))%0A parser.parse('speeches.xml')%0A%0Aif __name__ == %22__main__%22: main(callback)%0A
|
|
ec6dff24e3049ddaab392f0bc5b8d8b724e41e20 | Print the trending Python repos on GitHub | trending_python.py | trending_python.py | Python | 0.998715 | @@ -0,0 +1,341 @@
+#!/usr/bin/env python3%0A%0Aimport bs4%0Aimport requests%0A%0Aurl = 'https://github.com/trending?l=Python'%0Asoup = bs4.BeautifulSoup(requests.get(url).content, 'lxml') # or 'html5lib'%0Arepos = soup.find('ol', class_=%22repo-list%22).find_all('a', href=True)%0Arepos = (r.text.strip().replace(' ', '') for r in repos if '/' in r.text)%0Aprint('%5Cn'.join(repos))%0A
|
|
37691851b6e21a6a51140f512fd9802e964b0785 | Create beta_pythons_dynamic_classes_3.py | Solutions/beta/beta_pythons_dynamic_classes_3.py | Solutions/beta/beta_pythons_dynamic_classes_3.py | Python | 0.000067 | @@ -0,0 +1,434 @@
+def create_class(class_name, secrets = None):%0A %0A if not class_name: return None%0A %0A class NewClass(object):%0A pass%0A %0A NewClass.__name__ = class_name%0A %0A if not secrets: return NewClass%0A %0A for i in secrets:%0A if 'function' in str(type(secrets%5Bi%5D)):%0A setattr(NewClass, i, classmethod(secrets%5Bi%5D))%0A else:%0A setattr(NewClass, i, secrets%5Bi%5D)%0A%0A return NewClass%0A
|
|
32d9a97336c786660a838dc69cfab2ebe3436343 | update viafReconciliationPeople.py | viafReconciliationPeople.py | viafReconciliationPeople.py | Python | 0 | @@ -0,0 +1,2030 @@
+import requests%0Aimport csv%0Afrom fuzzywuzzy import fuzz%0Aimport json%0Aimport urllib%0A%0AbaseURL = 'http://viaf.org/viaf/search/viaf?query=local.personalNames+%253D+%2522'%0Af=csv.writer(open('viafPeopleResults.csv', 'wb'))%0Af.writerow(%5B'search'%5D+%5B'result'%5D+%5B'viaf'%5D+%5B'lc'%5D+%5B'isni'%5D+%5B'ratio'%5D+%5B'partialRatio'%5D+%5B'tokenSort'%5D+%5B'tokenSet'%5D+%5B'avg'%5D)%0Awith open('people.txt') as txt:%0A for row in txt:%0A print row%0A rowEdited = urllib.quote(row.decode('utf-8-sig').encode('utf-8').strip())%0A url = baseURL+rowEdited+'%2522+and+local.sources+%253D+%2522lc%2522&sortKeys=holdingscount&maximumRecords=1&httpAccept=application/rdf+json'%0A response = requests.get(url).content%0A try:%0A response = response%5Bresponse.index('%3CrecordData xsi:type=%22ns1:stringOrXmlFragment%22%3E')+47:response.index('%3C/recordData%3E')%5D.replace('"','%22')%0A response = json.loads(response)%0A label = response%5B'mainHeadings'%5D%5B'data'%5D%5B0%5D%5B'text'%5D%0A viafid = response%5B'viafID'%5D%0A except:%0A label = ''%0A viafid = ''%0A ratio = fuzz.ratio(row, label)%0A partialRatio = fuzz.partial_ratio(row, label)%0A tokenSort = fuzz.token_sort_ratio(row, label)%0A tokenSet = fuzz.token_set_ratio(row, label)%0A avg = (ratio+partialRatio+tokenSort+tokenSet)/4%0A%0A if viafid != '':%0A links = json.loads(requests.get('http://viaf.org/viaf/'+viafid+'/justlinks.json').text)%0A viafid = 'http://viaf.org/viaf/'+viafid%0A try:%0A lc = 'http://id.loc.gov/authorities/names/'+json.dumps(links%5B'LC'%5D%5B0%5D).replace('%22','')%0A except:%0A lc = ''%0A try:%0A isni = 'http://isni.org/isni/'+json.dumps(links%5B'ISNI'%5D%5B0%5D).replace('%22','')%0A except:%0A isni = ''%0A else:%0A lc = ''%0A isni = ''%0A f=csv.writer(open('viafPeopleResults.csv', 'a'))%0A f.writerow(%5Brow.strip()%5D+%5Blabel%5D+%5Bviafid%5D+%5Blc%5D+%5Bisni%5D+%5Bratio%5D+%5BpartialRatio%5D+%5BtokenSort%5D+%5BtokenSet%5D+%5Bavg%5D)%0A
|
|
9d1f8f6bfd59cf2e083276ef095618f8545c5167 | Add test to check packages for Python2.6 compatibility, as well as core. | lib/spack/spack/test/python_version.py | lib/spack/spack/test/python_version.py | ##############################################################################
# Copyright (c) 2013, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Written by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://scalability-llnl.github.io/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License (as published by
# the Free Software Foundation) version 2.1 dated February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
"""
This test ensures that all Spack files are Python version 2.6 or less.
Spack was originally 2.7, but enough systems in 2014 are still using
2.6 on their frontend nodes that we need 2.6 to get adopted.
"""
import unittest
import os
import re
import llnl.util.tty as tty
from external import pyqver2
import spack
spack_max_version = (2,6)
class PythonVersionTest(unittest.TestCase):
def spack_python_files(self):
# first file is the spack script.
yield spack.spack_file
# Next files are all the source files and package files.
search_paths = [spack.lib_path, spack.var_path]
# Iterate through the whole spack source tree.
for path in search_paths:
for root, dirnames, filenames in os.walk(path):
for filename in filenames:
if re.match(r'^[^.#].*\.py$', filename):
yield os.path.join(root, filename)
def test_python_versions(self):
# dict version -> filename -> reasons
all_issues = {}
for fn in self.spack_python_files():
with open(fn) as pyfile:
versions = pyqver2.get_versions(pyfile.read())
for ver, reasons in versions.items():
if ver > spack_max_version:
if not ver in all_issues:
all_issues[ver] = {}
all_issues[ver][fn] = reasons
if all_issues:
tty.error("Spack must run on Python version %d.%d"
% spack_max_version)
for v in sorted(all_issues.keys(), reverse=True):
msgs = []
for fn in sorted(all_issues[v].keys()):
short_fn = fn
if fn.startswith(spack.prefix):
short_fn = fn[len(spack.prefix):]
reasons = [r for r in set(all_issues[v][fn]) if r]
for r in reasons:
msgs.append(("%s:%s" % ('spack' + short_fn, r[0]), r[1]))
tty.error("These files require version %d.%d:" % v)
maxlen = max(len(f) for f, prob in msgs)
fmt = "%%-%ds%%s" % (maxlen+3)
print fmt % ('File', 'Reason')
print fmt % ('-' * (maxlen), '-' * 20)
for msg in msgs:
print fmt % msg
self.assertTrue(len(all_issues) == 0)
| Python | 0 | @@ -2155,20 +2155,168 @@
def
-test
+all_package_py_files(self):%0A for name in spack.db.all_package_names():%0A yield spack.db.filename_for_package_name(name)%0A%0A%0A def check
_python_
@@ -2324,24 +2324,31 @@
ersions(self
+, files
):%0A #
@@ -2431,33 +2431,13 @@
in
-self.spack_python_
files
-()
:%0A
@@ -3727,8 +3727,233 @@
) == 0)%0A
+%0A%0A def test_core_module_compatibility(self):%0A self.check_python_versions(self.spack_python_files())%0A%0A%0A def test_package_module_compatibility(self):%0A self.check_python_versions(self.all_package_py_files())%0A
|
3ea69c783393b6c62f3428c6ec83a24fe7634b6c | add grader in Python | 8-kyu/grader.py | 8-kyu/grader.py | Python | 0.000128 | @@ -0,0 +1,178 @@
+def grader(score):%0A%09if score %3C 0.6 or score %3E 1:%0A%09%09return 'F'%0A%09elif score %3C 0.7:%0A%09%09return 'D'%0A%09elif score %3C 0.8:%0A%09%09return 'C'%0A%09elif score %3C 0.9:%0A%09%09return 'B'%0A%09else:%0A%09%09return 'A'%0A
|
|
37dda1d235017bebb9bb0f6eff150dd12222762f | remove organisation from db | migrations/versions/0162_remove_org.py | migrations/versions/0162_remove_org.py | Python | 0 | @@ -0,0 +1,1812 @@
+%22%22%22%0A%0ARevision ID: 0162_remove_org%0ARevises: 0161_email_branding%0ACreate Date: 2018-02-06 17:08:11.879844%0A%0A%22%22%22%0Afrom alembic import op%0Aimport sqlalchemy as sa%0Afrom sqlalchemy.dialects import postgresql%0A%0Arevision = '0162_remove_org'%0Adown_revision = '0161_email_branding'%0A%0A%0Adef upgrade():%0A # ### commands auto generated by Alembic - please adjust! ###%0A op.drop_column('services', 'organisation_id')%0A op.drop_column('services_history', 'organisation_id')%0A%0A op.drop_table('organisation')%0A%0A op.alter_column('service_email_branding', 'email_branding_id', nullable=False)%0A # ### end Alembic commands ###%0A%0A%0Adef downgrade():%0A # ### commands auto generated by Alembic - please adjust! ###%0A op.add_column('services_history', sa.Column('organisation_id', postgresql.UUID(), autoincrement=False, nullable=True)) # noqa%0A op.add_column('services', sa.Column('organisation_id', postgresql.UUID(), autoincrement=False, nullable=True))%0A%0A op.create_table(%0A 'organisation',%0A sa.Column('id', postgresql.UUID(), autoincrement=False, nullable=False),%0A sa.Column('colour', sa.VARCHAR(length=7), autoincrement=False, nullable=True),%0A sa.Column('logo', sa.VARCHAR(length=255), autoincrement=False, nullable=True),%0A sa.Column('name', sa.VARCHAR(length=255), autoincrement=False, nullable=True),%0A sa.PrimaryKeyConstraint('id', name='organisation_pkey')%0A )%0A%0A op.create_index('ix_services_history_organisation_id', 'services_history', %5B'organisation_id'%5D, unique=False)%0A op.create_foreign_key('services_organisation_id_fkey', 'services', 'organisation', %5B'organisation_id'%5D, %5B'id'%5D)%0A op.create_index('ix_services_organisation_id', 'services', %5B'organisation_id'%5D, unique=False)%0A%0A op.alter_column('service_email_branding', 'email_branding_id', nullable=True)%0A
|
|
d1e8a8bb6ffc852bf07c40968029c5def7dc0a96 | Correct the dict | nclxd/nova/virt/lxd/host_utils.py | nclxd/nova/virt/lxd/host_utils.py | # Copyright (c) 2015 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
def get_fs_info(path):
"""get free/used/total space info for a filesystem
:param path: Any dirent on the filesystem
:returns: A dict containing
:free: How much space is free (in bytes)
:used: How much space is used (in bytes)
:total: How big the filesytem is (in bytes)
"""
hddinfo = os.statvfs(path)
total = hddinfo.f_frsize * hddinfo.f_blocks
used = (hddinfo.f_blocks - hddinfo.f_bfree) * hddinfo.f_frsize
available = st.f_bavail * st.f_frsize
return {'total': total,
'available': free,
'used': used}
def get_memory_mb_usage():
"""Get the used memory size(MB) of the host.
"returns: the total usage of memory(MB)
"""
with open('/proc/meminfo') as fp:
m = fp.read().split()
idx1 = m.index('MemTotal:')
idx2 = m.index('MemFree:')
idx3 = m.index('Buffers:')
idx4 = m.index('Cached:')
total = int(m[idx1 + 1])
avail = int(m[idx2 + 1]) + int(m[idx3 + 1]) + int(m[idx4 + 1])
return {
'total': total * 1024,
'used': (total - avail) * 1024
}
| Python | 0.999145 | @@ -1190,19 +1190,24 @@
lable':
-fre
+availabl
e,%0A
|
85060c7653a04f18e6f5cd016e113327ba3a2878 | Add support for Sercomm IP camera discovery. (#238) | netdisco/discoverables/sercomm.py | netdisco/discoverables/sercomm.py | Python | 0 | @@ -0,0 +1,492 @@
+%22%22%22%0ADiscover Sercomm network cameras.%0AThese are rebranded as iControl and many others, and are usually%0Adistributed as part of an ADT or Comcast/Xfinity monitoring package.%0Ahttps://github.com/edent/Sercomm-API%0A%22%22%22%0Afrom . import SSDPDiscoverable%0A%0A%0Aclass Discoverable(SSDPDiscoverable):%0A %22%22%22Add support for discovering camera services.%22%22%22%0A%0A def get_entries(self):%0A %22%22%22Get all Sercomm iControl devices.%22%22%22%0A return self.find_by_device_description(%7B'manufacturer': 'iControl'%7D)%0A
|
|
1295f2867eb7348959d86618b8e80c001cc41ff7 | Add 'lib' init module. | akhet/paster_templates/akhet/+package+/lib/__init__.py | akhet/paster_templates/akhet/+package+/lib/__init__.py | Python | 0 | @@ -0,0 +1,55 @@
+%22%22%22Miscellaneous support packages for %7B%7Bproject%7D%7D.%0A%22%22%22%0A
|
|
c491b9379966e772c0ab4649584a8d5a0773c403 | Update repositoryInstaller.py | scripts/devSetup/repositoryInstaller.py | scripts/devSetup/repositoryInstaller.py | from __future__ import print_function
__author__ = u'schmatz'
import configuration
import errors
import subprocess
import os
import sys
from which import which
#git clone https://github.com/nwinter/codecombat.git coco
class RepositoryInstaller():
def __init__(self,config):
self.config = config
assert isinstance(self.config,configuration.Configuration)
if not self.checkIfGitExecutableExists():
if self.config.system.operating_system == "linux":
raise errors.CoCoError("Git is missing. Please install it (try 'sudo apt-get install git')\nIf you are not using Ubuntu then please see your Linux Distribution's documentation for help installing git.")
elif self.config.system.operating_system == "mac":
raise errors.CoCoError("Git is missing. Please install the Xcode command line tools.")
raise errors.CoCoError(u"Git is missing. Please install git.")
#http://stackoverflow.com/questions/9329243/xcode-4-4-and-later-install-command-line-tools
if not self.checkIfCurlExecutableExists():
if self.config.system.operating_system == "linux":
raise errors.CoCoError("Curl is missing. Please install it(try 'sudo apt-get install curl')\nIf you are not using Ubuntu then please see your Linux Distribution's documentation for help installing curl.")
elif self.config.system.operating_system == "mac":
raise errors.CoCoError("Curl is missing. Please install the Xcode command line tools.")
raise errors.CoCoError(u"Git is missing. Please install git.")
def checkIfGitExecutableExists(self):
gitPath = which(u"git")
if gitPath:
return True
else:
return False
#TODO: Refactor this into a more appropriate file
def checkIfCurlExecutableExists(self):
curlPath = which("curl")
if curlPath:
return True
else:
return False
def cloneRepository(self):
print(u"Cloning repository...")
#TODO: CHANGE THIS BEFORE LAUNCH
return_code = True
git_folder = self.config.directory.root_install_directory + os.sep + "coco"
print("Installing into " + git_folder)
return_code = subprocess.call("git clone " + self.config.repository_url +" coco",cwd=self.config.directory.root_install_directory,shell=True)
#TODO: remove this on windos
subprocess.call("chown -R " +git_folder + " 0777",shell=True)
if return_code and self.config.system.operating_system != u"windows":
#raise errors.CoCoError("Failed to clone git repository")
import shutil
#import sys
#sys.stdout.flush()
raw_input(u"Copy it now")
#shutil.copytree(u"/Users/schmatz/coco",self.config.directory.root_install_directory + os.sep + u"coco")
print(u"Copied tree just for you")
#print("FAILED TO CLONE GIT REPOSITORY")
#input("Clone the repository and click any button to continue")
elif self.config.system.operating_system == u"windows":
raise errors.CoCoError(u"Windows doesn't support automated installations of npm at this point.")
else:
print(u"Cloned git repository")
def install_node_packages(self):
print(u"Installing node packages...")
#TODO: "Replace npm with more robust package
#npm_location = self.config.directory.bin_directory + os.sep + "node" + os.sep + "bin" + os.sep + "npm"
npm_location = u"npm"
if sys.version_info[0] == 2:
py_cmd = "python"
else:
py_cmd = subprocess.check_output(['which', 'python2'])
return_code = subprocess.call([npm_location, u"install",
"--python=" + py_cmd],
cwd=self.config.directory.root_dir +
os.sep + u"coco")
if return_code:
raise errors.CoCoError(u"Failed to install node packages")
else:
print(u"Installed node packages!")
| Python | 0.000001 | @@ -1231,16 +1231,17 @@
stall it
+
(try 'su
|
8fe99eedd4e1a1604277c42ed8f2ea0dc2e622de | add simple csv utility module | mediacloud/mediawords/util/csv.py | mediacloud/mediawords/util/csv.py | Python | 0 | @@ -0,0 +1,659 @@
+%22%22%22Utility functions for dealing with csvs.%22%22%22%0A%0Aimport csv%0Aimport io%0A%0A%0Adef get_csv_string_from_dicts(dicts: list) -%3E str:%0A %22%22%22Given a list of dicts, return a representative csv string.%22%22%22%0A if len(dicts) %3C 1:%0A return ''%0A%0A csvio = io.StringIO()%0A%0A csvwriter = csv.DictWriter(csvio, fieldnames=dicts%5B0%5D.keys())%0A%0A csvwriter.writeheader()%0A %5Bcsvwriter.writerow(d) for d in dicts%5D%0A%0A return csvio.getvalue()%0A%0A%0Adef get_dicts_from_csv_string(csvstring: str) -%3E list:%0A %22%22%22Given a csv string, return a list of dicts.%22%22%22%0A if len(csvstring) %3C 1:%0A return %5B%5D%0A%0A csvio = io.StringIO(csvstring)%0A%0A return list(csv.DictReader(csvio))%0A
|
|
f6c2d5e37685b149cfd447545c58ce1fc4d836b9 | Add function to create view for Span candidate subclasses | snorkel/models/views.py | snorkel/models/views.py | Python | 0 | @@ -0,0 +1,1313 @@
+%0A%0Adef create_serialized_candidate_view(session, C, verbose=True):%0A %22%22%22Creates a view in the database for a Candidate sub-class C defined over%0A Span contexts, which are direct children of a single sentence.%0A%0A Creates VIEW with schema:%0A candidate.id, candidate.split, span0.*, ..., spanK.*, sentence.*%0A%0A NOTE: This limited functionality should be expanded for arbitrary context%0A trees. Also this should be made more dialect-independent.%0A %22%22%22%0A selects, froms, joins = %5B%5D, %5B%5D, %5B%5D%0A for i, arg in enumerate(C.__argnames__):%0A selects.append(%22span%7B0%7D.*%22.format(i))%0A froms.append(%22span AS span%7B0%7D%22.format(i))%0A joins.append(%22%7B0%7D.%7B1%7D_id = span%7B2%7D.id%22.format(C.__tablename__, arg, i))%0A%0A sql = %22%22%22%0A CREATE VIEW %7B0%7D_serialized AS%0A SELECT%0A candidate.id,%0A candidate.split,%0A %7B1%7D,%0A sentence.*%0A FROM%0A candidate,%0A %7B0%7D,%0A %7B2%7D,%0A sentence%0A WHERE%0A candidate.id = %7B0%7D.id%0A AND sentence.id = span0.sentence_id%0A AND %7B3%7D%0A %22%22%22.format(%0A C.__tablename__,%0A %22, %22.join(selects),%0A %22, %22.join(froms),%0A %22 AND %22.join(joins)%0A )%0A if verbose:%0A print(%22Creating view...%22)%0A print(sql)%0A session.execute(sql)
|
|
135324dd3346f7830abbe64cb5eadf82d1ca963c | add - module for generating data sets. | versus/src/data.py | versus/src/data.py | Python | 0 | @@ -0,0 +1,3205 @@
+%22%22%22%0AModule for loading datasets%0A%22%22%22%0A%0Aimport gzip%0Aimport theano.tensor as T%0Aimport theano%0Aimport numpy%0Aimport cPickle%0Aimport os%0A%0A%0Adef load_MNIST(dataset):%0A ''' Loads the dataset%0A%0A :type dataset: string%0A :param dataset: the path to the dataset (here MNIST)%0A '''%0A%0A #############%0A # LOAD DATA #%0A #############%0A%0A # Download the MNIST dataset if it is not present%0A data_dir, data_file = os.path.split(dataset)%0A if data_dir == %22%22 and not os.path.isfile(dataset):%0A # Check if dataset is in the data directory.%0A new_path = os.path.join(os.path.split(__file__)%5B0%5D, %22..%22, %22data%22, dataset)%0A if os.path.isfile(new_path) or data_file == 'mnist.pkl.gz':%0A dataset = new_path%0A%0A if (not os.path.isfile(dataset)) and data_file == 'mnist.pkl.gz':%0A import urllib%0A origin = 'http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz'%0A print 'Downloading data from %25s' %25 origin%0A urllib.urlretrieve(origin, dataset)%0A%0A print '... loading data'%0A%0A # Load the dataset%0A f = gzip.open(dataset, 'rb')%0A train_set, valid_set, test_set = cPickle.load(f)%0A f.close()%0A #train_set, valid_set, test_set format: tuple(input, target)%0A #input is an numpy.ndarray of 2 dimensions (a matrix)%0A #witch row's correspond to an example. target is a%0A #numpy.ndarray of 1 dimensions (vector)) that have the same length as%0A #the number of rows in the input. It should give the target%0A #target to the example with the same index in the input.%0A%0A def shared_dataset(data_xy, borrow=True):%0A %22%22%22 Function that loads the dataset into shared variables%0A%0A The reason we store our dataset in shared variables is to allow%0A Theano to copy it into the GPU memory (when code is run on GPU).%0A Since copying data into the GPU is slow, copying a minibatch everytime%0A is needed (the default behaviour if the data is not in a shared%0A variable) would lead to a large decrease in performance.%0A %22%22%22%0A data_x, data_y = data_xy%0A shared_x = theano.shared(numpy.asarray(data_x,%0A dtype=theano.config.floatX),%0A borrow=borrow)%0A shared_y = theano.shared(numpy.asarray(data_y,%0A dtype=theano.config.floatX),%0A borrow=borrow)%0A # When storing data on the GPU it has to be stored as floats%0A # therefore we will store the labels as %60%60floatX%60%60 as well%0A # (%60%60shared_y%60%60 does exactly that). But during our computations%0A # we need them as ints (we use labels as index, and if they are%0A # floats it doesn't make sense) therefore instead of returning%0A # %60%60shared_y%60%60 we will have to cast it to int. This little hack%0A # lets ous get around this issue%0A return shared_x, T.cast(shared_y, 'int32')%0A%0A test_set_x, test_set_y = shared_dataset(test_set)%0A valid_set_x, valid_set_y = shared_dataset(valid_set)%0A train_set_x, train_set_y = shared_dataset(train_set)%0A%0A rval = %5B(train_set_x, train_set_y), (valid_set_x, valid_set_y),%0A (test_set_x, test_set_y)%5D%0A return rval
|
|
9732c401fb51ae0b757be5108835b71e7c389850 | Add tests | django_comments_xtd/tests/test_get_version.py | django_comments_xtd/tests/test_get_version.py | Python | 0.000001 | @@ -0,0 +1,604 @@
+try:%0A from unittest.mock import patch%0Aexcept ImportError:%0A from mock import patch%0A%0Afrom django.test import TestCase%0A%0A%0Aclass GetVersionTestCase(TestCase):%0A%0A @patch('django_comments_xtd.VERSION', (2, 8, 0, 'f', 0))%0A def test_get_version_when_patch_equal_to_zero(self):%0A from django_comments_xtd import get_version%0A self.assertEqual(get_version(), '2.8.0')%0A%0A @patch('django_comments_xtd.VERSION', (2, 8, 1, 'f', 0))%0A def test_get_version_when_patch_greater_than_zero(self):%0A from django_comments_xtd import get_version%0A self.assertEqual(get_version(), '2.8.1')%0A
|
|
2eb163c5dd675c2e7a9cedb5d6868545833cbf34 | Add lemma rules | spacy/en/lemma_rules.py | spacy/en/lemma_rules.py | Python | 0.000237 | @@ -0,0 +1,702 @@
+# encoding: utf8%0Afrom __future__ import unicode_literals%0A%0A%0ALEMMA_RULES = %7B%0A %22noun%22: %5B%0A %5B%22s%22, %22%22%5D,%0A %5B%22ses%22, %22s%22%5D,%0A %5B%22ves%22, %22f%22%5D,%0A %5B%22xes%22, %22x%22%5D,%0A %5B%22zes%22, %22z%22%5D,%0A %5B%22ches%22, %22ch%22%5D,%0A %5B%22shes%22, %22sh%22%5D,%0A %5B%22men%22, %22man%22%5D,%0A %5B%22ies%22, %22y%22%5D%0A %5D,%0A%0A %22verb%22: %5B%0A %5B%22s%22, %22%22%5D,%0A %5B%22ies%22, %22y%22%5D,%0A %5B%22es%22, %22e%22%5D,%0A %5B%22es%22, %22%22%5D,%0A %5B%22ed%22, %22e%22%5D,%0A %5B%22ed%22, %22%22%5D,%0A %5B%22ing%22, %22e%22%5D,%0A %5B%22ing%22, %22%22%5D%0A %5D,%0A%0A %22adj%22: %5B%0A %5B%22er%22, %22%22%5D,%0A %5B%22est%22, %22%22%5D,%0A %5B%22er%22, %22e%22%5D,%0A %5B%22est%22, %22e%22%5D%0A %5D,%0A%0A %22punct%22: %5B%0A %5B%22%E2%80%9C%22, %22%5C%22%22%5D,%0A %5B%22%E2%80%9D%22, %22%5C%22%22%5D,%0A %5B%22%5Cu2018%22, %22'%22%5D,%0A %5B%22%5Cu2019%22, %22'%22%5D%0A %5D%0A%7D%0A
|
|
45628f2abd6ec66ad48679732d600174a3a7de26 | add a script | jython/surfaceMapToDs.py | jython/surfaceMapToDs.py | Python | 0.000003 | @@ -0,0 +1,1595 @@
+#!/bin/env jython%0A%0Aimport sys%0Aimport java.io%0Aimport org.gavrog%0A%0A%0Adef dsymbolFromCyclicAdjacencies(adjs):%0A vertexToChamber = %7B%7D%0A edgeToChamber = %7B%7D%0A chamberToVertex = %7B%7D%0A%0A size = 0%0A%0A for v in adjs:%0A vertexToChamber%5Bv%5D = size%0A for w in adjs%5Bv%5D:%0A if w == v:%0A raise RuntimeException(%22found a loop at vertex %25s%22 %25 v)%0A else:%0A edgeToChamber%5Bv, w%5D = size%0A chamberToVertex%5Bsize%5D = v%0A chamberToVertex%5Bsize + 1%5D = v%0A size += 2%0A%0A ds = org.gavrog.joss.dsyms.basic.DynamicDSymbol(2)%0A elms = ds.grow(size)%0A%0A for v, w in edgeToChamber:%0A D = edgeToChamber%5Bv, w%5D%0A E = edgeToChamber%5Bw, v%5D%0A if E is None:%0A print (%22# WARNING: missing %25s in adjacencies for %25s%22 %25 (v, w))%0A ds.redefineOp(0, elms%5BD%5D, elms%5BE + 1%5D)%0A%0A for v in adjs:%0A d = 2 * len(adjs%5Bv%5D)%0A D = vertexToChamber%5Bv%5D%0A for i in range(1, d, 2):%0A ds.redefineOp(1, elms%5BD + i%5D, elms%5BD + (i + 1) %25 d%5D)%0A%0A for D in range(0, size, 2):%0A ds.redefineOp(2, elms%5BD%5D, elms%5BD + 1%5D)%0A%0A for D in range(size):%0A ds.redefineV(0, 1, elms%5BD%5D, 1)%0A ds.redefineV(1, 2, elms%5BD%5D, 1)%0A%0A return org.gavrog.joss.dsyms.basic.DSymbol(ds), chamberToVertex%0A%0A%0Aif __name__ == '__main__':%0A import re%0A%0A text = sys.stdin.read()%0A data = %5B %5B int(s) for s in re.split(r' +', line.strip()) %5D%0A for line in re.split(r'%5Cn+', text.strip()) %5D%0A adjs = dict((a%5B0%5D, a%5B1:%5D) for a in data)%0A%0A ds, _ = dsymbolFromCyclicAdjacencies(adjs)%0A%0A print ds%0A
|
|
e212ad90a8fedb8e29abe3683b99a28d4030b544 | Add process module for Popen compat handling | passpie/process.py | passpie/process.py | Python | 0 | @@ -0,0 +1,1082 @@
+from subprocess import Popen, PIPE%0A%0Afrom ._compat import *%0A%0A%0Aclass Proc(Popen):%0A%0A def communicate(self, **kwargs):%0A if kwargs.get('input') and isinstance(kwargs%5B'input'%5D, basestring):%0A kwargs%5B'input'%5D = kwargs%5B'input'%5D.encode('utf-8')%0A return super(Proc, self).communicate(**kwargs)%0A%0A def __exit__(self, *args, **kwargs):%0A if hasattr(super(Proc, self), '__exit__'):%0A super(Proc, self).__exit__(*args, **kwargs)%0A%0A def __enter__(self, *args, **kwargs):%0A if hasattr(super(Proc, self), '__enter__'):%0A return super(Proc, self).__enter__(*args, **kwargs)%0A return self%0A%0A%0Adef call(*args, **kwargs):%0A kwargs.setdefault('stdout', PIPE)%0A kwargs.setdefault('stderr', PIPE)%0A kwargs.setdefault('stdin', PIPE)%0A kwargs.setdefault('shell', False)%0A kwargs_input = kwargs.pop('input', None)%0A%0A with Proc(*args, **kwargs) as proc:%0A output, error = proc.communicate(input=kwargs_input)%0A if isinstance(output, basestring):%0A output = output.decode('utf-8')%0A return output, error%0A
|
|
53038aea2b439acdc265f81b9f031336ea1f27f3 | Add lc480_sliding_window_median.py | lc480_sliding_window_median.py | lc480_sliding_window_median.py | Python | 0.000044 | @@ -0,0 +1,1429 @@
+%22%22%22Leetcode 480. Sliding Window Median%0A%0AURL: https://leetcode.com/problems/sliding-window-median/%0A%0AHard%0A%0AMedian is the middle value in an ordered integer list. %0AIf the size of the list is even, there is no middle value. %0ASo the median is the mean of the two middle value.%0A%0AExamples: %0A%5B2,3,4%5D , the median is 3%0A%5B2,3%5D, the median is (2 + 3) / 2 = 2.5%0A%0AGiven an array nums, there is a sliding window of size k which is moving from %0Athe very left of the array to the very right. %0AYou can only see the k numbers in the window. %0AEach time the sliding window moves right by one position. %0AYour job is to output the median array for each window in the original array.%0A%0AFor example,%0AGiven nums = %5B1,3,-1,-3,5,3,6,7%5D, and k = 3.%0A%0AWindow position Median%0A--------------- -----%0A%5B1 3 -1%5D -3 5 3 6 7 1%0A 1 %5B3 -1 -3%5D 5 3 6 7 -1%0A 1 3 %5B-1 -3 5%5D 3 6 7 -1%0A 1 3 -1 %5B-3 5 3%5D 6 7 3%0A 1 3 -1 -3 %5B5 3 6%5D 7 5%0A 1 3 -1 -3 5 %5B3 6 7%5D 6%0A%0ATherefore, return the median sliding window as %5B1,-1,-1,3,5,6%5D.%0A%0ANote: %0AYou may assume k is always valid, ie: %0Ak is always smaller than input array's size for non-empty array.%0A%22%22%22%0A%0Aclass Solution(object):%0A def medianSlidingWindow(self, nums, k):%0A %22%22%22%0A :type nums: List%5Bint%5D%0A :type k: int%0A :rtype: List%5Bfloat%5D%0A %22%22%22%0A pass%0A%0A%0Adef main():%0A pass%0A%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
9a6bf30ecfa7b843d8588a8a7b052f87089e44c7 | convert csv to excel | write_excel.py | write_excel.py | Python | 0.999999 | @@ -0,0 +1,636 @@
+%0Adef Excel2CSV(ExcelFile, SheetName, CSVFile):%0A workbook = xlrd.open_workbook(ExcelFile)%0A try:%0A worksheet = workbook.sheet_by_name(SheetName)%0A except xlrd.biffh.XLRDError:%0A print %22Missing portmap for switch %22 + str(SheetName)%0A print %22Exiting program. Check spelling of Sheet name%22%0A quit()%0A%0A csvfile = open(CSVFile, 'wb')%0A wr = csv.writer(csvfile, quotechar=%22'%22, quoting=csv.QUOTE_ALL)%0A%0A for rownum in xrange(worksheet.nrows):%0A wr.writerow(%0A list(x.encode('utf-8') if type(x) == type(u'') else x%0A for x in worksheet.row_values(rownum)))%0A csvfile.close()
|
|
a5ec49a658de23263802c7ddad02a4e34073a2a4 | add example of a go block returning value through a channel | example/go_block.py | example/go_block.py | Python | 0 | @@ -0,0 +1,265 @@
+import csp%0A%0A%0Adef lazy_echo(x):%0A yield csp.wait(0.5)%0A print %22I'm done%22%0A yield csp.stop(x)%0A%0A%0Adef main():%0A chan = csp.go(lazy_echo(1))%0A print (yield csp.take(chan))%0A%0A chan = csp.go(lazy_echo(2))%0A yield csp.wait(2)%0A print (yield csp.take(chan))%0A
|
|
9ba08d5e678f3f088cc94b2dec5e1994be6b2912 | fix reversed flag for updating latest version. | script/upload.py | script/upload.py | #!/usr/bin/env python
import argparse
import errno
import glob
import os
import subprocess
import sys
import tempfile
from lib.util import *
TARGET_PLATFORM = {
'cygwin': 'win32',
'darwin': 'darwin',
'linux2': 'linux',
'win32': 'win32',
}[sys.platform]
ATOM_SHELL_VRESION = get_atom_shell_version()
NODE_VERSION = 'v0.10.15'
SOURCE_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
OUT_DIR = os.path.join(SOURCE_ROOT, 'out', 'Release')
DIST_DIR = os.path.join(SOURCE_ROOT, 'dist')
DIST_NAME = 'atom-shell-{0}-{1}.zip'.format(ATOM_SHELL_VRESION, TARGET_PLATFORM)
def main():
args = parse_args()
if not dist_newer_than_head():
create_dist = os.path.join(SOURCE_ROOT, 'script', 'create-dist.py')
subprocess.check_call([sys.executable, create_dist])
bucket, access_key, secret_key = s3_config()
upload(bucket, access_key, secret_key)
if not args.no_update_version:
update_version(bucket, access_key, secret_key)
def parse_args():
parser = argparse.ArgumentParser(description='upload distribution file')
parser.add_argument('-n', '--no-update-version',
help='Do not update the latest version file',
action='store_false')
return parser.parse_args()
def dist_newer_than_head():
with scoped_cwd(SOURCE_ROOT):
try:
head_time = subprocess.check_output(['git', 'log', '--pretty=format:%at',
'-n', '1']).strip()
dist_time = os.path.getmtime(os.path.join(DIST_DIR, DIST_NAME))
except OSError as e:
if e.errno != errno.ENOENT:
raise
return False
return dist_time > int(head_time)
def upload(bucket, access_key, secret_key, version=ATOM_SHELL_VRESION):
os.chdir(DIST_DIR)
s3put(bucket, access_key, secret_key, DIST_DIR,
'atom-shell/{0}'.format(version), [DIST_NAME])
s3put(bucket, access_key, secret_key, DIST_DIR,
'atom-shell/dist/{0}'.format(NODE_VERSION), glob.glob('node-*.tar.gz'))
if TARGET_PLATFORM == 'win32':
# Generate the node.lib.
build = os.path.join(SOURCE_ROOT, 'script', 'build.py')
subprocess.check_call([sys.executable, build, '-c', 'Release',
'-t', 'generate_node_lib'])
# Upload the 32bit node.lib.
node_lib = os.path.join(OUT_DIR, 'node.lib')
s3put(bucket, access_key, secret_key, OUT_DIR,
'atom-shell/dist/{0}'.format(NODE_VERSION), [node_lib])
# Upload the fake 64bit node.lib.
touch_x64_node_lib()
node_lib = os.path.join(OUT_DIR, 'x64', 'node.lib')
s3put(bucket, access_key, secret_key, OUT_DIR,
'atom-shell/dist/{0}'.format(NODE_VERSION), [node_lib])
def update_version(bucket, access_key, secret_key):
prefix = os.path.join(SOURCE_ROOT, 'dist')
version = os.path.join(prefix, 'version')
s3put(bucket, access_key, secret_key, prefix, 'atom-shell', [version])
def s3_config():
config = (os.environ.get('ATOM_SHELL_S3_BUCKET', ''),
os.environ.get('ATOM_SHELL_S3_ACCESS_KEY', ''),
os.environ.get('ATOM_SHELL_S3_SECRET_KEY', ''))
message = ('Error: Please set the $ATOM_SHELL_S3_BUCKET, '
'$ATOM_SHELL_S3_ACCESS_KEY, and '
'$ATOM_SHELL_S3_SECRET_KEY environment variables')
assert all(len(c) for c in config), message
return config
def s3put(bucket, access_key, secret_key, prefix, key_prefix, files):
args = [
's3put',
'--bucket', bucket,
'--access_key', access_key,
'--secret_key', secret_key,
'--prefix', prefix,
'--key_prefix', key_prefix,
'--grant', 'public-read'
] + files
subprocess.check_call(args)
def touch_x64_node_lib():
x64_dir = os.path.join(OUT_DIR, 'x64')
safe_mkdir(x64_dir)
with open(os.path.join(x64_dir, 'node.lib'), 'w+') as node_lib:
node_lib.write('Invalid library')
if __name__ == '__main__':
import sys
sys.exit(main())
| Python | 0 | @@ -1212,12 +1212,11 @@
ore_
-fals
+tru
e')%0A
|
5007a2910f54c339c50667993c11fd4586412524 | add letter code | wordonhd/Letter.py | wordonhd/Letter.py | Python | 0.978909 | @@ -0,0 +1,402 @@
+class Letter(object):%0A _values = %7B%0A 'ENIOA': 1,%0A 'SDTR': 2,%0A 'MLKPBG': 3,%0A 'ZVUFJH': 4,%0A 'CW': 5,%0A 'XY': 8,%0A 'Q': 10%0A %7D%0A%0A def __init__(self, letter):%0A self.letter = letter%5B-1%5D%0A self.wordon = letter%5B0%5D == '!'%0A%0A @property%0A def value(self):%0A return list(filter(lambda x: self.letter in x%5B0%5D, self._values.items()))%5B0%5D%5B1%5D
|
|
2e44b753a071aeba95b51bd03c5635a1eb4d7f28 | Create gcd.py | CiO/gcd.py | CiO/gcd.py | Python | 0.000001 | @@ -0,0 +1,158 @@
+from fractions import gcd%0A%0A%0Adef greatest_common_divisor(*args):%0A result, *args = args%0A for n in args:%0A result = gcd(result, n)%0A return result%0A
|
|
239488d33f94b0262e642fbf751878894fb7510e | add test for post form admin in articles | opps/articles/tests/test_forms.py | opps/articles/tests/test_forms.py | Python | 0 | @@ -0,0 +1,1974 @@
+#!/usr/bin/env python%0A# -*- coding: utf-8 -*-%0A%0Afrom django.test import TestCase%0Afrom django.contrib.sites.models import Site%0Afrom django.contrib.auth import get_user_model%0A%0Afrom opps.channels.models import Channel%0Afrom opps.core.widgets import OppsEditor%0A%0Afrom ..models import Post%0Afrom ..forms import PostAdminForm%0A%0A%0Aclass PostFormTest(TestCase):%0A%0A def setUp(self):%0A User = get_user_model()%0A self.user = User.objects.create(username=u'test', password='test')%0A self.site = Site.objects.filter(name=u'example.com').get()%0A self.channel = Channel.objects.create(name=u'Home', slug=u'home',%0A description=u'home page',%0A site=self.site, user=self.user)%0A%0A def test_init(self):%0A %22%22%22%0A Test successful init without data%0A %22%22%22%0A self.post = Post.objects.create(title=u'test', user=self.user,%0A site=self.site, channel=self.channel)%0A form = PostAdminForm(instance=self.post)%0A self.assertTrue(isinstance(form.instance, Post))%0A self.assertEqual(form.instance.pk, self.post.pk)%0A%0A def test_default_multiupload_link(self):%0A %22%22%22%0A Test default value field multiupload link%0A %22%22%22%0A self.post = Post.objects.create(title=u'test', user=self.user,%0A site=self.site, channel=self.channel)%0A form = PostAdminForm(instance=self.post)%0A self.assertEqual(form.multiupload_link, '/fileupload/image/')%0A%0A def test_editor_widgets(self):%0A %22%22%22%0A Test auto set field widget Editor%0A %22%22%22%0A self.post = Post.objects.create(title=u'test', user=self.user,%0A site=self.site, channel=self.channel)%0A form = PostAdminForm(instance=self.post)%0A self.assertTrue(isinstance(form.fields%5B'content'%5D.widget,%0A OppsEditor))%0A
|
|
94dbda64d07838a7408b94251972d81897536380 | Add listener example file | listeners_example.py | listeners_example.py | Python | 0 | @@ -0,0 +1,436 @@
+import turtle%0A%0Aturtle.penup()%0Aturtle.ht()%0A%0Adef up():%0A print(%22You pressed Up!%22)%0A%0Adef down():%0A print(%22You pressed Down!%22)%0A%0Adef left():%0A print(%22You pressed Left!%22)%0A%0Adef right():%0A print(%22You pressed Right!%22)%0A %0Aturtle.onkey(up, 'Up')%0Aturtle.onkey(down, 'Down')%0Aturtle.onkey(left, 'Left')%0Aturtle.onkey(right, 'Right')%0A%0Adef repeat():%0A turtle.ontimer(repeat, 500)%0A%0Aturtle.listen() # Remember to put this after your listeners!%0A
|
|
1d0aff329c5adb836e7b055c042990de219debe0 | Add rough first implementation of widgets.py | wtforms/widgets.py | wtforms/widgets.py | Python | 0 | @@ -0,0 +1,2072 @@
+%22%22%22%0A wtforms.widgets%0A ~~~~~~~~~~~~~~~%0A %0A The WTForms widget system.%0A %0A :copyright: 2009 by James Crasta, Thomas Johansson.%0A :license: MIT, see LICENSE.txt for details.%0A%22%22%22%0Afrom cgi import escape%0A%0A__all__ = (%0A 'ListWidget', 'TextInput', 'PasswordInput', 'HiddenInput', 'CheckboxInput',%0A 'RadioInput', 'Textarea', 'Select'%0A)%0A%0Adef html_params(**kwargs):%0A %22%22%22%0A Generate HTML parameters for keywords%0A %22%22%22%0A params = %5B%5D%0A keys = kwargs.keys()%0A keys.sort()%0A for k in keys:%0A if k in ('class_', 'class__'):%0A k = k%5B:-1%5D%0A k = unicode(k)%0A v = escape(unicode(kwargs%5Bk%5D), quote=True)%0A params.append(u'%25s=%22%25s%22' %25 (k, v))%0A return str.join(' ', params)%0A%0Aclass Widget(object):%0A %22%22%22%0A Base class for all WTForms widgets.%0A %22%22%22%0A def render(self, field, **kwargs):%0A %22%22%22%0A Renders the widget. All widgets must implement this.%0A %0A %60field%60%0A The field to render.%0A %60**kwargs%60%0A Any parameters used for rendering. Typically used to override or%0A pass extra html attributes.%0A %22%22%22%0A raise NotImplementedError()%0A%0Aclass ListWidget(Widget):%0A def __init__(self, parent_tag='ul', prefix_label=True):%0A assert parent_tag in ('ol', 'ul')%0A self.parent_tag = parent_tag%0A self.prefix_label = prefix_label%0A%0A def render(self, field, **kwargs):%0A html = %5Bu'%3C%25s %25s%3E' %25 (self.parent_tag, html_params(**kwargs))%5D%0A for subfield in field:%0A if self.prefix_label:%0A html.append(u'%3Cli%3E%25s: %25s%3C/li%3E' %25 (subfield.label, subfield()))%0A else:%0A out.append(u'%3Cli%3E%25s%25s%3C/li%3E' %25 (subfield(), subfield.label))%0A html.append(u'%3C/%25s%3E' %25 self.parent_tag)%0A return ''.join(html)%0A%0Aclass Input(Widget):%0A pass%0A%0Aclass TextInput(Input):%0A pass%0A %0Aclass PasswordInput(Input):%0A pass%0A%0Aclass HiddenInput(Input):%0A pass%0A%0Aclass CheckboxInput(Input):%0A pass%0A%0Aclass RadioInput(Input):%0A pass%0A%0Aclass Textarea(Widget):%0A pass%0A%0Aclass Select(Widget):%0A pass%0A
|
|
b1d3f2a9f78cfaa2da275091e56c9287a50b561d | Switch the order of steps in a production deployment | ci/sync-dist.py | ci/sync-dist.py | # This script is used for syncing parts of the rustup dist server
# between the dev environment (dev-static.rlo), the local machine, and
# the prod environment (static.rlo). It's used during the deployment process.
#
# It does only a few things:
#
# * Sync dev bins to local host:
# python sync-dist.py dev-to-local
#
# * Sync local bins to dev archives
# python sync-dist.py local-to-dev-archives 0.2.0
#
# * Sync local bins to prod
# python sync-dist.py local-to-prod
#
# * Sync local bins to prod archives
# python sync-dist.py local-to-prod-archives 0.2.0
import sys
import os
import subprocess
import shutil
def usage():
print ("usage: sync-dist dev-to-local [--live-run]\n"
" sync-dist local-to-dev-archives $version [--live-run]\n"
" sync-dist local-to-prod-archives $version [--live-run]\n"
" sync-dist local-to-prod [--live-run]\n")
sys.exit(1)
command = None
archive_version = None
live_run = False
if len(sys.argv) < 2:
usage()
command = sys.argv[1]
if not command in ["dev-to-local",
"local-to-dev-archives",
"local-to-prod-archives",
"local-to-prod"]:
usage()
if "archives" in command:
if len(sys.argv) < 3:
usage()
archive_version = sys.argv[2]
if "--live-run" in sys.argv:
live_run = True
dev_s3_bucket = "dev-static-rust-lang-org"
prod_s3_bucket = "static-rust-lang-org"
s3_bucket = dev_s3_bucket
if "prod" in command:
s3_bucket = prod_s3_bucket
print "s3 bucket: " + s3_bucket
print "command: " + command
print "archive version: " + str(archive_version)
# First, deal with the binaries
s3cmd = None
if command == "dev-to-local":
if os.path.exists("local-rustup/dist"):
shutil.rmtree("local-rustup/dist")
os.makedirs("local-rustup/dist")
s3cmd = "s3cmd sync s3://{}/rustup/dist/ ./local-rustup/dist/".format(s3_bucket)
elif command == "local-to-dev-archives" \
or command == "local-to-prod-archives":
s3cmd = "s3cmd sync ./local-rustup/dist/ s3://{}/rustup/archive/{}/".format(s3_bucket, archive_version)
elif command == "local-to-prod":
s3cmd = "s3cmd sync ./local-rustup/dist/ s3://{}/rustup/dist/".format(s3_bucket)
else:
sys.exit(1)
print "s3 command: {}".format(s3cmd)
print
def run_s3cmd(command):
s3cmd = command.split(" ")
if not live_run:
s3cmd += ["--dry-run"]
# These are old installer names for compatibility. They don't need to
# be touched ever again.
s3cmd += ["--exclude=*rustup-setup*"]
subprocess.check_call(s3cmd)
run_s3cmd(s3cmd)
# Next deal with the rustup-init.sh script and website
if command == "dev-to-local":
if os.path.exists("local-rustup/rustup-init.sh"):
os.remove("local-rustup/rustup-init.sh")
run_s3cmd("s3cmd get s3://{}/rustup/rustup-init.sh ./local-rustup/rustup-init.sh"
.format(s3_bucket))
if os.path.exists("local-rustup/www"):
shutil.rmtree("local-rustup/www")
os.makedirs("local-rustup/www")
run_s3cmd("s3cmd sync s3://{}/rustup/www/ ./local-rustup/www/"
.format(s3_bucket))
if command == "local-to-prod":
run_s3cmd("s3cmd put ./local-rustup/rustup-init.sh s3://{}/rustup/rustup-init.sh"
.format(s3_bucket))
run_s3cmd("s3cmd sync ./local-rustup/www/ s3://{}/rustup/www/"
.format(s3_bucket))
| Python | 0.000057 | @@ -430,16 +430,25 @@
to prod
+ archives
%0A# pyt
@@ -477,16 +477,31 @@
-to-prod
+-archives 0.2.0
%0A#%0A# * S
@@ -518,33 +518,24 @@
bins to prod
- archives
%0A# python
@@ -560,31 +560,16 @@
-to-prod
--archives 0.2.0
%0A%0Aimport
|
be81dbc33e932e870a66ad0663c23e5d05b01ffa | Create Counter.py | Counter.py | Counter.py | Python | 0.000001 | @@ -0,0 +1,1503 @@
+#!/usr/bin/env python%0A# -*- coding: utf-8 -*-%0A%0A%22%22%22 %0A@_ambonilla 2014%0A%0AUsing cocos & piglet libraries, is a small counter program %0Awhere when you push the up key it will add a number to the %0Adisplayed value, and the down key will substract one %0A%0A%22%22%22%0A%0Aimport cocos%0Aimport sys%0Afrom cocos.actions import *%0Aimport pyglet%0Afrom pyglet.window import key%0A%0Aclass TempBackground(cocos.layer.Layer):%0A%0A is_event_handler = True%0A%0A def on_key_press(self, symbol, modifiers):%0A if symbol == key.UP:%0A self.counter = self.counter + 1%0A elif symbol == key.DOWN:%0A self.counter = self.counter - 1%0A elif symbol == key.ESCAPE:%0A SystemExit()%0A %0A self.update_text()%0A %0A def update_text(self):%0A self.label.element.text = str(self.counter)%0A %0A def __init__(self):%0A self.startBackground = super(TempBackground, self).__init__()%0A self.counter = 0%0A self.label = cocos.text.Label(str(self.counter),%0A font_name='Arial',%0A font_size=150,%0A anchor_x='center',%0A anchor_y='center')%0A self.label.position = 320,240%0A self.update_text()%0A self.add(self.label)%0A%0Aif __name__ == %22__main__%22:%0A cocos.director.director.init(resizable=False, fullscreen=False)%0A temp_layer = TempBackground()%0A main_scene = cocos.scene.Scene(temp_layer)%0A cocos.director.director.run(main_scene)%0A
|
|
c80baf708c956a9814ef81213a66da8d443de12a | add migration | apps/bplan/migrations/0002_auto_20170509_1358.py | apps/bplan/migrations/0002_auto_20170509_1358.py | Python | 0.000001 | @@ -0,0 +1,1217 @@
+# -*- coding: utf-8 -*-%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations, models%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('meinberlin_bplan', '0001_initial'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterField(%0A model_name='statement',%0A name='email',%0A field=models.EmailField(verbose_name='Email address', max_length=254, blank=True),%0A ),%0A migrations.AlterField(%0A model_name='statement',%0A name='name',%0A field=models.CharField(verbose_name='Your Name', max_length=255),%0A ),%0A migrations.AlterField(%0A model_name='statement',%0A name='postal_code_city',%0A field=models.CharField(verbose_name='Postal code, City', max_length=255),%0A ),%0A migrations.AlterField(%0A model_name='statement',%0A name='statement',%0A field=models.TextField(verbose_name='Statement', max_length=17500),%0A ),%0A migrations.AlterField(%0A model_name='statement',%0A name='street_number',%0A field=models.CharField(verbose_name='Street, House number', max_length=255),%0A ),%0A %5D%0A
|
|
2c78290cc569eb70b5b7098d154da3fb7a2247a9 | Add db_mktag.py, command line tag creator. | db_mktag.py | db_mktag.py | Python | 0 | @@ -0,0 +1,240 @@
+#!/usr/bin/env python%0A# -*- coding: iso-8859-1 -*-%0A%0Afrom sys import argv, exit%0Afrom dbclient import dbclient%0A%0Aif len(argv) not in (2, 3):%0A%09print %22Usage:%22, argv%5B0%5D, %22tagname %5Btagtype%5D%22%0A%09exit(1)%0A%0Aclient = dbclient()%0Aclient.add_tag(*argv%5B1:%5D)%0A
|
|
3609c5842b33ca4146ad14b74c76f8954545aaa8 | Add commands for cases and variants | loqusdb/commands/view.py | loqusdb/commands/view.py | Python | 0.000001 | @@ -0,0 +1,1391 @@
+# -*- coding: utf-8 -*-%0Aimport logging%0Aimport click%0A%0Afrom . import base_command%0A%0Alogger = logging.getLogger(__name__)%0A%0A@base_command.command()%[email protected]('-c' ,'--case-id', %0A help='Search for case'%0A)%[email protected]_context%0Adef cases(ctx, case_id):%0A %22%22%22Display all cases in the database.%22%22%22%0A %0A adapter = ctx.obj%5B'adapter'%5D%0A %0A if case_id:%0A case = adapter.case(case_id)%0A if case:%0A click.echo(case)%0A else:%0A logger.info(%22Case %7B0%7D does not exist in database%22.format(case_id))%0A else:%0A i = 0%0A for case in adapter.cases():%0A i += 1%0A click.echo(case)%0A if i == 0:%0A logger.info(%22No cases found in database%22)%0A%0A@base_command.command()%[email protected]('--variant-id', %0A help='Search for a variant'%0A)%[email protected]_context%0Adef variants(ctx, variant_id):%0A %22%22%22Display variants in the database.%22%22%22%0A %0A adapter = ctx.obj%5B'adapter'%5D%0A %0A if variant_id:%0A variant = adapter.get_variant(%7B'_id':variant_id%7D)%0A if variant:%0A click.echo(variant)%0A else:%0A logger.info(%22Variant %7B0%7D does not exist in database%22.format(variant_id))%0A else:%0A i = 0%0A for variant in adapter.get_variants():%0A i += 1%0A click.echo(variant)%0A if i == 0:%0A logger.info(%22No variants found in database%22)%0A
|
|
dd2f332dd1b7a215d5a6aa81819e3d66d46c1b91 | add python solution for 20 | 01-50/20/20.py | 01-50/20/20.py | Python | 0.000077 | @@ -0,0 +1,76 @@
+import math%0Aprint sum(int(c) for c in str(math.factorial(100)).rstrip('L'))%0A
|
|
7f661e24388e82ae2e2872ab11ee6a84d487aac7 | Create py-mysql-select.py | py-mysql-select.py | py-mysql-select.py | Python | 0.000008 | @@ -0,0 +1,555 @@
+#!/usr/bin/env python%0A# --*-- coding:utf-8 --*--%0A%0Aimport MySQLdb #%E6%93%8D%E4%BD%9Cmysql%EF%BC%8C%E9%9C%80%E8%A6%81%E5%8A%A0%E8%BD%BDMySQLdb%E6%A8%A1%E5%9D%97%0A%0A#%E5%88%9B%E5%BB%BA%E8%BF%9E%E6%8E%A5%0Aconn = MySQLdb.connect(host = '127.0.0.1',user = 'root',passwd = '123',db = 'mydb') #%E4%BD%BF%E7%94%A8connect%E6%96%B9%E6%B3%95%E5%AF%B9%E6%95%B0%E6%8D%AE%E5%BA%93%E8%BF%9B%E8%A1%8C%E8%BF%9E%E6%8E%A5%EF%BC%8C%E7%9B%B8%E5%BD%93%E4%BA%8E%E4%B8%80%E4%B8%AA%E9%97%A8%0Acur = conn.cursor() #%E4%BD%BF%E7%94%A8conn.cursor%E6%96%B9%E6%B3%95%EF%BC%8C%E7%9B%B8%E5%BD%93%E4%BA%8E%E6%93%8D%E4%BD%9C%E7%9A%84%E4%B8%80%E5%8F%8C%E6%89%8B%0A%0A#%E6%93%8D%E4%BD%9C%E6%95%B0%E6%8D%AE%E5%BA%93%0AreCount = cur.execute('select * from students') #%E5%8F%AF%E4%BB%A5%E7%9C%8B%E5%88%B0%E4%B8%BB%E5%87%BD%E6%95%B0%E7%9A%84%E6%93%8D%E4%BD%9C%E6%98%AF%E6%9F%A5%E7%9C%8Bstudents%E8%A1%A8%0Atable = cur.fetchall() #%E5%B0%86%E6%93%8D%E4%BD%9C%E6%89%80%E5%BE%97%E5%88%B0%E7%9A%84%E6%95%B0%E6%8D%AE%E5%85%A8%E9%83%A8%E6%8B%BF%E5%87%BA%E6%9D%A5 #%0A%0A#%E5%85%B3%E9%97%AD%E8%BF%9E%E6%8E%A5%0Acur.close() #%E7%BB%93%E6%9D%9F%E6%93%8D%E4%BD%9C%E5%90%8E%EF%BC%8C%E5%B0%86%E6%89%8B%E6%8B%BF%E5%9B%9E%E6%9D%A5%0Aconn.close() #%E5%B0%86%E9%97%A8%E5%85%B3%E4%B8%8A%0Aprint reCount #cur.execute%E8%BF%94%E5%9B%9E%E7%9A%84%E6%98%AF%E6%93%8D%E4%BD%9C%E5%BD%B1%E5%93%8D%E7%9A%84%E8%A1%8C%E6%95%B0%0Aprint data %0A
|
|
48b2b234377d8e66ccb274e4845a835486228166 | Create test_utils.py | utils_test.py | utils_test.py | Python | 0.000001 | @@ -0,0 +1,799 @@
+ import pytest%0A from utils import *%0A%0Adef test_struct_initialization():%0A s = Struct(a=1, b=2)%0A assert s.a == 1%0A assert s.b == 2%0A%0Adef test_struct_assignment():%0A s = Struct(a=1)%0A s.a = 3%0A assert s.a == 3%0A%0Adef test_removeall_list():%0A assert removeall(4, %5B%5D) == %5B%5D%0A assert removeall(4, %5B1,2,3,4%5D) == %5B1,2,3%5D%0A%0Adef test_removeall_string():%0A assert removeall('s', '') == ''%0A assert removeall('s', 'This is a test. Was a test.') == 'Thi i a tet. Wa a tet.'%0A%0Adef test_count_if():%0A is_odd = lambda x: x %25 2%0A assert count_if(is_odd, %5B%5D) == 0%0A assert count_if(is_odd, %5B1, 2, 3, 4, 5%5D) == 3%0A%0Adef test_argmax():%0A assert argmax(%5B-2, 1%5D, lambda x: x**2) == -2%0A%0Adef test_argmin():%0A assert argmin(%5B-2, 1%5D, lambda x: x**2) == 1%0A%0Aif __name__ == '__main__':%0A pytest.main()%0A
|
|
7581fbc397915c1ad72714203fee2349a84e14e9 | add notifiaction push script - pushNotif.py | API/ssc/SscData/pushNotif.py | API/ssc/SscData/pushNotif.py | Python | 0 | @@ -0,0 +1,567 @@
+from urllib2 import *%0Aimport urllib%0Aimport json%0Aimport sys%0A%0AMY_API_KEY=%22AIzaSyCgSjnjxtYBGMOq7jNgnE_tbhpOJjU5nOo%22%0A%0AmessageTitle = sys.argv%5B1%5D%0AmessageBody = sys.argv%5B2%5D%0A%0Adata=%7B%0A %22to%22 : %22/topics/sscapp%22,%0A %22notification%22 : %7B%0A %22body%22 : messageBody,%0A %22title%22 : messageTitle,%0A %22icon%22 : %22notif_icon%22%0A %7D%0A%7D%0A%09%0AdataAsJSON = json.dumps(data)%0A%0Arequest = Request(%0A %22https://gcm-http.googleapis.com/gcm/send%22,%0A dataAsJSON,%0A %7B %22Authorization%22 : %22key=%22+MY_API_KEY,%0A %22Content-type%22 : %22application/json%22%0A %7D%0A)%0A%0Aprint urlopen(request).read()%0A
|
|
8d5f3136fb737c8058d8b0bb4d866d1fe5bb3af8 | Add main function for specchio | specchio/main.py | specchio/main.py | Python | 0.000003 | @@ -0,0 +1,1060 @@
+#!/usr/bin/env python%0A# -*- coding: utf-8 -*-%0A%0Aimport sys%0Aimport time%0A%0Afrom watchdog.observers import Observer%0A%0Afrom specchio.handlers import SpecchioEventHandler%0Afrom specchio.utils import logger%0A%0A%0Adef main():%0A %22%22%22Main function for specchio%0A%0A Example: specchio test/ user@host:test/%0A%0A :return: None%0A %22%22%22%0A if len(sys.argv) == 2:%0A src_path = sys.argv%5B0%5D.strip()%0A dst_ssh, dst_path = sys.argv%5B1%5D.strip().split(%22:%22)%0A event_handler = SpecchioEventHandler(%0A src_path=src_path, dst_ssh=dst_path, dst_path=dst_path%0A )%0A logger.info(%22Initialize Specchio%22)%0A observer = Observer()%0A observer.schedule(event_handler, src_path, recursive=True)%0A observer.start()%0A try:%0A while True:%0A time.sleep(1)%0A except KeyboardInterrupt:%0A observer.stop()%0A observer.join()%0A else:%0A print %22%22%22Specchio is a tool that can help you rsync your file%0Ait use %60.gitignore%60 in git to mark which file is ignored.%0A%0AUsage: specchio src/ user@host:dst%22%22%22%0A
|
|
e487ca21da9e7b62a860b91aadfecdf36df005a2 | add public templates module | pymzn/templates.py | pymzn/templates.py | Python | 0.000001 | @@ -0,0 +1,101 @@
+%0Afrom .mzn import templates as _templates%0Afrom .mzn.templates import *%0A%0A__all__ = _templates.__all__%0A
|
|
1019f866fc0e9c16ccbe726b4b21265dbfc1ac68 | Add search_rotated_sorted_array.py | data_structures/sorting/search_rotated_sorted_array.py | data_structures/sorting/search_rotated_sorted_array.py | Python | 0.00001 | @@ -0,0 +1,2081 @@
+# Search in a Rotated Sorted Array%0A# You are given a sorted array which is rotated at some random pivot point.%0A#%0A# Example: %5B0,1,2,4,5,6,7%5D might become %5B4,5,6,7,0,1,2%5D%0A#%0A# You are given a target value to search. If found in the array return its index, otherwise return -1.%0A#%0A# You can assume there are no duplicates in the array and your algorithm's runtime complexity%0A# must be in the order of O(log n).%0A#%0A# Example:%0A#%0A# Input: nums = %5B4,5,6,7,0,1,2%5D, target = 0, Output: 4%0A#%0A# Here is some boilerplate code and test cases to start with:%0A%0Adef rotated_array_search(input_list, number):%0A %22%22%22%0A Find the index by searching in a rotated sorted array%0A%0A Args:%0A input_list(array), number(int): Input array to search and the target%0A Returns:%0A int: Index or -1%0A %22%22%22%0A left = 0%0A right = len(input_list) - 1%0A%0A while left %3C= right:%0A mid = (left + right) // 2%0A if number == input_list%5Bmid%5D:%0A return mid%0A%0A # left sorted portion%0A if input_list%5Bleft%5D %3C= input_list%5Bmid%5D:%0A if number %3E input_list%5Bmid%5D or number %3C input_list%5Bleft%5D:%0A left = mid + 1%0A else:%0A right = mid - 1%0A%0A # right sorted portion%0A else:%0A if number %3E input_list%5Bright%5D or number %3C input_list%5Bmid%5D:%0A right = mid - 1%0A else:%0A left = mid + 1%0A return -1%0A%0A%0Adef linear_search(input_list, number):%0A for index, element in enumerate(input_list):%0A if element == number:%0A return index%0A return -1%0A%0A%0Adef test_function(test_case):%0A input_list = test_case%5B0%5D%0A number = test_case%5B1%5D%0A if linear_search(input_list, number) == rotated_array_search(input_list, number):%0A print(%22Pass%22)%0A else:%0A print(%22Fail%22)%0A%0A%0Atest_function(%5B%5B6, 7, 8, 9, 10, 1, 2, 3, 4%5D, 6%5D)%0Atest_function(%5B%5B6, 7, 8, 9, 10, 1, 2, 3, 4%5D, 1%5D)%0Atest_function(%5B%5B6, 7, 8, 1, 2, 3, 4%5D, 8%5D)%0Atest_function(%5B%5B6, 7, 8, 1, 2, 3, 4%5D, 1%5D)%0Atest_function(%5B%5B6, 7, 8, 1, 2, 3, 4%5D, 10%5D)%0Atest_function(%5B%5B%5D, 0%5D)%0Atest_function(%5B%5B88%5D, 88%5D)%0Atest_function(%5B%5B%5D, None%5D)
|
|
ffc1b443f13672d0a4002a38f5273b5f72cdb627 | Solve Even Fibonacci numbers | python/euler002.py | python/euler002.py | Python | 0.999998 | @@ -0,0 +1,434 @@
+#!/bin/python3%0A# Project Euler #2: Even Fibonacci numbers%0A%0Adef fibonacci_sequence(n):%0A sequence = %5B1, 2%5D%0A while sequence%5B-1%5D + sequence%5B-2%5D %3C n:%0A sequence.append(sequence%5B-1%5D + sequence%5B-2%5D)%0A return sequence%0A%0Adef evens(array):%0A return list(filter(lambda x: x %25 2 == 0, array))%0A%0A%0Atest_cases = int(input().strip())%0Afor _ in range(test_cases):%0A n = int(input().strip())%0A print(sum(evens(fibonacci_sequence(n))))%0A
|
|
f5f2f87030e48dd751ed95eec08f29ab863a8ed9 | Compute the difference between two images | python/img_diff.py | python/img_diff.py | Python | 1 | @@ -0,0 +1,860 @@
+import requests%0Aimport json%0A%0A# Compute the difference between two images and output the reconstructed image and the diff output.%0A# Keep in mind that the two images must be of the same size or call 'resize' or 'crop' before to%0A# fit the images to the same dimension.%0A# Read more on imgdiff here: https://pixlab.io/#/cmd?id=imgdiff%0A%0Asrc = 'https://pixlab.io/images/jdr.jpg' # Source image which is the famous Michael Jordan's crying face.%0Atarget = 'https://pixlab.io/images/jdr_draw.jpg' # Target image which is the same Jordan's face but a MEME is drown on top of it.%0A%0Areq = requests.get('https://api.pixlab.io/imgdiff',params=%7B%0A%09'src': src,%0A%09'target': target,%0A%09'key':'My_Key'%0A%7D)%0Areply = req.json()%0Aif reply%5B'status'%5D != 200:%0A%09print (reply%5B'error'%5D)%0Aelse:%0A print (%22Diff Output: %22+str(reply%5B'diff'%5D))%0A print (%22Reconstructed image link: %22+ reply%5B'link'%5D)%0A%09%0A
|
|
dc5aad16e63ff210aa3770f6eae18f215f78f8ce | Create 03.py | 01/hw/03.py | 01/hw/03.py | Python | 0 | @@ -0,0 +1,191 @@
+# Given the variables s and t defined as:%0As = 'udacity'%0At = 'bodacious'%0A# write Python code that prints out udacious%0A# without using any quote characters in%0A# your code.%0A%0Aprint s%5B:3%5D + t%5B4:%5D%0A
|
|
616e656cb9390321cb36d8f1b067d0bddaff11c2 | Add cli argument parser | frigg/worker/cli.py | frigg/worker/cli.py | Python | 0.000001 | @@ -0,0 +1,570 @@
+# -*- coding: utf8 -*-%0Afrom fabric import colors%0Afrom frigg.worker.fetcher import fetcher%0A%0A%0Aclass Commands(object):%0A%0A @staticmethod%0A def start():%0A print(colors.green(%22Starting frigg worker%22))%0A fetcher()%0A%0A @staticmethod%0A def unknown_command():%0A print(colors.red(%22Unknown command%22))%0A%0Aif __name__ == '__main__':%0A import argparse%0A%0A parser = argparse.ArgumentParser(description='Do some work for frigg.')%0A parser.add_argument('command')%0A%0A args = parser.parse_args()%0A%0A getattr(Commands, args.command, Commands.unknown_command)()
|
|
80caf160aba107f539d18287a09fc30d6cf3d0a1 | add demo plotting the available 1D demo signals | demo/plot_demo_signals.py | demo/plot_demo_signals.py | Python | 0 | @@ -0,0 +1,1392 @@
+#!/usr/bin/env python%0A# -*- coding: utf-8 -*-%0A%0A%22%22%22Plot the set of 1D demo signals available in %60pywt.data.demo_signal%60.%22%22%22%0A%0Aimport numpy as np%0A%0Aimport matplotlib.pyplot as plt%0A%0Aimport pywt%0A%0A# use 'list' to get a list of all available 1d demo signals%0Asignals = pywt.data.demo_signal('list')%0A%0Asubplots_per_fig = 5%0Asignal_length = 1024%0Ai_fig = 0%0An_figures = int(np.ceil(len(signals)/subplots_per_fig))%0Afor i_fig in range(n_figures):%0A # Select a subset of functions for the current plot%0A func_subset = signals%5B%0A i_fig * subplots_per_fig:(i_fig + 1) * subplots_per_fig%5D%0A%0A # create a figure to hold this subset of the functions%0A fig, axes = plt.subplots(subplots_per_fig, 1)%0A axes = axes.ravel()%0A for n, signal in enumerate(func_subset):%0A if signal in %5B'Gabor', 'sineoneoverx'%5D:%0A # user cannot specify a length for these two%0A x = pywt.data.demo_signal(signal)%0A else:%0A x = pywt.data.demo_signal(signal, signal_length)%0A ax = axes%5Bn%5D%0A ax.plot(x.real)%0A if signal == 'Gabor':%0A # The Gabor signal is complex-valued%0A ax.plot(x.imag)%0A ax.legend(%5B'Gabor (Re)', 'Gabor (Im)'%5D, loc='upper left')%0A else:%0A ax.legend(%5Bsignal, %5D, loc='upper left')%0A # omit axes for any unused subplots%0A for n in range(n + 1, len(axes)):%0A axes%5Bn%5D.set_axis_off()%0Aplt.show()%0A
|
|
944ab744ce4ba3fb30ce94ac2ec581e4b481610f | add img to the dirs that get created. | dj/scripts/mkdirs.py | dj/scripts/mkdirs.py | #!/usr/bin/python
# Makes the dir tree to put files into
import os,sys
from process import process
from main.models import Client, Show, Location, Episode
class mkdirs(process):
def mkdir(self,dir):
""" makes the dir if it doesn't exist """
ret = False
print(dir, end=' ')
if os.path.exists(dir):
print('(exists)')
else:
if self.options.test:
print('(testing, skipped)')
else:
os.makedirs(dir)
ret = True
print()
return ret
def work(self):
"""
find client and show, create the dirs
"""
client = Client.objects.get(slug=self.options.client)
show = Show.objects.get(client=client,slug=self.options.show)
self.set_dirs(show)
dirs = "dv assets tmp titles webm mp4 mlt custom/titles"
for d in dirs.split():
full_dir = os.path.join(self.show_dir,d)
ret = self.mkdir(full_dir)
# copy the footer image
# not sure where this should happen *shrug*
# It's really just for the default,
# If there is a non default, it will live under show_dir/assets/.
credits_img = client.credits
credits_src = os.path.join(
os.path.split(os.path.abspath(__file__))[0],
"bling",
credits_img)
# copy into show/assetts
credits_pathname = os.path.join(
self.show_dir, "assets", credits_img )
self.run_cmd( ["cp", credits_src, credits_pathname] )
if self.options.raw_slugs:
# get episodes for this show
eps = Episode.objects.filter(show=show)
for ep in eps:
loc = ep.location.slug
dt = ep.start.strftime("%Y-%m-%d")
slug = ep.slug
full_dir = os.path.join(self.show_dir,'dv',loc,dt,slug)
ret = self.mkdir(full_dir)
else:
# get locations of the episodes
for loc in Location.objects.filter(
show=show, active=True):
dir = os.path.join(self.show_dir,'dv',loc.slug)
ret = self.mkdir(dir)
return
def add_more_options(self, parser):
parser.add_option('--raw-slugs', action="store_true",
help="Make a dir for each talk's raw files")
if __name__=='__main__':
p=mkdirs()
p.main()
| Python | 0 | @@ -850,16 +850,20 @@
m/titles
+ img
%22%0A
|
4a30d30b82fbdccbb0f15ebb5c094b13ce791f7f | Add a utility class to normalize input | genderator/utils.py | genderator/utils.py | Python | 0.000002 | @@ -0,0 +1,544 @@
+from unidecode import unidecode%0A%0A%0Aclass Normalizer:%0A%0A def normalize(text):%0A text = Normalizer.remove_extra_whitespaces(text)%0A text = Normalizer.replace_hyphens(text)%0A # text = Normalizer.remove_accent_marks(text)%0A%0A return text.lower()%0A%0A @staticmethod%0A def replace_hyphens(text):%0A return text.replace('-', ' ')%0A%0A @staticmethod%0A def remove_extra_whitespaces(text):%0A return ' '.join(text.strip().split());%0A%0A @staticmethod%0A def remove_accent_marks(text):%0A return unidecode(text)
|
|
8b828e9c9daacd8bd6b5719e0ee50fc93f3c612d | add line-invoker, allows pipeline to be changed on the fly | line-invoker.py | line-invoker.py | Python | 0 | @@ -0,0 +1,1018 @@
+#!/usr/bin/python%0A%0Afrom __future__ import print_function%0A%0Aimport sys%0Aimport subprocess%0A%0A# A normal(ish) pipeline looks like the following:%0A# tailf input %7C grep -v foo %7C grep bar %7C cat %3E%3Eoutput%0A%0A# If we want to change the valu %22foo%22, %22bar%22 or otherwise change the%0A# pipeline, we have to kill the old pipeline and start a new one.%0A%0A# This script changes the above to%0A# tailf input %7C line-invoker.py mypipeline.sh %7C cat %3E%3Eoutput%0A%0A# where mypipeline.sh contains:%0A# grep -v foo %7C grep bar%0A%0A# This allows the pipeline to be edited at will, without breaking the%0A# tailf and potentially having missed lines, or duplicated them on%0A# restarting tailf%0A%0Adef main():%0A prog = sys.argv%5B1%5D%0A try:%0A line = sys.stdin.readline()%0A while line:%0A p = subprocess.Popen(prog, stdin=subprocess.PIPE)%0A p.stdin.write(line)%0A p.stdin.close()%0A sys.stdout.flush()%0A line = sys.stdin.readline()%0A%0A except KeyboardInterrupt:%0A pass%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
80cb11187894870ba9fe40e09834522d7ea2ee10 | Create middleware.py | middleware.py | middleware.py | Python | 0.000007 | @@ -0,0 +1 @@
+%0A
|
|
aadd5b5d60e1fa2939482790baa893d9624ad33b | Create mnist_lstm.py | mnist_lstm.py | mnist_lstm.py | Python | 0.000004 | @@ -0,0 +1,2658 @@
+from tensorflow.models.rnn import rnn_cell, rnn%0Aimport tensorflow as tf%0Aimport numpy as np%0Aimport input_data%0Asess = tf.Session() %0A%0A'''%0AClassify MNIST using LSTM running row by row. %0A%0AGood:%0A* No compilation time at all, which is cool.%0A%0ABad:%0A* Problem is that has all dimensions hard coded, which sucks.%0A%0AInspired by:%0Ahttps://github.com/nlintz/TensorFlow-Tutorials%0A'''%0A%0Adef init_weights(shape):%0A return tf.Variable(tf.random_normal(shape, stddev=0.01))%0A %0Adef get_lstm(num_steps, input_dim, hidden_dim, output_dim, batch_size):%0A # Define input%0A input = tf.placeholder(%22float%22, %5Bbatch_size, num_steps, input_dim%5D)%0A desired = tf.placeholder(%22float%22, %5Bbatch_size, 10%5D)%0A # Define parameters%0A i2h = init_weights(%5Binput_dim, hidden_dim%5D)%0A h2o = init_weights(%5Bhidden_dim, output_dim%5D)%0A bi = init_weights(%5Bhidden_dim%5D)%0A bo = init_weights(%5Boutput_dim%5D)%0A %0A # prepare input%0A # input shape: (batches, num_steps, input_dim)%0A X2 = tf.transpose(input, %5B1, 0, 2%5D) # (num_steps, batch_size, input_dim)%0A # tf.reshape does not accept X.get_shape elements as input :(%0A X3 = tf.reshape(X2, %5Bnum_steps*batch_size, dim%5D) # (num_steps*batch_size, input_dim)%0A # project to hidden state dimension%0A X4 = tf.matmul(X3, i2h) + bi # (num_steps*batch_size, hidden_dim)%0A%0A # LSTM for loop expects a list as input, here we slice X3 into pieces of (batch_size, hidden_dim)%0A # tf.split expects as input a axis to slice, number of slices and a tensor%0A Xh = tf.split(0, num_steps, X4)%0A %0A %0A initializer = tf.random_uniform_initializer(-.01, .01)%0A # INNER LOOP%0A # There are two ways of calculating the inner loop of an RNN%0A with tf.variable_scope(%22RNN%22, reuse=None, initializer=initializer): # this is necessary%0A lstm_cell = rnn_cell.BasicLSTMCell(hidden_dim, forget_bias=1.0)%0A initial_state = lstm_cell.zero_state(batch_size, tf.float32)%0A # Explicitly calling a for loop inside the scope%0A #for time_step, input_ in enumerate(inputs):%0A # if time_step %3E 0: tf.get_variable_scope().reuse_variables()%0A # (cell_output, state) = lstm_cell(input_, initial_state)%0A # outputs.append(cell_output)%0A # states.append(state)%0A %0A # or simply using rnn(cell, inputs, initial_state=init_state)%0A lstm_outputs, lstm_states = rnn.rnn(lstm_cell, Xh, initial_state=initial_state)%0A sess.run(tf.initialize_all_variables()) # it didn't work for me initializing outside the scope%0A %0A # calculate output%0A Y = lstm_outputs%5B-1%5D # outputs is a list, we get the last value%0A output = tf.matmul(Y, h2o) + bo%0A %0A return input, output, desired%0A
|
|
940c4f4238eac31f926e520dba473819abb44033 | Add a moksha.hub module with an initial OrbitedWidget | moksha/hub.py | moksha/hub.py | Python | 0 | @@ -0,0 +1,1935 @@
+# This file is part of Moksha.%0A#%0A# Moksha is free software: you can redistribute it and/or modify%0A# it under the terms of the GNU General Public License as published by%0A# the Free Software Foundation, either version 3 of the License, or%0A# (at your option) any later version.%0A# %0A# Moksha is distributed in the hope that it will be useful,%0A# but WITHOUT ANY WARRANTY; without even the implied warranty of%0A# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the%0A# GNU General Public License for more details.%0A#%0A# You should have received a copy of the GNU General Public License%0A# along with Moksha. If not, see %3Chttp://www.gnu.org/licenses/%3E.%0A#%0A# Copyright 2008, Red Hat, Inc.%0A# Authors: Luke Macken %[email protected]%3E%0A%0A%22%22%22%0AThe Moksha Real-time Hub%0A%22%22%22%0A%0Afrom tw.api import Widget, JSLink, js_callback, js_function%0A%0A# @@ Make the orbited url globally configurable%0AORBITED_URL = 'http://localhost:9000'%0Aorbited_js = JSLink(link=ORBITED_URL + '/static/Orbited.js')%0A%0Aclass OrbitedWidget(Widget):%0A params = %7B%0A 'onopen': 'A javascript callback for when the connection opens',%0A 'onread': 'A javascript callback for when new data is read',%0A 'onclose': 'A javascript callback for when the connection closes',%0A %7D%0A javascript = %5Borbited_js%5D%0A onopen = onread = onclose = js_callback('function()%7B%7D')%0A template = %22%22%22%0A %3Cscript type=%22text/javascript%22%3E%0A Orbited.settings.port = 9000%0A Orbited.settings.hostname = 'localhost'%0A document.domain = document.domain%0A TCPSocket = Orbited.TCPSocket%0A connect = function() %7B%0A conn = new TCPSocket()%0A conn.onread = $%7Bonread%7D%0A conn.onopen = $%7Bonopen%7D%0A conn.onclose = $%7Bonclose%7D%0A conn.open('localhost', 9000)%0A %7D%0A $(document).ready(function() %7B%0A connect()%0A %7D);%0A %3C/script%3E%0A %22%22%22%0A
|
|
7780c235f0f357ab918f0c031e7dc51f6ca072a9 | Solve problem 20 | problem020.py | problem020.py | Python | 0.999999 | @@ -0,0 +1,273 @@
+#!/usr/bin/env python3%0A%0Afrom functools import *%0Aimport operator%0A%0Adef factorial(number):%0A assert number %3E= 1%0A return reduce(operator.mul, range(1, number+1))%0A%0Adef digits(number):%0A yield from (int(digit) for digit in str(number))%0A%0Aprint(sum(digits(factorial(100))))%0A
|
|
50b47ecc52b390a7fc6749491b1895389280a4ec | Add more tests of BotTestExpectations, including some which we fail. | Tools/Scripts/webkitpy/layout_tests/layout_package/bot_test_expectations_unittest.py | Tools/Scripts/webkitpy/layout_tests/layout_package/bot_test_expectations_unittest.py | # Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the Google name nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest2 as unittest
from webkitpy.layout_tests.layout_package import bot_test_expectations
class BotTestExpectationsTest(unittest.TestCase):
# Expects newest result on left of string "PFF", means it just passed after 2 failures.
def _results_from_string(self, results_string):
results_list = []
last_char = None
for char in results_string:
if char != last_char:
results_list.insert(0, [1, char])
else:
results_list[0][0] += 1
return {'results': results_list}
def _assert_expectations(self, expectations, test_data, expectations_string):
output = expectations._generate_expectations_string(test_data)
self.assertMultiLineEqual(output, expectations_string)
def test_basic(self):
test_data = {
'tests': {
'foo': {
'veryflaky.html': self._results_from_string('FPFP'),
'maybeflaky.html': self._results_from_string('PPFP'),
'notflakypass.html': self._results_from_string('PPPP'),
'notflakyfail.html': self._results_from_string('FFFF'),
}
}
}
expectations = bot_test_expectations.BotTestExpectations(only_ignore_very_flaky=True)
self._assert_expectations(expectations, test_data, """Bug(auto) foo/veryflaky.html [ Failure Pass ]""")
expectations = bot_test_expectations.BotTestExpectations(only_ignore_very_flaky=False)
self._assert_expectations(expectations, test_data, """Bug(auto) foo/veryflaky.html [ Failure Pass ]
Bug(auto) foo/maybeflaky.html [ Failure Pass ]""")
def test_all_failure_types(self):
expectations = bot_test_expectations.BotTestExpectations(only_ignore_very_flaky=True)
test_data = {
'tests': {
'foo': {
'allfailures.html': self._results_from_string('FPFPCNCNTXTXIZIZOCOC'),
'imageplustextflake.html': self._results_from_string('ZPZPPPPPPPPPPPPPPPPP'),
}
}
}
self._assert_expectations(expectations, test_data, """Bug(auto) foo/imageplustextflake.html [ Failure Pass ]
Bug(auto) foo/allfailures.html [ Crash Missing ImageOnlyFailure Failure Timeout Pass ]""")
| Python | 0.000002 | @@ -1678,93 +1678,1308 @@
#
-Expects newest result on left of string %22PFF%22, means it just passed after 2 failures.
+All result_string's in this file expect newest result%0A # on left: %22PFF%22, means it just passed after 2 failures.%0A%0A def _assert_is_flaky(self, results_string, should_be_flaky):%0A expectations = bot_test_expectations.BotTestExpectations(only_ignore_very_flaky=True)%0A length_encoded = self._results_from_string(results_string)%5B'results'%5D%0A num_actual_results = len(expectations._actual_results_for_test(length_encoded))%0A if should_be_flaky:%0A self.assertGreater(num_actual_results, 1)%0A else:%0A self.assertEqual(num_actual_results, 1)%0A%0A def test_basic_flaky(self):%0A self._assert_is_flaky('PFF', False) # Used to fail, but now passes.%0A self._assert_is_flaky('FFP', False) # Just started failing.%0A self._assert_is_flaky('PFPF', True) # Seen both failures and passes.%0A # self._assert_is_flaky('PPPF', True) # Should be counted as flaky but isn't yet.%0A self._assert_is_flaky('FPPP', False) # Just started failing, not flaky.%0A self._assert_is_flaky('PFFP', True) # Failed twice in a row, still flaky.%0A # Failing 3+ times in a row is unlikely to be flaky, but rather a transient failure on trunk.%0A # self._assert_is_flaky('PFFFP', False)%0A # self._assert_is_flaky('PFFFFP', False)%0A
%0A
|
6fcb3adbcf85aa8039274f59d2b26401b5927fc4 | Create PowerofFour_001.py | kargtom/twodim/PowerofFour/PowerofFour_001.py | kargtom/twodim/PowerofFour/PowerofFour_001.py | Python | 0 | @@ -0,0 +1,90 @@
+def isPowerOfFour(n):%0A return n %3E 0 and n & n - 1 is 0 and n & 0x5555555555555555 != 0%0A
|
|
8a7fda2acf57c135e7f401ebdd8f71c3609c0eca | Create tries.py | Python/tries.py | Python/tries.py | Python | 0.000001 | @@ -0,0 +1,1565 @@
+def make_trie(*args):%0A%09trie=%7B%7D%0A%09for word in args:%0A%09%09if type(word)!= str:%0A%09%09%09raise TypeError(%22Trie work only on strings%22)%0A%09%09# temp_trie and trie refer to the same dictionary object.%0A%09%09temp_trie=trie%0A%09%09for letter in word:%0A%09%09%09# here setdefault sets the letter to %7B%7D(%7B'y':%7B%7D%7D) and then returns %7B%7D to temp_trie.%0A%09%09%09# So now temp_trie contains %7B%7D but trie points to (%7B'y':%7B%7D%7D).%0A%0A%09%09%09# setdefault assigns the letter their value and returns %7B%7D%0A%09%09%09# That is why nesting takes place.%0A%09%09%09temp_trie=temp_trie.setdefault(letter,%7B%7D)%0A%09%09temp_trie=temp_trie.setdefault('__end__','__end__')%0A%0A%09return trie%0A%0Adef in_trie(trie,word):%0A%09if type(word)!= str:%0A%09%09raise TypeError(%22Trie work only on strings%22)%0A%0A%09temp_trie=trie%0A%09for letter in word:%0A%09%09if letter not in temp_trie:%0A%09%09%09return False%0A%09%09temp_trie=temp_trie%5Bletter%5D%0A%0A%09if %22__end__%22 in temp_trie:%0A%09%09return True%0A%09else:%0A%09%09return False%0A%0Adef remove(trie,word,depth):%0A%09if word and word%5Bdepth%5D not in trie:%0A%09%09return False%0A%0A%09if len(word) == depth + 1:%0A%09%09if '__end__' in trie%5Bword%5Bdepth%5D%5D:%09%09%0A%09%09%09del trie%5Bword%5Bdepth%5D%5D%5B'__end__'%5D %0A%09%09%09%0A%09%09if len(trie%5Bword%5Bdepth%5D%5D) %3E 0 and len(trie) %3E 1: %0A%09%09%09return False%0A%09%09elif len(trie) %3E 1 : %0A%09%09%09del trie%5Bword%5Bdepth%5D%5D%0A%09%09%09return False%0A%09%09elif len(trie%5Bword%5Bdepth%5D%5D) %3E 0: %0A%09%09%09return False%0A%09%09else:%0A%09%09%09return True%0A%09else:%0A%09%09temp_trie = trie %0A%09%09# Recursively climb up to delete.%0A%09%09if remove(temp_trie%5Bword%5Bdepth%5D%5D, word, depth + 1):%0A%09%09%09if temp_trie:%0A%09%09%09%09del temp_trie%5Bword%5Bdepth%5D%5D%0A%09%09%09return not temp_trie%0A%09%09else:%0A%09%09%09return False%0A%0A%0A%0Atrie=make_trie('hack','hackerrank')%0Aprint trie%0Aprint in_trie(trie,'hac')%0Aprint trie%0A
|
|
23cf747a3ff24f75d3300547f4bfdecf10c4a325 | Add next traversal util function | scrapple/utils/config.py | scrapple/utils/config.py | Python | 0.000935 | @@ -0,0 +1,813 @@
+%22%22%22%0Ascrapple.utils.config%0A~~~~~~~~~~~~~~~~~~~~~%0A%0AFunctions related to traversing the configuration file%0A%22%22%22%0A%0Afrom __future__ import print_function%0A%0A%0Adef traverse_next(page, next, results):%0A for link in page.extract_links(next%5B'follow_link'%5D):%0A print(%22Loading page%22, link.url)%0A r = results%0A for attribute in next%5B'scraping'%5D.get('data'):%0A if attribute%5B'field'%5D != %22%22:%0A print(%22%5CnExtracting%22, attribute%5B'field'%5D, %22attribute%22, sep=' ')%0A r%5Battribute%5B'field'%5D%5D = link.extract_content(attribute%5B'selector'%5D, attribute%5B'attr'%5D)%0A if not next%5B'scraping'%5D.get('next'):%0A yield r%0A else:%0A for next2 in next%5B'scraping'%5D.get('next'):%0A for result in traverse_next(link, next2, r):%0A yield result%0A
|
|
56b3cf07fff4d3794dcdbf99f6d7faa629fa243e | fix string manipulation in render_templatefile() | scrapy/utils/template.py | scrapy/utils/template.py | """Helper functions for working with templates"""
import os
import re
import string
def render_templatefile(path, **kwargs):
with open(path, 'rb') as file:
raw = file.read()
content = string.Template(raw).substitute(**kwargs)
with open(path.rstrip('.tmpl'), 'wb') as file:
file.write(content)
if path.endswith('.tmpl'):
os.remove(path)
CAMELCASE_INVALID_CHARS = re.compile('[^a-zA-Z\d]')
def string_camelcase(string):
""" Convert a word to its CamelCase version and remove invalid chars
>>> string_camelcase('lost-pound')
'LostPound'
>>> string_camelcase('missing_images')
'MissingImages'
"""
return CAMELCASE_INVALID_CHARS.sub('', string.title())
| Python | 0.000007 | @@ -247,38 +247,104 @@
-with open(path.rstrip('.tmpl')
+render_path = path%5B:-len('.tmpl')%5D if path.endswith('.tmpl') else path%0A with open(render_path
, 'w
|
ec484a404752c60a7c88ae84f79b4792c777dfd4 | Define ESCO ua and eu tender models | openprocurement/tender/esco/models.py | openprocurement/tender/esco/models.py | Python | 0 | @@ -0,0 +1,582 @@
+from zope.interface import implementer%0Afrom schematics.types import StringType%0A%0Afrom openprocurement.api.models import ITender%0A%0Afrom openprocurement.tender.openua.models import (%0A Tender as BaseTenderUA,%0A)%0A%0Afrom openprocurement.tender.openeu.models import (%0A Tender as BaseTenderEU,%0A)%0A%0A%0A@implementer(ITender)%0Aclass Tender(BaseTenderUA):%0A %22%22%22 %22%22%22%0A procurementMethodType = StringType(default=%22esco.UA%22)%0A%0ATenderESCOUA = Tender%0A%0A%0A@implementer(ITender)%0Aclass Tender(BaseTenderEU):%0A %22%22%22 %22%22%22%0A procurementMethodType = StringType(default=%22esco.EU%22)%0A%0A%0ATenderESCOEU = Tender%0A
|
|
82b9a66ea826b4463d82c69ba1703eab213efe83 | Add test for stack outputs | heat_integrationtests/functional/test_stack_outputs.py | heat_integrationtests/functional/test_stack_outputs.py | Python | 0.00006 | @@ -0,0 +1,2371 @@
+# Licensed under the Apache License, Version 2.0 (the %22License%22); you may%0A# not use this file except in compliance with the License. You may obtain%0A# a copy of the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS, WITHOUT%0A# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the%0A# License for the specific language governing permissions and limitations%0A# under the License.%0A%0Afrom heat_integrationtests.functional import functional_base%0A%0A%0Aclass StackOutputsTest(functional_base.FunctionalTestsBase):%0A%0A template = '''%0Aheat_template_version: 2015-10-15%0Aresources:%0A test_resource_a:%0A type: OS::Heat::TestResource%0A properties:%0A value: 'a'%0A test_resource_b:%0A type: OS::Heat::TestResource%0A properties:%0A value: 'b'%0Aoutputs:%0A resource_output_a:%0A description: 'Output of resource a'%0A value: %7B get_attr: %5Btest_resource_a, output%5D %7D%0A resource_output_b:%0A description: 'Output of resource b'%0A value: %7B get_attr: %5Btest_resource_b, output%5D %7D%0A'''%0A%0A def test_outputs(self):%0A stack_identifier = self.stack_create(%0A template=self.template%0A )%0A expected_list = %5B%7Bu'output_key': u'resource_output_a',%0A u'description': u'Output of resource a'%7D,%0A %7Bu'output_key': u'resource_output_b',%0A u'description': u'Output of resource b'%7D%5D%0A%0A actual_list = self.client.stacks.output_list(%0A stack_identifier)%5B'outputs'%5D%0A self.assertEqual(expected_list, actual_list)%0A%0A expected_output_a = %7B%0A u'output_value': u'a', u'output_key': u'resource_output_a',%0A u'description': u'Output of resource a'%7D%0A expected_output_b = %7B%0A u'output_value': u'b', u'output_key': u'resource_output_b',%0A u'description': u'Output of resource b'%7D%0A actual_output_a = self.client.stacks.output_show(%0A stack_identifier, 'resource_output_a')%5B'output'%5D%0A actual_output_b = self.client.stacks.output_show(%0A stack_identifier, 'resource_output_b')%5B'output'%5D%0A self.assertEqual(expected_output_a, actual_output_a)%0A self.assertEqual(expected_output_b, actual_output_b)%0A
|
|
78df4f45ea4b8c04ba8f34d8fc356345998c616b | Add TelnetServer.py under version control. | TelnetServer.py | TelnetServer.py | Python | 0 | @@ -0,0 +1,2181 @@
+#!/usr/bin/env python%0A# coding: utf-8%0A%0Aimport socket%0Aimport threading%0A%0Awelcome_slogan = '''Welcome novice!%5Cr%5Cn%5C%0AType something and hit enter to see what happens.%5Cr%5Cn%5C%0ABe bold!%5Cr%5Cn%5Cr%5Cn'''%0Ahelp_message = '''Command Description%5Cr%5Cn%5C%0A=============================================================%5Cr%5Cn%5C%0AHELP Print this help message%5Cr%5Cn%5C%0ATALK 'MESSAGE' Talk to other users in the same telnet system%5Cr%5Cn%5C%0AEXIT Quit the telnet service%5Cr%5Cn%5Cr%5Cn%5C%0AAt your service. 20140819%5Cr%5Cn%5Cr%5Cn'''%0Agoodbye_farewell = '''Have a lot of fun!%5Cr%5Cn'''%0A%0APS1 = 'TELNET# '%0A%0AHOST = ''%0APORT = 56789%0A%0As = socket.socket(socket.AF_INET, socket.SOCK_STREAM)%0As.bind((HOST, PORT))%0As.listen(5)%0Aclients = %5B%5D # list of clients connected%0Alock = threading.Lock()%0A%0A%0Aclass telnetServer(threading.Thread):%0A def __init__(self, bind):%0A threading.Thread.__init__(self)%0A (self.socket, self.address) = bind%0A%0A def run(self):%0A lock.acquire()%0A clients.append(self)%0A lock.release()%0A print ('+ %25s:%25s connected.' %25 self.address)%0A self.socket.send(welcome_slogan.encode())%0A while True:%0A self.socket.send(PS1.encode())%0A data = self.socket.recv(1024)%0A temp = data.decode().strip()%0A if not data:%0A break%0A elif temp.upper() in %5B'BY', 'BYE', 'QUIT', 'EXIT'%5D:%0A break%0A elif temp.lower() in %5B'?', 'help'%5D:%0A self.socket.send(help_message.encode())%0A elif temp.startswith('#') or temp == '':%0A pass%0A elif temp%5B:5%5D.upper() == 'TALK ':%0A print ('%25s %25s' %25 (self.address, temp%5B5:%5D))%0A for c in clients:%0A c.socket.send(('%25s %25s%5Cr%5Cn' %25 (self.address, temp%5B5:%5D)).encode())%0A else:%0A self.socket.send(data)%0A self.socket.send(goodbye_farewell.encode())%0A self.socket.close()%0A print ('- %25s:%25s disconnected.' %25 self.address)%0A lock.acquire()%0A clients.remove(self)%0A lock.release()%0A%0Awhile True: # wait for socket to connect%0A # send socket to telnetserver and start monitoring%0A telnetServer(s.accept()).start()%0A%0A
|
|
8b2eb3bece67a1eb81a6165238205b05361f2ec3 | fix key case | corehq/apps/ota/tasks.py | corehq/apps/ota/tasks.py | from celery.task import task
from couchdbkit.exceptions import ResourceNotFound
from casexml.apps.case.xml import V1
from casexml.apps.phone.restore import RestoreConfig
from corehq.apps.users.models import CommCareUser
from soil import DownloadBase
@task
def prime_restore(domain, usernames_or_ids, version=V1, cache_timeout_hours=None,
overwrite_cache=False, check_cache_only=False):
"""
Task to generate and cache a restore payload for each user passed in.
:param domain: The domain name for the users
:param usernames_or_ids: List of usernames or user IDs
:param version: Restore format version
:param cache_timeout_hours: Hours to cache the payload
:param overwrite_cache: If True overwrite any existing cache
:param check_cache_only: Don't generate the payload, just check if it is already cached
"""
total = len(usernames_or_ids)
DownloadBase.set_progress(prime_restore, 0, total)
ret = {'messages': []}
for i, username_or_id in enumerate(usernames_or_ids):
couch_user = get_user(username_or_id)
if not couch_user:
ret['messages'].append('WARNING: User not found: {}'.format(username_or_id))
continue
elif couch_user.domain != domain:
ret['messages'].append("WARNING: User '{}' not from domain '{}'".format(
username_or_id,
domain
))
continue
try:
project = couch_user.project
commtrack_settings = project.commtrack_settings
stock_settings = commtrack_settings.get_ota_restore_settings() if commtrack_settings else None
restore_config = RestoreConfig(
couch_user.to_casexml_user(), None, version, None,
items=True,
stock_settings=stock_settings,
domain=project,
force_cache=True,
cache_timeout=cache_timeout_hours * 60 * 60,
overwrite_cache=overwrite_cache
)
if check_cache_only:
cached_payload = _get_cached_payload(restore_config)
ret['MESSAGES'].append('Restore cache {} for user: {}'.format(
'EXISTS' if cached_payload else 'does not exist',
couch_user.human_friendly_name,
))
else:
restore_config.get_payload()
cached_payload = _get_cached_payload(restore_config)
if cached_payload:
ret['messages'].append('SUCCESS: Restore cached successfully for user: {}'.format(
couch_user.human_friendly_name,
))
else:
ret['messages'].append('ERROR: Restore completed by cache still empty for user: {}'.format(
couch_user.human_friendly_name,
))
except Exception as e:
ret['messages'].append('ERROR: Error processing user: {}'.format(str(e)))
DownloadBase.set_progress(prime_restore, i + 1, total)
return ret
def _get_cached_payload(restore_config):
original = restore_config.overwrite_cache
try:
# must set this to False before attempting to check the cache
restore_config.overwrite_cache = False
payload = restore_config.get_cached_payload()
finally:
restore_config.overwrite_cache = original
return payload
def get_user(username_or_id):
try:
couch_user = CommCareUser.get(username_or_id)
except ResourceNotFound:
try:
couch_user = CommCareUser.get_by_username(username_or_id)
except ResourceNotFound:
return None
return couch_user
| Python | 0.999689 | @@ -2193,16 +2193,16 @@
et%5B'
-MESSAGES
+messages
'%5D.a
|
eaeb02839913136909cccc9a99612a1eb7145b97 | support state hash in ota restore if specified | corehq/apps/ota/views.py | corehq/apps/ota/views.py | from corehq.apps.users.models import CouchUser
from django_digest.decorators import *
from casexml.apps.phone.restore import generate_restore_payload
@httpdigest
def restore(request, domain):
"""
We override restore because we have to supply our own
user model (and have the domain in the url)
"""
user = request.user
restore_id = request.GET.get('since')
api_version = request.GET.get('version', "1.0")
username = user.username
couch_user = CouchUser.from_django_user(user)
if not couch_user.is_commcare_user():
response = HttpResponse("No linked chw found for %s" % username)
response.status_code = 401 # Authentication Failure
return response
response = generate_restore_payload(couch_user.to_casexml_user(), restore_id,
api_version)
return HttpResponse(response, mimetype="text/xml") | Python | 0 | @@ -135,23 +135,24 @@
restore_
-payload
+response
%0A%0A%0A%0A@htt
@@ -435,16 +435,58 @@
%221.0%22)%0A
+ state_hash = request.GET.get('state')%0A
user
@@ -771,24 +771,20 @@
%0A re
-sponse =
+turn
generat
@@ -797,15 +797,16 @@
ore_
-payload
+response
(cou
@@ -881,24 +881,25 @@
+
api_version)
@@ -901,60 +901,22 @@
sion
-)%0A return HttpResponse(response, mimetype=%22text/xml%22)
+, state_hash)%0A
|
dca0404e6f14194be3a5926e522bbeea375e8456 | add net spider rokic's version | crawler/discount_info.py | crawler/discount_info.py | Python | 0 | @@ -0,0 +1,1207 @@
+import json%0Aimport requests%0Afrom bs4 import BeautifulSoup%0A%0ADOMAIN = %22%22%0AAPI = %22http://%25s/api/%22 %25 (DOMAIN)%0ASTEAMDB_SALE_URL = %22https://steamdb.info/sales/?merged=true&cc=cn%22%0A%0Aheaders = %7B%0A 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',%0A 'Accept-Charset': 'UTF-8,*;q=0.5',%0A 'Accept-Encoding': 'gzip,deflate,sdch',%0A 'Accept-Language': 'en-US,en;q=0.8',%0A 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:13.0) Gecko/20100101 Firefox/13.0'%0A%7D%0A%0Ar = requests.get(STEAMDB_SALE_URL, header=headers)%0A%0Acontent = r.content.decode().replace('%5Cn', '')%0Ajar = BeautifulSoup(content, 'lxml').tbody%0Asweets = %5B'name', 'discount', 'price', 'rating'%5D%0Abox = %5B%5D#%E7%A9%BA%E7%AE%B1%E5%AD%90%0Afor cookies in jar:#%E6%8B%BF%E5%87%BA%E7%BD%90%E5%AD%90%E9%87%8C%E7%9A%84%E6%9B%B2%E5%A5%87%E9%A5%BC%0A try:%0A bottle = %7B'id':cookies%5B'data-appid'%5D, 'type':'game'%7D#%E8%A3%85%E7%BA%A2%E9%85%92%0A except KeyError:%0A bottle = %7B'id':cookies%5B'data-subid'%5D, 'type':'package'%7D#%E6%88%96%E8%80%85%E8%A3%85%E7%99%BD%E9%85%92%0A cast = lambda magic: None if not magic else magic.string if magic.string else cast(magic.findChild())%0A flour = cookies.findChildren('td')#%E6%8F%89%E6%8F%89%E9%9D%A2%E7%B2%89%0A biscuits = %5Bcast(i) for i in flour%5B2:5%5D + %5Bflour%5B6%5D%5D%5D#%E5%81%9A%E7%82%B9%E5%B0%8F%E9%A5%BC%E5%B9%B2%0A bottle.update(zip(sweets, biscuits))#%E6%AF%8F%E7%93%B6%E9%85%92%E9%99%84%E8%B5%A0%E7%82%B9%E9%9B%B6%E9%A3%9F%0A box.append(bottle) #%E8%A3%85%E7%AE%B1%0A%0Arequest.post(API, json.dumps(box))
|
|
1602513f2ee508ed70ec08af90a94cf150d14189 | Add grep_token_logs.py | skolo/grep_token_logs.py | skolo/grep_token_logs.py | Python | 0.000297 | @@ -0,0 +1,2159 @@
+#!/usr/bin/env python%0A%0A# Copyright 2018 Google LLC.%0A#%0A# Use of this source code is governed by a BSD-style license that can be%0A# found in the LICENSE file.%0A%0A%0A%22%22%22Search the syslog on a jumphost to determine when auth tokens changed.%22%22%22%0A%0A%0Aimport sys%0A%0A%0ASYSLOG = '/var/log/syslog'%0A%0AWHITELIST_LINES = %5B%0A # (process-name, pattern)%0A ('metadata-server', 'Updated token: '),%0A ('metadata-server', 'Token requested by '),%0A ('get-oauth2-token', 'Wrote new auth token: '),%0A%5D%0A%0A%0Adef transform_line(line):%0A %22%22%22Trim the log line and return it iff it matches a whitelisted pattern.%22%22%22%0A for proc, pattern in WHITELIST_LINES:%0A if pattern in line:%0A # Log lines look like this:%0A # pylint: disable=line-too-long%0A # Mar 12 09:58:43 jumphost-win-02 metadata-server%5B5259%5D: I0312 09:58:43.756257 5259 server.go:87%5D Updated token: %5Bredacted%5D%0A timestamp = line.split('jumphost', 1)%5B0%5D%0A suffix = line.split(pattern, 1)%5B1%5D.rstrip()%0A return timestamp + proc + ': ' + pattern + suffix%0A return None%0A%0A%0Adef read_syslog():%0A %22%22%22Read the syslog, returning any relevant lines.%22%22%22%0A lines = %5B%5D%0A with open(SYSLOG, 'rb') as f:%0A for line in f:%0A tf = transform_line(line)%0A if tf:%0A lines.append(tf)%0A return lines%0A%0A%0Adef filter_logs(ip, log_lines):%0A %22%22%22Filter the log lines to only those relevant to a particular IP address.%22%22%22%0A # First, collect all tokens used by the IP address.%0A tokens = %5B%5D%0A for line in log_lines:%0A if ip and ip in line:%0A tok = line.split(', serving ', 1)%5B1%5D%0A tokens.append(tok)%0A%0A # Filter to only lines which contain the IP address or one of its tokens.%0A filtered = %5B%5D%0A for line in log_lines:%0A if ip in line:%0A filtered.append(line)%0A else:%0A for tok in tokens:%0A # We don't care about other bots which used the token.%0A if tok in line and not 'Token requested by' in line:%0A filtered.append(line)%0A return filtered%0A%0A%0Adef main():%0A %22%22%22Read the syslog, filter to relevant lines, then print them.%22%22%22%0A lines = read_syslog()%0A if len(sys.argv) %3E 1:%0A lines = filter_logs(sys.argv%5B1%5D, lines)%0A for line in lines:%0A print line%0A%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
486263fff771a5f647d70d062e67022ae5031378 | Fix inverted sensors on the concord232 binary sensor component (#11261) | homeassistant/components/binary_sensor/concord232.py | homeassistant/components/binary_sensor/concord232.py | """
Support for exposing Concord232 elements as sensors.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/binary_sensor.concord232/
"""
import datetime
import logging
import requests
import voluptuous as vol
from homeassistant.components.binary_sensor import (
BinarySensorDevice, PLATFORM_SCHEMA, DEVICE_CLASSES)
from homeassistant.const import (CONF_HOST, CONF_PORT)
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['concord232==0.14']
_LOGGER = logging.getLogger(__name__)
CONF_EXCLUDE_ZONES = 'exclude_zones'
CONF_ZONE_TYPES = 'zone_types'
DEFAULT_HOST = 'localhost'
DEFAULT_NAME = 'Alarm'
DEFAULT_PORT = '5007'
DEFAULT_SSL = False
SCAN_INTERVAL = datetime.timedelta(seconds=1)
ZONE_TYPES_SCHEMA = vol.Schema({
cv.positive_int: vol.In(DEVICE_CLASSES),
})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_EXCLUDE_ZONES, default=[]):
vol.All(cv.ensure_list, [cv.positive_int]),
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_ZONE_TYPES, default={}): ZONE_TYPES_SCHEMA,
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Concord232 binary sensor platform."""
from concord232 import client as concord232_client
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
exclude = config.get(CONF_EXCLUDE_ZONES)
zone_types = config.get(CONF_ZONE_TYPES)
sensors = []
try:
_LOGGER.debug("Initializing Client")
client = concord232_client.Client('http://{}:{}'.format(host, port))
client.zones = client.list_zones()
client.last_zone_update = datetime.datetime.now()
except requests.exceptions.ConnectionError as ex:
_LOGGER.error("Unable to connect to Concord232: %s", str(ex))
return False
for zone in client.zones:
_LOGGER.info("Loading Zone found: %s", zone['name'])
if zone['number'] not in exclude:
sensors.append(
Concord232ZoneSensor(
hass, client, zone, zone_types.get(
zone['number'], get_opening_type(zone))
)
)
add_devices(sensors, True)
def get_opening_type(zone):
"""Return the result of the type guessing from name."""
if 'MOTION' in zone['name']:
return 'motion'
if 'KEY' in zone['name']:
return 'safety'
if 'SMOKE' in zone['name']:
return 'smoke'
if 'WATER' in zone['name']:
return 'water'
return 'opening'
class Concord232ZoneSensor(BinarySensorDevice):
"""Representation of a Concord232 zone as a sensor."""
def __init__(self, hass, client, zone, zone_type):
"""Initialize the Concord232 binary sensor."""
self._hass = hass
self._client = client
self._zone = zone
self._number = zone['number']
self._zone_type = zone_type
@property
def device_class(self):
"""Return the class of this sensor, from DEVICE_CLASSES."""
return self._zone_type
@property
def should_poll(self):
"""No polling needed."""
return True
@property
def name(self):
"""Return the name of the binary sensor."""
return self._zone['name']
@property
def is_on(self):
"""Return true if the binary sensor is on."""
# True means "faulted" or "open" or "abnormal state"
return bool(self._zone['state'] == 'Normal')
def update(self):
"""Get updated stats from API."""
last_update = datetime.datetime.now() - self._client.last_zone_update
_LOGGER.debug("Zone: %s ", self._zone)
if last_update > datetime.timedelta(seconds=1):
self._client.zones = self._client.list_zones()
self._client.last_zone_update = datetime.datetime.now()
_LOGGER.debug("Updated from zone: %s", self._zone['name'])
if hasattr(self._client, 'zones'):
self._zone = next((x for x in self._client.zones
if x['number'] == self._number), None)
| Python | 0 | @@ -3568,17 +3568,17 @@
state'%5D
-=
+!
= 'Norma
|
46c69a7eea1f9b5b7479e80cf3b1b8e224dbd7e5 | Fix : AD module crash when bad ldap entry. | shinken/modules/active_directory_ui.py | shinken/modules/active_directory_ui.py | #!/usr/bin/python
#Copyright (C) 2009 Gabes Jean, [email protected]
#
#This file is part of Shinken.
#
#Shinken is free software: you can redistribute it and/or modify
#it under the terms of the GNU Affero General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#Shinken is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU Affero General Public License for more details.
#
#You should have received a copy of the GNU Affero General Public License
#along with Shinken. If not, see <http://www.gnu.org/licenses/>.
"""
This class is for linking the WebUI with active directory,
like check passwords, or get photos.
"""
import ldap
import os
from shinken.basemodule import BaseModule
print "Loaded AD module"
properties = {
'daemons' : ['webui'],
'type' : 'ad_webui'
}
#called by the plugin manager
def get_instance(plugin):
print "Get an Active Directory UI module for plugin %s" % plugin.get_name()
instance = AD_Webui(plugin)
return instance
class AD_Webui(BaseModule):
def __init__(self, modconf):
BaseModule.__init__(self, modconf)
self.ldap_uri = getattr(modconf, 'ldap_uri', None)
self.username = getattr(modconf, 'username', '')
self.password = getattr(modconf, 'password', '')
self.basedn = getattr(modconf, 'basedn', '')
# If we got no uri, we bailout...
if not self.ldap_uri:
self.active = False
else:
self.active = True
self.con = None
# Try to connect if we got true parameter
def init(self):
if not self.active:
return
print "Trying to initalize the AD/Ldap connection"
self.con = ldap.initialize(self.ldap_uri)
self.con.set_option(ldap.OPT_REFERRALS,0)
print "Trying to connect to AD/Ldap", self.ldap_uri, self.username, self.password, self.basedn
# Any errors will throw an ldap.LDAPError exception
# or related exception so you can ignore the result
self.con.simple_bind_s(self.username, self.password)
print "AD/Ldap Connection done"
# To load the webui application
def load(self, app):
self.app = app
#Get a brok, parse it, and put in in database
#We call functions like manage_ TYPEOFBROK _brok that return us queries
def manage_brok(self, b):
type = b.type
manager = 'manage_'+type+'_brok'
f = getattr(self, manager, None)
if f:
f(b)
# Give the entry for a contact
def find_contact_entry(self, contact):
if self.con is None:
return None
print "AD/LDAP : search for contact", contact.get_name()
searchScope = ldap.SCOPE_SUBTREE
## retrieve all attributes
retrieveAttributes = ["userPrincipalName", "thumbnailPhoto", "samaccountname", "email"]
cname = contact.get_name()
email = contact.email
searchFilter = "(| (samaccountname=%s)(mail=%s))" % (cname, email)
print "Filter", searchFilter
try:
ldap_result_id = self.con.search(self.basedn, searchScope, searchFilter, retrieveAttributes)
result_set = []
while 1:
result_type, result_data = self.con.result(ldap_result_id, 0)
if (result_data == []):
print "No result for", cname
return None
if result_type == ldap.RES_SEARCH_ENTRY:
(_, elts) = result_data[0]
try :
account_name = elts['userPrincipalName'][0]
except KeyError:
account_name = str(result_data[0])
# Got a result, try to get photo to write file
print "Find account printicpalname", account_name
return elts
except ldap.LDAPError, e:
print "Ldap error", e, e.__dict__
return None
# One of our goal is to look for contacts and get all pictures
def manage_initial_broks_done_brok(self, b):
if self.con is None:
return
print "AD/LDAP : manage_initial_broks_done_brok, go for pictures"
searchScope = ldap.SCOPE_SUBTREE
## retrieve all attributes - again adjust to your needs - see documentation for more options
retrieveAttributes = ["userPrincipalName", "thumbnailPhoto", "samaccountname", "email"]
print "Contacts?", len(self.app.datamgr.get_contacts())
for c in self.app.datamgr.get_contacts():
print "Doing photo lookup for contact", c.get_name()
elts = self.find_contact_entry(c)
if elts is None:
print "No ldap entry for", c.get_name()
continue
# Ok, try to get photo from the entry
try:
photo = elts['thumbnailPhoto'][0]
try:
p = os.path.join(self.app.photo_dir, c.get_name()+'.jpg')
f = open(p, 'wb')
f.write(photo)
f.close()
print "Phto wrote for", c.get_name()
except Exception, exp:
print "Cannot write", p, ":", exp
except KeyError:
print "No photo for", account_name
# Try to auth a user in the ldap dir
def check_auth(self, user, password):
# If we do not have an ldap uri, no auth :)
if not self.ldap_uri:
return False
print "Trying to auth by ldap", user, password
c = self.app.datamgr.get_contact(user)
# first we need to find the principalname of this entry
# because it can be a user name like j.gabes, but we should auth by ldap
# with [email protected] for example
elts = self.find_contact_entry(c)
try :
account_name = elts['userPrincipalName'][0]
except KeyError:
print "Cannot find the userPrincipalName entry, so use the user entry"
account_name = user
local_con = ldap.initialize(self.ldap_uri)
local_con.set_option(ldap.OPT_REFERRALS,0)
# Any errors will throw an ldap.LDAPError exception
# or related exception so you can ignore the result
try:
local_con.simple_bind_s(account_name, password)
print "AD/Ldap Connection done with", user, password
return True
except ldap.LDAPError, exp:
print "LMdap auth error:", exp
# The local_con will automatically close this connection when
# the object will be deleted, so no close need
# No good? so no auth :)
return False
| Python | 0 | @@ -5542,28 +5542,28 @@
o for%22,
-accoun
+c.ge
t_name
+()
%0A%0A%0A%0A
|
417ff63118c967205ee630c5183b19a949a6c157 | Add migrations for indicadores. | indicadores/migrations/0002_auto_20170224_1535.py | indicadores/migrations/0002_auto_20170224_1535.py | Python | 0 | @@ -0,0 +1,431 @@
+# -*- coding: utf-8 -*-%0A# Generated by Django 1.10.2 on 2017-02-24 15:35%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations, models%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('indicadores', '0001_initial'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterField(%0A model_name='ingreso',%0A name='fecha',%0A field=models.DateField(),%0A ),%0A %5D%0A
|
|
b7bf4586fea207453225a87fb85df59ccfc94e80 | Add missing migration related to django-simple-history update | jarbas/core/migrations/0032_auto_20170613_0641.py | jarbas/core/migrations/0032_auto_20170613_0641.py | Python | 0.000001 | @@ -0,0 +1,552 @@
+# -*- coding: utf-8 -*-%0A# Generated by Django 1.11.2 on 2017-06-13 09:41%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations, models%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('core', '0031_add_index_together_for_reimbursement'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterField(%0A model_name='historicalreimbursement',%0A name='history_type',%0A field=models.CharField(choices=%5B('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')%5D, max_length=1),%0A ),%0A %5D%0A
|
|
7082f80d5be56073d9d2a66653188b2cee248a8e | add basic tests of search and matrix views | src/encoded/tests/test_search.py | src/encoded/tests/test_search.py | Python | 0 | @@ -0,0 +1,1779 @@
+# Use workbook fixture from BDD tests (including elasticsearch)%0Afrom .features.conftest import app_settings, app, workbook%0A%0A%0Adef test_search_view(workbook, testapp):%0A res = testapp.get('/search/').json%0A assert res%5B'@type'%5D == %5B'Search'%5D%0A assert res%5B'@id'%5D == '/search/'%0A assert res%5B'@context'%5D == '/terms/'%0A assert res%5B'notification'%5D == 'Success'%0A assert res%5B'title'%5D == 'Search'%0A assert res%5B'total'%5D %3E 0%0A assert 'facets' in res%0A assert 'filters' in res%0A assert 'columns' in res%0A assert '@graph' in res%0A%0A%0Adef test_matrix_view(workbook, testapp):%0A res = testapp.get('/experiments/matrix').json%0A assert res%5B'@type'%5D == %5B'Matrix'%5D%0A assert res%5B'@id'%5D == '/experiments/matrix'%0A assert res%5B'@context'%5D == '/terms/'%0A assert res%5B'notification'%5D == 'Success'%0A assert res%5B'title'%5D == 'Experiment Matrix'%0A assert res%5B'total'%5D %3E 0%0A assert 'facets' in res%0A assert 'filters' in res%0A assert 'matrix' in res%0A assert res%5B'matrix'%5D%5B'max_cell_doc_count'%5D %3E 0%0A assert res%5B'matrix'%5D%5B'search_base'%5D == '/search/?type=experiment'%0A assert res%5B'matrix'%5D%5B'x'%5D%5B'group_by'%5D == 'assay_term_name'%0A assert res%5B'matrix'%5D%5B'x'%5D%5B'label'%5D == 'Assay'%0A assert res%5B'matrix'%5D%5B'x'%5D%5B'limit'%5D == 20%0A assert len(res%5B'matrix'%5D%5B'x'%5D%5B'buckets'%5D) %3E 0%0A assert len(res%5B'matrix'%5D%5B'x'%5D%5B'facets'%5D) %3E 0%0A assert res%5B'matrix'%5D%5B'y'%5D%5B'group_by'%5D == %5B'replicates.library.biosample.biosample_type', 'biosample_term_name'%5D%0A assert res%5B'matrix'%5D%5B'y'%5D%5B'label'%5D == 'Biosample'%0A assert res%5B'matrix'%5D%5B'y'%5D%5B'limit'%5D == 5%0A assert len(res%5B'matrix'%5D%5B'y'%5D%5B'replicates.library.biosample.biosample_type'%5D%5B'buckets'%5D) %3E 0%0A assert len(res%5B'matrix'%5D%5B'y'%5D%5B'replicates.library.biosample.biosample_type'%5D%5B'buckets'%5D%5B0%5D%5B'biosample_term_name'%5D%5B'buckets'%5D) %3E 0%0A
|
|
2cd1da31b099cbf37552b2a049c3df6619e0e64f | Add helper enums for type encodings | rma/redis_types.py | rma/redis_types.py | Python | 0 | @@ -0,0 +1,1245 @@
+REDIS_ENCODING_ID_RAW = 0%0AREDIS_ENCODING_ID_INT = 1%0AREDIS_ENCODING_ID_EMBSTR = 2%0AREDIS_ENCODING_ID_HASHTABLE = 3%0AREDIS_ENCODING_ID_ZIPLIST = 4%0AREDIS_ENCODING_ID_LINKEDLIST = 5%0AREDIS_ENCODING_ID_QUICKLIST =6%0AREDIS_ENCODING_ID_INTSET = 7%0AREDIS_ENCODING_ID_SKIPLIST = 8%0A%0AREDIS_ENCODING_STR_TO_ID_LIB = %7B%0A b'raw': REDIS_ENCODING_ID_RAW,%0A b'int': REDIS_ENCODING_ID_INT,%0A b'embstr': REDIS_ENCODING_ID_EMBSTR,%0A b'hashtable': REDIS_ENCODING_ID_HASHTABLE,%0A b'ziplist': REDIS_ENCODING_ID_ZIPLIST,%0A b'linkedlist': REDIS_ENCODING_ID_LINKEDLIST,%0A b'quicklist': REDIS_ENCODING_ID_QUICKLIST,%0A b'intset': REDIS_ENCODING_ID_INTSET,%0A b'skiplist': REDIS_ENCODING_ID_SKIPLIST,%0A%7D%0A%0AREDIS_ENCODING_ID_TO_STR_LIB = dict((v, k) for k, v in REDIS_ENCODING_STR_TO_ID_LIB.items())%0A%0A%0Adef redis_encoding_str_to_id(key_encoding):%0A if key_encoding in REDIS_ENCODING_STR_TO_ID_LIB:%0A return REDIS_ENCODING_STR_TO_ID_LIB%5Bkey_encoding%5D%0A%0A raise ValueError(%22Invalid encoding %60%25s%60 given%22 %25 key_encoding)%0A%0A%0Adef redis_encoding_id_to_str(key_encoding):%0A if key_encoding in REDIS_ENCODING_ID_TO_STR_LIB:%0A return REDIS_ENCODING_ID_TO_STR_LIB%5Bkey_encoding%5D.decode('utf8')%0A%0A raise ValueError(%22Invalid encoding %60%25s%60 given%22 %25 key_encoding)%0A
|
|
901046879338b1bc19de59675c7eb513bbc2c517 | add problem 19 | euler019.py | euler019.py | Python | 0.001255 | @@ -0,0 +1,388 @@
+#!/usr/bin/env python%0A%0A%0Afirsts = %5B1%5D%0Ajan = 31%0Amar_dec = %5B31, 30, 31, 30, 31, 31, 30, 31, 30, 31%5D%0Afor year in range(1901,2001):%0A firsts.append(firsts%5B-1%5D + jan)%0A if year %25 4 == 0 and year %25 100 != 0 or year %25 400 == 0:%0A feb = 29%0A else:%0A feb = 28%0A firsts.append(firsts%5B-1%5D + feb)%0A for mon in mar_dec:%0A firsts.append(firsts%5B-1%5D + mon)%0A%0Aprint sum(%5B1 for i in firsts if i%257==6%5D)%0A
|
|
dfa5bee0720f8d4b5f3ac2309915090239780045 | Test Flask file | flaskweb.py | flaskweb.py | Python | 0.000001 | @@ -0,0 +1,425 @@
+from flask import Flask, request, jsonify%0Aapp = Flask(__name__)%0A%[email protected](%22/hello/%3Cname%3E%22)%0Adef hello(name):%0A return %22Hello World! %25s%22 %25 name%0A%[email protected](%22/data/%22)%0Adef temptime():%0A arr = %7B%22temp%22: %5B20, 21, 21%5D,%22time%22:%5B10,20,30%5D,%22unit%22:%22s%22%7D%0A return jsonify(arr)%0A%[email protected](%22/add%22, methods = %5B'POST'%5D)%0Adef sum():%0A r = request.get_json()%0A a = r%5B'a'%5D%0A b = r%5B'b'%5D%0A sum = a + b%0A return '%7B:d%7D'.format(sum)%0A%0A
|
|
02f84b8cf3c3dd77b6d84d9ccea979c8de23eaa5 | Add Awesome renderers | src/common/renderers.py | src/common/renderers.py | Python | 0 | @@ -0,0 +1,2953 @@
+import time%0Afrom rest_framework.renderers import JSONRenderer%0Afrom django.shortcuts import resolve_url%0Afrom django.template.loader import render_to_string%0Afrom django.utils.encoding import force_str%0Afrom django.utils.functional import Promise%0Afrom rest_framework.renderers import BaseRenderer, JSONRenderer, TemplateHTMLRenderer%0Afrom rest_framework.utils import encoders, json%0A%0A# from drf_yasg.app_settings import redoc_settings, swagger_settings%0A# from drf_yasg.codecs import VALIDATORS, OpenAPICodecJson, OpenAPICodecYaml%0A# from drf_yasg.openapi import Swagger%0A# from drf_yasg.utils import filter_none%0A%0Aclass AwesomeJSONRenderer(JSONRenderer):%0A def render(self, data, accepted_media_type=None, renderer_context=None):%0A status_code = renderer_context%5B'response'%5D.status_code%0A%0A # %7B'detail': ErrorDetail(string='address value is not Bitcoin Address or Web Address', code='00002')%7D%0A if 'detail' in data:%0A # %EC%97%90%EB%9F%AC exception %EC%9D%B8%EA%B2%BD%EC%9A%B0%EC%9E%84%0A message = str(data%5B'detail'%5D)%0A message_code = int(data%5B'detail'%5D.code)%0A response = %7B%0A # 'timnestamp': int(time.time()),%0A # 'success': True,%0A # 'status_code': status_code,%0A 'message_code': message_code,%0A 'message': message,%0A 'data': None,%0A # 'status_code': 200, # 200 %EA%B3%A0%EC%A0%95%EC%9E%84%0A # 'result': %7B%0A # 'msg': '',%0A # 'msg_code': '200',%0A # 'data': data,%0A # %7D,%0A # 'error': message,%0A # 'error_code': message_code,%0A %7D%0A%0A elif ('detail' not in data) and (status_code in %5B200, 201, 202%5D):%0A response = %7B%0A # 'timnestamp': int(time.time()),%0A # 'success': True,%0A # 'status_code': status_code,%0A 'message_code': 100,%0A 'message': 'success',%0A 'data': data,%0A # 'status_code': 200, # 200 %EA%B3%A0%EC%A0%95%EC%9E%84%0A # 'result': %7B%0A # 'msg': '',%0A # 'msg_code': '200',%0A # 'data': data,%0A # %7D,%0A # 'error': '',%0A # 'error_code': '',%0A %7D%0A else:%0A # %EA%B8%B0%EB%B3%B8 400 %EC%97%90%EB%9F%AC%EC%9D%B8%EA%B2%BD%EC%9A%B0%0A response = %7B%0A # 'timnestamp': int(time.time()),%0A # 'success': True,%0A # 'status_code': status_code,%0A 'message_code': status_code,%0A 'message': data,%0A 'data': None,%0A # 'status_code': 200, # 200 %EA%B3%A0%EC%A0%95%EC%9E%84%0A # 'result': %7B%0A # 'msg': '',%0A # 'msg_code': '200',%0A # 'data': data,%0A # %7D,%0A # 'error': '',%0A # 'error_code': '',%0A %7D%0A%0A return super(AwesomeJSONRenderer, self).render(response, accepted_media_type, renderer_context)%0A%0A%0A
|
|
80a435e3e382791b5615755d05c5353114650ecc | test only | hello.py | hello.py | Python | 0 | @@ -0,0 +1,263 @@
+#!/usr/bin/python%0A%0Aprint %22Content-type:text/html%5Cr%5Cn%5Cr%5Cn%22%0Aprint '%3Chtml%3E'%0Aprint '%3Chead%3E'%0Aprint '%3Ctitle%3EHello Word - First CGI Program%3C/title%3E'%0Aprint '%3C/head%3E'%0Aprint '%3Cbody%3E'%0Aprint '%3Ch2%3EHello Word! This is my first CGI program%3C/h2%3E'%0Aprint '%3C/body%3E'%0Aprint '%3C/html%3E'%0A
|
|
101f378fb536cdaf8f2c681f5b1fba669bf70631 | Add hex xor | hexor.py | hexor.py | Python | 0.000083 | @@ -0,0 +1,790 @@
+#!/usr/bin/python3%0A# -*- coding: utf-8 -*-%0A# xor 2 hex strings%0A%0Aimport string%0A%0Adef isHex(s):%0A '''Check if it is a hex string'''%0A if (len(s) == 0 or len(s) %25 2 != 0%0A or not all(c in string.hexdigits for c in s)):%0A return False%0A return True%0A%0Adef hexor(s1, s2):%0A '''xor 2 hex strings, returning a hex string'''%0A s3 = (int(c1,16) %5E int(c2,16) for (c1,c2) in zip(s1,s2))%0A res = %22%22%0A for c in s3:%0A res += %22%7B:x%7D%22.format(c)%0A return res%0A%0Aif __name__ == %22__main__%22:%0A while True:%0A s1 = input(%22First string: %22)%0A s2 = input(%22Second string: %22)%0A if not isHex(s1) or not isHex(s2):%0A print(%22Your hex string(s) are invalid!%22)%0A continue%0A else:%0A print(%22Result: %22, hexor(s1,s2))%0A
|
|
6a9d60a6e48b3231675e465c1a837c909a9e652a | Add forward2 | forward2.py | forward2.py | Python | 0.999904 | @@ -0,0 +1,1143 @@
+from convert import print_prob, load_image, checkpoint_fn, meta_fn%0Aimport tensorflow as tf%0A%0Aimport resnet%0A%0Aimport os%0A%0Alayers = 50%0A%0Aimg = load_image(%22data/cat.jpg%22)%0A%0Asess = tf.Session()%0A%0Afilename = checkpoint_fn(layers)%0Afilename = os.path.realpath(filename)%0A%0Aif layers == 50:%0A num_blocks = %5B3, 4, 6, 3%5D%0Aelif layers == 101:%0A num_blocks = %5B3, 4, 23, 3%5D%0Aelif layers == 152:%0A num_blocks = %5B3, 8, 36, 3%5D%0A%0Awith tf.device('/cpu:0'):%0A images = tf.placeholder(%22float32%22, %5BNone, 224, 224, 3%5D, name=%22images%22)%0A logits = resnet.inference(images,%0A is_training=False,%0A num_blocks=num_blocks,%0A preprocess=True,%0A bottleneck=True)%0A prob = tf.nn.softmax(logits, name='prob')%0A%0A%0Asaver = tf.train.Saver()%0Asaver.restore(sess, filename)%0A%0Agraph = tf.get_default_graph()%0Aprob_tensor = graph.get_tensor_by_name(%22prob:0%22)%0Afor op in graph.get_operations():%0A print op.name%0A%0Aprint %22graph restored%22%0A%0Abatch = img.reshape((1, 224, 224, 3))%0A%0Afeed_dict = %7Bimages: batch%7D%0A%0Aprob = sess.run(prob_tensor, feed_dict=feed_dict)%0A%0Aprint_prob(prob%5B0%5D)%0A
|
|
aabeda2b604bc44996d0afc4cc149deb71157123 | Update wunderground.py | homeassistant/components/sensor/wunderground.py | homeassistant/components/sensor/wunderground.py | """Support for Wunderground weather service."""
from datetime import timedelta
import logging
import requests
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
from homeassistant.const import CONF_API_KEY
from homeassistant.const import TEMP_FAHRENHEIT
from homeassistant.const import TEMP_CELSIUS
CONF_PWS_ID = 'pws_id'
_URLCONST = '/conditions/q/pws:'
_RESOURCE = 'http://api.wunderground.com/api/'
_LOGGER = logging.getLogger(__name__)
# Return cached results if last scan was less then this time ago.
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=300)
# Sensor types are defined like: Name, units
SENSOR_TYPES = {
'weather': ['Weather Summary', None],
'station_id': ['Station ID', None],
'feelslike_c': ['Feels Like (°C)', TEMP_CELSIUS],
'feelslike_f': ['Feels Like (°F)', TEMP_FAHRENHEIT],
'feelslike_string': ['Feels Like', None],
'heat_index_c': ['Dewpoint (°C)', TEMP_CELSIUS],
'heat_index_f': ['Dewpoint (°F)', TEMP_FAHRENHEIT],
'heat_index_string': ['Heat Index Summary', None],
'dewpoint_c': ['Dewpoint (°C)', TEMP_CELSIUS],
'dewpoint_f': ['Dewpoint (°F)', TEMP_FAHRENHEIT],
'dewpoint_string': ['Dewpoint Summary', None],
'wind_kph': ['Wind Speed', 'kpH'],
'wind_mph': ['Wind Speed', 'mpH'],
'UV': ['UV', None],
'pressure_in': ['Pressure', 'in'],
'pressure_mb': ['Pressure', 'mbar'],
'wind_dir': ['Wind Direction', None],
'wind_string': ['Wind Summary', None],
'temp_c': ['Temperature (°C)', TEMP_CELSIUS],
'temp_f': ['Temperature (°F)', TEMP_FAHRENHEIT],
'relative_humidity': ['Relative Humidity', '%'],
'visibility_mi': ['Visibility (miles)', 'mi'],
'visibility_km': ['Visibility (km)', 'km'],
'precip_today_in': ['Precipation Today', 'in'],
'precip_today_metric': ['Precipation Today', 'mm'],
'precip_today_string': ['Precipation today', None],
'solarradiation': ['Solar Radiation', None]
}
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the Wundeground sensor."""
payload = config.get('payload', None)
rest = WUndergroundData(_RESOURCE,
config.get(CONF_PWS_ID),
config.get(CONF_API_KEY),
payload)
sensors = []
for variable in config['monitored_conditions']:
if variable in SENSOR_TYPES:
sensors.append(WUndergroundSensor(rest, variable))
else:
_LOGGER.error('Wunderground sensor: "%s" does not exist', variable)
response = requests.get(_RESOURCE + config.get(CONF_API_KEY) +
_URLCONST + config.get(CONF_PWS_ID) +
'.json', timeout=10)
if "error" in response.json()["response"]:
_LOGGER.error("Check your Wunderground API")
return False
else:
add_devices(sensors)
rest.update()
class WUndergroundSensor(Entity):
"""Implementing the Wunderground sensor."""
def __init__(self, rest, condition):
"""Initialize the sensor."""
self.rest = rest
self._condition = condition
self._unit_of_measurement = None
self.update()
@property
def name(self):
"""Return the name of the sensor."""
return "PWS_" + str(self._condition)
@property
def state(self):
"""Return the state of the sensor."""
self.weather = self.rest.data
return self.weather[str(self._condition)]
@property
def entity_picture(self):
"""Return the entity picture."""
self.weather = self.rest.data
if self._condition == 'weather':
return self.weather['icon_url']
@property
def unit_of_measurement(self):
"""Return the units of measurement."""
return SENSOR_TYPES[self._condition][1]
def update(self):
"""Update current conditions."""
self.rest.update()
self._state = self.rest.data
# pylint: disable=too-few-public-methods
class WUndergroundData(object):
"""Get data from Wundeground."""
def __init__(self, resource, pws_id, api_key, data):
"""Initialize the data object."""
self._resource = resource
self._api_key = api_key
self._pws_id = pws_id
self.data = None
self.unit_system = None
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest data from wunderground"""
try:
result = requests.get(self._resource + self._api_key +
'/conditions/q/pws:' + self._pws_id + '.json',
timeout=10)
if "error" in result.json():
raise ValueError(result.json()["response"]["error"]
["description"])
else:
self.data = result.json()["current_observation"]
except ValueError as err:
_LOGGER.error("Check Wunderground API %s", err.args)
self.data = None
| Python | 0 | @@ -4070,24 +4070,25 @@
a from Wunde
+r
ground.%22%22%22%0A%0A
@@ -4606,24 +4606,16 @@
ws_id +
-'.json',
%0A
@@ -4640,16 +4640,25 @@
+ '.json',
timeout
|
c794fbf00c5ba5b661f01fcbd0652105ed4c3904 | Add missing migration. | mc2/controllers/base/migrations/0005_field_defaults.py | mc2/controllers/base/migrations/0005_field_defaults.py | Python | 0.000002 | @@ -0,0 +1,644 @@
+# -*- coding: utf-8 -*-%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations, models%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('base', '0004_marathonlabel'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterField(%0A model_name='envvariable',%0A name='key',%0A field=models.TextField(default='', blank=True),%0A preserve_default=False,%0A ),%0A migrations.AlterField(%0A model_name='marathonlabel',%0A name='name',%0A field=models.TextField(default='', blank=True),%0A preserve_default=False,%0A ),%0A %5D%0A
|
|
d478517c51c8428c5606bfa9dfd6f734aff5df13 | Fix races on recorder test (#11857) | tests/components/recorder/test_purge.py | tests/components/recorder/test_purge.py | """Test data purging."""
import json
from datetime import datetime, timedelta
from time import sleep
import unittest
from homeassistant.components import recorder
from homeassistant.components.recorder.const import DATA_INSTANCE
from homeassistant.components.recorder.purge import purge_old_data
from homeassistant.components.recorder.models import States, Events
from homeassistant.components.recorder.util import session_scope
from tests.common import get_test_home_assistant, init_recorder_component
class TestRecorderPurge(unittest.TestCase):
"""Base class for common recorder tests."""
def setUp(self): # pylint: disable=invalid-name
"""Setup things to be run when tests are started."""
config = {'purge_keep_days': 4, 'purge_interval': 2}
self.hass = get_test_home_assistant()
init_recorder_component(self.hass, config)
self.hass.start()
def tearDown(self): # pylint: disable=invalid-name
"""Stop everything that was started."""
self.hass.stop()
def _add_test_states(self):
"""Add multiple states to the db for testing."""
now = datetime.now()
five_days_ago = now - timedelta(days=5)
attributes = {'test_attr': 5, 'test_attr_10': 'nice'}
self.hass.block_till_done()
self.hass.data[DATA_INSTANCE].block_till_done()
with recorder.session_scope(hass=self.hass) as session:
for event_id in range(5):
if event_id < 3:
timestamp = five_days_ago
state = 'purgeme'
else:
timestamp = now
state = 'dontpurgeme'
session.add(States(
entity_id='test.recorder2',
domain='sensor',
state=state,
attributes=json.dumps(attributes),
last_changed=timestamp,
last_updated=timestamp,
created=timestamp,
event_id=event_id + 1000
))
# if self._add_test_events was called, we added a special event
# that should be protected from deletion, too
protected_event_id = getattr(self, "_protected_event_id", 2000)
# add a state that is old but the only state of its entity and
# should be protected
session.add(States(
entity_id='test.rarely_updated_entity',
domain='sensor',
state='iamprotected',
attributes=json.dumps(attributes),
last_changed=five_days_ago,
last_updated=five_days_ago,
created=five_days_ago,
event_id=protected_event_id
))
def _add_test_events(self):
"""Add a few events for testing."""
now = datetime.now()
five_days_ago = now - timedelta(days=5)
event_data = {'test_attr': 5, 'test_attr_10': 'nice'}
self.hass.block_till_done()
self.hass.data[DATA_INSTANCE].block_till_done()
with recorder.session_scope(hass=self.hass) as session:
for event_id in range(5):
if event_id < 2:
timestamp = five_days_ago
event_type = 'EVENT_TEST_PURGE'
else:
timestamp = now
event_type = 'EVENT_TEST'
session.add(Events(
event_type=event_type,
event_data=json.dumps(event_data),
origin='LOCAL',
created=timestamp,
time_fired=timestamp,
))
# Add an event for the protected state
protected_event = Events(
event_type='EVENT_TEST_FOR_PROTECTED',
event_data=json.dumps(event_data),
origin='LOCAL',
created=five_days_ago,
time_fired=five_days_ago,
)
session.add(protected_event)
session.flush()
self._protected_event_id = protected_event.event_id
def test_purge_old_states(self):
"""Test deleting old states."""
self._add_test_states()
# make sure we start with 6 states
with session_scope(hass=self.hass) as session:
states = session.query(States)
self.assertEqual(states.count(), 6)
# run purge_old_data()
purge_old_data(self.hass.data[DATA_INSTANCE], 4)
# we should only have 3 states left after purging
self.assertEqual(states.count(), 3)
def test_purge_old_events(self):
"""Test deleting old events."""
self._add_test_events()
with session_scope(hass=self.hass) as session:
events = session.query(Events).filter(
Events.event_type.like("EVENT_TEST%"))
self.assertEqual(events.count(), 6)
# run purge_old_data()
purge_old_data(self.hass.data[DATA_INSTANCE], 4)
# now we should only have 3 events left
self.assertEqual(events.count(), 3)
def test_purge_method(self):
"""Test purge method."""
service_data = {'keep_days': 4}
self._add_test_events()
self._add_test_states()
# make sure we start with 6 states
with session_scope(hass=self.hass) as session:
states = session.query(States)
self.assertEqual(states.count(), 6)
events = session.query(Events).filter(
Events.event_type.like("EVENT_TEST%"))
self.assertEqual(events.count(), 6)
self.hass.data[DATA_INSTANCE].block_till_done()
# run purge method - no service data, should not work
self.hass.services.call('recorder', 'purge')
self.hass.async_block_till_done()
# Small wait for recorder thread
sleep(0.1)
# we should still have everything from before
self.assertEqual(states.count(), 6)
self.assertEqual(events.count(), 6)
# run purge method - correct service data
self.hass.services.call('recorder', 'purge',
service_data=service_data)
self.hass.async_block_till_done()
# Small wait for recorder thread
sleep(0.1)
# we should only have 3 states left after purging
self.assertEqual(states.count(), 3)
# the protected state is among them
self.assertTrue('iamprotected' in (
state.state for state in states))
# now we should only have 4 events left
self.assertEqual(events.count(), 4)
# and the protected event is among them
self.assertTrue('EVENT_TEST_FOR_PROTECTED' in (
event.event_type for event in events.all()))
self.assertFalse('EVENT_TEST_PURGE' in (
event.event_type for event in events.all()))
| Python | 0.000001 | @@ -75,31 +75,8 @@
lta%0A
-from time import sleep%0A
impo
@@ -5959,32 +5959,69 @@
s
-leep(0.1
+elf.hass.data%5BDATA_INSTANCE%5D.block_till_done(
)%0A%0A
@@ -6449,16 +6449,53 @@
s
-leep(0.1
+elf.hass.data%5BDATA_INSTANCE%5D.block_till_done(
)%0A%0A
|
46a9c3789b86631258d881dacf6ae529ec277d70 | Add stats262.py | ielex/lexicon/management/commands/stats262.py | ielex/lexicon/management/commands/stats262.py | Python | 0.000001 | @@ -0,0 +1,1850 @@
+# -*- coding: utf-8 -*-%0Afrom __future__ import unicode_literals%0A%0Afrom django.core.management import BaseCommand%0A%0Afrom ielex.lexicon.models import Language, %5C%0A Meaning, %5C%0A Lexeme, %5C%0A CognateJudgementCitation%0A%0A%0Aclass Command(BaseCommand):%0A%0A help = %22Computes statistics for https://github.com/lingdb/CoBL/issues/262%22%5C%0A %22%5CnPossible parameters are: %7B1, 2, 3%7D for task number.%22%0A%0A def add_arguments(self, parser):%0A parser.add_argument('task', type=int)%0A%0A missing_args_message = %22Please provide a task number of %7B1,2,3%7D.%22%0A%0A def handle(self, *args, **options):%0A # Data to work with:%0A languageIds = Language.objects.filter(%0A languagelist__name='Current').values_list('id', flat=True)%0A meaningIds = Meaning.objects.filter(%0A meaninglist__name='Jena200').values_list('id', flat=True)%0A lexemeIds = Lexeme.objects.filter(%0A language_id__in=languageIds,%0A meaning_id__in=meaningIds,%0A not_swadesh_term=False).values_list('id', flat=True)%0A self.stdout.write(%22Task %25s:%22 %25 options%5B'task'%5D)%0A taskFilter = %7B1: 'C', # Doubtful%0A 2: 'L', # Loanword%0A 3: 'X'%7D # Exclude%0A cjcs = CognateJudgementCitation.objects.filter(%0A cognate_judgement__lexeme_id__in=lexemeIds,%0A reliability=taskFilter%5Boptions%5B'task'%5D%5D).all()%0A for cjc in cjcs:%0A cj = cjc.cognate_judgement%0A self.stdout.write(%22CognateJudgementCitation %25s %22%0A %22of CognateClass %25s %22%0A %22and Lexeme %25s.%22 %25 (cjc.id,%0A cj.cognate_class.id,%0A cj.lexeme.id))%0A
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.