commit
stringlengths
40
40
subject
stringlengths
1
3.25k
old_file
stringlengths
4
311
new_file
stringlengths
4
311
old_contents
stringlengths
0
26.3k
lang
stringclasses
3 values
proba
float64
0
1
diff
stringlengths
0
7.82k
90a30ae1b3165c03f6de5458c92f8ecb9d3f948a
Add homework min_three
domaci-zadaci/05/test_min_three.py
domaci-zadaci/05/test_min_three.py
Python
0.00046
@@ -0,0 +1,1030 @@ +from solutions import min_three%0A%0Aimport unittest%0Aimport random%0A%0Aclass TestMinThree(unittest.TestCase):%0A def test_1000_cases(self):%0A for _ in range(1000):%0A first = (random.random() - 0.5) * 2000%0A second = (random.random() - 0.5) * 2000%0A third = (random.random() - 0.5) * 2000%0A%0A expected = min(first, second, third)%0A %0A actual = min_three(first, second, third)%0A self.assertEqual(expected, actual)%0A%0A actual = min_three(first, third, second)%0A self.assertEqual(expected, actual)%0A%0A actual = min_three(second, first, third)%0A self.assertEqual(expected, actual)%0A%0A actual = min_three(second, third, first)%0A self.assertEqual(expected, actual)%0A%0A actual = min_three(third, first, second)%0A self.assertEqual(expected, actual)%0A%0A actual = min_three(third, second, first)%0A self.assertEqual(expected, actual)%0A%0Aif __name__ == '__main__':%0A unittest.main()%0A
2d4c59cef7c2db0c3e8c07ef9c749b8b3a5d3998
Remove stray todo.
pakit_tests.py
pakit_tests.py
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Automated bot for testing recipes. """ from __future__ import absolute_import, print_function import argparse from argparse import RawDescriptionHelpFormatter as RawDescriptionHelp import glob import mmap import os import re import sys __version__ = '0.1.0' TEMPLATE = """\"\"\" Do not edit this file. Just rerun the program to regenerate tests. \"\"\" from __future__ import absolute_import import os import shutil import subprocess import tempfile CONFIG_TEMPLATE = \"\"\"pakit: command: timeout: 120 defaults: repo: stable log: enabled: true file: {0} level: debug paths: link: {1} prefix: {2} recipes: {3} source: {4} recipe: update_interval: 86400 uris: - uri: test_recipes {5} \"\"\" def write_config(tail): \"\"\" Write config for a test execution to path. Args: tail: Vaild yaml to affix to the end of CONFIG_TEMPLATE. Returns: Path to temp directory. \"\"\" root = tempfile.mkdtemp() recipe_d = os.path.join(root, 'recipe') os.mkdir(recipe_d) # TODO: Pass in folder via command. os.symlink('ROOT_RECS', os.path.join(recipe_d, 'test_recipes')) with open(os.path.join(root, 'pakit.yml'), 'w') as fout: fout.write(CONFIG_TEMPLATE.format( os.path.join(root, 'main.log'), os.path.join(root, 'link'), os.path.join(root, 'prefix'), recipe_d, os.path.join(root, 'source'), tail )) return root def delete_it(path): \"\"\" File or folder, it is deleted. Args: path: path to a file or dir \"\"\" try: shutil.rmtree(path) except OSError: try: os.remove(path) except OSError: pass class RecipeTest(object): def setup_method(self, method): recipe = type(self).__name__.replace('Test_', '').split('::')[0] repo = method.__name__.replace('test_', '') self.temp_d = write_config(recipe + ':\\n repo: ' + repo) self.args = ['pakit', '--conf', os.path.join(self.temp_d, 'pakit.yml'), 'install', recipe] self.new_env = os.environ.copy() new_path = os.environ['PATH'] + ':' + os.path.join(self.temp_d, 'link', 'bin') self.new_env.update({'PATH': new_path}) def teardown_method(self, _): delete_it(self.temp_d) """ def create_args_parser(): """ Create the program argument parser. Returns: An argparse parser object. """ prog_name = os.path.basename(os.path.dirname(sys.argv[0])) mesg = """ This script will (re)generate tests for recipes. It will OVERWRITE existing tests. """ mesg = mesg[0:-5] parser = argparse.ArgumentParser(prog=prog_name, description=mesg, formatter_class=RawDescriptionHelp) parser.add_argument('-v', '--version', action='version', version='pakit_tests {0}'.format(__version__)) parser.add_argument('recipes_root', help='the folder containing recipes') parser.add_argument('output', nargs='?', default='tests/test_recipes.py', help="""relative path from recipes root to test file, default: tests/test_recipes.py""") return parser def extract_repo_names(text): """ Given a string, extract all keys from the string. Returns: List of keys in the string. """ matcher = re.compile(r'\'(\w+)\':') results = [matched.group(1) for matched in matcher.finditer(text)] return results def extract_repo_block(text): """ Given a string, extract ONLY the repos dictionary block. Returns: A string containing only required block. """ return re.search(r'(self.repos\s*=\s*{.*?})', text, re.DOTALL).group(1) def format_lines(recipes): """ Transform the dictionary to lines to write. Returns: Lines to write to test file. """ lines = [] class_line = '\nclass Test_{0}(RecipeTest):' repo_line = """ def test_{0}(self): assert subprocess.call(self.args, cwd=self.temp_d, env=self.new_env) == 0 """ for recipe in sorted(recipes): repo_name = recipes[recipe] lines.append(class_line.format(recipe)) for repo_name in recipes[recipe]: lines.extend(repo_line.format(repo_name).split('\n')) return lines[0:-1] def scan_recipes(recipe_d): """ Scan the recipe directory and return relevant data. """ data = {} matcher = re.compile(r'class\s+\S+\(Recipe\)') candidates = [fname for fname in glob.glob(os.path.join(recipe_d, '*.py'))] for fname in candidates: short_name = os.path.basename(fname)[0:-3] with open(fname, 'r+') as fin: text = mmap.mmap(fin.fileno(), 0) if matcher.search(text) is not None: data[short_name] = extract_repo_names(extract_repo_block(text)) return data def write_file(root, test_file): """ Write the test file as requested. """ try: os.makedirs(os.path.dirname(test_file)) except OSError: pass text = TEMPLATE.replace('ROOT_RECS', root) + \ '\n'.join(format_lines(scan_recipes(root))) with open(test_file, 'w') as fout: fout.write(text) def main(argv=None): """ The main entry point for this program. Args: argv: A list of program options, if None use sys.argv. """ if argv is None: argv = sys.argv parser = create_args_parser() args = parser.parse_args(argv[1:]) root = os.path.abspath(args.recipes_root) if os.path.isabs(args.output): test_file = os.path.join(root, args.output) else: test_file = os.path.join(root, args.output) print('Scanning recipes under: ' + root) print('Writing tests to: ' + test_file) write_file(root, test_file) if __name__ == "__main__": main() # pragma: no cover
Python
0
@@ -1112,48 +1112,8 @@ _d)%0A - # TODO: Pass in folder via command.%0A
aad51679cc2e4e719ed12e3983b54dcf15a2c06f
Update slack.py
graphite_beacon/handlers/slack.py
graphite_beacon/handlers/slack.py
import json from tornado import gen, httpclient as hc from graphite_beacon.handlers import AbstractHandler, LOGGER from graphite_beacon.template import TEMPLATES class SlackHandler(AbstractHandler): name = 'slack' # Default options defaults = { 'webhook': None, 'channel': None, 'username': 'graphite-beacon', } emoji = { 'critical': ':exclamation:', 'warning': ':warning:', 'normal': ':white_check_mark:', } def init_handler(self): self.webhook = self.options.get('webhook') assert self.webhook, 'Slack webhook is not defined.' self.channel = self.options.get('channel') if self.channel and not self.channel.startswith('#'): self.channel = '#' + self.channel self.username = self.options.get('username') self.client = hc.AsyncHTTPClient() def get_message(self, level, alert, value, target=None, ntype=None, rule=None): msg_type = 'slack' if ntype == 'graphite' else 'short' tmpl = TEMPLATES[ntype][msg_type] return tmpl.generate( level=level, reactor=self.reactor, alert=alert, value=value, target=target).strip() @gen.coroutine def notify(self, level, *args, **kwargs): LOGGER.debug("Handler (%s) %s", self.name, level) message = self.get_message(level, *args, **kwargs) data = dict() data['username'] = self.username data['text'] = message data['icon_emoji'] = self.emoji.get(level, ':warning:') if self.channel: data['channel'] = self.channel body = json.dumps(data) yield self.client.fetch(self.webhook, method='POST', body=body)
Python
0
@@ -730,19 +730,26 @@ rtswith( +( '#' +, '@') ):%0A
34908071bd11470806a84d9f76c630fd3fcc2d4b
test file :-)
tests/gsim/abrahamson_silva_2008_test.py
tests/gsim/abrahamson_silva_2008_test.py
Python
0
@@ -0,0 +1,1665 @@ +# nhlib: A New Hazard Library%0A# Copyright (C) 2012 GEM Foundation%0A#%0A# This program is free software: you can redistribute it and/or modify%0A# it under the terms of the GNU Affero General Public License as%0A# published by the Free Software Foundation, either version 3 of the%0A# License, or (at your option) any later version.%0A#%0A# This program is distributed in the hope that it will be useful,%0A# but WITHOUT ANY WARRANTY; without even the implied warranty of%0A# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the%0A# GNU Affero General Public License for more details.%0A#%0A# You should have received a copy of the GNU Affero General Public License%0A# along with this program. If not, see %3Chttp://www.gnu.org/licenses/%3E.%0Afrom nhlib.gsim.abrahamson_silva_2008 import AbrahamsonSilva2008%0A%0Afrom tests.gsim.utils import BaseGSIMTestCase%0A%0A# Test data have been generated from Fortran implementation%0A# of Dave Boore available at:%0A# http://www.daveboore.com/software_online.html%0A# Note that the Fortran implementation has been modified not%0A# to compute the 'Constant Displacement Model' term%0A%0Aclass AbrahamsonSilva2008TestCase(BaseGSIMTestCase):%0A GSIM_CLASS = AbrahamsonSilva2008%0A%0A def test_mean(self):%0A self.check('AS08/AS08_MEAN.csv',%0A max_discrep_percentage=0.1)%0A%0A def test_std_inter(self):%0A self.check('AS08/AS08_STD_INTER.csv',%0A max_discrep_percentage=0.1)%0A%0A def test_std_intra(self):%0A self.check('AS08/AS08_STD_INTRA.csv',%0A max_discrep_percentage=0.1)%0A%0A def test_std_total(self):%0A self.check('AS08/AS08_STD_TOTAL.csv',%0A max_discrep_percentage=0.1)%0A%0A
9fc373bbfa606aeb23c237df9c8d9143e14b60a1
structure of preprocessing module for lea to fill in
code/python/seizures/preprocessing/preprocessing.py
code/python/seizures/preprocessing/preprocessing.py
Python
0
@@ -0,0 +1,537 @@ +import scipy.signal%0A%0Adef preprocess_multichannel_data(matrix):%0A n_channel,m= matrix.shape%0A for i in range(n_channel):%0A preprocess_single_channel(matrix%5Bi,:%5D)%0A%0Adef preprocess_single_channel(x):%0A x = remove_elec_noise(x)%0A x = hp_filter(x)%0A x = remove_dc(x)%0A return x%0A%0Adef remove_dc():%0A %22%22%22%0A Remove mean of signal%0A :return:%0A %22%22%22%0A pass%0A%0Adef remove_elec_noise():%0A %22%22%22%0A Bandpass remove:49-51Hz%0A :return:%0A %22%22%22%0A pass%0A%0Adef hp_filter():%0A %22%22%22%0A Anti_aliasing%0A :return:%0A %22%22%22%0A pass
5de3f1294961621b4167778096d04fbee581ad78
Fix error: iterate over custom_translations.values().
app/recaptcha/client/captcha.py
app/recaptcha/client/captcha.py
# This file is originally from recaptcha-client 1.0.5 (obtained from pypi), # now modified to support custom translations. import urllib import urllib2 import simplejson API_SSL_SERVER = 'https://api-secure.recaptcha.net' API_SERVER = 'http://api.recaptcha.net' VERIFY_SERVER = 'api-verify.recaptcha.net' class RecaptchaResponse(object): def __init__(self, is_valid, error_code=None): self.is_valid = is_valid self.error_code = error_code def get_display_html(public_key, use_ssl=False, error=None, lang='en', custom_translations={}): """Gets the HTML to display for reCAPTCHA public_key -- The public api key use_ssl -- Should the request be sent over ssl? error -- An error message to display (from RecaptchaResponse.error_code)""" error_param = '' if error: error_param = '&error=%s' % error server = API_SERVER if use_ssl: server = API_SSL_SERVER # _('...') used to return objects that are unpalatable to simplejson. # For better compatibility, we keep this conversion code, but execute it # only when values are non-unicode to prevent UnicodeEncodeError. if any(not isinstance(v, unicode) for k, v in custom_translations): custom_translations = dict((k, unicode(str(v), 'utf-8')) for (k, v) in custom_translations.items()) options = { 'theme': 'white', 'lang': lang, 'custom_translations': custom_translations } return ''' <script> var RecaptchaOptions = %(options)s; </script> <script src="%(server)s/challenge?k=%(public_key)s%(error_param)s"></script> <noscript> <iframe src="%(server)s/noscript?k=%(public_key)s%(error_param)s" height="300" width="500" frameborder="0"></iframe><br> <textarea name="recaptcha_challenge_field" rows="3" cols="40"></textarea> <input type="hidden" name="recaptcha_response_field" value="manual_challenge"> </noscript> ''' % { 'options': simplejson.dumps(options), 'server': server, 'public_key': public_key, 'error_param': error_param, } def submit (recaptcha_challenge_field, recaptcha_response_field, private_key, remoteip): """ Submits a reCAPTCHA request for verification. Returns RecaptchaResponse for the request recaptcha_challenge_field -- The value of recaptcha_challenge_field from the form recaptcha_response_field -- The value of recaptcha_response_field from the form private_key -- your reCAPTCHA private key remoteip -- the user's ip address """ if not (recaptcha_response_field and recaptcha_challenge_field and len (recaptcha_response_field) and len (recaptcha_challenge_field)): return RecaptchaResponse (is_valid = False, error_code = 'incorrect-captcha-sol') def encode_if_necessary(s): if isinstance(s, unicode): return s.encode('utf-8') return s params = urllib.urlencode ({ 'privatekey': encode_if_necessary(private_key), 'remoteip' : encode_if_necessary(remoteip), 'challenge': encode_if_necessary(recaptcha_challenge_field), 'response' : encode_if_necessary(recaptcha_response_field), }) request = urllib2.Request ( url = "http://%s/verify" % VERIFY_SERVER, data = params, headers = { "Content-type": "application/x-www-form-urlencoded", "User-agent": "reCAPTCHA Python" } ) httpresp = urllib2.urlopen (request) return_values = httpresp.read ().splitlines (); httpresp.close(); return_code = return_values [0] if (return_code == "true"): return RecaptchaResponse (is_valid=True) else: return RecaptchaResponse (is_valid=False, error_code = return_values [1])
Python
0.00003
@@ -1207,11 +1207,8 @@ for - k, v i @@ -1228,16 +1228,25 @@ slations +.values() ):%0A
42c82bc865d69b904ec688aa152caf3a247df1c6
Create frontdoor.py
home/pi/PirFrontDoor/frontdoor.py
home/pi/PirFrontDoor/frontdoor.py
Python
0.000024
@@ -0,0 +1,649 @@ +#!/usr/bin/python%0A%0Aimport RPi.GPIO as GPIO%0Aimport time%0Aimport requests%0A%0AGPIO.setmode(GPIO.BCM)%0A%0APIR_PIN = 22%0A%0AGPIO.setup(PIR_PIN, GPIO.IN)%0A%0Adef MOTION(PIR_PIN):%0A print %22Motion Detected!%22%0A payload = %7B 'value1' : 'Someone at Front Door'%7D%0A r = requests.post(%22https://maker.ifttt.com/trigger/%7BEvent%7D/with/key/%7Bsecret key%7D%22, data=payload)%0A print r.text%0Aprint %22PIR Module Test (CTRL+C to exit)%22%0Atime.sleep(2)%0Aprint %22Ready%22%0A%0Atry:%0A GPIO.add_event_detect(PIR_PIN, GPIO.RISING, callback=MOTION)%0A while 1:%0A time.sleep(120)%0A%0Aexcept KeyboardInterrupt:%0A print %22Quit%22%0A GPIO.cleanup()%0A %0A
1d7451fd6eca8a68832b676ef0a696e8de801533
Update services_and_index_sync.py
tendrl/node_agent/node_sync/services_and_index_sync.py
tendrl/node_agent/node_sync/services_and_index_sync.py
import json import etcd from tendrl.commons.event import Event from tendrl.commons.message import ExceptionMessage from tendrl.commons.message import Message from tendrl.commons.utils import etcd_utils # TODO(darshan) this has to be moved to Definition file TENDRL_SERVICES = [ "tendrl-node-agent", "etcd", "tendrl-api", "tendrl-gluster-integration", "tendrl-ceph-integration", "glusterd", "ceph-mon@*", "ceph-osd@*", "ceph-installer" ] def sync(sync_ttl=None): try: tags = [] # update node agent service details Event( Message( priority="debug", publisher=NS.publisher_id, payload={"message": "node_sync, Updating Service data"} ) ) for service in TENDRL_SERVICES: s = NS.tendrl.objects.Service(service=service) if s.running: service_tag = NS.compiled_definitions.get_parsed_defs()[ 'namespace.tendrl' ]['tags'][service.strip("@*")] tags.append(service_tag) if service_tag == "tendrl/server": tags.append("tendrl/monitor") s.save() # updating node context with latest tags Event( Message( priority="debug", publisher=NS.publisher_id, payload={"message": "node_sync, updating node context " "data with tags" } ) ) NS.node_context = NS.tendrl.objects.NodeContext().load() current_tags = list(NS.node_context.tags) tags += current_tags NS.node_context.tags = list(set(tags)) NS.node_context.tags.sort() current_tags.sort() if NS.node_context.tags != current_tags: NS.node_context.save() # Update /indexes/tags/:tag = [node_ids] for tag in NS.node_context.tags: index_key = "/indexes/tags/%s" % tag _node_ids = [] try: _node_ids = NS._int.client.read(index_key).value _node_ids = json.loads(_node_ids) except etcd.EtcdKeyNotFound: pass if _node_ids: if NS.node_context.node_id in _node_ids: continue else: _node_ids += [NS.node_context.node_id] else: _node_ids = [NS.node_context.node_id] _node_ids = list(set(_node_ids)) etcd_utils.write(index_key, json.dumps(_node_ids)) if sync_ttl: etcd_utils.refresh(index_key, sync_ttl) Event( Message( priority="debug", publisher=NS.publisher_id, payload={"message": "node_sync, Updating detected " "platform" } ) ) except Exception as ex: Event( ExceptionMessage( priority="error", publisher=NS.publisher_id, payload={"message": "node_sync service and indexes " "sync failed: " + ex.message, "exception": ex} ) )
Python
0.000001
@@ -1218,32 +1218,512 @@ s.save()%0A + %0A # Try to claim orphan %22provisioner_%25integration_id%22 tag%0A _cluster = NS.tendrl.objects.Cluster(integration_id=NS.tendrl_context.integration_id).load()%0A try:%0A if _cluster.is_managed == %22yes%22:%0A _tag = %22provisioner/%25s%22 %25 _cluster.integration_id%0A _index_key = %22/indexes/tags/%25s%22 %25 _tag%0A etcd_utils.read(_index_key)%0A except etcd.EtcdKeyNotFound:%0A tags.append(_tag)%0A %0A # updat
bcb89187a398000d80c7c0b0ac5152e76edd2666
Remove TRT 4.0 restrictions on int32 test.
tensorflow/python/compiler/tensorrt/test/int32_test.py
tensorflow/python/compiler/tensorrt/test/int32_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Test conversion of graphs involving INT32 tensors and operations.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.compiler.tensorrt.test import tf_trt_integration_test_base as trt_test from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn from tensorflow.python.platform import test class ExcludeUnsupportedInt32Test(trt_test.TfTrtIntegrationTestBase): def _ConstOp(self, shape, dtype): return constant_op.constant(np.random.randn(*shape), dtype=dtype) def GetParams(self): """Test exclusion of ops which are not supported in INT32 mode by TF-TRT""" input_name = 'input' output_name = 'output' input_dims = [100, 4] dtype = dtypes.int32 g = ops.Graph() with g.as_default(): x = array_ops.placeholder(dtype=dtype, shape=input_dims, name=input_name) b = self._ConstOp((4, 10), dtype) x = math_ops.matmul(x, b) b = self._ConstOp((10,), dtype) x = nn.bias_add(x, b) x = array_ops.identity(x, name=output_name) return trt_test.TfTrtIntegrationTestParams( gdef=g.as_graph_def(), input_names=[input_name], input_dims=[[input_dims]], output_names=[output_name], expected_output_dims=[[[100, 10]]]) def GetConversionParams(self, run_params): """Return a ConversionParams for test.""" conversion_params = super(ExcludeUnsupportedInt32Test, self).GetConversionParams(run_params) return conversion_params._replace( max_batch_size=100, maximum_cached_engines=1, # Disable layout optimizer, since it will convert BiasAdd with NHWC # format to NCHW format under four dimentional input. rewriter_config_template=trt_test.OptimizerDisabledRewriterConfig()) def ExpectedEnginesToBuild(self, run_params): """Return the expected engines to build.""" return [] def ShouldRunTest(self, run_params): """Whether to run the test.""" # TODO(aaroey): Trt 4.0 forbids conversion for tensors with rank <3 in int8 # mode, which is a bug. Re-enable this when trt library is fixed. return not trt_test.IsQuantizationMode(run_params.precision_mode) if __name__ == '__main__': test.main()
Python
0
@@ -2868,303 +2868,8 @@ %5B%5D%0A%0A - def ShouldRunTest(self, run_params):%0A %22%22%22Whether to run the test.%22%22%22%0A # TODO(aaroey): Trt 4.0 forbids conversion for tensors with rank %3C3 in int8%0A # mode, which is a bug. Re-enable this when trt library is fixed.%0A return not trt_test.IsQuantizationMode(run_params.precision_mode)%0A%0A %0Aif
f94f1f698c8e9473b7c96ec7b1244e84fc4ebe5d
update unittest for MonoMixer
test/src/unittest/standard/test_monomixer_streaming.py
test/src/unittest/standard/test_monomixer_streaming.py
#!/usr/bin/env python # Copyright (C) 2006-2013 Music Technology Group - Universitat Pompeu Fabra # # This file is part of Essentia # # Essentia is free software: you can redistribute it and/or modify it under # the terms of the GNU Affero General Public License as published by the Free # Software Foundation (FSF), either version 3 of the License, or (at your # option) any later version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the Affero GNU General Public License # version 3 along with this program. If not, see http://www.gnu.org/licenses/ from essentia_test import * from essentia.streaming import MonoMixer, AudioLoader class TestMonoMixer_Streaming(TestCase): left = [] right = [] def clickTrack(self): size = 100 offset = 10 self.left = [0]*size self.right = [0]*size for i in range(offset/2, size, offset): self.left[i] = 1.0 for i in range(offset, size, offset): self.right[i] = 1 output = [] for i in range(size): output.append((self.left[i], self.right[i])) return array(output) def testLeft(self): gen = VectorInput(self.clickTrack()) chGen = VectorInput([2]) mixer = MonoMixer(type='left') pool = Pool() gen.data >> mixer.audio mixer.audio >> (pool, "mix") chGen.data >> mixer.numberChannels chGen.push('data', 2) run(gen) self.assertEqualVector(pool['mix'], self.left) def testRight(self): gen = VectorInput(self.clickTrack()) chGen = VectorInput([2]) mixer = MonoMixer(type='right') pool = Pool() gen.data >> mixer.audio mixer.audio >> (pool, "mix") chGen.data >> mixer.numberChannels chGen.push('data', 2) run(gen) self.assertEqualVector(pool['mix'], self.right) def testMix(self): gen = VectorInput(self.clickTrack()) chGen = VectorInput([2]) mixer = MonoMixer(type='mix') pool = Pool() gen.data >> mixer.audio mixer.audio >> (pool, "mix") chGen.data >> mixer.numberChannels chGen.push('data', 2) run(gen) self.assertEqual(sum(pool['mix']), 19*0.5) def testSingle(self): gen = VectorInput(array([(0.9, 0.5)])) chGen = VectorInput([2]) mixer = MonoMixer(type='mix') pool = Pool() gen.data >> mixer.audio mixer.audio >> (pool, "mix") chGen.data >> mixer.numberChannels chGen.push('data', 2) run(gen) self.assertAlmostEqual(sum(pool['mix']), (0.9+0.5)*0.5) def testEmpty(self): inputFilename = join(testdata.audio_dir, 'generated', 'empty', 'empty.wav') loader = AudioLoader(filename=inputFilename) mixer = MonoMixer(type='left') pool = Pool() loader.audio >> mixer.audio mixer.audio >> (pool, "mix") loader.numberChannels >> mixer.numberChannels loader.sampleRate >> None run(loader) self.assertEqualVector(pool.descriptorNames(), []) def testInvalidParam(self): self.assertConfigureFails(MonoMixer(), {'type':'unknown'}) suite = allTests(TestMonoMixer_Streaming) if __name__ == '__main__': TextTestRunner(verbosity=2).run(suite)
Python
0
@@ -2992,13 +2992,167 @@ pty. -wav') +ogg')%0A # NOTE: AudioLoader will through exception on %22empty.wav%22 complaining that %0A # it cannot read stream info, using %22empty.ogg%22 therefore... %0A @@ -3424,16 +3424,43 @@ %3E%3E None%0A + loader.md5 %3E%3E None%0A
1307070cfe27ca605bfcc279644b735ee941f627
Add work for ex21.py.
lpthw/ex31.py
lpthw/ex31.py
Python
0
@@ -0,0 +1,1048 @@ +print %22You enter a dark room with two doors. Do you go through door #1 or #2?%22%0A%0Adoor = raw_input(%22%3E %22)%0A%0Aif door == %221%22:%0A print %22Ther's a giant bear here eating a cheese cake. What do you do?%22%0A print %221. Take the cake.%22%0A print %222. Scream at the bear.%22%0A%0A bear = raw_input(%22%3E %22)%0A%0A if bear == %221%22:%0A print %22The bear eats your face off. Good job!%22%0A elif bear == %222%22:%0A print %22The bear ears your legs off. Good job!%22%0A else:%0A print %22Well, doing %25s is probably better. Bear runs away.%22 %25 bear%0A%0Aelif door == %222%22:%0A print %22You stare into the endless abyss at Cthulhu's retina.%22%0A print %221. Blueberries.%22%0A print %222. Yellow jacket clothespins.%22%0A print %223. Understanding revolvers yelling melodies.%22%0A%0A insanity = raw_input(%22%3E %22)%0A%0A if insanity == %221%22 or insanity == %222%22:%0A print %22Your body survives powered by a mind of jello. Good job!%22%0A else:%0A print %22The insanity rots your eyes into a pool of muck. Good job!%22%0A%0Aelse:%0A print %22You stumble around and fall on a knife and die. Good job!%22%0A
d3c6c91bc4b6214053b9a1d1d2291a402c164b86
add file
GridPixelPlot.py
GridPixelPlot.py
Python
0.000001
@@ -0,0 +1,2300 @@ +import kplr%0Aimport numpy as np%0Aimport matplotlib.pyplot as plt%0A%0Aqua = 5%0A%0Aclient = kplr.API()%0A%0A# Find the target KOI.%0Akoi = client.koi(282.02)%0A%0AoriginStar = koi.star%0A%0A# Find potential targets by Kepler magnitude%0AkoisOver = client.kois(where=%22koi_kepmag between %25f and %25f%22%25(originStar.kic_kepmag, originStar.kic_kepmag+0.1), sort=(%22koi_kepmag%22,1))%0AkoisUnder = client.kois(where=%22koi_kepmag between %25f and %25f%22%25(originStar.kic_kepmag-0.1, originStar.kic_kepmag), sort=(%22koi_kepmag%22,1))%0AkoisUnder.reverse()%0A%0Astars = %5B%5D%0Astars.append(originStar.kepid)%0A%0A#Find 16 stars that are closest to the origin star in terms of Kepler magnitude%0Ai=0%0Aj=0%0Awhile len(stars) %3C17:%0A while koisOver%5Bi%5D.kepid in stars:%0A i+=1%0A tmpOver = koisOver%5Bi%5D.star%0A while koisUnder%5Bj%5D.kepid in stars:%0A j+=1%0A tmpUnder =koisUnder%5Bj%5D.star%0A if tmpOver.kic_kepmag-originStar.kic_kepmag %3E originStar.kic_kepmag-tmpUnder.kic_kepmag:%0A stars.append(tmpUnder.kepid)%0A j+=1%0A elif tmpOver.kic_kepmag-originStar.kic_kepmag %3C originStar.kic_kepmag-tmpUnder.kic_kepmag:%0A stars.append(tmpOver.kepid)%0A j+=1%0A else:%0A stars.append(tmpUnder.kepid)%0A stars.append(tmpOver.kepid)%0A i+=1%0A j+=1%0A%0A%0Afor tmp in stars:%0A star = client.star(tmp)%0A# Get a list of light curve datasets.%0A tpfs = star.get_target_pixel_files(short_cadence=False)%0A%0A time, flux = %5B%5D, %5B%5D%0A%0A for tpf in tpfs:%0A with tpf.open() as f:%0A hdu_data = f%5B1%5D.data%0A time.append(hdu_data%5B%22time%22%5D)%0A flux.append(hdu_data%5B%22flux%22%5D)%0A%0A t = time%5Bqua%5D%0A%0A data = flux%5Bqua%5D%0A data = np.nan_to_num(data)%0A data = np.ma.masked_equal(data,0)%0A%0A shape = data.shape%0A td = shape%5B0%5D%0A x = shape%5B1%5D%0A y = shape%5B2%5D%0A%0A# Plot the data%0A f, axes = plt.subplots(x, y)%0A%0A for i in range(0,x):%0A for j in range(0,y):%0A axes%5Bi,j%5D.plot(t,data%5B0:td:1,i,j%5D)%0A plt.setp( axes%5Bi,j%5D.get_xticklabels(), visible=False)%0A plt.setp( axes%5Bi,j%5D.get_yticklabels(), visible=False)%0A%0A plt.subplots_adjust(left=None, bottom=None, right=None, top=None,%0A wspace=0, hspace=0)%0A plt.suptitle('Kepler %25d Quarter %25d%5Cn Kepler magnitude %25f'%25(star.kepid, qua, star.kic_kepmag))%0A plt.savefig('%25d-%25d.png'%25(star.kepid, qua))%0A plt.clf()%0A%0A%0A
8515155d9d0df940eea758121124995320fce6bb
add experimental C/clang plugin
languages/c.py
languages/c.py
Python
0
@@ -0,0 +1,713 @@ +import os%0A%0Afrom lint.linter import Linter%0Afrom lint.util import find%0A%0Aclass C(Linter):%0A language = 'c'%0A cmd = ('clang', '-xc', '-fsyntax-only', '-std=c99', '-Werror',%0A '-pedantic')%0A regex = (%0A r'%5E%3Cstdin%3E:(?P%3Cline%3E%5Cd+):(?P%3Ccol%3E%5Cd+):'%0A r'(?:(?P%3Cranges%3E%5B%7B%7D0-9:%5C-%5D+):)?%5Cs+'%0A r'(?P%3Cerror%3E.+)'%0A )%0A%0A def communicate(self, cmd, code):%0A includes = %5B%5D%0A if self.filename:%0A parent = os.path.dirname(self.filename)%0A includes.append('-I' + parent)%0A inc = find(parent, 'include')%0A if inc:%0A includes.append('-I' + inc)%0A%0A cmd += ('-',) + tuple(includes)%0A return super(C, self).communicate(cmd, code)%0A
0026beea95ec26b8763feae270e79872f86de8a5
Add run_sample_tests for executing sample tests in Travis
stress_test/sample_test_confs/run_sample_tests.py
stress_test/sample_test_confs/run_sample_tests.py
Python
0
@@ -0,0 +1,492 @@ +#! /usr/bin/env python3.4%0A%0A# Copyright (c) 2015 Intracom S.A. Telecom Solutions. All rights reserved.%0A#%0A# This program and the accompanying materials are made available under the%0A# terms of the Eclipse Public License v1.0 which accompanies this distribution,%0A# and is available at http://www.eclipse.org/legal/epl-v10.html%0A%0A%22%22%22%0ARunner for sample tests%0A%22%22%22%0A%0Aimport os%0A%0A%0Adef run_tests():%0A %22%22%22%0A Method for running sample tests%0A %22%22%22%0A%0A pass%0A%0Aif __name__ == '__main__':%0A run_tests()%0A
352b17d8139fb0d269e4c17c01fe8ee488961c3a
Create HR_miniMaxSum.py
HR_miniMaxSum.py
HR_miniMaxSum.py
Python
0.000003
@@ -0,0 +1,458 @@ +#!/bin/python3%0A%0Aimport math%0Aimport os%0Aimport random%0Aimport re%0Aimport sys%0A%0A%0A# Complete the miniMaxSum function below.%0Adef miniMaxSum(arr):%0A maxx = max(arr)%0A minn = min(arr)%0A %0A mini = arr.copy()%0A mini.remove(maxx)%0A maxi = arr.copy()%0A maxi.remove(minn)%0A %0A sum_min = sum(mini)%0A sum_max = sum(maxi)%0A %0A print(sum_min, sum_max)%0A%0A%0Aif __name__ == '__main__':%0A arr = list(map(int, input().rstrip().split()))%0A%0A miniMaxSum(arr)%0A
b8fe92674773c7470c3b47899a8832bbb94771b4
Add path module
lib/oelite/path.py
lib/oelite/path.py
Python
0
@@ -0,0 +1,647 @@ +import os%0A%0ATOPDIR = os.getcwd()%0A%0A%0Adef init(topdir):%0A global TOPDIR%0A TOPDIR = topdir%0A%0A%0Adef relpath(path):%0A %22%22%22Return a relative version of paths compared to TOPDIR.%22%22%22%0A global TOPDIR%0A if path.startswith(TOPDIR):%0A return path%5Blen(TOPDIR):%5D.lstrip(%22/%22)%0A return path%0A%0A%0Adef which(path, filename, pathsep=os.pathsep):%0A %22%22%22Given a search path, find file.%22%22%22%0A if isinstance(path, basestring):%0A path = path.split(pathsep)%0A for p in path:%0A f = os.path.join(p, filename)%0A if os.path.exists(f):%0A return os.path.abspath(f)%0A return '' # TODO: change to None, and fixup the breakage it causes%0A
8ce2da2ed2e445480ee2e10483a5fae1c7c677a0
Include self contained method for output to a view
lib/output_view.py
lib/output_view.py
Python
0
@@ -0,0 +1,2185 @@ +import sublime%0Aimport sublime_plugin%0A%0A###-----------------------------------------------------------------------------%0A%0Adef output_to_view(window,%0A title,%0A content,%0A reuse=True,%0A syntax=None,%0A clear=True,%0A settings=None):%0A%0A if not isinstance(content, str):%0A content = %22%5Cn%22.join (content)%0A%0A view = None%0A%0A if reuse:%0A for _view in window.views ():%0A if _view.name () == title:%0A view = _view%0A break%0A%0A if view is None:%0A view = window.new_file ()%0A view.set_scratch (True)%0A view.set_name (title)%0A if syntax is not None:%0A view.assign_syntax (syntax)%0A%0A else:%0A view.set_read_only (False)%0A%0A if clear is True:%0A view.sel ().clear ()%0A view.sel ().add (sublime.Region (0, view.size ()))%0A view.run_command (%22left_delete%22)%0A%0A if window.active_view () != view:%0A window.focus_view (view)%0A%0A if settings is not None:%0A for setting in settings:%0A view.settings ().set (setting, settings%5Bsetting%5D)%0A%0A # Sace current buffer size, selection information and view position%0A saved_size = view.size ()%0A saved_sel = list(view.sel ())%0A saved_position = view.viewport_position ()%0A%0A # Single select, position cursor at end of file, insert the data%0A view.sel ().clear ()%0A view.sel ().add (sublime.Region (saved_size, saved_size))%0A view.run_command (%22insert%22, %7B%22characters%22: content%7D)%0A%0A # If the last selection was at the end of the buffer, replace that selection%0A # with the new end of the buffer so the relative position remains the same.%0A if sublime.Region (saved_size, saved_size) == saved_sel%5B-1%5D:%0A saved_sel%5B-1%5D = sublime.Region (view.size (), view.size ())%0A%0A # Clear current selection and add original selection back%0A view.sel ().clear ()%0A for region in saved_sel:%0A view.sel ().add (region)%0A%0A view.set_viewport_position (saved_position, False)%0A view.set_read_only (True)%0A%0A###-----------------------------------------------------------------------------%0A
0d35b502515a9775166e775c3462ca9300fe4517
add examples
examples/helpers.py
examples/helpers.py
Python
0
@@ -0,0 +1,701 @@ +# -*- coding: utf-8 -*-%0A#%0Afrom dolfin import as_backend_type%0Aimport matplotlib.pyplot as plt%0Aimport scipy.linalg%0A%0A%0Adef show_matrix(A):%0A A = as_backend_type(A)%0A A_matrix = A.sparray()%0A%0A # colormap%0A cmap = plt.cm.gray_r%0A A_dense = A_matrix.todense()%0A # A_r = A_dense%5B0::2%5D%5B0::2%5D%0A # A_i = A_dense%5B1::2%5D%5B0::2%5D%0A cmap.set_bad('r')%0A # im = plt.imshow(%0A # abs(A_dense), cmap=cmap, interpolation='nearest', norm=LogNorm()%0A # )%0A plt.imshow(abs(A_dense), cmap=cmap, interpolation='nearest')%0A plt.colorbar()%0A plt.show()%0A return%0A%0A%0Adef get_eigenvalues(A):%0A A = as_backend_type(A)%0A A_matrix = A.sparray()%0A return scipy.linalg.eigvals(A_matrix.todense())%0A
e333bc7b23a69a39392899a1d1c8e0bdf3523c3f
remove unused import [ci skip]
corehq/apps/app_manager/management/commands/build_apps.py
corehq/apps/app_manager/management/commands/build_apps.py
import contextlib from functools import wraps import json from django.core.management.base import BaseCommand from lxml import etree import os from corehq.apps.app_manager.models import Application, RemoteApp _parser = etree.XMLParser(remove_blank_text=True) def normalize_xml(xml): xml = etree.fromstring(xml, parser=_parser) return etree.tostring(xml, pretty_print=True) @contextlib.contextmanager def record_performance_stats(filepath, slug): from guppy import hpy import time hp = hpy() before = hp.heap() start = time.clock() try: yield finally: end = time.clock() after = hp.heap() leftover = after - before with open(filepath, 'a') as f: f.write('{},{},{}\n'.format(slug, leftover.size, end - start)) class Command(BaseCommand): args = '<path_to_dir> <build-slug>' help = """ Pass in a path to a directory (dir, below) with the following layout: dir/ src/ [app-slug].json [app-slug].json ... """ def handle(self, *args, **options): path, build_slug = args app_slugs = [] perfpath = os.path.join(path, '{}-performance.txt'.format(build_slug)) if os.path.exists(perfpath): os.remove(perfpath) for name in os.listdir(os.path.join(path, 'src')): _JSON = '.json' if name.endswith(_JSON): app_slugs.append(name[:-len(_JSON)]) for slug in app_slugs: print 'Fetching %s...' % slug source_path = os.path.join(path, 'src', '%s.json' % slug) with open(source_path) as f: j = json.load(f) if j['doc_type'] == 'Application': app = Application.wrap(j) elif j['doc_type'] == 'RemoteApp': app = RemoteApp.wrap(j) app.version = 1 build_path = os.path.join(path, build_slug, slug) print ' Creating files...' with record_performance_stats(perfpath, slug): files = app.create_all_files() self.write_files(files, build_path) def write_files(self, files, path): for filename, payload in files.items(): filepath = os.path.join(path, filename) dirpath, filename = os.path.split(filepath) try: os.makedirs(dirpath) except OSError: # file exists pass with open(filepath, 'w') as f: if filepath.endswith('.xml'): payload = normalize_xml(payload) f.write(payload)
Python
0
@@ -15,36 +15,8 @@ lib%0A -from functools import wraps%0A impo
f4bf1c83f55013051037b4380f1b579375bad3d7
Add test for ContextAwareForm
backend/tests/api/test_forms.py
backend/tests/api/test_forms.py
Python
0
@@ -0,0 +1,435 @@ +import pytest%0A%0Afrom api.forms import ContextAwareForm%0A%0Afrom users.models import User%0A%0A%0Adef test_cannot_use_form_context_if_its_not_passed():%0A class TestModelForm(ContextAwareForm):%0A class Meta:%0A model = User%0A fields = ('id',)%0A%0A form = TestModelForm()%0A%0A with pytest.raises(ValueError) as e:%0A form.context%0A%0A assert str(e.value) == 'Make sure you pass the context when instancing the Form'%0A
3d40378e0e42f62615199daf97a48f24d5b9eb12
add basic test for LIS
test_lis.py
test_lis.py
Python
0.000001
@@ -0,0 +1,360 @@ +import unittest%0Aimport lis%0A%0Aclass TestLis(unittest.TestCase):%0A def test_basic(self):%0A l = lis.Lis()%0A answer = %5B%5B0, 4, 6, 9, 13, 15%5D, %5B0, 2, 6, 9, 13, 15%5D, %5B0, 4, 6, 9, 11, 15%5D, %5B0, 2, 6, 9, 11, 15%5D%5D%0A self.assertEquals(answer, l.lis(%5B0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5, 13, 3, 11, 7, 15%5D))%0A%0Aif __name__ == '__main__':%0A unittest.main()
71675f81214ea510c377abf23fe2a11dfb113717
create module
pyAhocorasick/pyAhocorasick.py
pyAhocorasick/pyAhocorasick.py
Python
0.000001
@@ -0,0 +1,77 @@ +#-*- encoding=utf-8 -*- %0A'''%0ACreated on Mar 15, 2014%0A%0A@author: tonyzhang%0A'''%0A
963866e795df42121f972ee2170ddeb890f7e5b7
Create pytest test file
python-practice/test_arrays.py
python-practice/test_arrays.py
Python
0.000001
@@ -0,0 +1,1319 @@ +import arrays%0A%0A# Reverse an array in place%0Adef test_reverse_array():%0A input = %5B1, 2, 3%5D%0A%0A assert arrays.reverse_array(input) == %5B3, 2, 1%5D%0A%0A%0A# Search a sorted list%0Adef test_binary_search_no_list():%0A input_array = %5B%5D%0A target = 1%0A%0A assert arrays.binary_search(input_array, target) == -1%0A%0A%0Adef test_binary_search_short_list_found():%0A input_array = %5B1%5D%0A target = 1%0A%0A assert arrays.binary_search(input_array, target) == 0%0A%0A%0Adef test_binary_search_short_list_not_found():%0A input_array = %5B1%5D%0A target = 10%0A%0A assert arrays.binary_search(input_array, target) == -1%0A%0A%0Adef test_binary_search_even_list():%0A input_array = %5B1, 4, 8, 10%5D%0A target = 4%0A%0A assert arrays.binary_search(input_array, target) == 1%0A%0A%0Adef test_binary_search_odd_list():%0A input_array = %5B1, 5, 10%5D%0A target = 1%0A%0A assert arrays.binary_search(input_array, target) == 0%0A%0A%0Adef test_binary_search_last_in_list():%0A input_array = %5B1, 5, 10%5D%0A target = 10%0A%0A assert arrays.binary_search(input_array, target) == 2%0A%0A%0Adef test_binary_search_not_in_list_big():%0A input_array = %5B1, 5, 10%5D%0A target = 100%0A%0A assert arrays.binary_search(input_array, target) == -1%0A%0A%0Adef test_binary_search_not_in_list_small():%0A input_array = %5B1, 5, 10%5D%0A target = -100%0A%0A assert arrays.binary_search(input_array, target) == -1%0A
4932483b10876eddab39477063a9b8546e5e0f33
Create a.py
a.py
a.py
Python
0.000489
@@ -0,0 +1,2 @@ +a%0A
50e24b0445f259d975e5dd78dd34a8e760e4ed88
Create SQLite database and table and insert data from CSV file
DB.py
DB.py
Python
0
@@ -0,0 +1,1113 @@ +# Create a database%0A%0Aimport sqlite3%0Aimport csv%0Afrom datetime import datetime%0Aimport sys %0Areload(sys) %0Asys.setdefaultencoding('utf8')%0A%0Aclass createDB():%0A%0A def readCSV(self, filename):%0A conn = sqlite3.connect('CIUK.db')%0A print 'DB Creation Successful!'%0A cur = conn.cursor()%0A # cur.execute('''DROP TABLE PRODUCTS;''')%0A cur.execute('''CREATE TABLE PRODUCTS%0A%09%09%09%09 (ID INTEGER PRIMARY KEY AUTOINCREMENT,%0A%09%09%09%09 TITLE TEXT NOT NULL,%0A%09%09%09%09 DESCRIPTION TEXT NOT NULL,%0A%09%09%09%09 PRICE INTEGER NOT NULL,%0A%09%09%09%09 CREATED_AT TIMESTAMP,%0A%09%09%09%09 UPDATED_AT TIMESTAMP);''')%0A print 'Table Creation Successful!'%0A with open(filename) as f:%0A reader = csv.reader(f)%0A for row in reader:%0A cur.execute(%22INSERT INTO PRODUCTS VALUES (null, ?, ?, ?, ?, ?);%22, (unicode(row%5B0%5D), unicode(row%5B1%5D), unicode(row%5B2%5D), datetime.now(), datetime.now()))%0A print 'Successfully read data from CSV file!'%0A conn.commit()%0A conn.close()%0A%0Ac = createDB().readCSV('products.csv')
5a926913d3da29e8911fbea03fe57be020525e03
Update to v0.6 of liffylights
homeassistant/components/light/lifx.py
homeassistant/components/light/lifx.py
""" homeassistant.components.light.lifx ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ LIFX platform that implements lights Configuration: light: # platform name platform: lifx # optional server address # only needed if using more than one network interface # (omit if you are unsure) server: 192.168.1.3 # optional broadcast address, set to reach all LIFX bulbs # (omit if you are unsure) broadcast: 192.168.1.255 """ # pylint: disable=missing-docstring import logging import colorsys from homeassistant.helpers.event import track_time_change from homeassistant.components.light import \ (Light, ATTR_BRIGHTNESS, ATTR_RGB_COLOR, ATTR_COLOR_TEMP, ATTR_TRANSITION) _LOGGER = logging.getLogger(__name__) REQUIREMENTS = ['liffylights==0.5'] DEPENDENCIES = [] CONF_SERVER = "server" # server address configuration item CONF_BROADCAST = "broadcast" # broadcast address configuration item SHORT_MAX = 65535 # short int maximum BYTE_MAX = 255 # byte maximum TEMP_MIN = 2500 # lifx minimum temperature TEMP_MAX = 9000 # lifx maximum temperature TEMP_MIN_HASS = 154 # home assistant minimum temperature TEMP_MAX_HASS = 500 # home assistant maximum temperature class LIFX(): def __init__(self, add_devices_callback, server_addr=None, broadcast_addr=None): from liffylights import liffylights self._devices = [] self._add_devices_callback = add_devices_callback self._liffylights = liffylights(self.on_device, self.on_power, self.on_color, server_addr, broadcast_addr) def find_bulb(self, ipaddr): bulb = None for device in self._devices: if device.ipaddr == ipaddr: bulb = device break return bulb # pylint: disable=too-many-arguments def on_device(self, ipaddr, name, power, hue, sat, bri, kel): bulb = self.find_bulb(ipaddr) if bulb is None: bulb = LIFXLight(self._liffylights, ipaddr, name, power, hue, sat, bri, kel) self._devices.append(bulb) self._add_devices_callback([bulb]) # pylint: disable=too-many-arguments def on_color(self, ipaddr, hue, sat, bri, kel): bulb = self.find_bulb(ipaddr) if bulb is not None: bulb.set_color(hue, sat, bri, kel) bulb.update_ha_state() def on_power(self, ipaddr, power): bulb = self.find_bulb(ipaddr) if bulb is not None: bulb.set_power(power) bulb.update_ha_state() # pylint: disable=unused-argument def poll(self, now): self.probe() def probe(self, address=None): self._liffylights.probe(address) # pylint: disable=unused-argument def setup_platform(hass, config, add_devices_callback, discovery_info=None): """ Set up platform. """ server_addr = config.get(CONF_SERVER, None) broadcast_addr = config.get(CONF_BROADCAST, None) lifx_library = LIFX(add_devices_callback, server_addr, broadcast_addr) # register our poll service track_time_change(hass, lifx_library.poll, second=10) lifx_library.probe() def convert_rgb_to_hsv(rgb): """ Convert HASS RGB values to HSV values. """ red, green, blue = [_ / BYTE_MAX for _ in rgb] hue, saturation, brightness = colorsys.rgb_to_hsv(red, green, blue) return [int(hue * SHORT_MAX), int(saturation * SHORT_MAX), int(brightness * SHORT_MAX)] # pylint: disable=too-many-instance-attributes class LIFXLight(Light): """ Provides LIFX light. """ # pylint: disable=too-many-arguments def __init__(self, liffy, ipaddr, name, power, hue, saturation, brightness, kelvin): self._liffylights = liffy self._ip = ipaddr self.set_name(name) self.set_power(power) self.set_color(hue, saturation, brightness, kelvin) @property def should_poll(self): """ No polling needed for LIFX light. """ return False @property def name(self): """ Returns the name of the device. """ return self._name @property def ipaddr(self): """ Returns the ip of the device. """ return self._ip @property def rgb_color(self): """ Returns RGB value. """ return self._rgb @property def brightness(self): """ Returns brightness of this light between 0..255. """ return int(self._bri / (BYTE_MAX + 1)) @property def color_temp(self): """ Returns color temperature. """ return int(TEMP_MIN_HASS + (TEMP_MAX_HASS - TEMP_MIN_HASS) * (self._kel - TEMP_MIN) / (TEMP_MAX - TEMP_MIN)) @property def is_on(self): """ True if device is on. """ return self._power != 0 def turn_on(self, **kwargs): """ Turn the device on. """ if ATTR_TRANSITION in kwargs: fade = kwargs[ATTR_TRANSITION] * 1000 else: fade = 0 if ATTR_BRIGHTNESS in kwargs: brightness = kwargs[ATTR_BRIGHTNESS] * (BYTE_MAX + 1) else: brightness = self._bri if ATTR_RGB_COLOR in kwargs: hue, saturation, brightness = \ convert_rgb_to_hsv(kwargs[ATTR_RGB_COLOR]) else: hue = self._hue saturation = self._sat brightness = self._bri if ATTR_COLOR_TEMP in kwargs: kelvin = int(((TEMP_MAX - TEMP_MIN) * (kwargs[ATTR_COLOR_TEMP] - TEMP_MIN_HASS) / (TEMP_MAX_HASS - TEMP_MIN_HASS)) + TEMP_MIN) else: kelvin = self._kel _LOGGER.debug("%s %d %d %d %d %d", self._ip, hue, saturation, brightness, kelvin, fade) if self._power == 0: self._liffylights.set_power(self._ip, 65535, 0) self._liffylights.set_color(self._ip, hue, saturation, brightness, kelvin, fade) def turn_off(self, **kwargs): """ Turn the device off. """ if ATTR_TRANSITION in kwargs: fade = kwargs[ATTR_TRANSITION] * 1000 else: fade = 0 self._liffylights.set_power(self._ip, 0, fade) def set_name(self, name): """ Set name. """ self._name = name def set_power(self, power): """ Set power state value. """ self._power = (power != 0) def set_color(self, hue, sat, bri, kel): """ Set color state values. """ self._hue = hue self._sat = sat self._bri = bri self._kel = kel red, green, blue = colorsys.hsv_to_rgb(hue / SHORT_MAX, sat / SHORT_MAX, bri / SHORT_MAX) self._rgb = [int(red * BYTE_MAX), int(green * BYTE_MAX), int(blue * BYTE_MAX)]
Python
0
@@ -745,17 +745,17 @@ ghts==0. -5 +6 '%5D%0ADEPEN
874c01374397014e7c99afd67f5680ed32f1c5c6
Build and revision number script
bn.py
bn.py
Python
0
@@ -0,0 +1,242 @@ +import sys%0Afrom time import gmtime%0Ayear, mon, mday, hour, min, sec, wday, yday, isdst = gmtime()%0Abld = ((year - 2000) * 12 + mon - 1) * 100 + mday%0Arev = hour * 100 + min%0Aprint 'Your build and revision number for today is %25d.%25d.' %25 (bld, rev)%0A
480b0bd80f65646da52824403ade92880af1af2e
Add circle ci settings
project/circleci_settings.py
project/circleci_settings.py
Python
0.000001
@@ -0,0 +1,366 @@ +# -*- coding: utf-8 -*-%0A%0ADEBUG = True%0A%0ALOCAL_DATABASES = %7B%0A 'default': %7B%0A 'ENGINE': 'django.db.backends.postgresql',%0A 'NAME': 'circle_test',%0A 'USER': 'circleci',%0A 'PASSWORD': '',%0A 'HOST': 'localhost',%0A 'PORT': '5432',%0A %7D%0A%7D%0A%0ALOCALLY_INSTALLED_APPS = %5B%0A%5D%0A%0AENABLE_EMAILS = False%0A%0ALOCALLY_ALLOWED_HOSTS = %5B%0A%5D%0A%0AADMINS = %5B%5D%0A
339798bbed673253358866bf083e7d974f79956c
Make sure proper_count is populated by metainfo_series
flexget/plugins/metainfo/series.py
flexget/plugins/metainfo/series.py
import logging from string import capwords from flexget.plugin import priority, register_plugin from flexget.utils.titles import SeriesParser from flexget.utils.titles.parser import ParseWarning import re log = logging.getLogger('metanfo_series') class MetainfoSeries(object): """ Check if entry appears to be a series, and populate series info if so. """ def validator(self): from flexget import validator return validator.factory('boolean') # Run after series plugin so we don't try to re-parse it's entries @priority(120) def on_feed_metainfo(self, feed): # Don't run if we are disabled if not feed.config.get('metainfo_series', True): return for entry in feed.entries: # If series plugin already parsed this, don't touch it. if entry.get('series_name'): continue self.guess_entry(entry) def guess_entry(self, entry, allow_seasonless=False): """Populates series_* fields for entries that are successfully parsed.""" if entry.get('series_parser') and entry['series_parser'].valid: # Return true if we already parsed this, false if series plugin parsed it return entry.get('series_guessed') parser = self.guess_series(entry['title'], allow_seasonless=allow_seasonless) if parser: entry['series_name'] = parser.name entry['series_season'] = parser.season entry['series_episode'] = parser.episode entry['series_id'] = parser.identifier entry['series_guessed'] = True entry['series_parser'] = parser entry['proper'] = parser.proper return True return False def guess_series(self, title, allow_seasonless=False): """Returns a valid series parser if this :title: appears to be a series""" parser = SeriesParser(identified_by='ep', allow_seasonless=allow_seasonless) # We need to replace certain characters with spaces to make sure episode parsing works right # We don't remove anything, as the match positions should line up with the original title clean_title = re.sub('[_.,\[\]\(\):]', ' ', title) match = parser.parse_episode(clean_title) if match: if parser.parse_unwanted(clean_title): return elif match['match'].start() > 1: # We start using the original title here, so we can properly ignore unwanted prefixes. # Look for unwanted prefixes to find out where the series title starts start = 0 prefix = re.match('|'.join(parser.ignore_prefixes), title) if prefix: start = prefix.end() # If an episode id is found, assume everything before it is series name name = title[start:match['match'].start()] # Remove possible episode title from series name (anything after a ' - ') name = name.split(' - ')[0] # Replace some special characters with spaces name = re.sub('[\._\(\) ]+', ' ', name).strip(' -') # Normalize capitalization to title case name = capwords(name) # If we didn't get a series name, return if not name: return parser.name = name parser.data = title try: parser.parse(data=title) except ParseWarning, pw: log.debug('ParseWarning: %s' % pw.value) if parser.valid: return parser register_plugin(MetainfoSeries, 'metainfo_series')
Python
0.000017
@@ -1705,16 +1705,72 @@ .proper%0A + entry%5B'proper_count'%5D = parser.proper_count%0A
780e4eb03420d75c18d0b21b5e616f2952aeda41
Test sending headers with end stream.
test/test_basic_logic.py
test/test_basic_logic.py
# -*- coding: utf-8 -*- """ test_basic_logic ~~~~~~~~~~~~~~~~ Test the basic logic of the h2 state machines. """ import h2.connection from hyperframe import frame class TestBasicConnection(object): """ Basic connection tests. """ example_request_headers = [ (':authority', 'example.com'), (':path', '/'), (':scheme', 'https'), (':method', 'GET'), ] def test_begin_connection(self): c = h2.connection.H2Connection() frames = c.send_headers_on_stream(1, self.example_request_headers) assert len(frames) == 1 def test_sending_some_data(self): c = h2.connection.H2Connection() frames = c.send_headers_on_stream(1, self.example_request_headers) frames.append(c.send_data_on_stream(1, b'test', end_stream=True)) assert len(frames) == 2 def test_receive_headers_frame(self): f = frame.HeadersFrame(1) f.data = b'fake headers' f.flags = set(['END_STREAM', 'END_HEADERS']) c = h2.connection.H2Connection() assert c.receive_frame(f) is None
Python
0
@@ -1092,8 +1092,310 @@ is None%0A +%0A def test_send_headers_end_stream(self):%0A c = h2.connection.H2Connection()%0A frames = c.send_headers_on_stream(%0A 1, self.example_request_headers, end_stream=True%0A )%0A assert len(frames) == 1%0A assert frames%5B-1%5D.flags == set(%5B'END_STREAM', 'END_HEADERS'%5D)%0A
8adac46cd59c562ec494508ad735843253adc1f2
add frequencies benchmark
bench/test_frequencies.py
bench/test_frequencies.py
Python
0.000001
@@ -0,0 +1,112 @@ +from toolz import frequencies, identity%0A%0Adata = range(1000)*1000%0A%0Adef test_frequencies():%0A frequencies(data)%0A
892740ce17c2906de996089f07f005c7812270ef
add init back
src/__init__.py
src/__init__.py
Python
0.000001
@@ -0,0 +1,56 @@ +%22%22%22 Source Files, and a location for Global Imports %22%22%22%0A
94acf181f063808c2b6444dbc15ea40ee17bdee3
print structure
bin/print_h5_structure.py
bin/print_h5_structure.py
Python
0.000004
@@ -0,0 +1,726 @@ +import sys%0Afile_name = sys.argv%5B1%5D%0A# python3 print_data_structure.py filename%0Aimport glob%0Aimport os%0Aimport numpy as n%0A%0Aimport h5py # HDF5 support%0A%0Af0 = h5py.File(file_name, %22r%22)%0A%0Adef print_attr(h5item):%0A for attr in h5item:%0A print(attr, h5item%5Battr%5D)%0A %0Adef print_all_key(h5item):%0A for key in h5item.keys():%0A print('========================================')%0A print(key, h5item%5Bkey%5D)%0A print('- - - - - - - - - - - - - - - - - - - - ')%0A print_attr(h5item%5Bkey%5D)%0A %0Adef print_data_structure(h5item):%0A print('+ + + + + + + HEADER + + + + + + + + +')%0A print_attr(h5item.attrs)%0A print('%5Cn')%0A print('+ + + + + + + DATA + + + + + + + + + +')%0A print_all_key(h5item)%0A%0Aprint_data_structure(f0)%0A%0A
fea9e1e80d03b87c05eacd02b5440fc783eb456d
Fix buildfier
package_managers/apt_get/repos.bzl
package_managers/apt_get/repos.bzl
# Copyright 2017 Google Inc. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Rules that create additional apt-get repo files.""" load("@bazel_tools//tools/build_defs/pkg:pkg.bzl", "pkg_tar") def _impl(ctx): ctx.actions.write(ctx.outputs.out, content="%s\n" % ctx.attr.repo) _generate_additional_repo = rule( attrs = { "repo": attr.string(doc = "Additional repo to add, in sources.list format"), }, executable = False, outputs = { "out": "%{name}.list", }, implementation = _impl, ) def generate_additional_repos(name, repos): all_repo_files=[] for i, repo in enumerate(repos): repo_name = "%s_%s" % (name, i) all_repo_files.append(repo_name) _generate_additional_repo( name=repo_name, repo=repo ) pkg_tar( name=name, srcs=all_repo_files, package_dir="/etc/apt/sources.list.d/" ) """Generates /etc/apt/sources.list.d/ files with the specified repos. Args: repos: List of repos to add in sources.list format. """
Python
0.000001
@@ -1437,16 +1437,17 @@ %22%0A )%0A +%0A %22%22%22Gener
05c103238d977fe8c5d6b614f21f581069373524
Increase tidy column limit to 100
src/etc/tidy.py
src/etc/tidy.py
#!/usr/bin/env python # xfail-license import sys, fileinput, subprocess, re from licenseck import * err=0 cols=78 # Be careful to support Python 2.4, 2.6, and 3.x here! config_proc=subprocess.Popen([ "git", "config", "core.autocrlf" ], stdout=subprocess.PIPE) result=config_proc.communicate()[0] true="true".encode('utf8') autocrlf=result.strip() == true if result is not None else False def report_error_name_no(name, no, s): global err print("%s:%d: %s" % (name, no, s)) err=1 def report_err(s): report_error_name_no(fileinput.filename(), fileinput.filelineno(), s) def report_warn(s): print("%s:%d: %s" % (fileinput.filename(), fileinput.filelineno(), s)) def do_license_check(name, contents): if not check_license(name, contents): report_error_name_no(name, 1, "incorrect license") file_names = [s for s in sys.argv[1:] if (not s.endswith("_gen.rs")) and (not ".#" in s)] current_name = "" current_contents = "" try: for line in fileinput.input(file_names, openhook=fileinput.hook_encoded("utf-8")): if fileinput.filename().find("tidy.py") == -1: if line.find("FIXME") != -1: if re.search("FIXME.*#\d+", line) == None: report_err("FIXME without issue number") if line.find("TODO") != -1: report_err("TODO is deprecated; use FIXME") idx = line.find("// NOTE") if idx != -1: report_warn("NOTE" + line[idx + len("// NOTE"):]) if (line.find('\t') != -1 and fileinput.filename().find("Makefile") == -1): report_err("tab character") if not autocrlf and line.find('\r') != -1: report_err("CR character") if line.endswith(" \n") or line.endswith("\t\n"): report_err("trailing whitespace") line_len = len(line)-2 if autocrlf else len(line)-1 if line_len > cols: report_err("line longer than %d chars" % cols) if fileinput.isfirstline() and current_name != "": do_license_check(current_name, current_contents) if fileinput.isfirstline(): current_name = fileinput.filename() current_contents = "" current_contents += line if current_name != "": do_license_check(current_name, current_contents) except UnicodeDecodeError, e: report_err("UTF-8 decoding error " + str(e)) sys.exit(err)
Python
0.000193
@@ -110,10 +110,11 @@ ols= -78 +100 %0A%0A#
f5a561494ece69c32d4bbd3e23c435a0fe74788a
Add local enum capability (needed for contentwrapper)
processrunner/enum.py
processrunner/enum.py
Python
0
@@ -0,0 +1,524 @@ +# -*- coding: utf-8 -*-%0Afrom __future__ import unicode_literals%0A%0A# For use with deployment statuses%0A# https://stackoverflow.com/a/1695250%0Adef enum(*sequential, **named):%0A %22%22%22An implementation of the Enum data type%0A%0A Usage%0A myEnum= enum(%0A 'Apple'%0A , 'Banana')%0A %22%22%22%0A enums = dict(zip(sequential, range(len(sequential))), **named)%0A reverse = dict((value, key) for key, value in list(enums.items()))%0A enums%5B'reverse_mapping'%5D = reverse%0A return type(str('Enum'), (), enums)
bb649f299538c76d555e30ac0d31e2560e0acd3e
Add test
tests/test_calculator.py
tests/test_calculator.py
Python
0.000005
@@ -0,0 +1,501 @@ +import unittest%0A%0Afrom app.calculator import Calculator%0A%0A%0Aclass TestCalculator(unittest.TestCase):%0A%0A def setUp(self):%0A self.calc = Calculator()%0A%0A def test_calculator_addition_method_returns_correct_result(self):%0A calc = Calculator()%0A result = calc.addition(2,2)%0A self.assertEqual(4, result)%0A%0A def test_calculator_subtraction_method_returns_correct_result(self):%0A calc = Calculator()%0A result = calc.subtraction(4,2)%0A self.assertEqual(2, result)%0A
630309837989e79ba972358a3098df40892982f5
Create rrd_ts_sync.py
rrd_ts_sync.py
rrd_ts_sync.py
Python
0.000005
@@ -0,0 +1,3172 @@ +#-------------------------------------------------------------------------------%0A#%0A# Controls shed weather station%0A#%0A# The MIT License (MIT)%0A#%0A# Copyright (c) 2015 William De Freitas%0A# %0A# Permission is hereby granted, free of charge, to any person obtaining a copy%0A# of this software and associated documentation files (the %22Software%22), to deal%0A# in the Software without restriction, including without limitation the rights%0A# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell%0A# copies of the Software, and to permit persons to whom the Software is%0A# furnished to do so, subject to the following conditions:%0A# %0A# The above copyright notice and this permission notice shall be included in all%0A# copies or substantial portions of the Software.%0A# %0A# THE SOFTWARE IS PROVIDED %22AS IS%22, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR%0A# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,%0A# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE%0A# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER%0A# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,%0A# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE%0A# SOFTWARE.%0A#%0A#-------------------------------------------------------------------------------%0A%0A#!usr/bin/env python%0A%0A#===============================================================================%0A# Import modules%0A#===============================================================================%0Aimport settings as s%0Aimport rrdtool%0Aimport thingspeak%0A%0A%0A%0A#===============================================================================%0A# MAIN%0A#===============================================================================%0Adef main():%0A%0A # --- Set up thingspeak account ---%0A #Set up inital values for variables%0A thingspeak_write_api_key = ''%0A %0A #Set up thingspeak account%0A thingspeak_acc = thingspeak.ThingspeakAcc(s.THINGSPEAK_HOST_ADDR,%0A s.THINGSPEAK_API_KEY_FILENAME,%0A s.THINGSPEAK_CHANNEL_ID)%0A%0A #Create RRD files if none exist%0A if not os.path.exists(s.RRDTOOL_RRD_FILE):%0A return%0A%0A # ========== Timed Loop ==========%0A try:%0A while True:%0A %0A #Fetch values from rrd%0A data_values = rrdtool.fetch(s.RRDTOOL_RRD_FILE, 'LAST', %0A '-s', str(s.UPDATE_RATE * -2))%0A %0A # --- Send data to thingspeak ---%0A #Create dictionary with field as key and value%0A sensor_data = %7B%7D%0A for key, value in sorted(sensors.items(), key=lambda e: e%5B1%5D%5B0%5D):%0A sensor_data%5Bvalue%5Bs.TS_FIELD%5D%5D = value%5Bs.VALUE%5D%0A response = thingspeak_acc.update_channel(sensor_data)%0A%0A%0A # ========== User exit command ==========%0A except KeyboardInterrupt:%0A sys.exit(0)%0A %0A%0A#===============================================================================%0A# Boiler plate%0A#===============================================================================%0Aif __name__=='__main__':%0A main()%0A
b20a6ccc211060644ff3e6f89428420fa59f5a5d
add a couple of tests for the build_scripts command
tests/test_build_scripts.py
tests/test_build_scripts.py
Python
0
@@ -0,0 +1,2325 @@ +%22%22%22Tests for distutils.command.build_scripts.%22%22%22%0A%0Aimport os%0Aimport unittest%0A%0Afrom distutils.command.build_scripts import build_scripts%0Afrom distutils.core import Distribution%0A%0Afrom distutils.tests import support%0A%0A%0Aclass BuildScriptsTestCase(support.TempdirManager, unittest.TestCase):%0A%0A def test_default_settings(self):%0A cmd = self.get_build_scripts_cmd(%22/foo/bar%22, %5B%5D)%0A self.assert_(not cmd.force)%0A self.assert_(cmd.build_dir is None)%0A%0A cmd.finalize_options()%0A%0A self.assert_(cmd.force)%0A self.assertEqual(cmd.build_dir, %22/foo/bar%22)%0A%0A def test_build(self):%0A source = self.mkdtemp()%0A target = self.mkdtemp()%0A expected = self.write_sample_scripts(source)%0A%0A cmd = self.get_build_scripts_cmd(target,%0A %5Bos.path.join(source, fn)%0A for fn in expected%5D)%0A cmd.finalize_options()%0A cmd.run()%0A%0A built = os.listdir(target)%0A for name in expected:%0A self.assert_(name in built)%0A%0A def get_build_scripts_cmd(self, target, scripts):%0A dist = Distribution()%0A dist.scripts = scripts%0A dist.command_obj%5B%22build%22%5D = support.DummyCommand(%0A build_scripts=target,%0A force=1%0A )%0A return build_scripts(dist)%0A%0A def write_sample_scripts(self, dir):%0A expected = %5B%5D%0A expected.append(%22script1.py%22)%0A self.write_script(dir, %22script1.py%22,%0A (%22#! /usr/bin/env python2.3%5Cn%22%0A %22# bogus script w/ Python sh-bang%5Cn%22%0A %22pass%5Cn%22))%0A expected.append(%22script2.py%22)%0A self.write_script(dir, %22script2.py%22,%0A (%22#!/usr/bin/python%5Cn%22%0A %22# bogus script w/ Python sh-bang%5Cn%22%0A %22pass%5Cn%22))%0A expected.append(%22shell.sh%22)%0A self.write_script(dir, %22shell.sh%22,%0A (%22#!/bin/sh%5Cn%22%0A %22# bogus shell script w/ sh-bang%5Cn%22%0A %22exit 0%5Cn%22))%0A return expected%0A%0A def write_script(self, dir, name, text):%0A f = open(os.path.join(dir, name), %22w%22)%0A f.write(text)%0A f.close()%0A%0A%0Adef test_suite():%0A return unittest.makeSuite(BuildScriptsTestCase)%0A
3cf30bac4d20dbebf6185351ba0c10426a489de9
Add sanity linter to catch future use
tools/run_tests/sanity/check_channel_arg_usage.py
tools/run_tests/sanity/check_channel_arg_usage.py
Python
0
@@ -0,0 +1,1689 @@ +#!/usr/bin/env python%0A%0A# Copyright 2018 gRPC authors.%0A#%0A# Licensed under the Apache License, Version 2.0 (the %22License%22);%0A# you may not use this file except in compliance with the License.%0A# You may obtain a copy of the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS,%0A# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.%0A# See the License for the specific language governing permissions and%0A# limitations under the License.%0A%0Afrom __future__ import print_function%0A%0Aimport os%0Aimport sys%0A%0Aos.chdir(os.path.join(os.path.dirname(sys.argv%5B0%5D), '../../..'))%0A%0A# set of files that are allowed to use the raw GRPC_ARG_* types%0A_EXCEPTIONS = set(%5B%0A 'src/core/lib/channel/channel_args.cc',%0A 'src/core/lib/channel/channel_args.h',%0A%5D)%0A%0A_BANNED = set(%5B%0A %22GRPC_ARG_POINTER%22,%0A%5D)%0A%0Aerrors = 0%0Anum_files = 0%0Afor root, dirs, files in os.walk('src/core'):%0A for filename in files:%0A num_files += 1%0A path = os.path.join(root, filename)%0A if path in _EXCEPTIONS: continue%0A with open(path) as f:%0A text = f.read()%0A for banned in _BANNED:%0A if banned in text:%0A print('Illegal use of %22%25s%22 in %25s' %25 (banned, path))%0A errors += 1%0A%0Aassert errors == 0%0A# This check comes about from this issue:%0A# https://github.com/grpc/grpc/issues/15381%0A# Basically, a change rendered this script useless and we did not realize it.%0A# This dumb check ensures that this type of issue doesn't occur again.%0Aassert num_files %3E 300 # we definitely have more than 300 files%0A
51d0623da276aa60a0da4d48343f215f0c517a29
Add module for ids2vecs
thinc/neural/ids2vecs.py
thinc/neural/ids2vecs.py
Python
0
@@ -0,0 +1,55 @@ +from ._classes.window_encode import MaxoutWindowEncode%0A
ada91bd1ed76d59b7ec41d765af188aed2f8fd62
add a module for collecting Warnings
src/pymor/core/warnings.py
src/pymor/core/warnings.py
Python
0
@@ -0,0 +1,256 @@ +'''%0ACreated on Nov 19, 2012%0A%0A@author: r_milk01%0A'''%0A%0Aclass CallOrderWarning(UserWarning):%0A '''I am raised when there's a preferred call order, but the user didn't follow it.%0A For an Example see pymor.discretizer.stationary.elliptic.cg%0A '''%0A pass
03c1f7040cc971c6e05f79f537fc501c550edaa8
Add back manage.py (doh).
manage.py
manage.py
Python
0
@@ -0,0 +1,353 @@ +#!/usr/bin/env python%0Aimport os%0Aimport sys%0A%0Aif __name__ == %22__main__%22:%0A conf = os.path.dirname(__file__)%0A wafer = os.path.join(conf, '..', 'wafer')%0A sys.path.append(wafer)%0A%0A os.environ.setdefault(%22DJANGO_SETTINGS_MODULE%22, %22settings%22)%0A%0A from django.core.management import execute_from_command_line%0A%0A execute_from_command_line(sys.argv)%0A
0b88d652ddb23a385e79bfccb1db89c954d7d27f
Set up Restaurant class
get_a_lunch_spot.py
get_a_lunch_spot.py
Python
0.000003
@@ -0,0 +1,625 @@ +import json%0A%0Arestaurants_string = %22%22%22%5B%7B%0A %22name%22 : %22sweetgreen%22%0A%7D%5D%22%22%22%0Aprint restaurants_string%0A%0Arestaurants_json = json.loads(restaurants_string)%0Aprint restaurants_json%0A%0Aclass Restaurant:%0A name = %22%22%0A%0A def __init__(self, data):%0A self.name = self.getName(data)%0A%0A def getName(self, data):%0A for i in data:%0A return i%5B'name'%5D%0A%0Arestaurant_response = Restaurant(restaurants_json)%0Aprint restaurant_response.name%0A%0A%0Aclass GetALunchSpotResponse:%0A restaurant = None%0A%0A def _init_(self, intent):%0A self.restaurant = self.restaurants%5B0%5D%0A%0A def generateStringResponse():%0A return %22Why don't you go to: %22 + restaurant.name%0A
23e778c78c2d77a9eeb0904856429546e379f8b5
change the version of openerp
bin/release.py
bin/release.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- ############################################################################## # # Copyright (c) 2004-2008 Tiny SPRL (http://tiny.be) All Rights Reserved. # # $Id$ # # WARNING: This program as such is intended to be used by professional # programmers who take the whole responsability of assessing all potential # consequences resulting from its eventual inadequacies and bugs # End users who are looking for a ready-to-use solution with commercial # garantees and support are strongly adviced to contract a Free Software # Service Company # # This program is Free Software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. ############################################################################### name = 'openerp-server' version = '4.3.0' description = 'OpenERP Server' long_desc = '''\ OpenERP is a complete ERP and CRM. The main features are accounting (analytic and financial), stock management, sales and purchases management, tasks automation, marketing campaigns, help desk, POS, etc. Technical features include a distributed server, flexible workflows, an object database, a dynamic GUI, customizable reports, and SOAP and XML-RPC interfaces. ''' classifiers = """\ Development Status :: 5 - Production/Stable License :: OSI Approved :: GNU General Public License Version 2 (GPL-2) Programming Language :: Python """ url = 'http://www.openerp.com' author = 'Tiny.be' author_email = '[email protected]' license = 'GPL-2' # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
Python
0
@@ -1405,17 +1405,18 @@ = '4.3. -0 +99 '%0Adescri
23257c56b58c26694773fb12d3ba167de43bd43b
Add validate.py tool
validate.py
validate.py
Python
0.000001
@@ -0,0 +1,681 @@ +import json%0Aimport sys%0Aimport urllib2%0A%0Adata=json.loads(open(%22bootstrap/%7B%7D.json%22.format(sys.argv%5B1%5D)).read())%0Afor f in data%5B'storage'%5D%5B'files'%5D:%0A if 'source' not in f%5B'contents'%5D or 'http' not in f%5B'contents'%5D%5B'source'%5D:%0A continue%0A url = f%5B'contents'%5D%5B'source'%5D%0A digest = f%5B'contents'%5D%5B'verification'%5D%5B'hash'%5D.lstrip('sha512-')%0A print('%7B%7D %7B%7D'.format(url, digest))%0A print('Fetching %7B%7D..'.format(url))%0A response = urllib2.urlopen(url)%0A html = response.read()%0A with open('/tmp/%7B%7D'.format(digest), 'w+') as tmpfile:%0A tmpfile.write(html)%0A print('Wrote /tmp/%7B%7D'.format(digest))%0A%0A%0A# if 'source', fetch and compare with 'verification'%5B'hash'%5D%0A%0A%0A
dd1c49eb12bf69580a8727353aa19741059df6d5
add 102
vol3/102.py
vol3/102.py
Python
0.999996
@@ -0,0 +1,355 @@ +import urllib2%0A%0Aif __name__ == %22__main__%22:%0A ans = 0%0A for line in urllib2.urlopen('https://projecteuler.net/project/resources/p102_triangles.txt'):%0A ax, ay, bx, by, cx, cy = map(int, line.split(','))%0A a = ax * by - ay * bx %3E 0%0A b = bx * cy - by * cx %3E 0%0A c = cx * ay - cy * ax %3E 0%0A ans += a == b == c%0A print ans%0A
feac5a01059a95910c76a0de5f83ad2473cf09c8
Create app.py
app.py
app.py
Python
0.000003
@@ -0,0 +1,2763 @@ +import os%0Aimport sys%0Aimport tweepy%0Aimport requests%0Aimport numpy as np%0Aimport json%0Aimport os%0A%0Afrom __future__ import print_function%0Afrom future.standard_library import install_aliases%0Ainstall_aliases()%0A%0Afrom urllib.parse import urlparse, urlencode%0Afrom urllib.request import urlopen, Request%0Afrom urllib.error import HTTPError%0A%0A%0A%0Afrom flask import Flask%0Afrom flask import request%0Afrom flask import make_response%0A%0A# Flask app should start in global layout%0Aapp = Flask(__name__)%0A%0A%0A%0Afrom keras.models import Sequential%0Afrom keras.layers import Dense%0Afrom textblob import TextBlob%0A%0A%0A%0A%0A# Where the csv file will live%0AFILE_NAME = 'historical.csv'%0A%0A%[email protected]('/webhook', methods=%5B'POST'%5D)%0Adef webhook():%0A req = request.get_json(silent=True, force=True)%0A%0A print(%22Request:%22)%0A print(json.dumps(req, indent=4))%0A%0A res = processRequest(req)%0A%0A res = json.dumps(res, indent=4)%0A # print(res)%0A r = make_response(res)%0A r.headers%5B'Content-Type'%5D = 'application/json'%0A return r%0A%0Adef processRequest(req):%0A quote=req.get(%22result%22).get(%22parameters%22).get(%22STOCK%22)%0A get_historical(quote)%0A res = stock_prediction() %0A return res%0A%0Adef get_historical(quote):%0A # Download our file from google finance%0A url = 'http://www.google.com/finance/historical?q=NASDAQ%253A'+quote+'&output=csv'%0A r = requests.get(url, stream=True)%0A%0A if r.status_code != 400:%0A with open(FILE_NAME, 'wb') as f:%0A for chunk in r:%0A f.write(chunk)%0A%0A return True%0A%0A%0Adef stock_prediction():%0A%0A # Collect data points from csv%0A dataset = %5B%5D%0A%0A with open(FILE_NAME) as f:%0A for n, line in enumerate(f):%0A if n != 0:%0A dataset.append(float(line.split(',')%5B1%5D))%0A%0A dataset = np.array(dataset)%0A%0A # Create dataset matrix (X=t and Y=t+1)%0A def create_dataset(dataset):%0A dataX = %5Bdataset%5Bn+1%5D for n in range(len(dataset)-2)%5D%0A return np.array(dataX), dataset%5B2:%5D%0A %0A trainX, trainY = create_dataset(dataset)%0A%0A # Create and fit Multilinear Perceptron model%0A model = Sequential()%0A model.add(Dense(8, input_dim=1, activation='relu'))%0A model.add(Dense(1))%0A model.compile(loss='mean_squared_error', optimizer='adam')%0A model.fit(trainX, trainY, nb_epoch=200, batch_size=2, verbose=2)%0A%0A # Our prediction for tomorrow%0A prediction = model.predict(np.array(%5Bdataset%5B0%5D%5D))%0A result = 'The price will move from %25s to %25s' %25 (dataset%5B0%5D, prediction%5B0%5D%5B0%5D)%0A%0A return result%0A %0A return %7B%0A %22speech%22: result,%0A %22displayText%22: ,%0A # %22data%22: data,%0A # %22contextOut%22: %5B%5D,%0A %7D%0A%0A %0A%0A%0A# We have our file so we create the neural net and get the prediction%0Aprint stock_prediction()%0A%0A# We are done so we delete the csv file%0Aos.remove(FILE_NAME)%0A
1490f438693a5727c722d933a712c889d3c09556
test where SSL proxying works
test/others/ProxyTest.py
test/others/ProxyTest.py
Python
0.000002
@@ -0,0 +1,1528 @@ +import urllib2%0D%0A%0D%0AhttpTarget = %22http://www.collab.net%22%0D%0AhttpsTargetTrusted = %22https://ctf.open.collab.net/sf/sfmain/do/home%22%0D%0AhttpsTargetUntrusted = %22https://www.collab.net%22%0D%0A%0D%0AproxyHost = %22cu182.cloud.sp.collab.net%22%0D%0AproxyPort = %2280%22%0D%0AproxyUser = %22proxyuser%22%0D%0AproxyPwd = %22proxypass%22%0D%0A%0D%0Adef main():%0D%0A print %22Testing proxy: %25s%5Cn%22 %25 (getProxyUrl(),)%0D%0A testProxy(httpTarget)%0D%0A testProxy(httpsTargetTrusted)%0D%0A testProxy(httpsTargetUntrusted)%0D%0A%0D%0Adef getProxyUrl():%0D%0A proxyUrl = %22http://%25s:%25s@%25s:%25s%22 %25 (proxyUser, proxyPwd, proxyHost, proxyPort)%0D%0A return proxyUrl%0D%0A %0D%0Adef testProxy(url):%0D%0A req = urllib2.Request(url)%0D%0A scheme = %22https%22 if url.startswith(%22https%22) else %22http%22 %0D%0A %0D%0A # build a new opener that uses a proxy requiring authorization%0D%0A proxy_support = urllib2.ProxyHandler(%7Bscheme : getProxyUrl()%7D)%0D%0A opener = urllib2.build_opener(proxy_support, urllib2.HTTPHandler)%0D%0A # install it%0D%0A urllib2.install_opener(opener) %0D%0A%0D%0A try:%0D%0A print %22Testing proxy to target: %25s ...%22 %25 (url, )%0D%0A response = urllib2.urlopen(req)%0D%0A if response.read():%0D%0A print %22Proxy connection was successful%5Cn%22 %0D%0A %0D%0A except IOError, e:%0D%0A if hasattr(e, 'reason'):%0D%0A print 'Failed to reach a server.'%0D%0A print 'Reason: %5Cn', e.reason%0D%0A elif hasattr(e, 'code'):%0D%0A print 'The server couldn%5C't fulfill the request.'%0D%0A print 'Error code: %5Cn', e.code%0D%0A %0D%0A %0D%0Aif __name__ == %22__main__%22:%0D%0A main() %0D%0A%0D%0A%0D%0A%0D%0A
a021928ff9e1625b5e95a8962e12a0d2cc25399f
Remove debug prints
nose2/main.py
nose2/main.py
import logging import os import sys from nose2.compat import unittest from nose2 import events, loader, runner, session, util log = logging.getLogger(__name__) __unittest = True class PluggableTestProgram(unittest.TestProgram): sessionClass = session.Session loaderClass = loader.PluggableTestLoader runnerClass = runner.PluggableTestRunner defaultPlugins = ['nose2.plugins.loader.discovery', 'nose2.plugins.loader.testcases', 'nose2.plugins.loader.functions', 'nose2.plugins.loader.generators', 'nose2.plugins.result', 'nose2.plugins.collect', 'nose2.plugins.logcapture', # etc ] # XXX override __init__ to warn that testLoader and testRunner are ignored? def parseArgs(self, argv): log.debug("parse argv %s", argv) self.session = self.sessionClass() self.argparse = self.session.argparse # for convenience # XXX force these? or can it be avoided? self.testLoader = self.loaderClass(self.session) # Parse initial arguments like config file paths, verbosity self.setInitialArguments() # FIXME -h here makes processing stop. cfg_args, argv = self.argparse.parse_known_args(argv[1:]) print cfg_args, argv self.handleCfgArgs(cfg_args) # Parse arguments for plugins (if any) and test names self.argparse.add_argument('testNames', nargs='*') args, argv = self.argparse.parse_known_args(argv) print args, argv if argv: self.argparse.error("Unrecognized arguments: %s" % ' '.join(argv)) self.handleArgs(args) self.createTests() def setInitialArguments(self): self.argparse.add_argument( '-s', '--start-dir', default='.', help="Directory to start discovery ('.' default)") self.argparse.add_argument( '-t', '--top-level-directory', '--project-directory', help='Top level directory of project (defaults to start dir)') self.argparse.add_argument('--config', '-c', nargs='?', action='append', default=['unittest.cfg', 'nose2.cfg']) self.argparse.add_argument('--no-user-config', action='store_const', dest='user_config', const=False, default=True) self.argparse.add_argument('--no-plugins', action='store_const', dest='load_plugins', const=False, default=True) self.argparse.add_argument('--verbose', '-v', action='count', default=0) self.argparse.add_argument('--quiet', action='store_const', dest='verbose', const=0) def handleCfgArgs(self, cfg_args): if cfg_args.verbose: self.session.verbosity += cfg_args.verbose self.session.startDir = cfg_args.start_dir if cfg_args.top_level_directory: self.session.topLevelDir = cfg_args.top_level_directory self.session.loadConfigFiles(*self.findConfigFiles(cfg_args)) self.session.prepareSysPath() if cfg_args.load_plugins: self.loadPlugins() def findConfigFiles(self, cfg_args): filenames = cfg_args.config[:] proj_opts = ('unittest.cfg', 'nose2.cfg') for fn in proj_opts: if cfg_args.top_level_directory: fn = os.path.abspath( os.path.join(cfg_args.top_level_directory, fn)) filenames.append(fn) if cfg_args.user_config: user_opts = ('~/.unittest.cfg', '~/.nose2.cfg') for fn in user_opts: filenames.append(os.path.expanduser(fn)) return filenames def handleArgs(self, args): # FIXME pass arguments to session & plugins self.testNames = args.testNames def loadPlugins(self): # FIXME also pass in plugins set via __init__ args self.session.loadPlugins(self.defaultPlugins) def createTests(self): # XXX belongs in init? log.debug("Create tests from %s/%s", self.testNames, self.module) if self.module and '__unittest' in dir(self.module): self.module = None self.test = self.testLoader.loadTestsFromNames( self.testNames, self.module) def runTests(self): # fire plugin hook runner = self._makeRunner() self.result = runner.run(self.test) if self.exit: sys.exit(not self.result.wasSuccessful()) def _makeRunner(self): runner = self.runnerClass(self.session) event = events.RunnerCreatedEvent(runner) self.session.hooks.runnerCreated(event) return event.runner main_ = PluggableTestProgram
Python
0.000001
@@ -1359,37 +1359,8 @@ :%5D)%0A - print cfg_args, argv%0A @@ -1576,33 +1576,8 @@ gv)%0A - print args, argv%0A
aa1808c9a13894751953c8a1c816c89861e514d1
Create new package. (#6061)
var/spack/repos/builtin/packages/r-iso/package.py
var/spack/repos/builtin/packages/r-iso/package.py
Python
0
@@ -0,0 +1,1679 @@ +##############################################################################%0A# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.%0A# Produced at the Lawrence Livermore National Laboratory.%0A#%0A# This file is part of Spack.%0A# Created by Todd Gamblin, [email protected], All rights reserved.%0A# LLNL-CODE-647188%0A#%0A# For details, see https://github.com/llnl/spack%0A# Please also see the NOTICE and LICENSE files for our notice and the LGPL.%0A#%0A# This program is free software; you can redistribute it and/or modify%0A# it under the terms of the GNU Lesser General Public License (as%0A# published by the Free Software Foundation) version 2.1, February 1999.%0A#%0A# This program is distributed in the hope that it will be useful, but%0A# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF%0A# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and%0A# conditions of the GNU Lesser General Public License for more details.%0A#%0A# You should have received a copy of the GNU Lesser General Public%0A# License along with this program; if not, write to the Free Software%0A# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA%0A##############################################################################%0Afrom spack import *%0A%0A%0Aclass RIso(RPackage):%0A %22%22%22Linear order and unimodal order (univariate) isotonic regression;%0A bivariate isotonic regression with linear order on both variables.%22%22%22%0A%0A homepage = %22https://cran.r-project.org/package=Iso%22%0A url = %22https://cran.rstudio.com/src/contrib/Iso_0.0-17.tar.gz%22%0A list_url = %22https://cran.rstudio.com/src/contrib/Archive/Iso%22%0A%0A version('0.0-17', 'bf99821efb6a44fa75fdbf5e5c4c91e4')%0A
384033b6b5d7a3b207c1360b896f70bfbc064caf
Update __init__.py
tendrl/integrations/gluster/sds_sync/__init__.py
tendrl/integrations/gluster/sds_sync/__init__.py
import etcd import time from tendrl.commons.objects.job import Job from tendrl.commons import sds_sync from tendrl.commons.utils import log_utils as logger import uuid class GlusterIntegrtaionsSyncThread(sds_sync.StateSyncThread): def run(self): logger.log( "debug", NS.get("publisher_id", None), {"message": "%s running" % self.__class__.__name__} ) while not self._complete.is_set(): time.sleep(int(NS.config.data.get("sync_interval", 10))) try: nodes = NS._int.client.read("/nodes") except etcd.EtcdKeyNotFound: return for node in nodes.leaves: node_id = node.key.split('/')[-1] try: node_context = NS.tendrl.objects.NodeContext( node_id=node_id ).load() tendrl_context = NS.tendrl.objects.TendrlContext( node_id=node_id ).load() if node_context.status != "DOWN" or\ tendrl_context.sds_name != "gluster": continue # check if the node belongs to a cluster that is managed cluster = NS.tendrl.objects.Cluster( integration_id=tendrl_context.integration_id ).load() if cluster.is_managed != "yes": continue # check if the bricks of this node are already # marked as down bricks = NS._int.client.read( "clusters/{0}/Bricks/all/{1}".format( tendrl_context.integration_id, node_context.fqdn ) ) bricks_marked_already = True for brick in bricks.leaves: brick_status = NS._int.client.read( "{0}/status".format(brick.key) ).value if brick_status != "Stopped": bricks_marked_already = False break if bricks_marked_already: continue self.update_brick_status( node_context.fqdn, tendrl_context.integration_id, "Stopped" ) except etcd.EtcdKeyNotFound: pass def update_brick_status(self, fqdn, integration_id, status): _job_id = str(uuid.uuid4()) _params = { "TendrlContext.integration_id": integration_id, "Node.fqdn": fqdn, "Brick.status": status } _job_payload = { "tags": [ "tendrl/integration/{0}".format( integration_id ) ], "run": "gluster.flows.UpdateBrickStatus", "status": "new", "parameters": _params, "type": "sds" } Job( job_id=_job_id, status="new", payload=_job_payload ).save()
Python
0.000072
@@ -451,77 +451,8 @@ ():%0A - time.sleep(int(NS.config.data.get(%22sync_interval%22, 10)))%0A @@ -579,14 +579,89 @@ -return +time.sleep(int(NS.config.data.get(%22sync_interval%22, 10)))%0A continue %0A%0A @@ -2628,16 +2628,106 @@ pass +%0A %0A time.sleep(int(NS.config.data.get(%22sync_interval%22, 10))) %0A%0A de
930274bb8ab10379f4c76618cccc604c9fe27996
Update the test to match removed XLA:CPU device
tensorflow/python/eager/remote_cloud_tpu_test.py
tensorflow/python/eager/remote_cloud_tpu_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Test that we can connect to a real Cloud TPU.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl import flags from absl.testing import absltest from tensorflow.python.distribute.cluster_resolver import tpu_cluster_resolver from tensorflow.python.eager import remote from tensorflow.python.framework import config from tensorflow.python.tpu import tpu_strategy_util FLAGS = flags.FLAGS flags.DEFINE_string('tpu', '', 'Name of TPU to connect to.') flags.DEFINE_string('project', None, 'Name of GCP project with TPU.') flags.DEFINE_string('zone', None, 'Name of GCP zone with TPU.') flags.DEFINE_integer('num_tpu_devices', 8, 'The expected number of TPUs.') DEVICES_PER_TASK = 8 EXPECTED_DEVICES_PRE_CONNECT = [ '/device:CPU:0', '/device:XLA_CPU:0', ] EXPECTED_NEW_DEVICES_AFTER_CONNECT_TEMPLATES = [ '/job:worker/replica:0/task:{task}/device:CPU:0', '/job:worker/replica:0/task:{task}/device:XLA_CPU:0', '/job:worker/replica:0/task:{task}/device:TPU_SYSTEM:0', '/job:worker/replica:0/task:{task}/device:TPU:0', '/job:worker/replica:0/task:{task}/device:TPU:1', '/job:worker/replica:0/task:{task}/device:TPU:2', '/job:worker/replica:0/task:{task}/device:TPU:3', '/job:worker/replica:0/task:{task}/device:TPU:4', '/job:worker/replica:0/task:{task}/device:TPU:5', '/job:worker/replica:0/task:{task}/device:TPU:6', '/job:worker/replica:0/task:{task}/device:TPU:7', ] class RemoteCloudTPUTest(absltest.TestCase): """Test that we can connect to a real Cloud TPU.""" def test_connect(self): # Log full diff on failure. self.maxDiff = None # pylint:disable=invalid-name self.assertCountEqual( EXPECTED_DEVICES_PRE_CONNECT, [device.name for device in config.list_logical_devices()]) resolver = tpu_cluster_resolver.TPUClusterResolver( tpu=FLAGS.tpu, zone=FLAGS.zone, project=FLAGS.project ) remote.connect_to_cluster(resolver) expected_devices = EXPECTED_DEVICES_PRE_CONNECT for task in range(FLAGS.num_tpu_devices // DEVICES_PER_TASK): expected_devices.extend([ template.format(task=task) for template in EXPECTED_NEW_DEVICES_AFTER_CONNECT_TEMPLATES ]) self.assertCountEqual( expected_devices, [device.name for device in config.list_logical_devices()]) tpu_strategy_util.initialize_tpu_system(resolver) if __name__ == '__main__': absltest.main()
Python
0
@@ -1492,33 +1492,8 @@ 0',%0A - '/device:XLA_CPU:0',%0A %5D%0AEX
4dd36d68225311e328cc4a909b3c56bf9b6e8e53
Create picture.py
picture.py
picture.py
Python
0.000002
@@ -0,0 +1,50 @@ +from ggame import App%0A%0Amyapp = App()%0A%0Amyapp.run()%0A
b3a3376e90d1eede9b2d33d0a4965c1f4920f20a
Add a memoizer
memoize.py
memoize.py
Python
0.000022
@@ -0,0 +1,465 @@ +%0A# from http://code.activestate.com/recipes/578231-probably-the-fastest-memoization-decorator-in-the-/%0A%0A__all__ = %5B%22memoize%22%5D%0A%0Adef memoize(f):%0A %22%22%22 Memoization decorator for a function taking one or more arguments. %22%22%22%0A class memodict(dict):%0A def __getitem__(self, *key):%0A return dict.__getitem__(self, key)%0A%0A def __missing__(self, key):%0A ret = self%5Bkey%5D = f(*key)%0A return ret%0A%0A return memodict().__getitem__%0A
9a902049212ceea29b7b0e440acd33e3c63c7beb
Add timer tests script.
scripts/examples/02-Board-Control/timer_tests.py
scripts/examples/02-Board-Control/timer_tests.py
Python
0
@@ -0,0 +1,617 @@ +# Timer Test Example%0A#%0A# This example tests all the timers.%0A%0Aimport time%0Afrom pyb import Pin, Timer, LED%0A%0Ablue_led = LED(3)%0A%0A# Note: functions that allocate memory are Not allowed in callbacks%0Adef tick(timer):%0A blue_led.toggle()%0A%0Aprint(%22%22)%0Afor i in range(1, 18):%0A try:%0A print(%22Testing TIM%25d... %22%25(i), end=%22%22)%0A tim = Timer(i, freq=10) # create a timer object using timer 4 - trigger at 1Hz%0A tim.callback(tick) # set the callback to our tick function%0A time.sleep(1000)%0A tim.deinit()%0A except ValueError as e:%0A print(e)%0A continue%0A print(%22done!%22)%0A
11447d409756f2bcd459a6db9d51967358272780
move flags to constant module
hotline/constant.py
hotline/constant.py
Python
0.000002
@@ -0,0 +1,120 @@ +# -*- coding: utf-8 -*-%0A%0A%0Aclass flags(object):%0A DontHide = object()%0A Hide = object()%0A _list = %5BDontHide, Hide%5D%0A
0295a1cbad1dfa2443e6b8e8d639b7d845adaebf
Add lc0225_implement_stack_using_queues.py
lc0225_implement_stack_using_queues.py
lc0225_implement_stack_using_queues.py
Python
0.000002
@@ -0,0 +1,1827 @@ +%22%22%22Leetcode 225. Implement Stack using Queues%0AEasy%0A%0AURL: https://leetcode.com/problems/implement-stack-using-queues/%0A%0AImplement the following operations of a stack using queues.%0A- push(x) -- Push element x onto stack.%0A- pop() -- Removes the element on top of the stack.%0A- top() -- Get the top element.%0A- empty() -- Return whether the stack is empty.%0A%0AExample:%0AMyStack stack = new MyStack();%0Astack.push(1);%0Astack.push(2); %0Astack.top(); // returns 2%0Astack.pop(); // returns 2%0Astack.empty(); // returns false%0A%0ANotes:%0A- You must use only standard operations of a queue -- which means only %0A push to back, peek/pop from front, size, and is empty operations are valid.%0A- Depending on your language, queue may not be supported natively. You may simulate%0A a queue by using a list or deque (double-ended queue), as long as you use only%0A standard operations of a queue.%0A- You may assume that all operations are valid (for example, no pop or top%0A operations will be called on an empty stack).%0A%22%22%22%0A%0Aclass MyStack(object):%0A def __init__(self):%0A %22%22%22%0A Initialize your data structure here.%0A %22%22%22%0A pass%0A%0A def push(self, x):%0A %22%22%22%0A Push element x onto stack.%0A :type x: int%0A :rtype: None%0A %22%22%22%0A pass%0A%0A def pop(self):%0A %22%22%22%0A Removes the element on top of the stack and returns that element.%0A :rtype: int%0A %22%22%22%0A pass%0A%0A def top(self):%0A %22%22%22%0A Get the top element.%0A :rtype: int%0A %22%22%22%0A pass%0A%0A def empty(self):%0A %22%22%22%0A Returns whether the stack is empty.%0A :rtype: bool%0A %22%22%22%0A pass%0A%0A%0Adef main():%0A # obj = MyStack()%0A # obj.push(x)%0A # param_2 = obj.pop()%0A # param_3 = obj.top()%0A # param_4 = obj.empty()%0A pass%0A%0A%0Aif __name__ == '__main__':%0A main()%0A
0a48d3dc2286db9cff5ec757d8b5f0b45f35ab7d
update : some minor fixes
ptop/interfaces/__init__.py
ptop/interfaces/__init__.py
Python
0
@@ -0,0 +1,24 @@ +from .GUI import PtopGUI
261b2477cf6f086028a1028c7d8a02f1b1631018
add solution for Jump Game
src/jumpGame.py
src/jumpGame.py
Python
0
@@ -0,0 +1,322 @@ +class Solution:%0A # @param A, a list of integers%0A # @return a boolean%0A%0A def canJump(self, A):%0A if not A:%0A return False%0A max_dist = 0%0A for i in xrange(len(A)):%0A if i %3E max_dist:%0A return False%0A max_dist = max(max_dist, i+A%5Bi%5D)%0A return True%0A
5c0805edd7d54a070b7ce1942eadfc0b3ff2874b
Update memory.py
src/collectors/memory/memory.py
src/collectors/memory/memory.py
# coding=utf-8 """ This class collects data on memory utilization Note that MemFree may report no memory free. This may not actually be the case, as memory is allocated to Buffers and Cache as well. See [this link](http://www.linuxatemyram.com/) for more details. #### Dependencies * /proc/meminfo or psutil """ import diamond.collector import diamond.convertor import os try: import psutil psutil # workaround for pyflakes issue #13 except ImportError: psutil = None _KEY_MAPPING = [ 'MemTotal', 'MemFree', 'Buffers', 'Cached', 'Active', 'Dirty', 'Inactive', 'Shmem', 'SwapTotal', 'SwapFree', 'SwapCached', 'VmallocTotal', 'VmallocUsed', 'VmallocChunk' ] class MemoryCollector(diamond.collector.Collector): PROC = '/proc/meminfo' def get_default_config_help(self): config_help = super(MemoryCollector, self).get_default_config_help() config_help.update({ 'detailed': 'Set to True to Collect all the nodes', }) return config_help def get_default_config(self): """ Returns the default collector settings """ config = super(MemoryCollector, self).get_default_config() config.update({ 'enabled': 'True', 'path': 'memory', 'method': 'Threaded', # Collect all the nodes or just a few standard ones? # Uncomment to enable #'detailed': 'True' }) return config def collect(self): """ Collect memory stats """ if os.access(self.PROC, os.R_OK): file = open(self.PROC) data = file.read() file.close() for line in data.splitlines(): try: name, value, units = line.split() name = name.rstrip(':') value = int(value) if (name not in _KEY_MAPPING and 'detailed' not in self.config): continue for unit in self.config['byte_unit']: value = diamond.convertor.binary.convert(value=value, oldUnit=units, newUnit=unit) self.publish(name, value, metric_type='GAUGE') # TODO: We only support one unit node here. Fix it! break except ValueError: continue return True else: if not psutil: self.log.error('Unable to import psutil') self.log.error('No memory metrics retrieved') return None phymem_usage = psutil.phymem_usage() virtmem_usage = psutil.virtmem_usage() units = 'B' for unit in self.config['byte_unit']: value = diamond.convertor.binary.convert( value=phymem_usage.total, oldUnit=units, newUnit=unit) self.publish('MemTotal', value, metric_type='GAUGE') value = diamond.convertor.binary.convert( value=phymem_usage.free, oldUnit=units, newUnit=unit) self.publish('MemFree', value, metric_type='GAUGE') value = diamond.convertor.binary.convert( value=virtmem_usage.total, oldUnit=units, newUnit=unit) self.publish('SwapTotal', value, metric_type='GAUGE') value = diamond.convertor.binary.convert( value=virtmem_usage.free, oldUnit=units, newUnit=unit) self.publish('SwapFree', value, metric_type='GAUGE') # TODO: We only support one unit node here. Fix it! break return True return None
Python
0
@@ -723,16 +723,37 @@ ocChunk' +,%0A 'Committed_AS', %0A%5D%0A%0A%0Acla
99f9f14039749f8e3e4340d6bf0e0394e3483ca2
add basic propfind test
protocol/test_protocol_propfind.py
protocol/test_protocol_propfind.py
Python
0
@@ -0,0 +1,600 @@ +from smashbox.utilities import *%0Afrom smashbox.utilities.hash_files import *%0Afrom smashbox.protocol import *%0A%0A@add_worker%0Adef main(step):%0A%0A d = make_workdir()%0A reset_owncloud_account()%0A%0A URL = oc_webdav_url()%0A%0A ls_prop_desktop20(URL,depth=0)%0A logger.info(%22Passed 1%22)%0A%0A ls_prop_desktop20(URL,depth=1)%0A logger.info(%22Passed 2%22)%0A%0A ls_prop_desktop17(URL,depth=0)%0A logger.info(%22Passed 3%22)%0A%0A ls_prop_desktop17(URL,depth=1)%0A logger.info(%22Passed 4%22)%0A%0A all_prop_android(URL,depth=0)%0A logger.info(%22Passed 5%22)%0A%0A all_prop_android(URL,depth=1)%0A logger.info(%22Passed 6%22)%0A
6bbdd1d9d60b03429dc2bc1ff3ba5d06353fad9a
Add a Bug class.
libzilla/bug.py
libzilla/bug.py
Python
0
@@ -0,0 +1,515 @@ +class Bug:%0A def __init__(self,%0A bug_number,%0A comment=None,%0A resolution=None,%0A status=None):%0A%0A self.bug_number = bug_number%0A self.resolution = resolution%0A self.status = status%0A self.comment = comment%0A%0A def __str__(self):%0A return %22%22%22Bug #: %5B%25s%5D%0ARESOLUTION: %5B%25s%5D%0ASTATUS: %5B%25s%5D%0AComment: %25s%22%22%22 %25 (%0A self.bug_number,%0A self.resolution,%0A self.status,%0A self.comment%0A )%0A
17bd35df32dd68d1cbc0fe73fcda186d13a66db0
Add run.py
run.py
run.py
Python
0.000009
@@ -0,0 +1,1328 @@ +#!/usr/bin/env python3%0A#%0A# Copyright (c) 2014 Mark Samman %3Chttps://github.com/marksamman/pylinkshortener%3E%0A#%0A# Permission is hereby granted, free of charge, to any person obtaining a copy%0A# of this software and associated documentation files (the %22Software%22), to deal%0A# in the Software without restriction, including without limitation the rights%0A# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell%0A# copies of the Software, and to permit persons to whom the Software is%0A# furnished to do so, subject to the following conditions:%0A#%0A# The above copyright notice and this permission notice shall be included in%0A# all copies or substantial portions of the Software.%0A#%0A# THE SOFTWARE IS PROVIDED %22AS IS%22, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR%0A# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,%0A# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE%0A# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER%0A# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,%0A# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN%0A# THE SOFTWARE.%0A%0Aimport threading%0Afrom app import app%0Afrom websocket import websocketThread%0A%0Aif __name__ == %22__main__%22:%0A%09threading.Thread(target=websocketThread).start()%0A%09app.run()%0A
7613285ba24990d62cf2273387a143aa74ce8bb0
add shortcut to send sms message
nexmo/utils.py
nexmo/utils.py
Python
0.000001
@@ -0,0 +1,605 @@ +from .libpynexmo.nexmomessage import NexmoMessage%0Afrom django.conf import settings%0A%0A%0Adef send_message(to, message):%0A %22%22%22Shortcut to send a sms using libnexmo api.%0A%0A Usage:%0A%0A %3E%3E%3E from nexmo import send_message%0A %3E%3E%3E send_message('+33612345678', 'My sms message body')%0A %22%22%22%0A params = %7B%0A 'username': settings.NEXMO_USERNAME,%0A 'password': settings.NEXMO_PASSWORD,%0A 'type': 'unicode',%0A 'from': settings.NEXMO_FROM,%0A 'to': to,%0A 'text': message.encode('utf-8'),%0A %7D%0A sms = NexmoMessage(params)%0A response = sms.send_request()%0A return response%0A
49b68f35bb6555eaad7cd5e3bfeb4e7fadb500ba
Add intermediate tower 4
pythonwarrior/towers/intermediate/level_004.py
pythonwarrior/towers/intermediate/level_004.py
Python
0.998117
@@ -0,0 +1,935 @@ +# ----%0A# %7CC s %7C%0A# %7C @ S%7C%0A# %7CC s%3E%7C%0A# ----%0A%0Alevel.description(%22Your ears become more in tune with the surroundings. %22%0A %22Listen to find enemies and captives!%22)%0Alevel.tip(%22Use warrior.listen to find spaces with other units, %22%0A %22and warrior.direction_of to determine what direction they're in.%22)%0Alevel.clue(%22Walk towards an enemy or captive with %22%0A %22warrior.walk_(warrior.direction_of(warrior.listen()%5B0%5D)), %22%0A %22once len(warrior.listen()) == 0 then head for the stairs.%22)%0Alevel.time_bonus(55)%0Alevel.ace_score(144)%0Alevel.size(4, 3)%0Alevel.stairs(3, 2)%0A%0Adef add_abilities(warrior):%0A warrior.add_abilities('listen')%0A warrior.add_abilities('direction_of')%0A%0Alevel.warrior(1, 1, 'east', func=add_abilities)%0A%0Alevel.unit('captive', 0, 0, 'east')%0Alevel.unit('captive', 0, 2, 'east')%0Alevel.unit('sludge', 2, 0, 'south')%0Alevel.unit('thick_sludge', 3, 1, 'west')%0Alevel.unit('sludge', 2, 2, 'north')%0A
7662d0a0701381b37f60d42e7dbf04d7950c18ad
add management command to print out duplicate bihar tasks
custom/bihar/management/commands/bihar_cleanup_tasks.py
custom/bihar/management/commands/bihar_cleanup_tasks.py
Python
0.000001
@@ -0,0 +1,2456 @@ +import csv%0Afrom django.core.management.base import BaseCommand%0Afrom corehq.apps.hqcase.utils import get_cases_in_domain%0Afrom dimagi.utils.decorators.log_exception import log_exception%0A%0A%0Aclass Command(BaseCommand):%0A %22%22%22%0A Creates the backlog of repeat records that were dropped when bihar repeater%0A infrastructure went down.%0A %22%22%22%0A%0A @log_exception()%0A def handle(self, *args, **options):%0A domain = 'care-bihar'%0A root_types = ('cc_bihar_pregnancy', 'cc_bihar_newborn')%0A TASK_TYPE = 'task'%0A # loop through all mother cases, then all child cases%0A # for each case get all associated tasks%0A # if any duplicates found, clean up / print them%0A with open('bihar-duplicate-tasks.csv', 'wb') as f:%0A writer = csv.writer(f, dialect=csv.excel)%0A _dump_headings(writer)%0A for case_type in root_types:%0A for parent_case in get_cases_in_domain(domain, case_type):%0A try:%0A tasks = filter(lambda subcase: subcase.type == TASK_TYPE, parent_case.get_subcases())%0A if tasks:%0A types = %5B_task_id(t) for t in tasks%5D%0A unique_types = set(types)%0A if len(unique_types) != len(tasks):%0A for type_being_checked in unique_types:%0A matching_cases = %5Bt for t in tasks if _task_id(t) == type_being_checked%5D%0A if len(matching_cases) %3E 1:%0A _dump(parent_case, matching_cases, writer)%0A except Exception, e:%0A print 'error with case %25s (%25s)' %25 (parent_case._id, e)%0A%0Adef _task_id(task_case):%0A id = getattr(task_case, 'task_id', None)%0A if id is None:%0A print '%25s has no task id' %25 task_case._id%0A return id%0A%0Adef _dump_headings(csv_writer):%0A csv_writer.writerow(%5B%0A 'parent case id',%0A 'task case id',%0A 'task id',%0A 'date created',%0A 'closed?',%0A 'keep?',%0A %5D)%0A%0Adef _dump(parent, tasklist, csv_writer):%0A tasklist = sorted(tasklist, key=lambda case: (not case.closed, case.opened_on))%0A for i, task in enumerate(tasklist):%0A csv_writer.writerow(%5B%0A parent._id,%0A task._id,%0A _task_id(task),%0A task.opened_on,%0A task.closed,%0A i==0,%0A %5D)%0A
245879ce699b275edc3ee17e4cba1146241f25de
Add GLib mainllop transport for xmlrpcserver
wizbit/xmlrpcdeferred.py
wizbit/xmlrpcdeferred.py
Python
0
@@ -0,0 +1,2780 @@ +import gobject%0A%0Aimport xmlrpclib%0A%0Aclass XMLRPCDeferred (gobject.GObject):%0A %22%22%22Object representing the delayed result of an XML-RPC%0A request.%0A%0A .is_ready: bool%0A True when the result is received; False before then.%0A .value : any%0A Once is_ready=True, this attribute contains the result of the%0A request. If this value is an instance of the xmlrpclib.Fault%0A class, then some exception occurred during the request's%0A processing.%0A%0A %22%22%22%0A __gsignals__ = %7B%0A 'ready': (gobject.SIGNAL_RUN_FIRST, gobject.TYPE_NONE, ())%0A %7D%0A def __init__ (self, transport, http):%0A self.__gobject_init__()%0A self.transport = transport%0A self.http = http%0A self.value = None%0A self.is_ready = False%0A%0A sock = self.http._conn.sock%0A self.src_id = gobject.io_add_watch(sock,%0A gobject.IO_IN %7C gobject.IO_HUP,%0A self.handle_io)%0A%0A def handle_io (self, source, condition):%0A # Triggered when there's input available on the socket.%0A # The assumption is that all the input will be available%0A # relatively quickly.%0A self.read()%0A%0A # Returning false prevents this callback from being triggered%0A # again. We also remove the monitoring of this file%0A # descriptor.%0A gobject.source_remove(self.src_id)%0A return False%0A%0A def read (self):%0A errcode, errmsg, headers = self.http.getreply()%0A%0A if errcode != 200:%0A raise ProtocolError(%0A host + handler,%0A errcode, errmsg,%0A headers%0A )%0A%0A try:%0A result = xmlrpclib.Transport._parse_response(self.transport,%0A self.http.getfile(), None)%0A except xmlrpclib.Fault, exc:%0A result = exc%0A%0A self.value = result%0A self.is_ready = True%0A self.emit('ready')%0A%0A def __len__ (self):%0A # XXX egregious hack!!!%0A # The code in xmlrpclib.ServerProxy calls len() on the object%0A # returned by the transport, and if it's of length 1 returns%0A # the contained object. Therefore, this __len__ method%0A # returns a completely fake length of 2.%0A return 2 %0A%0A%0Aclass GXMLRPCTransport (xmlrpclib.Transport):%0A def request(self, host, handler, request_body, verbose=0):%0A # issue XML-RPC request%0A%0A h = self.make_connection(host)%0A if verbose:%0A h.set_debuglevel(1)%0A%0A self.send_request(h, handler, request_body)%0A self.send_host(h, host)%0A self.send_user_agent(h)%0A self.send_content(h, request_body)%0A%0A self.verbose = verbose%0A%0A return XMLRPCDeferred(self, h)%0A%0A
0880d067f478ba6474e433e620a1e48e23ed9c34
Add nginx+uWSGI for 10% perf improvement over gunicorn
wsgi/setup_nginxuwsgi.py
wsgi/setup_nginxuwsgi.py
Python
0
@@ -0,0 +1,915 @@ +import subprocess%0Aimport multiprocessing%0Aimport os%0A%0Abin_dir = os.path.expanduser('~/FrameworkBenchmarks/installs/py2/bin')%0Aconfig_dir = os.path.expanduser('~/FrameworkBenchmarks/config')%0ANCPU = multiprocessing.cpu_count()%0A%0Adef start(args):%0A try:%0A subprocess.check_call('sudo /usr/local/nginx/sbin/nginx -c ' +%0A config_dir + '/nginx_uwsgi.conf', shell=True)%0A # Run in the background, but keep stdout/stderr for easy debugging%0A subprocess.Popen(bin_dir + '/uwsgi --ini ' + config_dir + '/uwsgi.ini' +%0A ' --processes ' + str(NCPU) +%0A ' --wsgi hello:app',%0A shell=True, cwd='wsgi')%0A return 0%0A except subprocess.CalledProcessError:%0A return 1%0A%0Adef stop():%0A subprocess.call('sudo /usr/local/nginx/sbin/nginx -s stop', shell=True)%0A subprocess.call(bin_dir + '/uwsgi --ini ' + config_dir + '/uwsgi_stop.ini', shell=True)%0A return 0%0A
d4f4d93e8180bb2046c4e37111edfee18f18013e
Bump setuptools to latest
readthedocs/doc_builder/python_environments.py
readthedocs/doc_builder/python_environments.py
import logging import os import shutil from django.conf import settings from readthedocs.doc_builder.config import ConfigWrapper from readthedocs.doc_builder.loader import get_builder_class from readthedocs.projects.constants import LOG_TEMPLATE log = logging.getLogger(__name__) class PythonEnvironment(object): def __init__(self, version, build_env, config=None): self.version = version self.project = version.project self.build_env = build_env if config: self.config = config else: self.config = ConfigWrapper(version=version, yaml_config={}) # Compute here, since it's used a lot self.checkout_path = self.project.checkout_path(self.version.slug) def _log(self, msg): log.info(LOG_TEMPLATE .format(project=self.project.slug, version=self.version.slug, msg=msg)) def delete_existing_build_dir(self): # Handle deleting old build dir build_dir = os.path.join( self.venv_path(), 'build') if os.path.exists(build_dir): self._log('Removing existing build directory') shutil.rmtree(build_dir) def install_package(self): setup_path = os.path.join(self.checkout_path, 'setup.py') if os.path.isfile(setup_path) and self.config.install_project: if getattr(settings, 'USE_PIP_INSTALL', False): self.build_env.run( 'python', self.venv_bin(filename='pip'), 'install', '--ignore-installed', '--cache-dir', self.project.pip_cache_path, '.', cwd=self.checkout_path, bin_path=self.venv_bin() ) else: self.build_env.run( 'python', 'setup.py', 'install', '--force', cwd=self.checkout_path, bin_path=self.venv_bin() ) def venv_bin(self, filename=None): """Return path to the virtualenv bin path, or a specific binary :param filename: If specified, add this filename to the path return :returns: Path to virtualenv bin or filename in virtualenv bin """ parts = [self.venv_path(), 'bin'] if filename is not None: parts.append(filename) return os.path.join(*parts) class Virtualenv(PythonEnvironment): def venv_path(self): return os.path.join(self.project.doc_path, 'envs', self.version.slug) def setup_base(self): site_packages = '--no-site-packages' if self.config.use_system_site_packages: site_packages = '--system-site-packages' env_path = self.venv_path() self.build_env.run( self.config.python_interpreter, '-mvirtualenv', site_packages, env_path, bin_path=None, # Don't use virtualenv bin that doesn't exist yet ) def install_core_requirements(self): requirements = [ 'sphinx==1.3.4', 'Pygments==2.0.2', 'setuptools==18.6.1', 'docutils==0.12', 'mkdocs==0.15.0', 'mock==1.0.1', 'pillow==2.6.1', 'readthedocs-sphinx-ext==0.5.4', 'sphinx-rtd-theme==0.1.9', 'alabaster>=0.7,<0.8,!=0.7.5', 'commonmark==0.5.4', 'recommonmark==0.1.1', ] cmd = [ 'python', self.venv_bin(filename='pip'), 'install', '--use-wheel', '-U', '--cache-dir', self.project.pip_cache_path, ] if self.config.use_system_site_packages: # Other code expects sphinx-build to be installed inside the # virtualenv. Using the -I option makes sure it gets installed # even if it is already installed system-wide (and # --system-site-packages is used) cmd.append('-I') cmd.extend(requirements) self.build_env.run( *cmd, bin_path=self.venv_bin() ) def install_user_requirements(self): requirements_file_path = self.config.requirements_file if not requirements_file_path: builder_class = get_builder_class(self.project.documentation_type) docs_dir = (builder_class(build_env=self.build_env, python_env=self) .docs_dir()) for path in [docs_dir, '']: for req_file in ['pip_requirements.txt', 'requirements.txt']: test_path = os.path.join(self.checkout_path, path, req_file) if os.path.exists(test_path): requirements_file_path = test_path break if requirements_file_path: self.build_env.run( 'python', self.venv_bin(filename='pip'), 'install', '--exists-action=w', '--cache-dir', self.project.pip_cache_path, '-r{0}'.format(requirements_file_path), cwd=self.checkout_path, bin_path=self.venv_bin() ) class Conda(PythonEnvironment): def venv_path(self): return os.path.join(self.project.doc_path, 'conda', self.version.slug) def setup_base(self): conda_env_path = os.path.join(self.project.doc_path, 'conda') version_path = os.path.join(conda_env_path, self.version.slug) if os.path.exists(version_path): # Re-create conda directory each time to keep fresh state self._log('Removing existing conda directory') shutil.rmtree(version_path) self.build_env.run( 'conda', 'env', 'create', '--name', self.version.slug, '--file', self.config.conda_file, bin_path=None, # Don't use conda bin that doesn't exist yet ) def install_core_requirements(self): # Use conda for requirements it packages requirements = [ 'sphinx==1.3.1', 'Pygments==2.0.2', 'docutils==0.12', 'mock', 'pillow==3.0.0', 'sphinx_rtd_theme==0.1.7', 'alabaster>=0.7,<0.8,!=0.7.5', ] cmd = [ 'conda', 'install', '--yes', '--name', self.version.slug, ] cmd.extend(requirements) self.build_env.run( *cmd ) # Install pip-only things. pip_requirements = [ 'mkdocs==0.15.0', 'readthedocs-sphinx-ext==0.5.4', 'commonmark==0.5.4', 'recommonmark==0.1.1', ] pip_cmd = [ 'python', self.venv_bin(filename='pip'), 'install', '-U', '--cache-dir', self.project.pip_cache_path, ] pip_cmd.extend(pip_requirements) self.build_env.run( *pip_cmd, bin_path=self.venv_bin() ) def install_user_requirements(self): self.build_env.run( 'conda', 'env', 'update', '--name', self.version.slug, '--file', self.config.conda_file, )
Python
0
@@ -3315,12 +3315,12 @@ ls== -18.6 +20.1 .1',
8fe4b03d7944f2facf8f261d440e7220c4d1228b
add file
Python/mapexp.py
Python/mapexp.py
Python
0.000001
@@ -0,0 +1,1016 @@ +#!/usr/bin/env python%0A%0A%22%22%22%0Amap(fun, iterable,...)%0A Return a list of the results of applying the function to the items of%0A the argument sequence(s). If more than one sequence is given, the%0A function is called with an argument list consisting of the corresponding%0A item of each sequence, substituting None for missing values when not all%0A sequences have the same length. If the function is None, return a list of the items of the sequence (or a list of tuples if more than one sequenc:e).%0A%0A%22%22%22%0Ali1 = %5B1, 2, 3, 4, 5%5D%0Ali2 = %5B1, 2, 3, 4, 7%5D%0Ali3 = %5B2, 3, 4, %22hehe%22%5D%0A%0A#return a list of sum of corresponding element in each list%0Aret = map(lambda x,y : x + y, li1, li2)%0Aprint ret%0A%0Aret = map(lambda x, y : str(x) + str(y), li2, li3 )%0Aprint ret%0A%0A#return a tubple consis of corresponding items from two list%0Aret = map(None, li2, li3)%0Aprint ret%0A%0A#convert to list of list%0Aret = map(list, ret)%0Aprint ret%0A%0A#flat list%0Ala = %5B%5D%0Afor e in ret:%0A%09la += e%0Aprint la%0A%0A#flat list%0Aret = reduce(lambda x, y: x + y, ret)%0Aprint ret%0A
6dd4b6ee9b9457d2362404aac71fd73e907bf535
Add Timeline class.
source/vistas/ui/controls/timeline.py
source/vistas/ui/controls/timeline.py
Python
0
@@ -0,0 +1,3168 @@ +import datetime%0Afrom bisect import insort%0A%0A%0Aclass TimelineFilter:%0A pass # Todo: implement%0A%0A%0Aclass Timeline:%0A _global_timeline = None%0A%0A @classmethod%0A def app(cls):%0A %22%22%22 Global timeline %22%22%22%0A%0A if cls._global_timeline is None:%0A cls._global_timeline = Timeline()%0A%0A return cls._global_timeline%0A%0A def __init__(self, start_time=None, end_time=None, current_time=None):%0A%0A self._start_time = start_time%0A self._end_time = end_time%0A self._current_time = current_time%0A self._min_step = None%0A self._timestamps = %5B%5D%0A%0A # Todo: implement TimelineFilter%0A # self._filtered_timestamps = %5B%5D # potentially unneeded, since we can filter on the fly now%0A # self._use_filter = False%0A%0A @property%0A def timestamps(self):%0A # return self._filtered_timestamps if self._use_filter else self._timestamps%0A return self._timestamps%0A%0A @property%0A def num_timestamps(self):%0A return len(self._timestamps)%0A%0A @property%0A def start_time(self):%0A return self._start_time%0A%0A @start_time.setter%0A def start_time(self, value: datetime.datetime):%0A self._start_time = value%0A # Todo: TimelineEvent?%0A%0A @property%0A def end_time(self):%0A return self._end_time%0A%0A @end_time.setter%0A def end_time(self, value):%0A self._end_time = value%0A # Todo: TimelineEvent?%0A%0A @property%0A def current_time(self):%0A return self._current_time%0A%0A @current_time.setter%0A def current_time(self, value: datetime.datetime):%0A if value not in self._timestamps:%0A if value %3E self._timestamps%5B-1%5D:%0A value = self._timestamps%5B-1%5D%0A elif value %3C self._timestamps%5B0%5D:%0A value = self._timestamps%5B0%5D%0A else:%0A # Go to nearest floor step%0A value = list(filter(lambda x: x %3E value, self._timestamps))%5B0%5D%0A self._current_time = value%0A # Todo: TimelineEvent?%0A%0A @property%0A def min_step(self):%0A return self._min_step%0A%0A def reset(self):%0A self._timestamps = %5B%5D%0A self._start_time, self._end_time, self._current_time = %5Bdatetime.datetime.fromtimestamp(0)%5D * 3%0A%0A def add_timestamp(self, timestamp: datetime.datetime):%0A if timestamp not in self._timestamps:%0A if timestamp %3E self._timestamps%5B-1%5D:%0A self.end_time = timestamp%0A elif timestamp %3C self._timestamps%5B0%5D:%0A self.start_time = timestamp%0A insort(self._timestamps, timestamp) # unique and sorted%0A%0A # recalculate smallest timedelta%0A self._min_step = self._timestamps%5B-1%5D - self._timestamps%5B0%5D%0A for i in range(len(self._timestamps) - 1):%0A diff = self._timestamps%5Bi+1%5D - self._timestamps%5Bi+1%5D%0A self._min_step = diff if diff %3C self._min_step else self._min_step%0A%0A def index_at_time(self, time: datetime.datetime):%0A return self._timestamps.index(time)%0A%0A @property%0A def current_index(self):%0A return self.index_at_time(self._current_time)%0A%0A def time_at_index(self, index):%0A return self.timestamps%5Bindex%5D%0A
560d82cd4b7de72a4fada77b0fe13bfb1caa9790
package script
package.py
package.py
Python
0.000002
@@ -0,0 +1,646 @@ +import os%0Aimport sys%0Afor root, dirs, files in os.walk(os.path.dirname(os.path.abspath(__file__))):%0A for name in files:%0A if name.endswith((%22.java%22)):%0A file = open(name, %22r%22)%0A lines = file.readlines()%0A file.close()%0A file = open(name, %22w%22)%0A for line in lines:%0A if %22package%22 not in line:%0A file.write(line)%0A file.close()%0A%0A%0A%0A#filename = %22hello.java%22%0A#file = open(filename, %22r%22)%0A#lines = file.readlines()%0A#file.close()%0A#file = open(filename, %22w%22)%0A#for line in lines:%0A# if %22package%22 not in line:%0A# file.write(line)%0A#%0A#file.close()%0A%0A%0A%0A
cc9ce576a33c60acc9f60f12b42e56f474b760ac
Add json_jinja renderer
salt/renderers/json_jinja.py
salt/renderers/json_jinja.py
Python
0.000668
@@ -0,0 +1,657 @@ +'''%0AThe default rendering engine, yaml_jinja, this renderer will take a yaml file%0Awith the jinja template and render it to a high data format for salt states.%0A'''%0A%0A# Import python libs%0Aimport os%0Aimport json%0A%0A# Import Third Party libs%0Afrom jinja2 import Template%0A%0Adef render(template):%0A '''%0A Render the data passing the functions and grains into the rendering system%0A '''%0A if not os.path.isfile(template):%0A return %7B%7D%0A passthrough = %7B%7D%0A passthrough.update(__salt__)%0A passthrough.update(__grains__)%0A template = Template(open(template, 'r').read())%0A json_data = template.render(**passthrough)%0A return json.loads(json_data)%0A%0A
e35711d368faadaa017186200092297f264648fe
Add web ui
nodes/web_ui.py
nodes/web_ui.py
Python
0.000001
@@ -0,0 +1,1771 @@ +#!/usr/bin/env python%0Aimport roslib; roslib.load_manifest('rospilot')%0Aimport rospy%0Afrom pymavlink import mavutil%0Aimport rospilot.msg%0A%0Afrom BaseHTTPServer import BaseHTTPRequestHandler,HTTPServer%0A%0APORT_NUMBER = 8085%0A%0Aarmed = None%0A%0A#This class will handles any incoming request from%0A#the browser %0Aclass HttpHandler(BaseHTTPRequestHandler):%0A %0A #Handler for the GET requests%0A def do_GET(self):%0A self.send_response(200)%0A self.send_header('Content-type','text/html')%0A self.send_header('Refresh','1;url=/')%0A self.end_headers()%0A # Send the html message%0A self.wfile.write(%0A %22%3Chtml%3E%22%0A %22%3Cbody%3E%22%0A %22%3Ch2%22 + (%22 style='color:red;'%22 if armed else %22%22) + %22%3E%22 %0A + (%22ARMED%22 if armed else %22DISARMED%22) + %0A %22%3C/h2%3E%22%0A %22%3Ca href='/arm'%3E%22%0A + (%22disarm%22 if armed else %22arm%22) + %0A %22%3C/a%3E%22%0A %22%3C/body%3E%22%0A %22%3C/html%3E%22)%0A if 'arm' in self.path:%0A node.send_arm(not armed)%0A return%0A%0Aclass WebUiNode:%0A def __init__(self):%0A self.pub_set_mode = rospy.Publisher('set_mode', rospilot.msg.BasicMode)%0A rospy.Subscriber(%22basic_status%22, rospilot.msg.BasicMode, self.handle_status)%0A self.http_server = HTTPServer(('', PORT_NUMBER), HttpHandler)%0A%0A def handle_status(self, data):%0A global armed%0A armed = data.armed%0A%0A def send_arm(self, arm):%0A self.pub_set_mode.publish(arm)%0A%0A def run(self):%0A rospy.init_node('rospilot_webui')%0A rospy.loginfo(%22Running%22)%0A while not rospy.is_shutdown():%0A self.http_server.handle_request()%0A self.http_server.close()%0A%0Aif __name__ == '__main__':%0A node = WebUiNode()%0A node.run()%0A
c8ede03a393ae1287a9a34e86af40cd6a8b3027b
add missing module (RBL-3757)
mint/targets.py
mint/targets.py
Python
0
@@ -0,0 +1,2325 @@ +#%0A# Copyright (c) 2008 rPath, Inc.%0A#%0A# All Rights Reserved%0A#%0A%0Afrom mint import database, mint_error%0Aimport simplejson%0A%0Aclass TargetsTable(database.KeyedTable):%0A name = 'Targets'%0A key = 'targetId'%0A%0A fields = ('targetId', 'targetType', 'targetName')%0A%0A def addTarget(self, targetType, targetName):%0A cu = self.db.cursor()%0A targetId = self.getTargetId(targetType, targetName, None)%0A if targetId:%0A raise mint_error.TargetExists( %5C%0A %22Target named '%25s' of type '%25s' already exists%22,%0A targetName, targetType)%0A cu = cu.execute(%22INSERT INTO Targets (targetType, targetName) VALUES(?, ?)%22, targetType, targetName)%0A self.db.commit()%0A return cu.lastid()%0A%0A def getTargetId(self, targetType, targetName, default = -1):%0A cu = self.db.cursor()%0A cu.execute(%22%22%22SELECT targetId FROM Targets WHERE targetType=?%0A AND targetName=?%22%22%22, targetType, targetName)%0A res = cu.fetchone()%0A if res:%0A return res%5B0%5D%0A if default == -1:%0A raise mint_error.TargetMissing(%22No target named '%25s' of type '%25s'%22,%0A targetName, targetType)%0A return default%0A%0A def deleteTarget(self, targetId):%0A cu = self.db.cursor()%0A cu.execute(%22DELETE FROM Targets WHERE targetId=?%22, targetId)%0A self.db.commit()%0A%0Aclass TargetDataTable(database.DatabaseTable):%0A name = 'TargetData'%0A%0A fields = ('targetId', 'name', 'value')%0A%0A def addTargetData(self, targetId, targetData):%0A cu = self.db.cursor()%0A # perhaps check the id to be certain it's unique%0A for name, value in targetData.iteritems():%0A value = simplejson.dumps(value)%0A cu.execute(%22INSERT INTO TargetData VALUES(?, ?, ?)%22,%0A targetId, name, value)%0A self.db.commit()%0A%0A def getTargetData(self, targetId):%0A cu = self.db.cursor()%0A cu.execute(%22SELECT name, value FROM TargetData WHERE targetId=?%22,%0A targetId)%0A res = %7B%7D%0A for name, value in cu.fetchall():%0A res%5Bname%5D = simplejson.loads(value)%0A return res%0A%0A def deleteTargetData(self, targetId):%0A cu = self.db.cursor()%0A cu.execute(%22DELETE FROM TargetData WHERE targetId=?%22, targetId)%0A self.db.commit()%0A
3709bcbd421d82f9404ab3b054989546d95c006f
Fix another broken sc2reader.plugins reference.
sc2reader/scripts/sc2json.py
sc2reader/scripts/sc2json.py
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import absolute_import, print_function, unicode_literals, division import sc2reader from sc2reader.plugins.replay import toJSON def main(): import argparse parser = argparse.ArgumentParser(description="Prints replay data to a json string.") parser.add_argument('--indent', '-i', type=int, default=None, help="The per-line indent to use when printing a human readable json string") parser.add_argument('--encoding', '-e', type=str, default='UTF-8', help="The character encoding use..") parser.add_argument('path', metavar='path', type=str, nargs=1, help="Path to the replay to serialize.") args = parser.parse_args() factory = sc2reader.factories.SC2Factory() factory.register_plugin("Replay", toJSON(encoding=args.encoding, indent=args.indent)) replay_json = factory.load_replay(args.path[0]) print(replay_json) if __name__ == '__main__': main()
Python
0
@@ -155,16 +155,26 @@ 2reader. +factories. plugins.
c3c559f893e31e728a429cf446039781cea1f25d
Add unit tests for `%tensorflow_version`
tests/test_tensorflow_magics.py
tests/test_tensorflow_magics.py
Python
0
@@ -0,0 +1,1732 @@ +# Copyright 2019 Google Inc.%0A#%0A# Licensed under the Apache License, Version 2.0 (the %22License%22);%0A# you may not use this file except in compliance with the License.%0A# You may obtain a copy of the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS,%0A# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.%0A# See the License for the specific language governing permissions and%0A# limitations under the License.%0A%22%22%22Tests for the %60%25tensorflow_version%60 magic.%22%22%22%0A%0Afrom __future__ import absolute_import%0Afrom __future__ import division%0Afrom __future__ import print_function%0A%0Aimport sys%0Aimport unittest%0A%0Afrom google.colab import _tensorflow_magics%0A%0A%0Aclass TensorflowMagicsTest(unittest.TestCase):%0A%0A @classmethod%0A def setUpClass(cls):%0A super(TensorflowMagicsTest, cls).setUpClass()%0A cls._original_version = _tensorflow_magics._tf_version%0A cls._original_sys_path = sys.path%5B:%5D%0A%0A def setUp(self):%0A super(TensorflowMagicsTest, self).setUp()%0A _tensorflow_magics._tf_version = self._original_version%0A sys.path%5B:%5D = self._original_sys_path%0A%0A def test_switch_1x_to_2x(self):%0A _tensorflow_magics._tensorflow_version(%222.x%22)%0A tf2_path = _tensorflow_magics._available_versions%5B%222.x%22%5D%0A self.assertEqual(sys.path%5B1:%5D, self._original_sys_path)%0A self.assertTrue(sys.path%5B0%5D.startswith(tf2_path), (sys.path%5B0%5D, tf2_path))%0A%0A def test_switch_back(self):%0A _tensorflow_magics._tensorflow_version(%222.x%22)%0A _tensorflow_magics._tensorflow_version(%221.x%22)%0A self.assertEqual(sys.path, self._original_sys_path)%0A%0A%0Aif __name__ == %22__main__%22:%0A unittest.main()%0A
fe879d7f6f56db410eb5d3d9aeb5691d020661c7
Create shackbullet.py
shackbullet.py
shackbullet.py
Python
0.000109
@@ -0,0 +1,1418 @@ +#Import the modules%0Aimport requests%0Aimport json%0Aimport uuid%0A%0A#Edit these to your shacknews login credentials%0Ashackname = 'Username'%0Ashackpass = 'ShackPassword'%0Apushbulletkey = 'access token from https://www.pushbullet.com/account'%0A%0A#Fun Bbegins%0A%0A#generate uuid from namespace%0Auuid = uuid.uuid5(uuid.NAMESPACE_DNS, 'winchatty.com')%0A#setup registration payload%0Apayload = %7B 'id' : uuid, 'name' : 'shackbullet', 'username' : shackname, 'password' : shackpass %7D%0A%0A#register this client%0Ar = requests.post(%22https://winchatty.com/v2/notifications/registerRichClient%22, data=payload)%0A%0A#We are setup so start waiting for notifications%0A#setup checking payload%0Apayload = %7B 'clientId' : uuid %7D%0Abulletheaders = %7B 'Authorization' : 'Bearer ' + pushbulletkey %7D%0A#main loop to check for notifications%0Awhile True:%0A #wait for notifications%0A r = requests.post(%22http://notifications.winchatty.com/v2/notifications/waitForNotification%22, data=payload)%0A #got one, now setup the payload for pushbullet%0A bulletpayload = %7B 'type' : 'link', 'title' : data%5B'messages'%5D%5B0%5D%5B'subject'%5D + ': ' + data%5B'messages'%5D%5B0%5D%5B'body'%5D, 'body' : data%5B'messages'%5D%5B0%5D%5B'body'%5D, 'url' : 'http://www.shacknews.com/chatty?id=' + str(data%5B'messages'%5D%5B0%5D%5B'postId'%5D) + '#item_' + str(data%5B'messages'%5D%5B0%5D%5B'postId'%5D) %7D%0A #send the notification to pushbullet%0A r = requests.post(%22https://api.pushbullet.com/v2/pushes%22, headers=bulletheaders, data=bulletpayload)%0A
dc4a3ec9a8bb042cef115005d8ebf11dc2c5889e
Longest increasing subsequence in the list
longest_increasing_subsequence.py
longest_increasing_subsequence.py
Python
1
@@ -0,0 +1,1162 @@ +l = %5B3,4,5,9,8,1,2,7,7,7,7,7,7,7,6,0,1%5D%0Aempty = %5B%5D%0Aone = %5B1%5D%0Atwo = %5B2,1%5D%0Athree = %5B1,0,2,3%5D%0Atricky = %5B1,2,3,0,-2,-1%5D%0Aring = %5B3,4,5,0,1,2%5D%0Ainternal = %5B9,1,2,3,4,5,0%5D%0A%0A# consider your list as a ring, continuous and infinite%0Adef longest_increasing_subsequence(l):%0A length = len(l)%0A if length == 0: return 0 # list is empty%0A i, tmp, longest = %5B0, 1, 1%5D%0A # 1 %3C tmp means that ring is finished, but the sequence continue to increase%0A while i %3C length or 1 %3C tmp:%0A # compare elements on the ring%0A if l%5Bi%25length%5D %3C l%5B(i+1)%25length%5D:%0A tmp += 1%0A else:%0A if longest %3C tmp: longest = tmp%0A tmp = 1%0A i += 1%0A return longest%0A%0Aprint(%220 == %22 + str(longest_increasing_subsequence(empty)))%0Aprint(%221 == %22 + str(longest_increasing_subsequence(one)))%0Aprint(%222 == %22 + str(longest_increasing_subsequence(two)))%0Aprint(%223 == %22 + str(longest_increasing_subsequence(three)))%0Aprint(%225 == %22 + str(longest_increasing_subsequence(tricky)))%0Aprint(%225 == %22 + str(longest_increasing_subsequence(internal)))%0Aprint(%226 == %22 + str(longest_increasing_subsequence(ring)))%0Aprint(%226 == %22 + str(longest_increasing_subsequence(l)))%0A
0b419a71a414e605af57029286b627e286c5df47
add session
controller/addSession.py
controller/addSession.py
Python
0
@@ -0,0 +1,630 @@ +#!/usr/local/bin/python3%0A%0A%22%22%22%0Acreated_by: Aninda Manocha%0Acreated_date: 3/5/2015%0Alast_modified_by: Aninda Manocha%0Alast_modified_date: 3/5/2015%0A%22%22%22%0A%0Aimport constants%0Aimport utils%0Aimport json%0Afrom sql.session import Session%0Afrom sql.user import User%0A%0A#Format of session%0A#requestType: addSession%0A#token: %22string%22%0A#ip: %22string%22%0A#user: User%0A%0Adef iChooseU(json):%0A thisUser = utils.findUser(json)%0A%0A token = json%5B%22token%22%5D%0A ip = json%5B%22ip%22%5D%0A user = json%5B%22user%22%5D%0A theUser = User.get(user%5B%22id%22%5D)%5B0%5D%0A%0A newSession = Session.noID(token, ip, user, ACTIVE)%0A newSession.add()%0A%0A return utils.successJson(json)%0A
d83e30cc2ec46eeb2f7c27c26e0fc3d2d3e6de90
add an environment checker
scripts/check_environment.py
scripts/check_environment.py
Python
0
@@ -0,0 +1,87 @@ +%22%22%22%0ASomething to run to make sure our machine is up to snuff!%0A%22%22%22%0Aimport pg%0Aimport xlwt
ef78460a3303216f424247203cf0b5e1ecc88197
Add test for ticket #1074.
scipy/optimize/tests/test_regression.py
scipy/optimize/tests/test_regression.py
Python
0.000005
@@ -0,0 +1,322 @@ +%22%22%22Regression tests for optimize.%0A%0A%22%22%22%0Afrom numpy.testing import *%0Aimport numpy as np%0A%0A%0Aclass TestRegression(TestCase):%0A def test_newton_x0_is_0(self):%0A %22%22%22Ticket #1074%22%22%22%0A import scipy.optimize%0A tgt = 1%0A res = scipy.optimize.newton(lambda x: x - 1, 0)%0A assert_almost_equal(res, tgt)%0A
c654841595fd679c511d2d3b91c2edc9335c78cc
Create Quiz-Problem8.py
Quiz-Problem8.py
Quiz-Problem8.py
Python
0.000001
@@ -0,0 +1,527 @@ +# PROBLEM 8%0A%0A%0Adef satisfiesF(L):%0A %22%22%22%0A Assumes L is a list of strings%0A Assume function f is already defined for you and it maps a string to a Boolean%0A Mutates L such that it contains all of the strings, s, originally in L such%0A that f(s) returns True, and no other elements%0A Returns the length of L after mutation%0A %22%22%22%0A %0A harsh = L%5B:%5D%0A for x in harsh:%0A if f(x) == False:%0A a = L.index(x)%0A del L%5Ba%5D%0A return len(L)%0A %0A %0Arun_satisfiesF(L, satisfiesF) %0A
aa3cf6a383c38a9f17172ae2a754a8e67243e318
add new form to bulk import indicators from json file
corehq/apps/indicators/forms.py
corehq/apps/indicators/forms.py
Python
0
@@ -0,0 +1,1257 @@ +from django import forms%0Afrom django.utils.translation import ugettext_noop, ugettext as _%0Afrom bootstrap3_crispy import bootstrap as twbs%0Afrom bootstrap3_crispy.helper import FormHelper%0Afrom bootstrap3_crispy import layout as crispy%0Afrom corehq.apps.style.crispy import FormActions%0A%0A%0Aclass ImportIndicatorsFromJsonFileForm(forms.Form):%0A json_file = forms.FileField(%0A label=ugettext_noop(%22Exported File%22),%0A required=False,%0A )%0A override_existing = forms.BooleanField(%0A label=_(%22Override Existing Indicators%22),%0A required=False,%0A )%0A%0A def __init__(self, *args, **kwargs):%0A super(ImportIndicatorsFromJsonFileForm, self).__init__(*args, **kwargs)%0A%0A self.helper = FormHelper()%0A self.helper.form_method = 'POST'%0A self.helper.form_class = 'form-horizontal'%0A self.helper.label_class = 'col-lg-2'%0A self.helper.field_class = 'col-lg-8'%0A self.helper.layout = crispy.Layout(%0A crispy.Field('json_file'),%0A crispy.Field('override_existing'),%0A FormActions(%0A twbs.StrictButton(_(%22Import Indicators%22),%0A type='submit',%0A css_class='btn-primary'),%0A ),%0A%0A )%0A
04f851706b4384add01e6cd41f31305d587f7a36
Create pushbullet.py
pushbullet.py
pushbullet.py
Python
0.00003
@@ -0,0 +1,859 @@ +# Python unofficial Pushbullet client%0A# (C) 2015 Patrick Lambert - http://dendory.net - Provided under MIT License%0Aimport urllib.request%0Aimport sys%0A%0Aapi_key = %22XXXXXXXXX%22%0Atitle = %22My Title%22%0Amessage = %22My Body%22%0A%0Adef notify(key, title, text):%0A%09post_params = %7B%0A%09%09'type': 'note',%0A%09%09'title': title,%0A%09%09'body': text%0A%09%7D%0A%09post_args = urllib.parse.urlencode(post_params)%0A%09data = post_args.encode()%0A%09request = urllib.request.Request(url='https://api.pushbullet.com/v2/pushes', headers=%7B'Authorization': 'Bearer ' + key%7D, data=data)%0A%09result = urllib.request.urlopen(request)%0A%09return result.read().decode('utf-8')%0A%0Aif '-key' in sys.argv:%0A%09api_key = sys.argv%5Bsys.argv.index('-key')+1%5D%0Aif '-title' in sys.argv:%0A%09title = sys.argv%5Bsys.argv.index('-title')+1%5D%0Aif '-message' in sys.argv:%0A%09message = sys.argv%5Bsys.argv.index('-message')+1%5D%0A%0Aprint(notify(api_key, title, message))%0A
57b19c56b8be8c8131cc3d98cb9f30da3398412b
create a reporting helper for logging channel info
remoto/log.py
remoto/log.py
Python
0
@@ -0,0 +1,329 @@ +%0A%0Adef reporting(conn, result):%0A log_map = %7B'debug': conn.logger.debug, 'error': conn.logger.error%7D%0A while True:%0A try:%0A received = result.receive()%0A level_received, message = received.items()%5B0%5D%0A log_map%5Blevel_received%5D(message.strip('%5Cn'))%0A except EOFError:%0A break%0A
371df7c27fa1c4130214c58ececa83b0e0b6b165
Create palindrome3.py
palindrome3.py
palindrome3.py
Python
0.000034
@@ -0,0 +1,47 @@ +palindrome3 = lambda x: str(x) == str(x)%5B::-1%5D%0A
5129dd5de6f4a8c0451adbb5631940bb82b51a26
Add a script to enact updates to positions & alternative names
mzalendo/kenya/management/commands/kenya_apply_updates.py
mzalendo/kenya/management/commands/kenya_apply_updates.py
Python
0
@@ -0,0 +1,3477 @@ +from collections import defaultdict%0Aimport csv%0Aimport datetime%0Aimport errno%0Aimport hmac%0Aimport hashlib%0Aimport itertools%0Aimport json%0Aimport os%0Aimport re%0Aimport requests%0Aimport sys%0A%0Afrom django.core.management.base import NoArgsCommand, CommandError%0Afrom django.template.defaultfilters import slugify%0A%0Afrom django_date_extensions.fields import ApproximateDate%0A%0Afrom settings import IEBC_API_ID, IEBC_API_SECRET%0Afrom optparse import make_option%0A%0Afrom core.models import Place, PlaceKind, Person, ParliamentarySession, Position, PositionTitle, Organisation, OrganisationKind%0A%0Afrom iebc_api import *%0A%0Adata_directory = os.path.join(sys.path%5B0%5D, 'kenya', '2013-election-data')%0A%0Aheadings = %5B'Place Name',%0A 'Place Type',%0A 'Race Type',%0A 'Old?',%0A 'Existing Aspirant Position ID',%0A 'Existing Aspirant Person ID',%0A 'Existing Aspirant External ID',%0A 'Existing Aspirant Legal Name',%0A 'Existing Aspirant Other Names',%0A 'API Normalized Name',%0A 'API Code',%0A 'Action'%5D%0A%0Aclass Command(NoArgsCommand):%0A help = 'Update the database with aspirants from the IEBC website'%0A%0A option_list = NoArgsCommand.option_list + (%0A make_option('--commit', action='store_true', dest='commit', help='Actually update the database'),%0A )%0A%0A def handle_noargs(self, **options):%0A%0A csv_filename = os.path.join(data_directory, 'positions-to-end-delete-and-alternative-names.csv')%0A%0A with open(csv_filename) as fp:%0A%0A reader = csv.DictReader(fp)%0A%0A for row in reader:%0A%0A alternative_names_to_add = row%5B'Alternative Names To Add'%5D%0A if not alternative_names_to_add:%0A continue%0A%0A position = Position.objects.get(pk=row%5B%22Existing Aspirant Position ID%22%5D)%0A%0A if alternative_names_to_add == '%5Bendpos%5D':%0A position.end_date = yesterday_approximate_date%0A maybe_save(position, **options)%0A elif alternative_names_to_add == '%5Bdelpos%5D':%0A if options%5B'commit'%5D:%0A position.delete()%0A else:%0A print %22------------------------------------------------------------------------%22%0A print alternative_names_to_add%0A names_to_add = %5Ban.title().strip() for an in alternative_names_to_add.split(', ')%5D%0A for n in names_to_add:%0A person = Person.objects.get(pk=row%5B'Existing Aspirant Person ID'%5D)%0A person.add_alternative_name(n)%0A maybe_save(person, **options)%0A%0A# for each county, representative, ward:%0A# for each contest_type:%0A# get all the current aspirants%0A# for each aspirant:%0A# find each other aspirant that has this aspirant as an alternative name%0A# (make a mapping of (person with multiple names) =%3E all people whose names match those)%0A# for each value in that mapping, check that they have the same API CODE%0A# set the key person's API CODE%0A# check that there's no extra data attached to the values peope, then remove the position + the person%0A%0A%0A%0A# check - if we're deleting a position, because there's already an older one there, make sure any IEBC code of the former is applied to the latter%0A
67e7d530b4b4ffa86c9f147751cf17828e024cba
add migration to create job table
migrations/versions/5706baf73b01_add_jobs_table.py
migrations/versions/5706baf73b01_add_jobs_table.py
Python
0.000001
@@ -0,0 +1,1741 @@ +%22%22%22Add jobs table%0A%0ARevision ID: 5706baf73b01%0ARevises: 6bd350cf4748%0ACreate Date: 2016-09-14 15:53:50.394610%0A%0A%22%22%22%0A%0A# revision identifiers, used by Alembic.%0Arevision = '5706baf73b01'%0Adown_revision = '6bd350cf4748'%0A%0Afrom alembic import op%0Aimport sqlalchemy as sa%0Aimport server%0A%0A%0Adef upgrade():%0A ### commands auto generated by Alembic - please adjust! ###%0A op.create_table('job',%0A sa.Column('created', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=False),%0A sa.Column('id', sa.Integer(), nullable=False),%0A sa.Column('updated', sa.DateTime(timezone=True), nullable=True),%0A sa.Column('status', sa.Enum('queued', 'running', 'finished', name='status'), nullable=False),%0A sa.Column('user_id', sa.Integer(), nullable=False),%0A sa.Column('course_id', sa.Integer(), nullable=False),%0A sa.Column('name', sa.String(length=255), nullable=False),%0A sa.Column('description', sa.Text(), nullable=False),%0A sa.Column('failed', sa.Boolean(), nullable=False),%0A sa.Column('log', sa.Text(), nullable=True),%0A sa.ForeignKeyConstraint(%5B'course_id'%5D, %5B'course.id'%5D, name=op.f('fk_job_course_id_course')),%0A sa.ForeignKeyConstraint(%5B'user_id'%5D, %5B'user.id'%5D, name=op.f('fk_job_user_id_user')),%0A sa.PrimaryKeyConstraint('id', name=op.f('pk_job'))%0A )%0A op.create_index(op.f('ix_job_course_id'), 'job', %5B'course_id'%5D, unique=False)%0A op.create_index(op.f('ix_job_user_id'), 'job', %5B'user_id'%5D, unique=False)%0A ### end Alembic commands ###%0A%0A%0Adef downgrade():%0A ### commands auto generated by Alembic - please adjust! ###%0A op.drop_index(op.f('ix_job_user_id'), table_name='job')%0A op.drop_index(op.f('ix_job_course_id'), table_name='job')%0A op.drop_table('job')%0A ### end Alembic commands ###%0A
ec841c86348302dc67b48af93d2b3b5c8fb96b6e
add list
misc/list.py
misc/list.py
Python
0.000016
@@ -0,0 +1,230 @@ +#!/usr/bin/env python%0A%0A# Python 3: List comprehensions%0A%0Afruits = %5B'Banana', 'Apple', 'Lime'%5D%0A%0Aloud_fruits = %5Bfruit.upper() for fruit in fruits%5D%0A%0Aprint(loud_fruits)%0A%0A# List and the enumerate function%0Aprint(list(enumerate(fruits)))%0A
427fa7f57776a73b5ec0e5045114d5ac330e6a57
Create misc.py
misc/misc.py
misc/misc.py
Python
0
@@ -0,0 +1 @@ +%0A
0612769b07e88eae9865a16f1ae8162502fe65f9
Add senate evacuation
2016/1c/senate_evacuation.py
2016/1c/senate_evacuation.py
Python
0.000367
@@ -0,0 +1,2241 @@ +#!/usr/bin/env python%0A%0Afrom __future__ import print_function%0A%0Adef parse_senates(senates_str):%0A return %5Bint(_) for _ in senates_str.split(' ')%5D%0A%0Adef get_evacuation_plan(senates):%0A if not isinstance(senates, list):%0A raise TypeError%0A%0A num_parties = len(senates)%0A remaining_senates = senates%5B:%5D%0A evacuation = %5B%5D%0A%0A while sum(remaining_senates) %3E 0:%0A sorted_index = get_sorted_index(remaining_senates)%0A party_index0, party_index1 = sorted_index%5B:2%5D%0A if remaining_senates%5Bparty_index0%5D %3E 0:%0A evacuated_party0 = get_party(party_index0)%0A evacuation.append(evacuated_party0)%0A%0A if remaining_senates%5Bparty_index1%5D %3E 0:%0A evacuated_party1 = get_party(party_index1)%0A evacuation.append(evacuated_party1)%0A%0A evacuation.append(' ')%0A%0A remaining_senates%5Bparty_index0%5D += -1%0A remaining_senates%5Bparty_index1%5D += -1%0A%0A evacuation_plan = ''.join(evacuation)%5B:-1%5D%0A if evacuation_plan%5B-2%5D == ' ':%0A evacuation_plan = evacuation_plan%5B:-3%5D + ' ' + evacuation_plan%5B-3%5D + evacuation_plan%5B-1%5D%0A%0A return evacuation_plan%0A%0Adef get_sorted_index(seq):%0A return sorted(range(len(seq)), key=lambda i:-seq%5Bi%5D)%0A%0Adef get_party(party_index):%0A return chr(party_index + 65)%0A%0Aif __name__ == '__main__':%0A import os%0A%0A samples = %5B'2 2',%0A '3 2 2',%0A '1 1 2',%0A '2 3 1'%5D%0A%0A for sample in samples:%0A senates = parse_senates(sample)%0A print(get_evacuation_plan(senates))%0A%0A data_files = %5B'A-small-practice', 'A-large-practice'%5D%0A for f in data_files:%0A with open(os.path.join(os.path.dirname(os.path.realpath(__file__)),%0A '%7B0%7D.in'.format(f)), 'r') as input_file:%0A lines = input_file.readlines()%0A input_count = int(lines%5B0%5D.replace('%5Cn' ,''))%0A inputs = %5Bline.replace('%5Cn', '') for line in lines%5B2::2%5D%5D%0A%0A i = 1%0A with open(os.path.join(os.path.dirname(os.path.realpath(__file__)),%0A '%7B0%7D.out'.format(f)), 'w') as output_file:%0A for in_ in inputs:%0A senates = parse_senates(in_)%0A output_file.write('Case #%7B0%7D: %7B1%7D%5Cn'.format(i, get_evacuation_plan(senates)))%0A i += 1%0A
aabb57148cced94b31109b46adf83a43ca23f7a3
allow to apply std functions back
mosql/std.py
mosql/std.py
Python
0
@@ -0,0 +1,669 @@ +#!/usr/bin/env python%0A# -*- coding: utf-8 -*-%0A%0A'''It applies the standard functions to :mod:%60mosql.util%60.%0A%0AThe usage:%0A%0A::%0A%0A import mosql.std%0A%0AIf you want to patch again:%0A%0A::%0A%0A mosql.std.patch()%0A%0AIt will replace the functions in :mod:%60mosql.util%60 with original standard functions.%0A'''%0A%0Aimport mosql.util%0A%0Adef patch():%0A mosql.util.escape = mosql.util.std_escape%0A mosql.util.format_param = mosql.util.std_format_param%0A mosql.util.delimit_identifier = mosql.util.std_delimit_identifier%0A mosql.util.stringify_bool = mosql.util.std_stringify_bool%0A mosql.util.escape_identifier = mosql.util.std_escape_identifier%0A%0Apatch() # patch it when load this module%0A
69892066449a40322a34b3a7b8e60e3fa99eef41
Create deobfuscator.py
ver.-0.1/deobfuscator.py
ver.-0.1/deobfuscator.py
Python
0.000015
@@ -0,0 +1 @@ +%0A
3c396700a52571d5aae2a12fac601f063a7af761
Add missing add_master.py.
devops/deployment/add_master.py
devops/deployment/add_master.py
Python
0
@@ -0,0 +1,444 @@ +#!/usr/bin/env python%0A# script to add minion config%0Aimport yaml%0Aimport sys%0Aimport os%0Af=open(%22/etc/salt/minion%22, 'r')%0Asettings=yaml.load(f)%0Af.close()%0Aip=os.environ%5B%22MASTER_IP%22%5D%0Aif settings%5B%22master%22%5D.__class__ == str:%0A settings%5B%22master%22%5D = %5Bsettings%5B%22master%22%5D%5D%0Asettings%5B%22master%22%5D = %5Bip%5D%0A#if not ip in settings%5B%22master%22%5D:%0A# settings%5B%22master%22%5D.insert(0, ip)%0Af=open(%22/etc/salt/minion%22, 'w')%0Af.write(yaml.dump(settings))%0Af.close()%0Aprint %22Success:%22%0A
00a059f172e1d6214d858370829e1034c2742ce4
add gevent run script
run_gevent.py
run_gevent.py
Python
0
@@ -0,0 +1,235 @@ +from gevent.monkey import patch_all; patch_all()%0Afrom gevent.wsgi import WSGIServer%0Afrom pypi_notifier import create_app%0Aapp = create_app('ProductionConfig')%0Ahttp_server = WSGIServer(('0.0.0.0', 5001), app)%0Ahttp_server.serve_forever()%0A
916b7fe4ee4c3c5c55278927a7116a4d1e0ad6d1
Add solarized256.py
solarized256.py
solarized256.py
Python
0
@@ -0,0 +1,2569 @@ +# -*- coding: utf-8 -*-%0A%0A%22%22%22%0A solarized256%0A ------------%0A%0A A Pygments style inspired by Solarized's 256 color mode.%0A%0A :copyright: (c) 2011 by Hank Gay, (c) 2012 by John Mastro.%0A :license: BSD, see LICENSE for more details.%0A%22%22%22%0A%0Afrom pygments.style import Style%0Afrom pygments.token import Token, Comment, Name, Keyword, Generic, Number, %5C%0A Operator, String%0A%0ABASE03 = %22#1c1c1c%22%0ABASE02 = %22#262626%22%0ABASE01 = %22#4e4e4e%22%0ABASE00 = %22#585858%22%0ABASE0 = %22#808080%22%0ABASE1 = %22#8a8a8a%22%0ABASE2 = %22#d7d7af%22%0ABASE3 = %22#ffffd7%22%0AYELLOW = %22#af8700%22%0AORANGE = %22#d75f00%22%0ARED = %22#af0000%22%0AMAGENTA = %22#af005f%22%0AVIOLET = %22#5f5faf%22%0ABLUE = %22#0087ff%22%0ACYAN = %22#00afaf%22%0AGREEN = %22#5f8700%22%0A%0A%0Aclass Solarized256Style(Style):%0A background_color = BASE03%0A styles = %7B%0A Keyword: GREEN,%0A Keyword.Constant: ORANGE,%0A Keyword.Declaration: BLUE,%0A Keyword.Namespace: ORANGE,%0A #Keyword.Pseudo%0A Keyword.Reserved: BLUE,%0A Keyword.Type: RED,%0A%0A #Name%0A Name.Attribute: BASE1,%0A Name.Builtin: BLUE,%0A Name.Builtin.Pseudo: BLUE,%0A Name.Class: BLUE,%0A Name.Constant: ORANGE,%0A Name.Decorator: BLUE,%0A Name.Entity: ORANGE,%0A Name.Exception: YELLOW,%0A Name.Function: BLUE,%0A #Name.Label%0A #Name.Namespace%0A #Name.Other%0A Name.Tag: BLUE,%0A Name.Variable: BLUE,%0A #Name.Variable.Class%0A #Name.Variable.Global%0A #Name.Variable.Instance%0A%0A #Literal%0A #Literal.Date%0A String: CYAN,%0A String.Backtick: BASE01,%0A String.Char: CYAN,%0A String.Doc: CYAN,%0A #String.Double%0A String.Escape: RED,%0A String.Heredoc: CYAN,%0A #String.Interpol%0A #String.Other%0A String.Regex: RED,%0A #String.Single%0A #String.Symbol%0A Number: CYAN,%0A #Number.Float%0A #Number.Hex%0A #Number.Integer%0A #Number.Integer.Long%0A #Number.Oct%0A%0A Operator: BASE1,%0A Operator.Word: GREEN,%0A%0A #Punctuation: ORANGE,%0A%0A Comment: BASE01,%0A #Comment.Multiline%0A Comment.Preproc: GREEN,%0A #Comment.Single%0A Comment.Special: GREEN,%0A%0A #Generic%0A Generic.Deleted: CYAN,%0A Generic.Emph: 'italic',%0A Generic.Error: RED,%0A Generic.Heading: ORANGE,%0A Generic.Inserted: GREEN,%0A #Generic.Output%0A #Generic.Prompt%0A Generic.Strong: 'bold',%0A Generic.Subheading: ORANGE,%0A #Generic.Traceback%0A%0A Token: BASE1,%0A Token.Other: ORANGE,%0A %7D%0A
fd89f422e1a95729b31da683a50f601604a43e9b
Version 1 of pyAdd. Raw
pyAdd.py
pyAdd.py
Python
0.000003
@@ -0,0 +1,531 @@ +#!/usr/bin/python%0A# Name: Frank Lewis%0A# NSID: fbl773%0A# Student ID: 11194945%0A# Lecture: 04%0A# Tutorial: T08%0A# Assignment: lab _%0A# Synopsis: python - beans on toast... weird%0A%0Aprint %22Begin Adder%22%0A%0Ac = 2%0An = -1.0%0Ac = raw_input(%22How many elements?: %22)%0AtotalElements = c%0Aans = 0%0Awhile (c %3E 0):%0A%09n = raw_input(%22Give Me a number to add: %22)%0A%09ans = float(n) + float(ans)%0A%09c = int(c) - int(1)%0A%0Aaverage = float(ans)/float(totalElements)%0Aprint %22Final Score: %22 + str(ans)%0Aprint %22Average was: %22 + str(average)%0A'''%0A adsfd%0A'''%0A
6cab10d19386911f33aaca660a9e1a35751b18ee
broke github api url naming scheme. fixed
post_qr.py
post_qr.py
import praw import OAuth2Util import time import requests import json import os import humanize # sweet mother of imports def make_qr(repo): """ Takes a github url, uses the github api to get the direct download url and size, and uses google api to make a qr. It returns the link to the qr (not on imgur) and the formatted file size """ if 'tag' in repo: repo = repo.rsplit('tag', 1)[0] # cut the url up to /tag/ repo = repo[18::] # cut out www.~~~ blah up to /user/repo else: repo = repo.rsplit('releases', 1)[0] # cut the url up to /tag/ repo = repo[18::] req = requests.get("https://api.github.com/repos" + repo + "latest") # change to proper api format data = json.loads(req.text) for item in data['assets']: if ".cia" in item['name']: # if the download links have cia, make qr, else return None url = item["browser_download_url"] # search for keys containing url and size file_size = item['size'] file_size = humanize.naturalsize(file_size) qr_url = ('https://chart.googleapis.com/chart?chs=300x300&cht=qr&chl=' + url + '&choe=UTF-8') return qr_url, file_size else: return None r = praw.Reddit('3DS Homebrew QR Poster for /r/3DSHacks v0.3' 'By /u/Im_Soul') o = OAuth2Util.OAuth2Util(r) # create reddit oauth # o.refresh() if not os.path.isfile("posts_scanned.txt"): # check for posts_scanned.txt, if not, make empty list to store ids posts_scanned = [] # if so, import the ids stored to the file else: with open("posts_scanned.txt", "r") as f: posts_scanned = f.read() posts_scanned = posts_scanned.split("\n") posts_scanned = list(filter(None, posts_scanned)) subreddit = r.get_subreddit('3dshacks') # subreddit to scan for submission in subreddit.get_new(limit=5): # get 5 posts if submission.id not in posts_scanned: # check if we already checked the id if 'github.com' in submission.url: # check if url is github link_to_release = submission.url if "release" in submission.url: # check if it's a release (bad way of doing it) finished = make_qr(link_to_release) if finished is not None: # if 'make_qr()' was a success comment = '[QR Code (' + finished[1] + ')](' + finished[0] + ')' + '\n ***** \n Made by /u/Im_Soul' # comment formatting submission.add_comment(comment) print("Replied to ", submission.id, " on ", time.asctime(time.localtime(time.time()))) # run log posts_scanned.append(submission.id) # add id to list with open("posts_scanned.txt", "w") as f: # write from the list to the file for post_id in posts_scanned: f.write(post_id + "\n")
Python
0.999656
@@ -352,34 +352,8 @@ %22%22%22%0A - if 'tag' in repo:%0A @@ -372,19 +372,24 @@ rsplit(' -tag +releases ', 1)%5B0%5D @@ -430,33 +430,34 @@ url up to / -tag/%0A +releases/%0A repo = r @@ -469,217 +469,24 @@ 8::%5D - # cut out www.~~~ blah up to /user/repo%0A else:%0A repo = repo.rspli +%0A prin t( -' re -leases', 1)%5B0%5D # cut the url up to /tag/%0A repo = repo%5B18::%5D +po) %0A @@ -546,16 +546,25 @@ repo + %22 +releases/ latest%22)