commit
stringlengths
40
40
subject
stringlengths
1
3.25k
old_file
stringlengths
4
311
new_file
stringlengths
4
311
old_contents
stringlengths
0
26.3k
lang
stringclasses
3 values
proba
float64
0
1
diff
stringlengths
0
7.82k
c76c7b19afdf364ade2b7d0793cbdb14cb315131
add smalltalk like object model
smalltalk_like/obj_model.py
smalltalk_like/obj_model.py
Python
0.000002
@@ -0,0 +1,1482 @@ +class Base(object):%0A%0A def __init__(self, cls, fields):%0A self.cls = cls%0A self.fields = fields%0A%0A def read_attribute(self, field_name):%0A return self.fields.get(field_name)%0A%0A def write_attribute(self, field_name, value):%0A self.fields%5Bfield_name%5D = value%0A%0A def call_method(self, method_name, *args):%0A method = self.cls.find_method(method_name)%0A return method(self, *args)%0A%0A def isinstance(self, cls):%0A return self.cls.issubclass(cls)%0A%0A%0Aclass Class(Base):%0A%0A def __init__(self, name, base_class, fields, metaclass):%0A Base.__init__(self, metaclass, fields)%0A self.name = name%0A self.base_class = base_class%0A%0A def super_class_traversal(self):%0A if self.base_class is None:%0A return %5Bself%5D%0A else:%0A return %5Bself%5D + self.base_class.super_class_traversal()%0A%0A def issubclass(self, cls):%0A return cls in self.super_class_traversal()%0A%0A def find_method(self, method_name):%0A for cls in self.super_class_traversal():%0A if method_name in cls.fields:%0A return cls.fields%5Bmethod_name%5D%0A return MISSING%0A%0A%0Aclass Instance(Base):%0A%0A def __init__(self, cls):%0A assert isinstance(cls, Class)%0A Base.__init__(self, cls, %7B%7D)%0A%0AOBJECT = Class(name='object', base_class=None, fields=%7B%7D, metaclass=None)%0ATYPE = Class(name='TYPE', base_class=OBJECT, fields=%7B%7D, metaclass=None)%0ATYPE.cls = TYPE%0AOBJECT.cls = TYPE%0AMISSING = object()%0A%0A
5bcd31440322d19262b694a5df299f43af577e5e
Create app.py
app.py
app.py
Python
0.000003
@@ -0,0 +1,143 @@ +from flask import Flask%0A%0A%0Aapp = Flask(__name__)%0A%[email protected](%22/%22)%0Adef hello():%0A return %22Hello World!%22%0A%0Aif __name__ == %22__main__%22:%0A app.run() %0A
f6624531e47c599af42e75d84708359eaa982569
Solve AoC 2020-12-25/1
adventofcode2020/25.py
adventofcode2020/25.py
Python
0.999999
@@ -0,0 +1,700 @@ +%0Adef loop_size_finder(inp, subject_number=7):%0A i = 1%0A c = 0%0A%0A while i != inp:%0A i *= subject_number%0A i %25= 20201227%0A c += 1%0A%0A return c%0A%0A%0Adef transformer(iterations, subject_number=7):%0A i = 1%0A%0A for _ in range(0, iterations):%0A i *= subject_number%0A i %25= 20201227%0A%0A return i%0A%0A%0Adef test_loop_size_finder():%0A assert loop_size_finder(5764801) == 8%0A assert loop_size_finder(17807724) == 11%0A%0A assert transformer(11, subject_number=5764801) == transformer(8, subject_number=17807724)%0A%0A%0Aif __name__ == '__main__':%0A card_loops = loop_size_finder(10212254)%0A door_loops = loop_size_finder(12577395)%0A%0A print(transformer(card_loops, 12577395))%0A
b5433672a4e27db4e8f8698c311d05055462ac00
Create main file
annotate_clin_virus.py
annotate_clin_virus.py
Python
0.000001
@@ -0,0 +1,2541 @@ +import timeit%0Aimport subprocess%0Aimport glob%0Aimport sys%0Aimport argparse%0A%0Astart = timeit.default_timer()%0A%0A# This program runs some shit and does some shit about clinical virus samples%0A# Gonna write more as I need too%0A%0A# parser = argparse.ArgumentParser(description= 'Annotate a set of UW clinical viral samples, pulling virus information from prokka and blast')%0A# parser.add_argument('file_dir', help='Input file directory, all .fasta files will be processed and .seq and .gbf files will be produced in the format input_dir/output/FASTA_name')%0A# parser.add_argument('metadata_info_sheet_location', help='.csv file where all of the metadata is stored')%0A# parser.add_argument('sbt_file_loc', help='location of .sbt file for .gbf file creation')%0A%0A# args = parser.parse_args()%0A%0A# Here I assume that the .fasta file has multiple fastas as opposed to being given a directory, this is subject to later change%0Afasta_filename = '10fasta_UWViroClinSeq.fasta'%0Ametadata_info_sheet = 'UWVIROCLINSEQ - SCCA.csv'%0Agff_file_loc = 'HPIV3_121416.gff'%0A%0A# Takes the name of a clincical virus as specified on the metadata sheet and returns a list of the relevant metadata%0Adef pull_metadata(virus_name):%0A for line in open(metadata_info_sheet):%0A if line.split(',')%5B1%5D == virus_name:%0A # Parse and steal input%0A # reutrn two strings, one for the cmt file and the other for the .fsa features%0A%0A%0Adef parse_gff(gff_file_loc):%0A # First two lines are garbarge%0A # One line a sequence format: ##TYPE DNA virus_name%0A # then sequences start:%0A # FORMAT:%0A # RNA NAME%0A # SEQUENCE%0A # end-%0A # all of them, also in the same order as the first list%0A # NAME GENEIOUS cds ## ## stupid shit then the names%0A # all named, and also in order%0A # Write this into lists%0A # write the damn files right here%0A # pull_metadata(name)%0A # write the .tbl and .fsa right here%0A%0Adef write_output():%0A # make a folder for each, name it the sample name%0A # Go through and make .fsa and .tbl files out of our data%0A%0A# TODO: generalize, but first I'mma run it with hard coded filepaths%0A%0Adef run_tbl():%0A # run .tbl2asn on all of the folders and process the .sqn files for submission%0A # Probobly entails throwing the .sbt file into each folder%0A #%0A%0A%0A# Process the fasta_file%0A%0A%0A%0A%0A# Now we go through and actually work our magic on the viruses%0Afor x in range(0,len(virus_name_list)):%0A clin_data_list = pull_metadata(virus_name_list%5Bx%5D)%0A # TODO: Modify fasta/cmt file%0A # TODO: Run Prokka - with options stolen from sheet%0A%0A%0A%0A
92aaff39dbd670f65dcbdeb34a2a506e0fcdf58b
add basic show_urls test
tests/management/commands/test_show_urls.py
tests/management/commands/test_show_urls.py
Python
0
@@ -0,0 +1,670 @@ +# -*- coding: utf-8 -*-%0Afrom django.core.management import call_command%0Afrom django.utils.six import StringIO%0A%0A%0Adef test_show_urls_format_dense():%0A out = StringIO()%0A call_command('show_urls', stdout=out)%0A%0A output = out.getvalue()%0A assert %22/admin/%5Ctdjango.contrib.admin.sites.index%5Ctadmin:index%5Cn%22 in output%0A assert %22/admin/%3Capp_label%3E/%5Ctdjango.contrib.admin.sites.app_index%5Ctadmin:app_list%5Cn%22 in output%0A%0A%0Adef test_show_urls_format_verbose():%0A out = StringIO()%0A call_command('show_urls', format=%22verbose%22, stdout=out)%0A%0A output = out.getvalue()%0A assert %22%22%22/login/%0A%5CtController: django.contrib.auth.views.LoginView%0A%5CtURL Name: login%22%22%22 in output%0A
74a4f56d28497de89415f29ca3e1d6298c2fdd23
Create drivers.py
chips/sensor/simulation/drivers.py
chips/sensor/simulation/drivers.py
Python
0.000001
@@ -0,0 +1,371 @@ +# This code has to be added to the corresponding __init__.py%0A%0ADRIVERS%5B%22simulatedsensors%22%5D = %5B%22PRESSURE%22, %22TEMPERATURE%22, %22LUMINOSITY%22, %22DISTANCE%22, %22HUMIDITY%22,%0A %22COLOR%22, %22CURRENT%22, %22VOLTAGE%22, %22POWER%22,%0A %22LINEARACCELERATION%22, %22ANGULARACCELERATION%22, %22ACCELERATION%22, %22LINEARVELOCITY%22, %22ANGULARVELOCITY%22, %22VELOCITY%22,%0A%22SENSORS%22%5D%0A
0d77fe363b6e6e8b1a0424cec7631cf13b669968
add linear simulation
epistasis/simulate/linear.py
epistasis/simulate/linear.py
Python
0.000012
@@ -0,0 +1,2387 @@ +__doc__ = %22%22%22Submodule with various classes for generating/simulating genotype-phenotype maps.%22%22%22%0A%0A# ------------------------------------------------------------%0A# Imports%0A# ------------------------------------------------------------%0A%0Aimport numpy as np%0Afrom gpmap.gpm import GenotypePhenotypeMap%0A%0A# local imports%0Afrom epistasis.decomposition import generate_dv_matrix%0Afrom epistasis.simulate.base import BaseSimulation%0A%0A# ------------------------------------------------------------%0A# ArtificialMap object can be used to quickly generating a toy%0A# space for testing the EpistasisModels%0A# ------------------------------------------------------------%0A%0Aclass LinearSimulation(BaseSimulation):%0A %22%22%22Construct an genotype-phenotype from linear building blocks and%0A epistatic coefficients.%0A%0A Example%0A -------%0A Phenotype = b0 + b1 + b2 + b3 + b12 + b13 + b13 + b123%0A%0A Parameters%0A ---------%0A wildtype : str%0A Wildtype genotype%0A mutations : dict%0A Mapping for each site to its alphabet%0A order : int%0A Order of epistasis in simulated genotype-phenotype map%0A betas : array-like%0A values of epistatic coefficients (must be positive for this function%0A to work. Log is taken)%0A model_type : str%0A Use a local or global (i.e. Walsh space) epistasis model to construct%0A phenotypes%0A %22%22%22%0A def __init__(self, wildtype, mutations,%0A model_type='local',%0A ):%0A # Construct epistasis mapping objects (empty)%0A super(LinearSimulation,self).__init__(%0A wildtype,%0A mutations,%0A )%0A self.model_type = model_type%0A%0A @property%0A def p_additive(self):%0A %22%22%22Get the additive phenotypes%22%22%22%0A orders = self.epistasis.getorder%0A labels = list(orders%5B0%5D.labels) + list(orders%5B1%5D.labels)%0A vals = list(orders%5B0%5D.values) + list(orders%5B1%5D.values)%0A x = generate_dv_matrix(self.binary.genotypes, labels, model_type=self.model_type)%0A return np.dot(x, vals)%0A%0A def build(self):%0A %22%22%22 Build the phenotype map from epistatic interactions. %22%22%22%0A # Allocate phenotype numpy array%0A _phenotypes = np.zeros(self.n, dtype=float)%0A # Get model type:%0A self.X = generate_dv_matrix(self.binary.genotypes, self.epistasis.labels, model_type=self.model_type)%0A self.phenotypes = np.dot( self.X, self.epistasis.values)%0A
14e637720d6c80ed88232130b00385ceb4d451da
Create manual/__init__.py
app/tests/manual/__init__.py
app/tests/manual/__init__.py
Python
0.000294
@@ -0,0 +1,195 @@ +%22%22%22%0AManual test module.%0A%0ANote that while %60TEST_MODE%60 should be set an environment variable for the%0Aunit and integration tests, we want that off here so we can test against%0Alocal config data.%0A%22%22%22%0A
5bd4534b375efed2ce5026a64228a45a9acc1d64
add parallel runner
microscopes/kernels/parallel.py
microscopes/kernels/parallel.py
Python
0.001425
@@ -0,0 +1,1582 @@ +%22%22%22Contains a parallel runner implementation, with support%0Afor various backends%0A%0A%22%22%22%0A%0Afrom microscopes.common import validator%0Aimport multiprocessing as mp%0A%0A%0Adef _mp_work(args):%0A runner, niters = args%0A runner.run(niters)%0A return runner.get_latent()%0A%0Aclass runner(object):%0A%0A def __init__(self, runners, backend='multiprocessing', **kwargs):%0A self._runners = runners%0A if backend not in ('multiprocessing',):%0A raise ValueError(%22invalid backend: %7B%7D%22.format(backend))%0A self._backend = backend%0A if backend == 'multiprocessing':%0A validator.validate_kwargs(kwargs, ('processes',))%0A if 'processes' not in kwargs:%0A kwargs%5B'processes'%5D = mp.cpu_count()%0A validator.validate_positive(kwargs%5B'processes'%5D, 'processes')%0A self._processes = kwargs%5B'processes'%5D%0A else:%0A assert False, 'should not be reached'%0A%0A%0A def run(self, niters=10000):%0A %22%22%22Run each runner for %60niters%60, using the backend for parallelism%0A%0A %22%22%22%0A if self._backend == 'multiprocessing':%0A pool = mp.Pool(processes=self._processes)%0A args = %5B(runner, niters) for runner in self._runners%5D%0A # map_async() + get() allows us to workaround a bug where%0A # control-C doesn't kill multiprocessing workers%0A self._latents = pool.map_async(_mp_work, args).get(10000000)%0A pool.close()%0A pool.join()%0A else:%0A assert False, 'should not be reached'%0A%0A def get_latents(self):%0A return self._latents%0A
0cdc87edc4d5e4c967e7bc5bd35c5b30151d5a6e
Create admin_pages.py
evewspace/API/admin_pages.py
evewspace/API/admin_pages.py
Python
0.000001
@@ -0,0 +1,118 @@ +from core.admin_page_registry import registry%0A%0Aregistry.register('SSO', 'sso_admin.html', 'API.change_ssoaccesslist')%0A
48190b463bcbafc0b1d3af6c41677a295237e3ba
Add missing file
3rdParty/V8/V8-5.0.71.39/build/has_valgrind.py
3rdParty/V8/V8-5.0.71.39/build/has_valgrind.py
Python
0.000006
@@ -0,0 +1,666 @@ +#!/usr/bin/env python%0A# Copyright 2016 the V8 project authors. All rights reserved.%0A# Use of this source code is governed by a BSD-style license that can be%0A# found in the LICENSE file.%0A%0Aimport os%0A%0ABASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))%0AVALGRIND_DIR = os.path.join(BASE_DIR, 'third_party', 'valgrind')%0ALINUX32_DIR = os.path.join(VALGRIND_DIR, 'linux_x86')%0ALINUX64_DIR = os.path.join(VALGRIND_DIR, 'linux_x64')%0A%0A%0Adef DoMain(_):%0A %22%22%22Hook to be called from gyp without starting a separate python%0A interpreter.%22%22%22%0A return int(os.path.exists(LINUX32_DIR) and os.path.exists(LINUX64_DIR))%0A%0A%0Aif __name__ == '__main__':%0A print DoMain(%5B%5D)%0A
ccce1108e1deab466fd72c022949fa05fa807a3a
add initial files for launch
synth.py
synth.py
Python
0.000001
@@ -0,0 +1,1123 @@ +# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS,%0A# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.%0A# See the License for the specific language governing permissions and%0A# limitations under the License.%0A%22%22%22This script is used to synthesize generated parts of this library.%22%22%22%0A%0Aimport synthtool as s%0Aimport synthtool.gcp as gcp%0Aimport synthtool.languages.node as node%0Aimport subprocess%0Aimport logging%0A%0Alogging.basicConfig(level=logging.DEBUG)%0A%0A# run the gapic generator%0Agapic = gcp.GAPICBazel()%0Aversions = %5B%22v1%22%5D%0Aname = 'policytroubleshooter'%0Afor version in versions:%0A library = gapic.node_library(%0A name, %0A version,%0A proto_path = f'google/cloud/policytroubleshooter/%7Bversion%7D')%0A s.copy(library, excludes=%5B%5D)%0A%0A# Copy common templates%0Acommon_templates = gcp.CommonTemplates()%0Atemplates = common_templates.node_library(%0A source_location='build/src', versions=%5B%22v1%22%5D, default_version=%22v1%22)%0As.copy(templates, excludes=%5B%5D)%0A%0Anode.postprocess_gapic_library()%0A
f480a0a8d51c5c059a05165f30f64bb310299ee3
Add 'rescore' command
project/apps/api/management/commands/rescore.py
project/apps/api/management/commands/rescore.py
Python
0.999783
@@ -0,0 +1,521 @@ +from django.core.management.base import (%0A BaseCommand,%0A)%0A%0Afrom apps.api.models import (%0A Contestant,%0A Appearance,%0A Performance,%0A)%0A%0A%0Aclass Command(BaseCommand):%0A help = %22Command to denormailze data.%22%0A%0A def handle(self, *args, **options):%0A ps = Performance.objects.all()%0A for p in ps:%0A p.save()%0A as_ = Appearance.objects.all()%0A for a in as_:%0A a.save()%0A cs = Contestant.objects.all()%0A for c in cs:%0A c.save()%0A return %22Done%22%0A
d4a7bbe27b285e455a3beafefd22fc493edeb161
Add unittest for eventlogger config validation.
test/test_config_eventlogger.py
test/test_config_eventlogger.py
Python
0
@@ -0,0 +1,1770 @@ +#!/usr/bin/env python2%0Aimport unittest%0Aimport subprocess%0Aimport threading%0Aimport tempfile%0Aimport os%0A%0Afrom testdc import *%0A%0ADAEMON_PATH = './astrond'%0ATERMINATED = -15%0AEXITED = 1%0A%0Aclass ConfigTest(object):%0A def __init__(self, config):%0A self.config = config%0A self.process = None%0A%0A def run(self, timeout):%0A def target():%0A self.process = subprocess.Popen(%5BDAEMON_PATH, self.config%5D)%0A self.process.communicate()%0A%0A thread = threading.Thread(target=target)%0A thread.start()%0A%0A thread.join(timeout)%0A if thread.is_alive():%0A self.process.terminate()%0A thread.join()%0A return self.process.returncode%0A%0Aclass TestConfigEventLogger(unittest.TestCase):%0A @classmethod%0A def setUpClass(cls):%0A cfg, cls.config_file = tempfile.mkstemp()%0A os.close(cfg)%0A%0A cls.test_command = ConfigTest(cls.config_file)%0A%0A @classmethod%0A def tearDownClass(cls):%0A if cls.config_file is not None:%0A os.remove(cls.config_file)%0A%0A @classmethod%0A def write_config(cls, config):%0A f = open(cls.config_file, %22w%22)%0A f.write(config)%0A f.close()%0A%0A @classmethod%0A def run_test(cls, config, timeout = 2):%0A cls.write_config(config)%0A return cls.test_command.run(timeout)%0A%0A def test_eventlogger_good(self):%0A config = %22%22%22%5C%0A messagedirector:%0A bind: 127.0.0.1:57123%0A%0A roles:%0A - type: eventlogger%0A bind: 0.0.0.0:9090%0A output: /var/log/astron/eventlogger/el-%25Y-%25m-%25d-%25H-%25M-%25S.log%0A rotate_interval: 1d%0A %22%22%22%0A self.assertEquals(self.run_test(config), TERMINATED)%0A%0Aif __name__ == '__main__':%0A unittest.main()%0A
78c9f392a02c0fdb72294e08a3d5ce78262443f5
Create 1.py
1.py
1.py
Python
0.000001
@@ -0,0 +1,4 @@ +u=1%0A
d596bfbbfa725111fb4c0f6d4abf6789669f06de
Create sets.py
sets.py
sets.py
Python
0.000001
@@ -0,0 +1,406 @@ +#!/usr/bin/env python2%0A%0A'''%0AGenerates automatically one array, a.%0APrints an ordered list with only unique elems%0A'''%0A%0Aimport random%0A%0ASIZE_LIST_A = 10%0Aa = %5B%5D%0A%0A%0Adef populate_arrays():%0A for i in range(0, SIZE_LIST_A):%0A a.append(random.randint(1, 100))%0A%0A%0Aif __name__ == %22__main__%22:%0A populate_arrays()%0A print %22a: %7B:s%7D%22.format(str(a))%0A b = list(set(a))%0A b.sort()%0A print %22b: %7B:s%7D%22.format(str(b))%0A%0Aexit(0)%0A
563b9e1f826433179a5e3c5e611d40efc8736c4a
Create Hexbin Example
altair/examples/hexbins.py
altair/examples/hexbins.py
Python
0
@@ -0,0 +1,1503 @@ +%22%22%22%0AHexbin Chart%0A-----------------%0AThis example shows a hexbin chart.%0A%22%22%22%0Aimport altair as alt%0Afrom vega_datasets import data%0A%0Asource = data.seattle_weather()%0A%0A# Size of the hexbins%0Asize = 15%0A# Count of distinct x features%0AxFeaturesCount = 12%0A# Count of distinct y features%0AyFeaturesCount = 7%0A# Name of the x field%0AxField = 'date'%0A# Name of the y field%0AyField = 'date'%0A%0A# the shape of a hexagon%0Ahexagon = %22M0,-2.3094010768L2,-1.1547005384 2,1.1547005384 0,2.3094010768 -2,1.1547005384 -2,-1.1547005384Z%22%0A%0Aalt.Chart(source).mark_point(size=size**2, shape=hexagon).encode(%0A x=alt.X('xFeaturePos:Q', axis=alt.Axis(title='Month',%0A grid=False, tickOpacity=0, domainOpacity=0)),%0A y=alt.Y('day(' + yField + '):O', axis=alt.Axis(title='Weekday',%0A labelPadding=20, tickOpacity=0, domainOpacity=0)),%0A stroke=alt.value('black'),%0A strokeWidth=alt.value(0.2),%0A fill=alt.Color('mean(temp_max):Q', scale=alt.Scale(scheme='darkblue')),%0A tooltip=%5B'month(' + xField + '):O', 'day(' + yField + '):O', 'mean(temp_max):Q'%5D%0A).transform_calculate(%0A # This field is required for the hexagonal X-Offset%0A xFeaturePos='(day(datum.' + yField + ') %25 2) / 2 + month(datum.' + xField + ')'%0A).properties(%0A # Exact scaling factors to make the hexbins fit%0A width=size * xFeaturesCount * 2,%0A height=size * yFeaturesCount * 1.7320508076, # 1.7320508076 is approx. sin(60%C2%B0)*2%0A).configure_view(%0A strokeWidth=0%0A)%0A
8118dc283eececdd074bac675c57975ceeba3739
Create gateway.py
Gateway/gateway.py
Gateway/gateway.py
Python
0.000001
@@ -0,0 +1,56 @@ +%5C%5C This will be the Gateway.py file for the RPi Gateway%0A
2c0ce3c64720122bf2fdd80aeb2ff8359873ac83
Test that noindex flag will only show robots metatag when set
municipal_finance/tests/test_analytics.py
municipal_finance/tests/test_analytics.py
Python
0
@@ -0,0 +1,546 @@ +from django.test import TestCase%0Afrom django.conf import settings%0A%0A%0Aclass TestAnalytics(TestCase):%0A%0A def test_noindex_flag(self):%0A response = self.client.get('/')%0A self.assertEqual(response.status_code, 200)%0A self.assertTrue('%3Cmeta name=%22robots%22 content=%22noindex%22%3E' not in str(response.content))%0A %0A settings.NO_INDEX = %22True%22%0A response = self.client.get('/')%0A self.assertEqual(response.status_code, 200)%0A self.assertTrue('%3Cmeta name=%22robots%22 content=%22noindex%22%3E' in str(response.content))
11dd2daf7dd125e0be6a604dd22ae25efed16226
Update at 2017-07-20 14-05-11
test.py
test.py
Python
0
@@ -0,0 +1,2269 @@ +import json%0Afrom pathlib import Path%0A%0Aimport numpy as np%0Aimport pandas as pd%0A%0Aimport tensorflow as tf%0Afrom keras.backend.tensorflow_backend import set_session%0Aconfig = tf.ConfigProto()%0Aconfig.gpu_options.allow_growth = True%0Aset_session(tf.Session(config=config))%0A%0Afrom keras.models import Sequential, Model%0Afrom keras.preprocessing import image%0Afrom keras.layers import *%0Afrom keras.optimizers import *%0A%0Afrom data import *%0Afrom utils import get_callbacks%0A%0A%0Adef main():%0A with tf.device('/gpu:3'):%0A model = Sequential()%0A model.add(TimeDistributed(BatchNormalization(), input_shape=(TIMESTEPS, 224, 224, 3)))%0A model.add(TimeDistributed(Conv2D(4, kernel_size=5, strides=3, activation='relu')))%0A model.add(TimeDistributed(Conv2D(8, kernel_size=5, strides=2, activation='relu')))%0A model.add(TimeDistributed(Conv2D(12, kernel_size=3, strides=1, activation='relu')))%0A model.add(TimeDistributed(BatchNormalization()))%0A model.add(TimeDistributed(MaxPooling2D(pool_size=3)))%0A model.add(Conv3D(4, kernel_size=5, strides=1, activation='relu'))%0A model.add(BatchNormalization())%0A model.add(Flatten())%0A model.add(Dense(16))%0A model.add(Dropout(0.3))%0A model.add(Dense(1, activation='sigmoid'))%0A%0A model_arg = %7B%0A 'loss': 'binary_crossentropy',%0A 'optimizer': 'sgd',%0A 'metrics': %5B'binary_accuracy'%5D%0A %7D%0A model.compile(**model_arg)%0A model.summary()%0A%0A n_train, n_val = 5000, 1000%0A x_train = np.zeros((n_train, TIMESTEPS, 224, 224, 3), dtype=np.float32)%0A y_train = np.zeros((n_train, 1), dtype=np.uint8)%0A x_val = np.zeros((n_val, TIMESTEPS, 224, 224, 3), dtype=np.float32)%0A y_val = np.zeros((n_val, 1), dtype=np.uint8)%0A%0A print('Loading data...', end='')%0A for i in range(n_train):%0A x, y = next(window_train_gen)%0A x_train%5Bi%5D = x%0A y_train%5Bi%5D = y%0A for i in range(n_val):%0A x, y = next(window_val_gen)%0A x_val%5Bi%5D = x%0A y_val%5Bi%5D = y%0A print('ok')%0A%0A fit_arg = %7B%0A 'x': x_train,%0A 'y': y_train,%0A 'batch_size': WINDOW_BATCH_SIZE,%0A 'epochs': 30,%0A 'validation_data': (x_val, y_val),%0A 'shuffle': True%0A %7D%0A%0A model.fit(**fit_arg)%0A%0A%0Aif __name__ == '__main__':%0A main()
0c76fa59e77786c577f0750c65f97d24eb3c4157
Test script
test.py
test.py
Python
0.000001
@@ -0,0 +1,2744 @@ +#!/usr/bin/env python%0A# -*- coding: utf-8 -*-%0A%0A%0Aimport tensorflow as tf%0Aimport numpy as np%0Aimport os%0Aimport time%0Aimport datetime%0Aimport tables%0Afrom sklearn.metrics import f1_score,confusion_matrix%0A%0A%0A%0A# ===================== Preparation des donn%C3%A9es =============================%0A%0A# Load data%0Aprint(%22Loading data...%22)%0A%0Aalphabet = %22abcdefghijklmnopqrstuvwxyz0123456789-,;.!?:'%5C%22/%5C%5C%7C_@#$%25%5E&*~%60+-=%3C%3E()%5B%5D%7B%7D %22%0Asequence_max_length = 1024 # Twitter has only 140 characters. We pad 4 blanks characters more to the right of tweets to be conformed with the architecture of A. Conneau et al (2016)%0A%0Afrom tensorflow.core.protobuf import saver_pb2%0A%0Acheckpoint_file = tf.train.latest_checkpoint(%22./%22)%0A%0Agraph = tf.Graph()%0A# Input data.%0Awith graph.as_default():%0A session_conf = tf.ConfigProto(%0A allow_soft_placement=FLAGS.allow_soft_placement,%0A log_device_placement=FLAGS.log_device_placement)%0A sess = tf.Session(config=session_conf)%0A with sess.as_default():%0A # Load the saved meta graph and restore variables%0A saver = tf.train.import_meta_graph(%22%7B%7D.meta%22.format(checkpoint_file))%0A saver.restore(sess, checkpoint_file)%0A%09# Get the placeholders from the graph by name%0A %0A%09input_x = graph.get_operation_by_name(%22input_x%22).outputs%5B0%5D%0A%09input_y = graph.get_operation_by_name(%22input_y%22).outputs%5B0%5D%0A%0A%09is_training = graph.get_operation_by_name(%0A %22phase%22).outputs%5B0%5D%0A%0A ### To update the computation of moving_mean & moving_var, we must put it on the parent graph of minimizing loss%0A%0A accuracy = graph.get_operation_by_name(%0A %22accuracy/accuracy%22).outputs%5B0%5D%0A%0A predictions = graph.get_operation_by_name(%0A %22fc-3/predictions%22).outputs%5B0%5D%0A %09hdf5_path = %22my_extendable_compressed_data_test.hdf5%22%0A%0A%09batch_size = 1000%0A extendable_hdf5_file = tables.open_file(hdf5_path, mode='r')%0A%0A y_true_ = %5B%5D%0A predictions_= %5B%5D%0A%0A for ptr in range(0, 70000, batch_size):%0A%0A feed_dict = %7Bcnn.input_x: extendable_hdf5_file.root.data%5Bptr:ptr + batch_size%5D, cnn.input_y: extendable_hdf5_file.root.clusters%5Bptr:ptr + batch_size%5D , cnn.is_training: False %7D %0A%0A y_true = tf.argmax(extendable_hdf5_file.root.clusters%5Bptr:ptr + batch_size%5D , 1)%0A y_true_bis,predictions_bis ,accuracy = sess.run(%5By_true,predictions,cnn.accuracy%5D, feed_dict= feed_dict) %0A y_true_.extend(y_true_bis)%0A predictions_.extend(predictions_bis)%0A%0A%0A confusion_matrix_ = confusion_matrix(y_true_,predictions_)%0A print(confusion_matrix_)%0A print (%22f1_score%22, f1_score(y_true_, predictions_ ,average ='weighted'))%0A print (%22f1_score%22, f1_score(y_true_, predictions_ ,average =None))%0A extendable_hdf5_file.close()%0A
77effff7ece070eabb3853ba918d40b7eb1c3de5
Create sc.py
sc.py
sc.py
Python
0.000005
@@ -0,0 +1,1191 @@ +#!/usr/bin/env python%0Aimport soundcloud%0Afrom clize import clize, run%0Afrom subprocess import call%0A %0A %0A@clize%0Adef sc_load(tracks='', likes='', tags='', group=''):%0A opts = %7B%7D%0A if likes:%0A method = 'favorites'%0A elif tracks or group:%0A method = 'tracks'%0A elif tags:%0A method = 'tracks'%0A opts = %7B'tags': tags%7D%0A else:%0A return%0A %0A client = soundcloud.Client(client_id='c4c979fd6f241b5b30431d722af212e8')%0A if likes or tracks:%0A user = likes or tracks%0A track = client.get('/resolve', url='https://soundcloud.com/' + user)%0A user_id = track.id%0A url = '/users/%25d/' %25 user_id%0A elif group:%0A track = client.get('/resolve', url='https://soundcloud.com/groups/' + group)%0A group_id = track.id%0A url = '/groups/%25d/' %25 group_id%0A else:%0A url = '/'%0A %0A end = '%25s%25s' %25 (url, method)%0A for i, sound in enumerate(client.get(end, **opts)):%0A print(%22%25d Loading %25s...%22 %25 (i, sound.obj%5B'title'%5D))%0A call(%5B'mpc', '-h', '%3Cmotdepasse%3E@entrecote', 'load',%0A 'soundcloud://url/%25s' %25 sound.obj%5B'permalink_url'%5D.replace('http:', 'https:')%5D)%0A %0A %0Aif __name__ == '__main__':%0A run(sc_load)%0A
2055fc1eda896103931eaba5fb01238506aaac1a
Add signup in urls
urls.py
urls.py
from django.conf.urls.defaults import patterns, include, url from django.contrib import admin from okupy.login.views import * from okupy.user.views import * admin.autodiscover() urlpatterns = patterns('', url(r'^login/$', mylogin), url(r'^$', user), url(r'^admin/', include(admin.site.urls)), )
Python
0
@@ -149,16 +149,49 @@ import * +%0Afrom okupy.signup.views import * %0A%0Aadmin. @@ -286,16 +286,46 @@ user),%0A + url(r'%5Esignup/', signup),%0A url(
d5b6299b802810748584b06242f614550155a283
Create app.py
app.py
app.py
Python
0.000003
@@ -0,0 +1,1381 @@ +from flask import Flask, request%0Aimport requests%0Aimport json%0Aimport traceback%0Aimport random%0Aimport os%0A%0Afrom urllib.parse import urlencode%0Afrom urllib.request import Request, urlopen%0A%0Aapp = Flask(__name__)%0A%[email protected]('/', methods=%5B'GET', 'POST'%5D)%0Adef main():%0A # if request.method == 'POST':%0A # try:%0A # data = json.loads(request.data)%0A # print ('data: ', data)%0A # print ('request.data: ', request.data)%0A # except:%0A # print ('error?')%0A # elif request.method == 'GET':%0A # print('get')%0A # print (request.data)%0A # return 'get'%0A # return 'all fails%5Cn'%0A if request.method == 'POST':%0A data = request.get_json()%0A%0A if data%5B'name'%5D != 'My Man':%0A # msg = '%7B%7D, you sent %22%7B%7D%22.'.format(data%5B'name'%5D, data%5B'text'%5D)%0A msg = 'https://media.giphy.com/media/qPVzemjFi150Q/giphy.gif'%0A send_message(msg)%0A elif request.method == 'GET':%0A msg = 'https://media.giphy.com/media/3o7aCUqs54taGzqDWU/giphy.gif'%0A send_message(msg)%0A%0A return (%22My Man!!%22)%0A%0Adef send_message(msg):%0A url = 'https://api.groupme.com/v3/bots/post'%0A%0A data = %7B%0A 'bot_id' : os.getenv('BOT_ID'),%0A 'text' : msg,%0A %7D%0A request = Request(url, urlencode(data).encode())%0A json = urlopen(request).read().decode()%0A%0A%0Aif __name__ == '__main__':%0A app.run()%0A %0A
4ff22a24a7d681a3c62f7d7e4fe56c0032a83370
Improve logging
app.py
app.py
import bottle from bottle import get, post, static_file, request, route, template from bottle import SimpleTemplate from configparser import ConfigParser from ldap3 import Connection, LDAPBindError, LDAPInvalidCredentialsResult, Server from ldap3 import AUTH_SIMPLE, SUBTREE from os import path @get('/') def get_index(): return index_tpl() @post('/') def post_index(): form = request.forms.get def error(msg): return index_tpl(username=form('username'), alerts=[('error', msg)]) if form('new-password') != form('confirm-password'): return error("Password doesn't match the confirmation!") if len(form('new-password')) < 8: return error("Password must be at least 8 characters long!") if not change_password(form('username'), form('old-password'), form('new-password')): return error("Username or password is incorrect!") return index_tpl(alerts=[('success', "Password has been changed")]) @route('/static/<filename>', name='static') def serve_static(filename): return static_file(filename, root=path.join(BASE_DIR, 'static')) def index_tpl(**kwargs): return template('index', **kwargs) def change_password(username, old_pass, new_pass): print("Changing password for user: %s" % username) server = Server(CONF['ldap']['host'], int(CONF['ldap']['port'])) user_dn = find_user_dn(server, username) try: with Connection(server, authentication=AUTH_SIMPLE, raise_exceptions=True, user=user_dn, password=old_pass) as c: c.bind() c.extend.standard.modify_password(user_dn, old_pass, new_pass) return True except (LDAPBindError, LDAPInvalidCredentialsResult): return False def find_user_dn(server, uid): with Connection(server) as c: c.search(CONF['ldap']['base'], "(uid=%s)" % uid, SUBTREE, attributes=['dn']) return c.response[0]['dn'] if c.response else None BASE_DIR = path.dirname(__file__) CONF = ConfigParser() CONF.read(path.join(BASE_DIR, 'settings.ini')) bottle.TEMPLATE_PATH = [ BASE_DIR ] # Set default attributes to pass into templates. SimpleTemplate.defaults = dict(CONF['html']) SimpleTemplate.defaults['url'] = bottle.url # Run bottle internal test server when invoked directly (in development). if __name__ == '__main__': bottle.run(host='0.0.0.0', port=8080) # Run bottle in application mode (in production under uWSGI server). else: application = bottle.default_app()
Python
0.00001
@@ -832,57 +832,211 @@ -return error(%22Username or password is incorrect!%22 +print(%22Unsuccessful attemp to change password for: %25s%22 %25 form('username'))%0A return error(%22Username or password is incorrect!%22)%0A%0A print(%22Password successfully changed for: %25s%22 %25 form('username') )%0A%0A @@ -1372,64 +1372,8 @@ s):%0A - print(%22Changing password for user: %25s%22 %25 username)%0A%0A
b720ecf75634718a122c97bcff29129e321aa9b2
Add cat.py.
cat.py
cat.py
Python
0.000007
@@ -0,0 +1,628 @@ +%22%22%22%0AUsage: cat.py %5BFILE%5D...%0AConcatenate FILE(s), or standard input, to standard output.%0A%0A%22%22%22%0Aimport sys%0A%0A%0Adef iter_files(paths):%0A for path in paths:%0A try:%0A yield open(path, 'rb')%0A except (IOError, OSError) as e:%0A print(%22error: %7B%7D%22.format(e), file=sys.stderr)%0A%0A%0Adef main(argv=None):%0A if not argv:%0A argv = list(sys.argv)%0A%0A if len(argv) %3C 2:%0A files = %5Bsys.stdin.buffer%5D%0A else:%0A files = iter_files(argv%5B1:%5D)%0A%0A for file in files:%0A for line in file:%0A sys.stdout.buffer.write(line)%0A file.close()%0A%0A%0Aif __name__ == %22__main__%22:%0A%0A main()%0A%0A
3b58283f613fc827e024c8d971d89c24fc2b3ed0
Create knn.py
knn.py
knn.py
Python
0.00005
@@ -0,0 +1,1086 @@ +import numpy as np%0Aimport pandas as pd%0Afrom sklearn import metrics%0Afrom sklearn.cross_validation import train_test_split%0Afrom sklearn.neighbors import KNeighborsClassifier%0Afrom sklearn.decomposition import PCA%0A%0A#Read training data and split into train and test data%0Adata=pd.read_csv('train.csv')%0Adata1=data.values%0AX=data1%5B:,1:%5D%0Ay=np.ravel(y)%0AXtrain,Xtest,ytrain,ytest=train_test_split(X,y,test_size=0.25)%0A%0A#Run PCA and KNN%0Apca=PCA(n_components=50).fit(Xtrain)%0AXtrain_reduced=pca.transform(Xtrain)%0AXtest_reduced=pca.transform(Xtest)%0Aknn=KNeighborsClassifier(n_neighbors=5,weights='distance',p=3) %0Aknn.fit(Xtrain_reduced,ytrain)%0Apred=knn.predict(Xtest_reduced)%0Aprint(%22Classification report for classifier %25s:%5Cn%25s%5Cn%22%0A %25 (knn, metrics.classification_report(ytest,pred)))%0A%0A#Run prediction on test data and make submissions%0Atest=pd.read_csv('test.csv')%0Atest_reduced=pca.transform(test)%0Apred2=knn.predict(test_reduced)%0Apred2 = pd.DataFrame(pred2)%0Apred2%5B'ImageId'%5D = pred2.index + 1%0Apred2 = pred2%5B%5B'ImageId', 0%5D%5D%0Apred2.columns = %5B'ImageId', 'Label'%5D%0Apred2.to_csv('pred2.csv', index=False)%0A
1faa3c76d1c752de02149af34954ed538fe10fa1
Add test
app/tests/test_data.py
app/tests/test_data.py
Python
0.000005
@@ -0,0 +1,383 @@ +import unittest%0A%0Afrom app import data%0A%0A%0Aclass TestProjects(unittest.TestCase):%0A def test_load(self) -%3E None:%0A projects = data.Projects.load()%0A self.assertNotEqual(projects.data, %7B%7D)%0A self.assertIn('Python', projects.data)%0A self.assertIn('Git Browse', projects.data%5B'Python'%5D)%0A self.assertIn('description', projects.data%5B'Python'%5D%5B'Git Browse'%5D)%0A
5813474651299998fb27c64c6d179a0a59bbe28c
Create otc.py
otc.py
otc.py
Python
0.000002
@@ -0,0 +1,1366 @@ +def tick(a,b,c):%0A if a == 'help':%0A msg = '%5Eotc %7Bcurrency%7D, specify a 2nd currency for rates, add --last/high/low etc for that alone.'%0A return msg%0A import urllib2,json,StringIO%0A a = a.lower()%0A b = b.lower()%0A c = c.lower()%0A%0A if b.startswith('-'):%0A c = b%0A b = 'usd'%0A%0A if b == 'none':%0A b = 'usd'%0A%0A btce = urllib2.Request('https://btc-e.com/api/2/' + a + '_' + b + '/ticker')%0A get = urllib2.urlopen(btce)%0A parse = get.read()%0A if parse == '%7B%22error%22:%22invalid pair%22%7D':%0A b = 'btc'%0A btce = urllib2.Request('https://btc-e.com/api/2/' + a + '_' + b + '/ticker')%0A get = urllib2.urlopen(btce)%0A parse = get.read()%0A%0A try:%0A ticker3 = %22%7B%22 + parse.split('%7B',2)%5B2%5D.split('%7D',2)%5B0%5D + %22%7D%22.replace('%22','%5C'').replace(':',':%22').replace(',','%22,').replace('%7D','%22%7D')%0A ticker2 = ticker3.replace(':',':%22').replace(',','%22,')%0A ticker = json.loads(ticker2)%0A except:%0A return 'Unknown currency'%0A%0A if c == 'none':%0A msg = 'BTC-E ' + a.upper() + b.upper() + ' ticker %7C High: ' + ticker%5B'high'%5D + ', Low: ' + ticker%5B'low'%5D + ', avg: ' + ticker%5B'avg'%5D + ', Last: ' + ticker%5B'last'%5D + ', Buy: ' + ticker%5B'buy'%5D + ', Sell: ' + ticker%5B'sell'%5D%0A%0A elif c.startswith('--'):%0A msg = ticker%5Bc%5B2:%5D%5D%0A%0A else:%0A msg = 'That flag does not exist'%0A%0A return msg%0A
bf678628cf98b1c18a75f09fa15d26526ea0e3ac
Add gender choices fields
accelerator/migrations/0028_add_gender_fields.py
accelerator/migrations/0028_add_gender_fields.py
Python
0.000001
@@ -0,0 +1,1577 @@ +from __future__ import unicode_literals%0A%0Afrom django.conf import settings%0Afrom django.db import migrations, models%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('accelerator', '0027_add_gender_choices_object'),%0A %5D%0A%0A operations = %5B%0A migrations.AddField(%0A model_name='entrepreneurprofile',%0A name='gender_self_description',%0A field=models.TextField(blank=True, default=''),%0A ),%0A migrations.AddField(%0A model_name='entrepreneurprofile',%0A name='gender_identity',%0A field=models.ManyToManyField(%0A blank=True,%0A to=settings.ACCELERATOR_GENDERCHOICES_MODEL),%0A ),%0A migrations.AddField(%0A model_name='expertprofile',%0A name='gender_self_description',%0A field=models.TextField(blank=True, default=''),%0A ),%0A migrations.AddField(%0A model_name='expertprofile',%0A name='gender_identity',%0A field=models.ManyToManyField(%0A blank=True,%0A to=settings.ACCELERATOR_GENDERCHOICES_MODEL),%0A ),%0A migrations.AddField(%0A model_name='memberprofile',%0A name='gender_self_description',%0A field=models.TextField(blank=True, default=''),%0A ),%0A migrations.AddField(%0A model_name='memberprofile',%0A name='gender_identity',%0A field=models.ManyToManyField(%0A blank=True,%0A to=settings.ACCELERATOR_GENDERCHOICES_MODEL),%0A ),%0A %5D%0A
bac06acb1e6255040f371232776f3da75fb9247a
Add data migration to populate preprint_doi_created field on existing published preprints where DOI identifier exists. Set to preprint date_published field.
osf/migrations/0069_auto_20171127_1119.py
osf/migrations/0069_auto_20171127_1119.py
Python
0
@@ -0,0 +1,2152 @@ +# -*- coding: utf-8 -*-%0A# Generated by Django 1.11.7 on 2017-11-27 17:19%0Afrom __future__ import unicode_literals%0Aimport logging%0A%0Afrom django.db import migrations%0Afrom osf.models import PreprintService%0Alogger = logging.getLogger(__name__)%0A%0Adef add_preprint_doi_created(apps, schema_editor):%0A %22%22%22%0A Data migration that makes preprint_doi_created equal to date_published for existing published preprints.%0A %22%22%22%0A null_preprint_doi_created = PreprintService.objects.filter(preprint_doi_created__isnull=True, date_published__isnull=False)%0A preprints_count = null_preprint_doi_created.count()%0A current_preprint = 0%0A logger.info('%7B%7D published preprints found with preprint_doi_created is null.'.format(preprints_count))%0A%0A for preprint in null_preprint_doi_created:%0A current_preprint += 1%0A if preprint.get_identifier('doi'):%0A preprint.preprint_doi_created = preprint.date_published%0A preprint.save()%0A logger.info('Preprint ID %7B%7D, %7B%7D/%7B%7D preprint_doi_created field populated.'.format(preprint._id, current_preprint, preprints_count))%0A else:%0A logger.info('Preprint ID %7B%7D, %7B%7D/%7B%7D skipped because a DOI has not been created.'.format(preprint._id, current_preprint, preprints_count))%0A%0Adef reverse_func(apps, schema_editor):%0A %22%22%22%0A Reverses data migration. Sets preprint_doi_created field back to null.%0A %22%22%22%0A preprint_doi_created_not_null = PreprintService.objects.filter(preprint_doi_created__isnull=False)%0A preprints_count = preprint_doi_created_not_null.count()%0A current_preprint = 0%0A logger.info('Reversing preprint_doi_created migration.')%0A%0A for preprint in preprint_doi_created_not_null:%0A current_preprint += 1%0A preprint.preprint_doi_created = None%0A preprint.save()%0A logger.info('Preprint ID %7B%7D, %7B%7D/%7B%7D preprint_doi_created field set to None.'.format(preprint._id, current_preprint, preprints_count))%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('osf', '0068_preprintservice_preprint_doi_created'),%0A %5D%0A%0A operations = %5B%0A migrations.RunPython(add_preprint_doi_created, reverse_func)%0A %5D%0A
167a6497d79a4a18badd5ea85a87e7eefcd02696
Add init file to the root acceptance tests folder
test/acceptance/__init__.py
test/acceptance/__init__.py
Python
0
@@ -0,0 +1,1075 @@ +# -*- coding: utf-8 -*-%0A%22%22%22%0ACopyright 2014 Telefonica Investigaci%C3%B3n y Desarrollo, S.A.U%0A%0AThis file is part of fiware-orion-pep%0A%0Afiware-orion-pep is free software: you can redistribute it and/or%0Amodify it under the terms of the GNU Affero General Public License as%0Apublished by the Free Software Foundation, either version 3 of the License,%0Aor (at your option) any later version.%0A%0Afiware-orion-pep is distributed in the hope that it will be useful,%0Abut WITHOUT ANY WARRANTY; without even the implied warranty of%0AMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.%0ASee the GNU Affero General Public License for more details.%0A%0AYou should have received a copy of the GNU Affero General Public%0ALicense along with fiware-orion-pep.%0AIf not, see http://www.gnu.org/licenses/.%0A%0AFor those usages not covered by the GNU Affero General Public License%0Aplease contact with::%[email protected]%5D%0A%22%22%22%0A__author__ = 'Jon Calderin Go%C3%B1i %[email protected]%3E'%0A%0Aimport os%0A%0A%22%22%22%0AMake sure the logs path exists and create it otherwise.%0A%22%22%22%0Aif not os.path.exists('logs'):%0A os.makedirs('logs')
d290b3b2cc15a3bab907ed3847da709ab31edace
disable unpredictable tests
tests/acceptance/test_api.py
tests/acceptance/test_api.py
from __future__ import absolute_import from sentry.testutils import AcceptanceTestCase class ApiTokensTest(AcceptanceTestCase): def setUp(self): super(ApiTokensTest, self).setUp() self.user = self.create_user('[email protected]') self.login_as(self.user) self.path = '/api/' def test_simple(self): self.browser.get(self.path) self.browser.wait_until_not('.loading') self.browser.snapshot('api tokens - no tokens') self.browser.click('.ref-create-token') self.browser.wait_until_not('.loading') self.browser.snapshot('api tokens - new token') self.browser.click('.btn-primary') self.browser.wait_until_not('.loading') self.browser.snapshot('api tokens - single token') class ApiApplicationTest(AcceptanceTestCase): def setUp(self): super(ApiApplicationTest, self).setUp() self.user = self.create_user('[email protected]') self.login_as(self.user) self.path = '/api/applications/' def test_simple(self): self.browser.get(self.path) self.browser.wait_until_not('.loading') self.browser.snapshot('api applications - no applications') self.browser.click('.ref-create-application') self.browser.wait_until_not('.loading') self.browser.snapshot('api applications - new application') self.browser.click('.btn-primary') self.browser.wait_until_not('.loading') self.browser.snapshot('api applications - single application')
Python
0
@@ -472,32 +472,34 @@ tokens')%0A + # self.browser.cl @@ -522,32 +522,34 @@ -token')%0A + # self.browser.wa @@ -572,32 +572,34 @@ oading')%0A + # self.browser.sn @@ -630,32 +630,34 @@ token')%0A + # self.browser.cl @@ -675,32 +675,34 @@ rimary')%0A + # self.browser.wa @@ -725,32 +725,34 @@ oading')%0A + # self.browser.sn @@ -1213,32 +1213,34 @@ ations')%0A + # self.browser.cl @@ -1269,32 +1269,34 @@ cation')%0A + # self.browser.wa @@ -1319,32 +1319,34 @@ oading')%0A + # self.browser.sn @@ -1389,32 +1389,34 @@ cation')%0A + # self.browser.cl @@ -1434,32 +1434,34 @@ rimary')%0A + # self.browser.wa @@ -1484,32 +1484,34 @@ oading')%0A + # self.browser.sn
8fa776fd2fa63a44cb048a39fe7359ee9366c5e8
Add basic Processor tests
tests/003-test-processor.py
tests/003-test-processor.py
Python
0.000001
@@ -0,0 +1,2333 @@ +import time%0Aimport random%0Aimport multiprocessing%0Afrom functools import wraps%0A%0Atry:%0A import queue%0Aexcept ImportError:%0A import Queue as queue%0A%0Aimport t%0Aimport bucky.processor%0Aimport bucky.cfg as cfg%0Acfg.debug = True%0A%0A%0Adef processor(func):%0A @wraps(func)%0A def run():%0A inq = multiprocessing.Queue()%0A outq = multiprocessing.Queue()%0A proc = bucky.processor.CustomProcessor(inq, outq, cfg)%0A proc.start()%0A func(inq, outq, proc)%0A inq.put(None)%0A dead = False%0A for i in range(5):%0A if not proc.is_alive():%0A dead = True%0A break%0A time.sleep(0.1)%0A if not dead:%0A raise RuntimeError(%22Server didn't die.%22)%0A return run%0A%0A%0Adef send_get_data(indata, inq, outq):%0A for sample in indata:%0A inq.put(sample)%0A while True:%0A try:%0A sample = outq.get(True, 1)%0A except queue.Empty:%0A break%0A yield sample%0A%0A%0Adef identity(host, name, val, time):%0A return host, name, val, time%0A%0A%[email protected]_cfg(%22processor%22, identity)%0A@processor%0Adef test_start_stop(inq, outq, proc):%0A assert proc.is_alive(), %22Processor not alive.%22%0A inq.put(None)%0A time.sleep(0.5)%0A assert not proc.is_alive(), %22Processor not killed by putting None in queue%22%0A%0A%[email protected]_cfg(%22processor%22, identity)%0A@processor%0Adef test_plumbing(inq, outq, proc):%0A data = %5B%5D%0A times = 100%0A for i in range(times):%0A host = %22tests.host-%25d%22 %25 i%0A name = %22test-plumbing-%25d%22 %25 i%0A value = i%0A timestamp = int(time.time() + i)%0A data.append((host, name, value, timestamp))%0A i = 0%0A for sample in send_get_data(data, inq, outq):%0A t.eq(sample, data%5Bi%5D)%0A i += 1%0A t.eq(i, times)%0A%0A%0Adef filter_even(host, name, val, timestamp):%0A if not val %25 2:%0A return None%0A return host, name, val, timestamp%0A%0A%[email protected]_cfg(%22processor%22, filter_even)%0A@processor%0Adef test_filter(inq, outq, proc):%0A data = %5B%5D%0A times = 100%0A for i in range(times):%0A host = %22tests.host-%25d%22 %25 i%0A name = %22test-filter-%25d%22 %25 i%0A timestamp = int(time.time() + i)%0A data.append((host, name, 0, timestamp))%0A data.append((host, name, 1, timestamp))%0A i = 0%0A for sample in send_get_data(data, inq, outq):%0A t.eq(sample%5B2%5D, 1)%0A i += 1%0A t.eq(i, times)%0A
0b185bb6a30cb7c9b02c80051a8426dc736da3d6
Add sample WSGI app
examples/wsgi.py
examples/wsgi.py
Python
0
@@ -0,0 +1,1781 @@ +%0Aimport cgi%0Aimport json%0Afrom wsgiref import simple_server%0A%0Aimport falcon%0A%0Afrom mclib import mc_info%0A%0Aclass MCInfo(object):%0A%0A def on_get(self, req, resp):%0A%0A host = req.get_param('host', required=True)%0A port = req.get_param_as_int('port', min=1024,%0A max=65565)%0A%0A try:%0A if port is not None:%0A info = mc_info.get_info(host=host,%0A port=port)%0A else:%0A info = mc_info.get_info(host=host)%0A except Exception:%0A raise Exception('Couldn%5C't retrieve info.')%0A%0A if '.json' in req.uri:%0A resp.body = self.get_json(info)%0A return%0A%0A preferred = req.client_prefers(%5B'application/json', 'text/html'%5D)%0A if 'html' in preferred:%0A resp.content_type = 'text/html'%0A resp.body = self.get_html(info)%0A else:%0A resp.body = self.get_json(info)%0A%0A def get_html(self, info):%0A%0A html = %22%22%22%3Cbody%3E%0A%3Cstyle%3E%0Atable,th,td%0A%7B%0Aborder:1px solid black;%0Aborder-collapse:collapse%0A%7D%0Ath,td%0A%7B%0Apadding: 5px%0A%7D%0A%3C/style%3E%0A%0A%3Ctable%3E%0A%22%22%22%0A%0A for k,v in info.iteritems():%0A items = %7B'key': cgi.escape(k)%7D%0A if isinstance(v, basestring):%0A items%5B'val'%5D = cgi.escape(v)%0A else:%0A items%5B'val'%5D = v%0A html = html + '%3Ctr%3E%3Ctd%3E%25(key)s%3C/td%3E%3Ctd%3E%25(val)s%3C/td%3E%3C/tr%3E' %25 items%0A%0A html = html + '%3C/table%3E%3C/body%3E'%0A%0A return html%0A%0A def get_json(self, info):%0A return json.dumps(info)%0A%0Aapp = falcon.API()%0A%0Amcinfo = MCInfo()%0A%0Aapp.add_route('/mcinfo', mcinfo)%0Aapp.add_route('/mcinfo.json', mcinfo)%0A%0Aif __name__ == '__main__':%0A httpd = simple_server.make_server('0.0.0.0', 3000, app)%0A httpd.serve_forever()%0A
b097075f7606563fc8ae80274e73b74dedd8129f
prepare a new folder "resources" for json files to replace python dynamic_resources
src/alfanous/Data.py
src/alfanous/Data.py
Python
0.000014
@@ -0,0 +1,128 @@ +'''%0ACreated on Jun 15, 2012%0A%0A@author: assem%0A'''%0A%0A%0Aclass Configs:%0A pass%0A%0A%0Aclass Indexes:%0A pass%0A%0Aclass Ressources:%0A pass%0A
b171eb0c77f2d68051b48145f4e49275ed6860b9
Add tests for signup code exists method
account/tests/test_models.py
account/tests/test_models.py
Python
0
@@ -0,0 +1,1823 @@ +from django.conf import settings%0Afrom django.core import mail%0Afrom django.core.urlresolvers import reverse%0Afrom django.test import TestCase, override_settings%0A%0Afrom django.contrib.auth.models import User%0A%0Afrom account.models import SignupCode%0A%0A%0Aclass SignupCodeModelTestCase(TestCase):%0A def test_exists_no_match(self):%0A code = SignupCode(email='[email protected]', code='FOOFOO')%0A code.save()%0A%0A self.assertFalse(SignupCode.exists(code='BARBAR'))%0A self.assertFalse(SignupCode.exists(email='[email protected]'))%0A self.assertFalse(SignupCode.exists(email='[email protected]', code='BARBAR'))%0A self.assertFalse(SignupCode.exists())%0A%0A def test_exists_email_only_match(self):%0A code = SignupCode(email='[email protected]', code='FOOFOO')%0A code.save()%0A%0A self.assertTrue(SignupCode.exists(email='[email protected]'))%0A%0A def test_exists_code_only_match(self):%0A code = SignupCode(email='[email protected]', code='FOOFOO')%0A code.save()%0A%0A self.assertTrue(SignupCode.exists(code='FOOFOO'))%0A self.assertTrue(SignupCode.exists(email='[email protected]', code='FOOFOO'))%0A%0A def test_exists_email_match_code_mismatch(self):%0A code = SignupCode(email='[email protected]', code='FOOFOO')%0A code.save()%0A%0A self.assertTrue(SignupCode.exists(email='[email protected]', code='BARBAR'))%0A%0A def test_exists_code_match_email_mismatch(self):%0A code = SignupCode(email='[email protected]', code='FOOFOO')%0A code.save()%0A%0A self.assertTrue(SignupCode.exists(email='[email protected]', code='FOOFOO'))%0A%0A def test_exists_both_match(self):%0A code = SignupCode(email='[email protected]', code='FOOFOO')%0A code.save()%0A%0A self.assertTrue(SignupCode.exists(email='[email protected]', code='FOOFOO'))%0A
6c55d840ed22ec584c6adad15d89d9888b408d88
[128. Longest Consecutive Sequence][Accepted]committed by Victor
128-Longest-Consecutive-Sequence/solution.py
128-Longest-Consecutive-Sequence/solution.py
Python
0.999951
@@ -0,0 +1,1005 @@ +class Solution(object):%0A def longestConsecutive(self, nums):%0A %22%22%22%0A :type nums: List%5Bint%5D%0A :rtype: int%0A %22%22%22%0A # idea something like bucket sort, %0A # put the element n at the (start,end) bucket, everytime%0A # check n-1 or n+1 in the bucket, if so combine them%0A %0A bucket=%7B%7D%0A max_len=1%0A for num in nums:%0A # duplicate n pass%0A if bucket.has_key(num):%0A continue%0A # inital n.start n.end is itself%0A start=end=num%0A # if has n-1%0A if bucket.has_key(num-1):%0A # update the start to the n-1.start%0A start=bucket%5Bnum-1%5D%5B0%5D%0A if bucket.has_key(num+1):%0A # update the end to the n+1.end%0A end=bucket%5Bnum+1%5D%5B1%5D%0A # add the update value to the bucket%0A bucket%5Bstart%5D=bucket%5Bend%5D=bucket%5Bnum%5D=(start,end)%0A max_len=max(end-start+1,max_len)%0A return max_len%0A
f5140f87e0e4326fe189b2f5f3ff3ac90f8db5c8
Add new heroku_worker.py to run as a Heroku worker process
blockbuster/heroku_worker.py
blockbuster/heroku_worker.py
Python
0.000001
@@ -0,0 +1,328 @@ +import redis%0Afrom rq import Worker, Queue, Connection%0Aimport os%0A%0AREDIS_URL = os.environ.get('REDIS_URL', 'redis://localhost:32769/1')%0Aprint(REDIS_URL)%0A%0Alisten = %5B'default'%5D%0A%0Aconn = redis.from_url(REDIS_URL)%0A%0Aif __name__ == '__main__':%0A with Connection(conn):%0A worker = Worker(map(Queue, listen))%0A worker.work()%0A
0722624244d107b19a006f07fd884d47597e4eb1
Add utility class to filter text through external program
lib/filter.py
lib/filter.py
Python
0
@@ -0,0 +1,1808 @@ +from subprocess import Popen%0Afrom subprocess import PIPE%0Afrom subprocess import TimeoutExpired%0Aimport threading%0A%0Afrom Dart import PluginLogger%0Afrom Dart.lib.plat import supress_window%0A%0A%0A_logger = PluginLogger(__name__)%0A%0A%0Aclass TextFilter(object):%0A '''Filters text through an external program (sync).%0A '''%0A def __init__(self, args, timeout=10):%0A self.args = args%0A self.timeout = timeout%0A # Encoding the external program likes to receive.%0A self.in_encoding = 'utf-8'%0A # Encoding the external program will emit.%0A self.out_encoding = 'utf-8'%0A%0A self._proc = None%0A%0A def encode(self, text):%0A return text.encode(self.in_ecoding)%0A%0A def decode(self, encoded_bytes):%0A return encoded_bytes.decode(self.out_encoding)%0A%0A def clean(self, text):%0A return text.replace('%5Cr', '').rstrip()%0A%0A def _start(self):%0A try:%0A self._proc = Popen(self.args,%0A stdout=PIPE,%0A stderr=PIPE,%0A stdin=PIPE,%0A startupinfo=supress_window())%0A except OSError as e:%0A _logger.error('while starting text filter program: %25s', e)%0A return%0A%0A def filter(self, input_text):%0A self._start()%0A try:%0A in_bytes = input_text.encode(self.in_encoding)%0A out_bytes, err_bytes = self._proc.communicate(in_bytes,%0A self.timeout)%0A return self.clean(self.decode(out_bytes))%0A except TimeoutExpired:%0A _logger.debug('text filter program response timed out')%0A return None%0A except Exception as e:%0A _logger.error('while running TextFilter: %25s', e)%0A return None%0A
c7da0ed13838150f0276c4c9f425390822b5b43b
Add serializers for API models.
vinotes/apps/api/serializers.py
vinotes/apps/api/serializers.py
Python
0
@@ -0,0 +1,900 @@ +from django.contrib.auth.models import User%0Afrom rest_framework import serializers%0Afrom .models import Note, Trait, Wine, Winery%0A%0A%0Aclass WinerySerializer(serializers.ModelSerializer):%0A class Meta:%0A model = Winery%0A fields = ('id', 'name')%0A%0A%0Aclass WineSerializer(serializers.ModelSerializer):%0A class Meta:%0A model = Wine%0A fields = ('id', 'winery', 'name', 'vintage')%0A%0A%0Aclass TraitSerializer(serializers.ModelSerializer):%0A class Meta:%0A model = Trait%0A fields = ('id', 'name')%0A%0A%0Aclass NoteSerializer(serializers.ModelSerializer):%0A class Meta:%0A model = Note%0A fields = ('id', 'taster', 'tasted', 'wine', 'color_traits', %0A 'nose_traits', 'taste_traits', 'finish_traits', 'rating')%0A%0A%0Aclass UserSerializer(serializers.ModelSerializer):%0A class Meta:%0A model = User%0A fields = ('id', 'username', 'email', 'notes')
383c67da4729886602227b715f65390427ccd8bc
Create w3_1.py
w3_1.py
w3_1.py
Python
0.000482
@@ -0,0 +1,23 @@ +print (%22Hello World!%22)%0A
66afbaab9abe51a83d6ea9765b7b8b70d045115e
Create question2.py
dingshubo/question2.py
dingshubo/question2.py
Python
0.999772
@@ -0,0 +1,551 @@ +#_*_ coding:utf-8 _*_%0A%0A#!/user/bin/python%0A%0Aimport random%0A%0Anumber_random = random.randint(1,100)%0A%0A%0A%0Afor chance in range(5): #%E7%8E%A9%E5%AE%B6%E6%9C%895%E6%AC%A1%E6%9C%BA%E4%BC%9A%0A%0A number_player=input('%E8%AF%B7%E8%BE%93%E5%85%A5%E4%B8%80%E4%B8%AA1-100%E4%B9%8B%E9%97%B4%E7%9A%84%E6%95%B4%E6%95%B0%EF%BC%9A')%0A%0A if(number_player%3Enumber_random):%0A%0A print('%E8%BF%99%E4%B8%AA%E6%95%B0%E5%AD%97%E5%81%8F%E5%A4%A7')%0A%0A elif (number_player%3Cnumber_random):%0A%0A print('%E8%BF%99%E4%B8%AA%E6%95%B0%E5%AD%97%E5%81%8F%E5%B0%8F')%0A%0A print('%E4%BD%A0%E8%BF%98%E6%9C%89%25d%E6%AC%A1%E6%9C%BA%E4%BC%9A')%25(4-chance)%0A%0A while (chance == 4): #%E5%BD%93for%E9%81%8D%E5%8E%86%E5%88%B0%E7%AC%AC%E6%9C%80%E5%90%8E%E4%B8%80%E6%AC%A1%E7%9A%84%E6%97%B6%E5%80%99%0A%0A if (number_player == number_random):%0A%0A print('%E6%81%AD%E5%96%9C%E4%BD%A0%E7%AD%94%E5%AF%B9%E4%BA%86')%0A%0A break%0A%0A else:%0A%0A print('%E6%AD%A3%E7%A1%AE%E7%AD%94%E6%A1%88%E6%98%AF%EF%BC%9A%25s') %25 number_random%0A%0A break%0A%0A
3189cd139b868d74caf35aa5b7a80f748f21c231
add tool to process brian's files
scripts/import/import_brian_files.py
scripts/import/import_brian_files.py
Python
0
@@ -0,0 +1,239 @@ +import glob%0Aimport os%0A%0Aos.chdir(%22c%22)%0Afor filename in glob.glob(%22*%22):%0A tokens = filename.split(%22_%22)%0A huc12 = tokens%5B1%5D%0A typ = tokens%5B2%5D.split(%22.%22)%5B1%5D%0A newfn = %22/i/%25s/%25s/%25s%22 %25 (typ, huc12, filename)%0A os.rename(filename, newfn)
5806747564a9b2d35663b4e2612b23d1f5c3e961
Version bump to 1.8
pyglui/__init__.py
pyglui/__init__.py
__version__ = '1.7'
Python
0
@@ -14,7 +14,7 @@ '1. -7 +8 '%0A
7e17363eaf8d17f0d595ca5199e59a51c7b1df65
Add the core social_pipeline.
oneflow/core/social_pipeline.py
oneflow/core/social_pipeline.py
Python
0
@@ -0,0 +1,1770 @@ +# -*- coding: utf-8 -*-%0Au%22%22%22%0ACopyright 2013-2014 Olivier Cort%C3%A8s %[email protected]%3E.%0A%0AThis file is part of the 1flow project.%0A%0AIt provides %7Bpython,django%7D-social-auth pipeline helpers.%0A%0A1flow is free software: you can redistribute it and/or modify%0Ait under the terms of the GNU Affero General Public License as%0Apublished by the Free Software Foundation, either version 3 of%0Athe License, or (at your option) any later version.%0A%0A1flow is distributed in the hope that it will be useful,%0Abut WITHOUT ANY WARRANTY; without even the implied warranty of%0AMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the%0AGNU Affero General Public License for more details.%0A%0AYou should have received a copy of the GNU Affero General Public%0ALicense along with 1flow. If not, see http://www.gnu.org/licenses/%0A%22%22%22%0A%0Aimport logging%0A%0A# from constance import config%0A%0A# from django.shortcuts import redirect%0A%0Afrom social_auth.backends.facebook import FacebookBackend%0Afrom social_auth.backends.twitter import TwitterBackend%0Afrom social_auth.backends import google%0A%0Afrom models import (%0A TwitterAccount,%0A # FacebookAccount, FacebookFeed,%0A)%0A%0A%0ALOGGER = logging.getLogger(__name__)%0A%0A%0Adef check_feeds(social_user, user, details, request, response, backend,%0A is_new=False, *args, **kwargs):%0A %22%22%22 Create Accounts & feeds associated with social networks. %22%22%22%0A%0A try:%0A%0A if isinstance(backend, FacebookBackend):%0A pass%0A%0A elif isinstance(backend, google.GoogleOAuth2Backend):%0A pass%0A%0A elif isinstance(backend, TwitterBackend):%0A TwitterAccount.check_social_user(social_user, user, backend)%0A%0A except:%0A LOGGER.exception(u'Could not check feeds for user %25s from '%0A u'backend %25s.', user, social_user)%0A
ee533a5e2a4eff99641383741e1cbe8e57c43e1f
add typing stub/compat package
gosubl/typing.py
gosubl/typing.py
Python
0
@@ -0,0 +1,1996 @@ +try:%0A%0A # ST builds %3E= 4000%0A%0A from mypy_extensions import TypedDict%0A from typing import Any%0A from typing import Callable%0A from typing import Dict%0A from typing import Generator%0A from typing import IO%0A from typing import Iterable%0A from typing import Iterator%0A from typing import List%0A from typing import Mapping%0A from typing import Optional%0A from typing import Set%0A from typing import Tuple%0A from typing import Type%0A from typing import Union%0A from typing_extensions import Protocol%0A%0Aexcept ImportError:%0A%0A # ST builds %3C 4000%0A%0A def _make_type(name: str) -%3E '_TypeMeta':%0A return _TypeMeta(name, (Type,), %7B%7D) # type: ignore%0A%0A class _TypeMeta(type):%0A def __getitem__(self, args: 'Any') -%3E 'Any':%0A if not isinstance(args, tuple):%0A args = (args,)%0A%0A name = '%7B%7D%5B%7B%7D%5D'.format(%0A str(self),%0A ', '.join(map(str, args))%0A )%0A return _make_type(name)%0A%0A def __str__(self) -%3E str:%0A return self.__name__%0A%0A class Type(metaclass=_TypeMeta): # type: ignore%0A pass%0A%0A class TypedDict(Type, dict): # type: ignore%0A def __init__(*args, **kwargs) -%3E None: # type: ignore%0A pass%0A%0A class Any(Type): # type: ignore%0A pass%0A%0A class Callable(Type): # type: ignore%0A pass%0A%0A class Dict(Type): # type: ignore%0A pass%0A%0A class Generator(Type): # type: ignore%0A pass%0A%0A class IO(Type): # type: ignore%0A pass%0A%0A class Iterable(Type): # type: ignore%0A pass%0A%0A class Iterator(Type): # type: ignore%0A pass%0A%0A class List(Type): # type: ignore%0A pass%0A%0A class Mapping(Type): # type: ignore%0A pass%0A%0A class Optional(Type): # type: ignore%0A pass%0A%0A class Set(Type): # type: ignore%0A pass%0A%0A class Tuple(Type): # type: ignore%0A pass%0A%0A class Union(Type): # type: ignore%0A pass%0A%0A Protocol = object # type: ignore%0A
2761e3bfd8d2c8281db565e54f6e3ea687bd5663
add backfill problem_id script
private/scripts/extras/backfill_problem_id.py
private/scripts/extras/backfill_problem_id.py
Python
0.000001
@@ -0,0 +1,1859 @@ +%22%22%22%0A Copyright (c) 2015-2019 Raj Patel([email protected]), StopStalk%0A%0A Permission is hereby granted, free of charge, to any person obtaining a copy%0A of this software and associated documentation files (the %22Software%22), to deal%0A in the Software without restriction, including without limitation the rights%0A to use, copy, modify, merge, publish, distribute, sublicense, and/or sell%0A copies of the Software, and to permit persons to whom the Software is%0A furnished to do so, subject to the following conditions:%0A%0A The above copyright notice and this permission notice shall be included in%0A all copies or substantial portions of the Software.%0A%0A THE SOFTWARE IS PROVIDED %22AS IS%22, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR%0A IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,%0A FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE%0A AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER%0A LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,%0A OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN%0A THE SOFTWARE.%0A%22%22%22%0A%0Aimport time%0Aptable = db.problem%0Astable = db.submission%0A%0Alinks = db(ptable).select(ptable.id, ptable.link)%0Aplink_to_id = dict(%5B(x.link, x.id) for x in links%5D)%0A%0ABATCH_SIZE = 25000%0Afor i in xrange(10000):%0A rows = db(stable).select(limitby=(i * BATCH_SIZE, (i + 1) * BATCH_SIZE))%0A print rows.first().id, rows.last().id,%0A updated = 0%0A for srecord in rows:%0A if srecord.problem_id is None and %5C%0A srecord.problem_link in plink_to_id:%0A srecord.update_record(problem_id=plink_to_id%5Bsrecord.problem_link%5D)%0A updated += 1%0A if updated %3E 0:%0A db.commit()%0A time.sleep(0.1)%0A print %22updated%22, updated%0A else:%0A print %22no updates%22%0A%0A
a3de0337f6e3511cc3381f92f7bbc384d7667dfd
Create xmas.py
xmas.py
xmas.py
Python
0.999876
@@ -0,0 +1,587 @@ +gifts=%5B'A Partridge in a Pear Tree', 'Two Turtle Doves, and', 'Three French Hens', 'Four Calling Birds', 'Five Golden Rings', 'Six Geese-a-Laying', 'Seven Swans-a-Swimming', 'Eight Maids-a-Milking', 'Nine Ladies Dancing', 'Ten Lords-a-Leaping', 'Eleven Pipers Piping', 'Twelve Drummers Drumming'%5D%0Aordinal=%5B'st', 'nd', 'rd', 'th', 'th', 'th', 'th', 'th', 'th', 'th', 'th', 'th'%5D%0A%0Afor day in range(12):%0A print('On the ' + str(day+1) + str(ordinal%5Bday%5D) + ' day of Christmas, my true love sent to me...')%0A gift=day%0A while gift %3E= 0:%0A print(str(gifts%5Bgift%5D))%0A gift-=1%0A print('%5Cn')%0A
8fa4888dbf82d225f52b6df347372a0381c08237
Add __main__.py for running python -m grip.
grip/__main__.py
grip/__main__.py
Python
0.000006
@@ -0,0 +1,226 @@ +%22%22%22%5C%0AGrip%0A----%0A%0ARender local readme files before sending off to Github.%0A%0A:copyright: (c) 2014 by Joe Esposito.%0A:license: MIT, see LICENSE for more details.%0A%22%22%22%0A%0Afrom command import main%0A%0A%0Aif __name__ == '__main__':%0A main()%0A
95874a5e06ff70d1cbea49321549beee5cc5abba
Create an example of storing units in HDF5
examples/store_and_retrieve_units_example.py
examples/store_and_retrieve_units_example.py
Python
0.000001
@@ -0,0 +1,1342 @@ +%22%22%22%0AAuthor: Daniel Berke, [email protected]%0ADate: October 27, 2019%0ARequirements: h5py%3E=2.10.0, unyt%3E=v2.4.0%0ANotes: This short example script shows how to save unit information attached%0Ato a %60unyt_array%60 using %60attrs%60 in HDF5, and recover it upon reading the file.%0AIt uses the Unyt package (https://github.com/yt-project/unyt) because that's %0Awhat I'm familiar with, but presumably similar options exist for Pint and%0Aastropy.units. %0A%22%22%22%0A%0Aimport h5py%0Aimport tempfile%0Aimport unyt as u%0A%0A# Set up a temporary file for this example.%0Atf = tempfile.TemporaryFile()%0Af = h5py.File(tf, 'a')%0A%0A# Create some mock data with moderately complicated units (this is the %0A# dimensional representation of Joules of energy).%0Atest_data = %5B1, 2, 3, 4, 5%5D * u.kg * ( u.m / u.s ) ** 2%0Aprint(test_data.units)%0A# kg*m**2/s**2%0A%0A# Create a data set to hold the numerical information:%0Af.create_dataset('stored data', data=test_data)%0A%0A# Save the units information as a string in %60attrs%60.%0Af%5B'stored data'%5D.attrs%5B'units'%5D = str(test_data.units)%0A%0A# Now recover the data, using the saved units information to reconstruct the%0A# original quantities.%0Areconstituted_data = u.unyt_array(f%5B'stored data'%5D,%0A units=f%5B'stored data'%5D.attrs%5B'units'%5D)%0A%0Aprint(reconstituted_data.units)%0A# kg*m**2/s**2%0A%0Aassert reconstituted_data.units == test_data.units%0A
4fe50fda289be7db3fb96450e713eb8f1a815026
Add weighted linear algorithm
autoscaler/server/scaling/algorithms/weighted.py
autoscaler/server/scaling/algorithms/weighted.py
Python
0.000596
@@ -0,0 +1,1158 @@ +import math%0A%0Afrom autoscaler.server.request_history import RequestHistory%0Afrom autoscaler.server.scaling.utils import parse_interval%0A%0A%0Aclass WeightedScalingAlgorithm:%0A def __init__(self, algorithm_config):%0A self.interval_seconds = parse_interval(%0A algorithm_config%5B'interval'%5D%0A )%0A self.requests_per_instance_interval = (%0A algorithm_config%5B'requests_per_instance_interval'%5D%0A )%0A self.weights = algorithm_config%5B'weights'%5D%0A%0A def get_instance_count(self, request_history: RequestHistory):%0A intervals = request_history.get_last_intervals(%0A self.interval_seconds, len(self.weights)%0A )%0A%0A normalized_weights = self._normalized_weights(self.weights)%0A weighted_request_count = sum(%0A len(interval) * weight%0A for weight, interval in zip(normalized_weights, intervals)%0A )%0A%0A return max(1, math.ceil(%0A weighted_request_count / self.requests_per_instance_interval)%0A )%0A%0A @staticmethod%0A def _normalized_weights(weights):%0A weight_sum = sum(weights)%0A return %5Bweight / weight_sum for weight in weights%5D%0A
5cb215814c5cd4e2c0b7b1e4561886094be5e315
version bump
djangocms_column/__init__.py
djangocms_column/__init__.py
__version__ = "1.3"
Python
0.000001
@@ -14,7 +14,7 @@ %221. -3 +4 %22%0A
a726625e13ac08d0b6c2c686de476b6e78bc0f48
Add unit test for _skeleton
dlstats/fetchers/test__skeleton.py
dlstats/fetchers/test__skeleton.py
Python
0.000001
@@ -0,0 +1,700 @@ +import unittest%0Afrom datetime import datetime%0Afrom _skeleton import Dataset%0A%0Aclass DatasetTestCase(unittest.TestCase):%0A def test_full_example(self):%0A self.assertIsInstance(Dataset(provider='Test provider',name='GDP',dataset_code='nama_gdp_fr',dimension_list=%5B%7B'name':'COUNTRY','values':%5B('FR','France'),('DE','Germany')%5D%7D%5D,doc_href='rasessr',last_update=datetime(2014,12,2)),Dataset)%0A def test_empty_doc_href(self):%0A self.assertIsInstance(Dataset(provider='Test provider',name='GDP',dataset_code='nama_gdp_fr',dimension_list=%5B%7B'name':'COUNTRY','values':%5B('FR','France'),('DE','Germany')%5D%7D%5D,last_update=datetime(2014,12,2)),Dataset)%0A%0Aif __name__ == '__main__':%0A unittest.main()%0A
e54c82c336827c1fc835837006885c245a05e5cb
Add html stripper for announcements
html_stripper.py
html_stripper.py
Python
0
@@ -0,0 +1,426 @@ +from html.parser import HTMLParser%0A%0Aclass HTMLStripper(HTMLParser):%0A def __init__(self):%0A super().__init__()%0A self.reset()%0A self.strict = False%0A self.convert_charrefs= True%0A self.fed = %5B%5D%0A def handle_data(self, d):%0A self.fed.append(d)%0A def get_data(self):%0A return ''.join(self.fed)%0A%0Adef strip_tags(html):%0A s = HTMLStripper()%0A s.feed(html)%0A return s.get_data()
20830e9fb2785eda94bf9e7c0dab70d476bc82b4
Add `sample_settings.py`
sample_settings.py
sample_settings.py
Python
0
@@ -0,0 +1,536 @@ +# Rename this file to %60settings.py%60 in deployment%0A%0A# supported_subreddits = 'india'%0Asupported_subreddits = 'india+indianbooks'%0Auser_agent = ('Goodreads, v0.1. Gives info of the book whenever goodreads'%0A 'link to a book is posted. (by /u/avinassh)')%0Ascopes = %5B'identity', 'submit', 'privatemessages', 'read'%5D%0Abe_gentle_to_reddit = True%0A%0A# reddit app%0Aapp_key = 'K...q'%0Aapp_secret = 'y...i'%0A%0A# bot account%0Aaccess_token = '3...R'%0Arefresh_token = '3...m'%0A%0A# good reads%0Agoodreads_api_key = '5...v'%0Agoodreads_api_secret = 'T...4'%0A
638c6383acf4431c95327fd0cbdb535e115e027d
Create admin util for user management.
flow-admin.py
flow-admin.py
Python
0
@@ -0,0 +1,3670 @@ +#!/usr/bin/env python%0A#%0A# To ensure you can import rhizo-server modules set PYTHONPATH%0A# to point to rhize-server base dir.%0A# E.g.%0A# export PYTHONPATH=/home/user/rhizo-server/%0A#%0A%0Afrom optparse import OptionParser%0A%0Afrom main.users.auth import create_user%0Afrom main.users.models import User, OrganizationUser%0Afrom main.resources.resource_util import find_resource, _create_folders%0Afrom main.app import db%0A%0Aif __name__ == '__main__':%0A%0A parser = OptionParser()%0A parser.add_option( '-c', %0A '--create-user', %0A dest='flow_user_spec', %0A help='Create flow user specified in the format email:username:password:fullname',%0A default='')%0A parser.add_option( '-d', %0A '--delete-user', %0A dest='delete_username', %0A help='Delete flow user specified by username',%0A default='')%0A%0A%0A (options, args) = parser.parse_args()%0A%0A if options.flow_user_spec:%0A parts = options.flow_user_spec.split(':')%0A email = parts%5B0%5D%0A username = parts%5B1%5D%0A password = parts%5B2%5D%0A fullname = parts%5B3%5D%0A assert '.' in email and '@' in email%0A%0A%0A #%0A # Create user%0A #%0A print(%22Creating user %25s%22 %25 (username))%0A user_id = create_user( email, %0A username, %0A password, %0A fullname,%0A User.STANDARD_USER)%0A%0A #%0A # Add user to flow organization%0A #%0A print(%22Creating organization user.%22)%0A org_user = OrganizationUser()%0A org_user.organization_id = find_resource('/testing').id%0A org_user.user_id = user_id%0A org_user.is_admin = False%0A db.session.add(org_user)%0A db.session.commit()%0A%0A #%0A # Create a folder for this user to store their programs%0A #%0A student_folder = 'testing/student-folders/%25s' %25 (username)%0A print(%22Creating student folder %25s.%22 %25 (student_folder))%0A _create_folders(student_folder)%0A%0A print('Created flow user: %25s' %25 (email))%0A%0A elif options.delete_username:%0A%0A #%0A # Delete the specified user by username%0A #%0A username = options.delete_username%0A user = User.query.filter(User.user_name == username).first()%0A if user is None:%0A print(%22No such user %25s.%22 %25 (username))%0A exit(1)%0A%0A #%0A # Delete user folder%0A #%0A student_folder = find_resource('/testing/student-folders/%25s' %25 (username))%0A if student_folder is not None:%0A print(%22Deleting student folder %25s.%22 %25 (student_folder.name))%0A db.session.delete(student_folder)%0A db.session.commit()%0A else:%0A print(%22No student folder to delete.%22)%0A%0A%0A #%0A # Delete organization user%0A #%0A org_id = find_resource('/testing').id%0A org_user = OrganizationUser.query.filter(%0A OrganizationUser.organization_id == org_id, %0A OrganizationUser.user_id == user.id ).first()%0A%0A if org_user is not None:%0A print(%22Deleting organization user.%22)%0A db.session.delete(org_user)%0A db.session.commit()%0A else:%0A print(%22No organization user to delete.%22)%0A%0A #%0A # Now delete the user%0A #%0A db.session.delete(user)%0A db.session.commit()%0A%0A print('Deleted flow user: %25s.' %25 (username))%0A%0A%0A
55dd21610a2ed1befed6b4560528e8a6bf3602e2
Define function to retrieve imgur credentials
imgur_cli/cli.py
imgur_cli/cli.py
Python
0.000006
@@ -0,0 +1,1209 @@ +import argparse%0Aimport logging%0Aimport os%0A%0Aimport imgurpython%0A%0Afrom collections import namedtuple%0A%0Alogger = logging.getLogger(__name__)%0A%0Adef imgur_credentials():%0A ImgurCredentials = namedtuple('ImgurCredentials', %5B'client_id', 'client_secret', 'access_token', 'refresh_token', 'mashape_key'%5D)%0A try:%0A from config import config%0A client_id = config.get('IMGUR_CLIENT_ID')%0A client_secret = config.get('IMGUR_CLIENT_SECRET')%0A access_token = config.get('IMGUR_ACCESS_TOKEN')%0A refresh_token = config.get('IMGUR_REFRESH_TOKEN')%0A mashape_key = config.get('IMGUR_MASHAPE_KEY')%0A except ImportError:%0A client_id = os.environ.get('IMGUR_CLIENT_ID')%0A client_secret = os.environ.get('IMGUR_CLIENT_SECRET')%0A access_token = os.environ.get('IMGUR_ACCESS_TOKEN')%0A refresh_token = os.environ.get('IMGUR_REFRESH_TOKEN')%0A mashape_key = os.environ.get('IMGUR_MASHAPE_KEY')%0A if not client_id or not client_secret:%0A raise imgurpython.client.ImgurClientError('Client credentials not found. Ensure you have both client id and client secret') %0A return ImgurCredentials(client_id, client_secret, access_token, refresh_token, mashape_key)%0A
d3ebb800c88be18861608f8b174cc652223ac67c
Add utils.py with get_options function
apps/ivrs/utils.py
apps/ivrs/utils.py
Python
0.000001
@@ -0,0 +1,151 @@ +def get_options(question_number):%0A if question_number == 2:%0A return %22 Press 4 or 5 %22%0A else:%0A return %22 Press 1 for Yes or 2 for No%22%0A
2c8752cd586f6d02ce8da4bc3a79660889ed7f3f
Add some minimal testing for BandRCModel to the test suite.
climlab/tests/test_bandrc.py
climlab/tests/test_bandrc.py
Python
0
@@ -0,0 +1,1127 @@ +import numpy as np%0Aimport climlab%0Aimport pytest%0A%0A# The fixtures are reusable pieces of code to set up the input to the tests.%0A# Without fixtures, we would have to do a lot of cutting and pasting%0A# I inferred which fixtures to use from the notebook%0A# Latitude-dependent grey radiation.ipynb%[email protected]()%0Adef model():%0A return climlab.BandRCModel()%0A%0A# helper for a common test pattern%0Adef _check_minmax(array, amin, amax):%0A return (np.allclose(array.min(), amin) and%0A np.allclose(array.max(), amax))%0A%0Adef test_model_creation(model):%0A %22%22%22Just make sure we can create a model.%22%22%22%0A assert len(model.Tatm)==30%0A%0Adef test_integrate_years(model):%0A %22%22%22Check that we can integrate forward the model and get the expected%0A surface temperature and water vapor.%0A Also check the climate sensitivity to doubling CO2.%22%22%22%0A model.step_forward()%0A model.integrate_years(2)%0A Ts = model.Ts.copy()%0A assert np.isclose(Ts, 275.43383753)%0A assert _check_minmax(model.q, 5.E-6, 3.23764447e-03)%0A model.absorber_vmr%5B'CO2'%5D *= 2.%0A model.integrate_years(2)%0A assert np.isclose(model.Ts - Ts, 3.180993)%0A
27899a91fc6cdf73dccc7f9c5c353b05d2433c42
add example participant client inbound drop rule for blackholing
pclnt/blackholing_test.py
pclnt/blackholing_test.py
Python
0
@@ -0,0 +1,266 @@ + %7B%0A %22inbound%22: %5B%0A %7B%0A %22cookie%22: 3,%0A %22match%22: %7B%0A %22eth_src%22: %2208:00:27:89:3b:9f%22%0A %7D,%0A %22action%22: %7B%0A %22drop%22: 0%0A %7D%0A %7D%0A %5D%0A %7D
cd910f95753a138e2df48a1370e666bee49ad1dd
Add py solution for 693. Binary Number with Alternating Bits
py/binary-number-with-alternating-bits.py
py/binary-number-with-alternating-bits.py
Python
0.000085
@@ -0,0 +1,211 @@ +class Solution(object):%0A def hasAlternatingBits(self, n):%0A %22%22%22%0A :type n: int%0A :rtype: bool%0A %22%22%22%0A power_2 = (n %5E (n %3E%3E 1)) + 1%0A return (power_2 & -power_2) == power_2%0A
b34c0ec439a997705799136e56a926649bd93e52
add new function to test whether an object is completely within the bounds of an image
plantcv/plantcv/within_frame.py
plantcv/plantcv/within_frame.py
Python
0.000054
@@ -0,0 +1,1171 @@ +import cv2 as cv2%0Aimport numpy as np%0A%0Adef within_frame(img, obj):%0A '''%0A This function tests whether the plant object is completely in the field of view%0A Input:%0A img - an image with the bounds you are interested in%0A obj - a single object, preferably after calling pcv.image_composition(), that is from within %60img%60%0A%0A Returns:%0A in_bounds - a boolean (True or False) whether the object touches the edge of the image%0A%0A :param img: numpy.ndarray%0A :param obj: str%0A :return in_bounds: boolean%0A%0A '''%0A # Check if object is touching image boundaries (QC)%0A if len(np.shape(img)) == 3:%0A ix, iy, iz = np.shape(img)%0A else:%0A ix, iy = np.shape(img)%0A size1 = ix, iy%0A frame_background = np.zeros(size1, dtype=np.uint8)%0A frame = frame_background + 1%0A frame_contour, frame_hierarchy = cv2.findContours(frame, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)%5B-2:%5D%0A ptest = %5B%5D%0A vobj = np.vstack(obj)%0A for i, c in enumerate(vobj):%0A xy = tuple(c)%0A pptest = cv2.pointPolygonTest(frame_contour%5B0%5D, xy, measureDist=False)%0A ptest.append(pptest)%0A in_bounds = all(c == 1 for c in ptest)%0A%0A return(in_bounds)%0A
c4040803cb670f913bc8743ee68f5a5f0721d4f8
Add game logic
backend/game.py
backend/game.py
Python
0.000059
@@ -0,0 +1,1122 @@ +# All game related code%0A%0Aimport json%0Aimport random%0A%0A%0Aclass Game():%0A def __init__(self):%0A self.players = %7B%7D%0A self.turn = None%0A self.running = False%0A%0A def add_player(self, conn, data):%0A player = Player(conn, data)%0A self.players%5Bplayer.get_name()%5D = player%0A conn.send(json.dumps(%7B'action': 'accepted', 'data': ''%7D))%0A return player%0A%0A def wait_for_answer(self, player):%0A # Initial start of game%0A if not self.running() and len(self.players) == 3:%0A starter = self.start_game()%0A data = %7B'turn': starter.get_name(), 'cards': %5B%5D%7D%0A return json.dumps(%7B'action': 'start', 'data': data%7D)%0A%0A return self.handle_round(self, player)%0A%0A def handle_round(self, player):%0A pass%0A%0A def start_game(self):%0A self.turn = random.choice(self.players)%0A return self.turn%0A%0A%0Aclass Player():%0A def __init__(self, conn, data):%0A self.name = data%5B'name'%5D%0A self.connection = conn%0A self.cards = %5B%5D%0A%0A def get_name(self):%0A return self.name%0A%0A%0Aclass Card():%0A def __init__(self):%0A pass%0A
69e22c778a576f746784270fa9971a6399433f92
Add docstring to UnivariateFilter.
examples/plot_feature_selection.py
examples/plot_feature_selection.py
""" =============================== Univariate Feature Selection =============================== An example showing univariate feature selection. Noisy (non informative) features are added to the iris data and univariate feature selection is applied. For each feature, we plot the p-values for the univariate feature selection and the corresponding weights of an SVM. We can see that univariate feature selection selects the informative features and that these have larger SVM weights. In the total set of features, only the 4 first ones are significant. We can see that they have the highest score with univariate feature selection. The SVM attributes small weights to these features, but these weight are non zero. Applying univariate feature selection before the SVM increases the SVM weight attributed to the significant features, and will thus improve classification. """ import numpy as np import pylab as pl ################################################################################ # import some data to play with # The IRIS dataset from scikits.learn import datasets, svm iris = datasets.load_iris() # Some noisy data not correlated E = np.random.normal(size=(len(iris.data), 35)) # Add the noisy data to the informative features x = np.hstack((iris.data, E)) y = iris.target ################################################################################ pl.figure(1) pl.clf() x_indices = np.arange(x.shape[-1]) ################################################################################ # Univariate feature selection from scikits.learn.feature_selection import univ_selection # As a scoring function, we use a F test for classification # We use the default selection function: the 10% most significant # features selector = univ_selection.UnivSelection( score_func=univ_selection.f_classif) selector.fit(x, y) scores = -np.log(selector.p_values_) scores /= scores.max() pl.bar(x_indices-.45, scores, width=.3, label=r'Univariate score ($-\log(p\,values)$)', color='g') ################################################################################ # Compare to the weights of an SVM clf = svm.SVC(kernel='linear') clf.fit(x, y) svm_weights = (clf.support_**2).sum(axis=0) svm_weights /= svm_weights.max() pl.bar(x_indices-.15, svm_weights, width=.3, label='SVM weight', color='r') ################################################################################ # Now fit an SVM with added feature selection selector = univ_selection.UnivSelection( estimator=clf, score_func=univ_selection.f_classif) selector.fit(x, y) svm_weights = (clf.support_**2).sum(axis=0) svm_weights /= svm_weights.max() full_svm_weights = np.zeros(selector.support_.shape) full_svm_weights[selector.support_] = svm_weights pl.bar(x_indices+.15, full_svm_weights, width=.3, label='SVM weight after univariate selection', color='b') pl.title("Comparing feature selection") pl.xlabel('Feature number') pl.yticks(()) pl.axis('tight') pl.legend() pl.show()
Python
0
@@ -1591,24 +1591,30 @@ import univ +ariate _selection %0A @@ -1612,16 +1612,33 @@ lection +as univ_selection %0A# As a @@ -1789,37 +1789,33 @@ v_selection. -Univ Select -ion +Fpr (%0A @@ -1907,17 +1907,16 @@ tor. -p _ +p values -_ )%0Asc @@ -2358,16 +2358,16 @@ eight',%0A + @@ -2382,587 +2382,8 @@ ')%0A%0A -################################################################################%0A# Now fit an SVM with added feature selection%0Aselector = univ_selection.UnivSelection(%0A estimator=clf,%0A score_func=univ_selection.f_classif)%0A%0Aselector.fit(x, y)%0Asvm_weights = (clf.support_**2).sum(axis=0)%0Asvm_weights /= svm_weights.max()%0Afull_svm_weights = np.zeros(selector.support_.shape)%0Afull_svm_weights%5Bselector.support_%5D = svm_weights%0Apl.bar(x_indices+.15, full_svm_weights, width=.3, %0A label='SVM weight after univariate selection',%0A color='b')%0A%0A%0A pl.t
1beec05941a6a34452bea6e9f60a1673c0f0925f
add base test case file
keen/tests/base_test_case.py
keen/tests/base_test_case.py
Python
0.000001
@@ -0,0 +1,22 @@ +__author__ = 'dkador'%0A
1fa849f1a0eadad9573b677d3904986d76f900eb
Create main.py
challenge_2/python/wost/main.py
challenge_2/python/wost/main.py
Python
0.000001
@@ -0,0 +1,724 @@ +%22%22%22%0APython 3.6:%0A :: Counts all the instances of all the elements in a list.%0A :: Returns all the instances with a count of 1.%0A%22%22%22%0A%0Adef find_one_in_list(a_list):%0A a_dict = %7B%7D%0A %0A for char in a_list:%0A if char not in a_dict.keys():%0A a_dict%5Bchar%5D = 1%0A else:%0A a_dict%5Bchar%5D += 1%0A %0A for letter in a_dict.keys():%0A if a_dict%5Bletter%5D == 1:%0A print(letter, end=%22 %22)%0A print()%0A %0Adef main():%0A # Returns 6, 7.%0A find_one_in_list(%5B5, 4, 3, 4, 5, 6, 1, 3, 1, 7, 8, 8%5D)%0A # Returns b.%0A find_one_in_list(%5B%22a%22, %22b%22, %22c%22, %22a%22, %22c%22, %22W%22, %22W%22%5D)%0A # Returns A, 5, r.%0A find_one_in_list(%5B%22A%22, %22b%22, %22d%22, %22r%22, 4, 5, 4, %22b%22, %22d%22%5D)%0A # Returns nothing.%0A find_one_in_list(%5B%5D)%0A %0Aif __name__ == %22__main__%22:%0A main()%0A
ac4679b4dcbbc3b2a29230233afc138f98cf2c42
Add the basics
anvil.py
anvil.py
Python
0.006448
@@ -0,0 +1,1782 @@ +import gzip%0Aimport io%0Aimport nbt.nbt%0Aimport pathlib%0Aimport re%0Aimport zlib%0A%0Aclass Region:%0A def __init__(self, path):%0A if isinstance(path, str):%0A path = pathlib.Path(path)%0A with path.open('rb') as f:%0A data = f.read()%0A self.locations = data%5B:4096%5D%0A self.timestamps = data%5B4096:8192%5D%0A self.data = data%5B8192:%5D%0A match = re.search('r%5C.(-?%5B0-9%5D+)%5C.(-?%5B0-9%5D+)%5C.mca$', path.name)%0A if match:%0A self.x = int(match.group(1))%0A self.z = int(match.group(2))%0A else:%0A self.x = None%0A self.z = None%0A%0A def chunk_column(self, x, z):%0A x_offset = x & 31%0A z_offset = z & 31%0A meta_offset = 4 * ((x_offset & 32) + (z_offset & 32) * 32)%0A chunk_location = self.locations%5Bmeta_offset:meta_offset + 4%5D%0A offset = chunk_location%5B0%5D * (256 ** 2) + chunk_location%5B1%5D * 256 + chunk_location%5B2%5D%0A if offset == 0:%0A return ChunkColumn(None, x=x, z=z)%0A else:%0A offset -= 2%0A sector_count = chunk_location%5B3%5D%0A return ChunkColumn(self.data%5B4096 * offset:4096 * (offset + sector_count)%5D, x=x, z=z)%0A%0Aclass ChunkColumn:%0A def __init__(self, data, *, x=None, z=None):%0A self.x = x%0A self.z = z%0A length = data%5B0%5D * (256 ** 3) + data%5B1%5D * (256 ** 2) + data%5B2%5D * 256 + data%5B3%5D%0A compression = data%5B4%5D%0A compressed_data = data%5B5:4 + length%5D%0A if compression == 1: # gzip%0A decompress = gzip.decompress%0A elif compression == 2: # zlib%0A decompress = zlib.decompress%0A else:%0A raise ValueError('Unknown compression method: %7B%7D'.format(compression))%0A self.data = nbt.nbt.NBTFile(buffer=io.BytesIO(decompress(compressed_data)))%0A
702abe6dc661fbcda04f743edc56d2938098cefa
Add checkJSON file function only for checking a JSON file against a specified schema
src/main/python/convertfiles/checkJSON.py
src/main/python/convertfiles/checkJSON.py
Python
0
@@ -0,0 +1,2438 @@ +#!/nfs/projects/c/ci3_jwaldo/MONGO/bin/python%0A%22%22%22%0AThis function will check an existing JSON newline delimited file%0Aagainst a specified schema%0A%0AInput is a newline delimited JSON file and schema file%0AOutput is a summary printout of statistics%0A%0AUsage:%0Apython checkJSON %5B-options%5D%0A%0AOPTIONS:%0A--input Name of input filename (required)%0A--output Name of output filename%0A--schema Specify JSON Schema (required)%0A--schema-name Specify JSON Schema name within json file, if it exists%0A%0A@author: G.Lopez%0A%22%22%22%0A%0Aimport convertCSVtoJSON as converter%0Afrom path import path%0Aimport json%0Afrom collections import OrderedDict%0Aimport argparse%0Aimport sys%0A%0A# Maintain Stats%0ALINE_CNT = 0%0ALINE_CNT_1000 = 1000%0A%0Adef checkJSON(inputFile, schemaFile, schemaName=None):%0A%0A%09global LINE_CNT%0A%0A%09# Read specified schema file%0A%09checkFormat = converter.convertCSVtoJSON()%0A%09schema_dict = checkFormat.readSchema( path(schemaFile), schemaName )%0A%0A%09# Read JSON file%0A%09fin = open(inputFile, 'r')%0A%09for line in fin:%0A%09%09try:%0A%09%09%09json_rec = json.loads(line, object_pairs_hook=OrderedDict)%0A%09%09%09checkFormat.cleanJSONline(json_rec, schema_dict, applySchema=False)%0A%09%09%09checkFormat.checkIllegalKeys(json_rec, fixkeys=False)%0A%09%09%09%0A%09%09%09# Print procesing Counter%0A%09%09%09LINE_CNT = LINE_CNT + 1%0A%09%09%09if LINE_CNT %25 LINE_CNT_1000 == 0:%0A%09%09%09%09sys.stdout.write(%22%5Bmain%5D: %25dk Lines processed%5Cr%22 %25 ( LINE_CNT / LINE_CNT_1000 ) )%0A%09%09%09%09sys.stdout.flush()%0A%09%09except:%0A%09%09%09print %22%5BcheckJSON%5D: Error parsing JSON line at line %25s%22 %25 LINE_CNT%0A%09%09%09pass%0A%0A%09checkFormat.printOtherStats()%0A%09checkFormat.calculateSchemaStats()%0A%09checkFormat.printSchemaStats()%0A%0A%09checkFormat.calculateOverallSummary()%0A%09checkFormat.printOverallSummary()%0A%0Adef main():%0A%09%22%22%22%0A%09Main Program to Check Specified JSON file against Schema%0A%09%22%22%22%0A%0A%09# Setup Command Line Options%0A%09text_help = '''usage: %25prog %5B-options%5D '''%0A%09text_description = ''' Check JSON schema script '''%0A%09parser = argparse.ArgumentParser( prog='PROG',%0A%09%09%09%09 description=text_description)%0A%09parser.add_argument(%22--input%22, type=str, help=%22Name of input file%22, required=True)%0A%09parser.add_argument(%22--schema%22, type=str, help=%22Specify JSON Schema%22, required=True)%0A%09parser.add_argument(%22--schema-name%22, type=str, help=%22Specify JSON Schema Name%22)%0A%09args = vars(parser.parse_args())%0A%09print %22%5Bmain%5D: arguments passed =%3E %25s%22 %25 args%0A%0A%09# Read Input File%0A%09print %22%5Bmain%5D: Reading JSON input file %25s %22 %25 args%5B'input'%5D%0A%09checkJSON( args%5B'input'%5D, args%5B'schema'%5D, args%5B'schema_name'%5D )%0A%0Aif __name__ == '__main__':%0A%09main()%0A
7330f9f1423fe7ee169569957d537441b6d72c08
Create 0106_us_city_synonyms.py
2019/0106_us_city_synonyms.py
2019/0106_us_city_synonyms.py
Python
0.00367
@@ -0,0 +1,830 @@ +#%25%25%0A%22%22%22%0ANPR 2019-01-06%0Ahttps://www.npr.org/2019/01/06/682575357/sunday-puzzle-stuck-in-the-middle%0A%0AName a major U.S. city in 10 letters. If you have the right one, you can rearrange its letters to get two 5-letter words that are synonyms. What are they?%0A%22%22%22%0A%0Aimport sys%0Asys.path.append('..')%0Aimport nprcommontools as nct%0Afrom nltk.corpus import gazetteers%0A%0A#%25%25%0ACOMMON_WORDS = frozenset(x for x in nct.get_common_words() if len(x) == 5)%0A%0A#%25%25%0AUS_CITIES = set(nct.alpha_only(x.lower()) for x in gazetteers.words('uscities.txt') if len(nct.alpha_only(x)) == 10)%0Acity_dict = nct.make_sorted_dict(US_CITIES)%0A%0A#%25%25%0Afor c1 in COMMON_WORDS:%0A my_synonyms = nct.get_synonyms(c1)%0A for c2 in my_synonyms:%0A sort_word = nct.sort_string(''.join(c1+c2))%0A if sort_word in city_dict:%0A print(c1,c2,city_dict%5Bsort_word%5D)%0A
2f08053dc04470c9a1e4802e0e90c198bb5eae63
Update app/views/account/__init__.py
app/views/account/__init__.py
app/views/account/__init__.py
Python
0
@@ -0,0 +1,101 @@ +from flask import Blueprint%0A%0Aaccount = Blueprint(%0A 'account',%0A __name__%0A)%0A%0Afrom . import views%0A
5470661c6f171f1e9da609c3bf67ece21cf6d6eb
Add example for response status code
examples/return_400.py
examples/return_400.py
Python
0.000001
@@ -0,0 +1,150 @@ +import hug%0Afrom falcon import HTTP_400%0A%[email protected]()%0Adef only_positive(positive: int, response):%0A if positive %3C 0:%0A response.status = HTTP_400
34f44cd57baf9f0a548d728e90ca0c67f47b08a1
Add tests for Resource
tests/test_resource.py
tests/test_resource.py
Python
0
@@ -0,0 +1,648 @@ +import unittest%0A%0Aimport soccermetrics%0Afrom soccermetrics import __api_version__%0Afrom soccermetrics.rest import SoccermetricsRestClient%0Afrom soccermetrics.rest.resource import Resource%0A%0Aclass ResourceTest(unittest.TestCase):%0A%0A def setUp(self):%0A base_url = %22http://api-summary.soccermetrics.net%22%0A auth = dict(account=%22APP_ID%22,api_key=%22APP_KEY%22)%0A self.resource = Resource(base_url, auth)%0A%0A def test_initialization(self):%0A self.assertEqual(self.resource.auth%5B'account'%5D,%22APP_ID%22)%0A self.assertEqual(self.resource.auth%5B'api_key'%5D,%22APP_KEY%22)%0A self.assertEqual(self.resource.endpoint,'/%25s' %25 __api_version__)
0b0d77ca77cf5359175836d68fc0bcce3829d731
Create change_config.py
static/scripts/change_hostname/change_config.py
static/scripts/change_hostname/change_config.py
Python
0.000002
@@ -0,0 +1,754 @@ +import os, sys%0Afrom change_gluu_host import Installer, FakeRemote, ChangeGluuHostname%0A%0Aname_changer = ChangeGluuHostname(%0A old_host='%3Ccurrent_hostname%3E',%0A new_host='%3Cnew_hostname%3E',%0A cert_city='%3Ccity%3E',%0A cert_mail='%3Cemail%3E',%0A cert_state='%3Cstate_or_region%3E',%0A cert_country='%3Ccountry%3E',%0A server='%3Cactual_hostname_of_server%3E',%0A ip_address='%3Cip_address_of_server%3E',%0A ldap_password=%22%3Cldap_password%3E%22,%0A os_type='%3Clinux_distro%3E',%0A local= True%0A )%0A%0Ar = name_changer.startup()%0Aif not r:%0A sys.exit(1)%0A%0Aname_changer.change_appliance_config()%0Aname_changer.change_clients()%0Aname_changer.change_uma()%0Aname_changer.change_httpd_conf()%0Aname_changer.create_new_certs()%0Aname_changer.change_host_name()%0Aname_changer.modify_etc_hosts()%0A
3cb39bc8be7fdf857ebbdd2f78cbb617b2dda104
Create PowofTwo_003.py
leetcode/231-Power-of-Two/PowofTwo_003.py
leetcode/231-Power-of-Two/PowofTwo_003.py
Python
0.000009
@@ -0,0 +1,138 @@ +class Solution:%0A # @param %7Binteger%7D n%0A # @return %7Bboolean%7D%0A def isPowerOfTwo(self, n):%0A return n %3E 0 and (n & n - 1 is 0)%0A
90987fccd2f604a5224e5b1cf8f91073b173fdc8
Splitting a sentence by ending characters
split_sentences.py
split_sentences.py
Python
1
@@ -0,0 +1,535 @@ +%22%22%22 Splitting a sentence by ending characters %22%22%22%0A%0Aimport re%0A%0Ast1 = %22 Another example!! Let me contribute 0.50 cents here?? %5C%0A How about pointer '.' character inside the sentence? %5C%0A Uni Mechanical Pencil Kurutoga, Blue, 0.3mm (M310121P.33). %5C%0A Maybe there could be a multipoint delimeter?.. Just maybe... %22%0A%0Ast2 = %22One word%22%0A%0Adef split_sentences(st):%0A st = st.strip() + '. '%0A sentences = re.split(r'%5B.?!%5D%5B.?!%5Cs%5D+', st)%0A return sentences%5B:-1%5D%0A%0Aprint(split_sentences(st1))%0Aprint(split_sentences(st2))%0A
98d8716192bfb6b4223d84855f647e2b698b5f19
Add test for viewport inspection
tests/test_viewport.py
tests/test_viewport.py
from tests.base import IntegrationTest class TestViewportsTaskGeneration(IntegrationTest): viminput = """ === Work tasks | +work === """ vimoutput = """ === Work tasks | +work === * [ ] tag work task #{uuid} """ tasks = [ dict(description="tag work task", tags=['work']), ] def execute(self): self.command("w", regex="written$", lines=1) class TestViewportsTaskRemoval(IntegrationTest): viminput = """ === Work tasks | -work === * [ ] tag work task #{uuid} """ vimoutput = """ === Work tasks | -work === """ tasks = [ dict(description="tag work task", tags=['work']), ] def execute(self): self.command("w", regex="written$", lines=1) class TestViewportDefaultsAssigment(IntegrationTest): viminput = """ === Work tasks | +work === * [ ] tag work task """ vimoutput = """ === Work tasks | +work === * [ ] tag work task #{uuid} """ def execute(self): self.command("w", regex="written$", lines=1) assert len(self.tw.tasks.pending()) == 1 task = self.tw.tasks.pending()[0] assert task['description'] == 'tag work task' assert task['status'] == 'pending' assert task['tags'] == ['work'] class TestViewportDefaultsOverriding(IntegrationTest): viminput = """ === Work tasks | project:Home +home | project:Chores === * [ ] home task """ vimoutput = """ === Work tasks | project:Home +home | project:Chores === """ def execute(self): self.command("w", regex="written$", lines=1) assert len(self.tw.tasks.pending()) == 1 task = self.tw.tasks.pending()[0] assert task['description'] == 'home task' assert task['status'] == 'pending' assert task['project'] == 'Chores' assert task['tags'] == ['home'] class TestViewportDefaultsRemoval(IntegrationTest): viminput = """ === Work tasks | project:Home +home | project: === * [ ] home task """ vimoutput = """ === Work tasks | project:Home +home | project: === """ def execute(self): self.command("w", regex="written$", lines=1) assert len(self.tw.tasks.pending()) == 1 task = self.tw.tasks.pending()[0] assert task['description'] == 'home task' assert task['status'] == 'pending' assert task['project'] == None assert task['tags'] == ['home']
Python
0.000001
@@ -32,16 +32,39 @@ ionTest%0A +from time import sleep%0A %0A%0Aclass @@ -2481,28 +2481,777 @@ rt task%5B'tags'%5D == %5B'home'%5D%0A +%0A%0Aclass TestViewportInspection(IntegrationTest):%0A%0A viminput = %22%22%22%0A === Work tasks %7C +work ===%0A * %5B %5D tag work task #%7Buuid%7D%0A %22%22%22%0A%0A vimoutput = %22%22%22%0A ViewPort inspection:%0A --------------------%0A Name: Work tasks%0A Filter used: -DELETED +work%0A Defaults used: tags:%5B'work'%5D%0A Matching taskwarrior tasks: 1%0A Displayed tasks: 1%0A Tasks to be added:%0A Tasks to be deleted:%0A %22%22%22%0A%0A tasks = %5B%0A dict(description=%22tag work task%22, tags=%5B'work'%5D),%0A %5D%0A%0A def execute(self):%0A self.command(%22w%22, regex=%22written$%22, lines=1)%0A self.client.feedkeys('1gg')%0A self.client.feedkeys(r'%5C%3CCR%3E')%0A sleep(0.5)%0A%0A assert self.command(%22:py print vim.current.buffer%22, regex=%22%3Cbuffer taskwiki.%22)%0A
5e2a14af770ca07cdf6f3674ef54668a0a433078
hello py
helloworld.py
helloworld.py
Python
0.99991
@@ -0,0 +1,20 @@ +print %22Hello World%22;
edd28dc68b91af78da1a1d576fcb9dcb83ebd0c8
Create lin_reg.py
lin_reg.py
lin_reg.py
Python
0.00001
@@ -0,0 +1,1419 @@ +#!/usr/bin/python%0A%0Aimport numpy as np%0Aimport matplotlib.pyplot as plt%0Afrom scipy.signal import square%0A%0A#Mean Square error function%0Adef costf(X, y, theta):%0A m = y.shape%5B0%5D%0A #print m%0A return (1.0/m)*np.sum(np.power(np.dot(X,theta) - y, 2))%0A%0A#Gradient of error function%0Adef gradientf(X, y, theta):%0A m = y.shape%5B0%5D%0A err = np.dot(X, theta) - y%0A return (2.0/m)*np.dot(np.transpose(X), err)%0A%0A%0At = np.arange(0,10,0.01)%0Ay = 2*square(t) + 0*np.random.random(t.shape)%0A%0AX = np.array(%5B%5B1, np.sin(x), np.sin(3*x), np.sin(5*x), np.sin(7*x)%5D for x in t%5D)%0Ath = np.zeros(5)%0A%0Aerrors = %5B%5D%0Athetas = %5B%5D%0A%0A#Optimizing using gradient descent algorithm%0Anumiters = 1000%0Aalpha = 0.02 #Learning rate%0A%0Aerrors.append(costf(X,y,th))%0A%0Afor i in xrange(numiters):%0A #Gradient descent%0A grad = gradientf(X,y,th)%0A th = th - alpha*grad%0A errors.append(costf(X,y,th))%0A thetas.append(th)%0A if(i%2510 == 0):%0A print %22Iteration: %22+str(i)%0A print %22Costf: %22+ str(costf(X,y,th))%0A print %22Gradient: %22 + str(gradientf(X, t, th))%0A print %22Theta: %22+ str(th)%0A%0Ay_ = np.dot(X, th)%0A#Closed form solution%0Ath_opt = np.dot(np.linalg.pinv(X), y)%0Ay_opt = np.dot(X, th_opt)%0A%0A#Plotting results%0Aplt.plot(t, y, 'o')%0Aplt.xlabel('x')%0Aplt.ylabel('y')%0Aplt.hold(True)%0Aplt.plot(t, y_)%0Aplt.plot(t, y_opt)%0A%0Aplt.figure()%0Aplt.plot(errors)%0Aplt.title(%22Error over time%22)%0Aplt.ylabel(%22Error%22)%0Aplt.xlabel(%22Number of iterations%22)%0Aplt.show()%0A
dc854dc41929b027f393c7e341be51193b4ca7b9
Create SearchinRSArr_001.py
leetcode/033-Search-in-Rotated-Sorted-Array/SearchinRSArr_001.py
leetcode/033-Search-in-Rotated-Sorted-Array/SearchinRSArr_001.py
Python
0
@@ -0,0 +1,648 @@ +class Solution:%0A # @param %7Binteger%5B%5D%7D nums%0A # @param %7Binteger%7D target%0A # @return %7Binteger%7D%0A def search(self, nums, target):%0A l, r = 0, len(nums) - 1%0A %0A while l %3C= r:%0A m = (l + r) / 2%0A if nums%5Bm%5D == target:%0A return m%0A elif nums%5Bm%5D %3E target:%0A if nums%5Bm%5D %3E nums%5Br%5D and target %3C nums%5Bl%5D:%0A l = m + 1%0A else:%0A r = m - 1%0A else:%0A if nums%5Bm%5D %3C nums%5Br%5D and target %3E nums%5Br%5D:%0A r = m - 1%0A else:%0A l = m + 1%0A %0A return -1%0A
a33fa25ce2537b0eb8dc33002a80aa71320c17a2
fix to sysUpTime OID
pysnmp/entity/rfc3413/ntforg.py
pysnmp/entity/rfc3413/ntforg.py
import time try: from sys import version_info except ImportError: version_info = ( 0, 0 ) # a really early version from pysnmp.entity.rfc3413 import config from pysnmp.proto.proxy import rfc2576 from pysnmp.proto.api import v2c from pysnmp.smi import error vacmID = 3 class NotificationOriginator: def __init__(self, snmpContext): self.__pendingReqs = {} self.__sendRequestHandleSource = 0L self.__context = snmpContext def processResponsePdu( self, snmpEngine, messageProcessingModel, securityModel, securityName, securityLevel, contextEngineId, contextName, pduVersion, PDU, statusInformation, sendPduHandle, (cbFun, cbCtx) ): # 3.3.6d ( origTransportDomain, origTransportAddress, origMessageProcessingModel, origSecurityModel, origSecurityName, origSecurityLevel, origContextEngineId, origContextName, origPduVersion, origPdu, origTimeout, origRetryCount, origRetries, origSendRequestHandle ) = self.__pendingReqs[sendPduHandle] del self.__pendingReqs[sendPduHandle] snmpEngine.transportDispatcher.jobFinished(id(self)) if statusInformation: if origRetries == origRetryCount: cbFun(origSendRequestHandle, statusInformation['errorIndication'], cbCtx) return # 3.3.6a sendPduHandle = snmpEngine.msgAndPduDsp.sendPdu( snmpEngine, origTransportDomain, origTransportAddress, origMessageProcessingModel, origSecurityModel, origSecurityName, origSecurityLevel, origContextEngineId, origContextName, origPduVersion, origPdu, (self.processResponsePdu, origTimeout/1000 + time.time(), (cbFun, cbCtx)) ) snmpEngine.transportDispatcher.jobStarted(id(self)) # 3.3.6b self.__pendingReqs[sendPduHandle] = ( origTransportDomain, origTransportAddress, origMessageProcessingModel, origSecurityModel, origSecurityName, origSecurityLevel, origContextEngineId, origContextName, origPduVersion, origPdu, origTimeout, origRetryCount, origRetries + 1, sendPduHandle ) return # 3.3.6c cbFun(origSendRequestHandle, None, cbCtx) def sendNotification( self, snmpEngine, notificationTarget, notificationName, additionalVarBinds=None, cbFun=None, cbCtx=None, contextName='' ): # 3.3 ( notifyTag, notifyType ) = config.getNotificationInfo( snmpEngine, notificationTarget ) contextMibInstrumCtl = self.__context.getMibInstrum( contextName ) for targetAddrName in config.getTargetNames(snmpEngine, notifyTag): ( transportDomain, transportAddress, timeout, retryCount, params ) = config.getTargetAddr(snmpEngine, targetAddrName) ( messageProcessingModel, securityModel, securityName, securityLevel ) = config.getTargetParams(snmpEngine, params) # 3.3.1 XXX # XXX filtering's yet to be implemented # filterProfileName = config.getNotifyFilterProfile(params) # ( filterSubtree, # filterMask, # filterType ) = config.getNotifyFilter(filterProfileName) varBinds = [] # 3.3.2 & 3.3.3 sysUpTime, = contextMibInstrumCtl.mibBuilder.importSymbols( '__SNMPv2-MIB', 'sysUpTime' ) varBinds.append((sysUpTime.name + (0,), sysUpTime.syntax)) snmpTrapOid, = contextMibInstrumCtl.mibBuilder.importSymbols( 'SNMPv2-MIB', 'snmpTrapOID' ) snmpTrapVal, = apply( contextMibInstrumCtl.mibBuilder.importSymbols, notificationName ) varBinds.append( (snmpTrapOid.name + (0,), v2c.ObjectIdentifier(snmpTrapVal.name)) ) # Get notification objects names for notificationObject in snmpTrapVal.getObjects(): mibNode, = contextMibInstrumCtl.mibBuilder.importSymbols( notificationObject #, mibNode.moduleName # XXX ) varBinds.append((mibNode.name + (0,), mibNode.syntax)) if additionalVarBinds: if version_info < (1, 6): additionalVarBinds = list(additionalVarBinds) varBinds.extend(additionalVarBinds) for varName, varVal in varBinds: try: snmpEngine.accessControlModel[vacmID].isAccessAllowed( snmpEngine, securityModel, securityName, securityLevel, 'notify', contextName, varName ) except error.SmiError: return # 3.3.4 if notifyType == 1: pdu = v2c.SNMPv2TrapPDU() elif notifyType == 2: pdu = v2c.InformRequestPDU() else: raise RuntimeError() v2c.apiPDU.setDefaults(pdu) v2c.apiPDU.setVarBinds(pdu, varBinds) # User-side API assumes SMIv2 if messageProcessingModel == 0: pdu = rfc2576.v2ToV1(pdu) pduVersion = 0 else: pduVersion = 1 # 3.3.5 if notifyType == 1: snmpEngine.msgAndPduDsp.sendPdu( snmpEngine, transportDomain, transportAddress, messageProcessingModel, securityModel, securityName, securityLevel, self.__context.contextEngineId, contextName, pduVersion, pdu, None ) else: # 3.3.6a sendPduHandle = snmpEngine.msgAndPduDsp.sendPdu( snmpEngine, transportDomain, transportAddress, messageProcessingModel, securityModel, securityName, securityLevel, self.__context.contextEngineId, contextName, pduVersion, pdu, (self.processResponsePdu, timeout/1000 + time.time(), (cbFun, cbCtx)) ) # 3.3.6b self.__pendingReqs[sendPduHandle] = ( transportDomain, transportAddress, messageProcessingModel, securityModel, securityName, securityLevel, self.__context.contextEngineId, contextName, pduVersion, pdu, timeout, retryCount, 1, self.__sendRequestHandleSource ) snmpEngine.transportDispatcher.jobStarted(id(self)) # XXX # move/group/implement config setting/retrieval at a stand-alone module
Python
0.015655
@@ -4315,23 +4315,16 @@ ime.name - + (0,) , sysUpT
a10554b81d4def386b016698c1e7dd771cecd35b
fix automatic testing
python/qidoc/test/test_qidoc.py
python/qidoc/test/test_qidoc.py
## Copyright (c) 2012 Aldebaran Robotics. All rights reserved. ## Use of this source code is governed by a BSD-style license that can be ## found in the COPYING file. import os import tempfile import unittest import qidoc.core import qibuild class TestQiDoc(unittest.TestCase): def setUp(self): self.tmp = tempfile.mkdtemp(prefix="tmp-qidoc") self.in_dir = os.path.join(self.tmp, "in") self.out_dir = os.path.join(self.tmp, "out") this_dir = os.path.dirname(__file__) qibuild.sh.install(os.path.join(this_dir, "in"), self.in_dir, quiet=True) self.qidoc_builder = qidoc.core.QiDocBuilder(self.in_dir, self.out_dir) def tearDown(self): qibuild.sh.rm(self.tmp) def test_build(self): opts = dict() opts["version"] = 1.42 self.qidoc_builder.build(opts) submodule_zip = os.path.join(self.out_dir, "qibuild", "_downloads", "submodule.zip") self.assertTrue(os.path.exists(submodule_zip)) def test_cfg_parse(self): qidoc_cfg = self.qidoc_builder.config qibuild_sphinx = self.qidoc_builder.sphinxdocs["qibuild"] self.assertEqual(qibuild_sphinx.name, "qibuild") self.assertEqual(qibuild_sphinx.src , os.path.join(self.in_dir, "qibuild", "doc")) doc_sphinx = self.qidoc_builder.sphinxdocs["doc"] self.assertEqual(doc_sphinx.depends, ["qibuild"]) libalcommon = self.qidoc_builder.doxydocs["libalcommon"] libalvision = self.qidoc_builder.doxydocs["libalvision"] self.assertEqual(libalcommon.name, "libalcommon") self.assertEqual(libalvision.name, "libalvision") self.assertEqual(libalcommon.src , os.path.join(self.in_dir, "libnaoqi", "libalcommon")) self.assertEqual(libalvision.src , os.path.join(self.in_dir, "libnaoqi", "libalvision")) self.assertEqual(libalcommon.dest, os.path.join(self.out_dir, "ref", "libalcommon")) self.assertEqual(libalvision.dest, os.path.join(self.out_dir, "ref", "libalvision")) self.assertEqual(qidoc_cfg.templates.repo, "aldeb-templates") def test_sorting(self): docs = self.qidoc_builder.sort_sphinx() names = [d.name for d in docs] self.assertEqual(names, ['qibuild', 'doc']) docs = self.qidoc_builder.sort_doxygen() names = [d.name for d in docs] self.assertEqual(names, ['libqi', 'libalcommon', 'libalvision']) def test_intersphinx_mapping(self): mapping = self.qidoc_builder.get_intersphinx_mapping("doc") self.assertEqual(mapping, {"qibuild": (os.path.join(self.out_dir, "qibuild"), None)} ) def test_doxygen_mapping(self): mapping = self.qidoc_builder.get_doxygen_mapping("libalvision") expected = { os.path.join(self.out_dir, "doxytags", "libalcommon.tag"): "../libalcommon", os.path.join(self.out_dir, "doxytags", "libqi.tag"): "../libqi", } self.assertEqual(mapping, expected) if __name__ == "__main__": unittest.main()
Python
0.000225
@@ -1047,55 +1047,8 @@ f):%0A - qidoc_cfg = self.qidoc_builder.config%0A%0A @@ -2067,78 +2067,8 @@ ))%0A%0A - self.assertEqual(qidoc_cfg.templates.repo, %22aldeb-templates%22)%0A %0A%0A
3205e44282f2a5e6a266c99d5c1e29f3e5fc132c
fix tests
tests/py/test_pages.py
tests/py/test_pages.py
from __future__ import print_function, unicode_literals from aspen import Response from gratipay.security.user import SESSION from gratipay.testing import Harness from gratipay.wireup import find_files class TestPages(Harness): def browse(self, **kw): alice = self.make_participant('alice', claimed_time='now') alice.insert_into_communities(True, 'Wonderland', 'wonderland') i = len(self.client.www_root) for spt in find_files(self.client.www_root, '*.spt'): url = spt[i:-4].replace('/%username/', 'alice') \ .replace('/for/%slug/', '/for/wonderland/') \ .replace('/%platform/', '/github/') \ .replace('/%user_name/', '/Gratipay/') try: r = self.client.GET(url, **kw) except Response as r: pass assert r.code < 500 def test_anon_can_browse(self): self.browse() def test_new_participant_can_browse(self): self.browse(auth_as='alice') def test_profile(self): self.make_participant('cheese', claimed_time='now') expected = "I'm grateful for gifts" actual = self.client.GET('/cheese/').body.decode('utf8') # deal with cent sign assert expected in actual def test_widget(self): self.make_participant('cheese', claimed_time='now') expected = "javascript: window.open" actual = self.client.GET('/cheese/widget.html').body assert expected in actual def test_bank_account(self): expected = "add<br> or change your bank account" actual = self.client.GET('/bank-account.html').body assert expected in actual def test_credit_card(self): expected = "add<br> or change your credit card" actual = self.client.GET('/credit-card.html').body assert expected in actual def test_github_associate(self): assert self.client.GxT('/on/github/associate').code == 400 def test_twitter_associate(self): assert self.client.GxT('/on/twitter/associate').code == 400 def test_about(self): expected = "small weekly cash" actual = self.client.GET('/about/').body assert expected in actual def test_about_stats(self): expected = "have joined Gratipay" actual = self.client.GET('/about/stats.html').body assert expected in actual def test_about_charts(self): expected = "Money transferred" actual = self.client.GET('/about/charts.html').body assert expected in actual def test_about_faq(self): expected = "What is Gratipay?" actual = self.client.GET('/about/faq.html').body.decode('utf8') assert expected in actual def test_about_teams(self): expected = "About teams" actual = self.client.GET('/about/teams/').body.decode('utf8') assert expected in actual def test_404(self): response = self.client.GET('/about/four-oh-four.html', raise_immediately=False) assert "Page Not Found" in response.body assert "{%" not in response.body def test_for_contributors_redirects_to_inside_gratipay(self): loc = self.client.GxT('/for/contributors/').headers['Location'] assert loc == 'http://inside.gratipay.com/' def test_mission_statement_also_redirects(self): assert self.client.GxT('/for/contributors/mission-statement.html').code == 302 def test_bank_account_json(self): assert self.client.GxT('/bank-account.json').code == 404 def test_credit_card_json(self): assert self.client.GxT('/credit-card.json').code == 404 def test_anonymous_sign_out_redirects(self): response = self.client.PxST('/sign-out.html') assert response.code == 302 assert response.headers['Location'] == '/' def test_sign_out_overwrites_session_cookie(self): self.make_participant('alice') response = self.client.PxST('/sign-out.html', auth_as='alice') assert response.code == 302 assert response.headers.cookie[SESSION].value == '' def test_sign_out_doesnt_redirect_xhr(self): self.make_participant('alice') response = self.client.PxST('/sign-out.html', auth_as='alice', HTTP_X_REQUESTED_WITH=b'XMLHttpRequest') assert response.code == 200 def test_receipts_signed_in(self): self.make_participant('alice', claimed_time='now') self.db.run("INSERT INTO exchanges (id, participant, amount, fee) " "VALUES(100,'alice',1,0.1)") request = self.client.GET("/alice/receipts/100.html", auth_as="alice") assert request.code == 200 def test_account_page_available_balance(self): self.make_participant('alice', claimed_time='now') self.db.run("UPDATE participants SET balance = 123.00 WHERE username = 'alice'") actual = self.client.GET("/alice/account/", auth_as="alice").body expected = "123" assert expected in actual def test_giving_page(self): alice = self.make_participant('alice', claimed_time='now') bob = self.make_participant('bob', claimed_time='now') alice.set_tip_to(bob, "1.00") actual = self.client.GET("/alice/giving/", auth_as="alice").body expected = "bob" assert expected in actual def test_giving_page_shows_unclaimed(self): alice = self.make_participant('alice', claimed_time='now') emma = self.make_elsewhere('github', 58946, 'emma').participant alice.set_tip_to(emma, "1.00") actual = self.client.GET("/alice/giving/", auth_as="alice").body expected1 = "emma" expected2 = "goes unclaimed" assert expected1 in actual assert expected2 in actual def test_giving_page_shows_cancelled(self): alice = self.make_participant('alice', claimed_time='now') bob = self.make_participant('bob', claimed_time='now') alice.set_tip_to(bob, "1.00") alice.set_tip_to(bob, "0.00") actual = self.client.GET("/alice/giving/", auth_as="alice").body expected1 = "bob" expected2 = "cancelled 1 tip" assert expected1 in actual assert expected2 in actual def test_new_participant_can_edit_profile(self): self.make_participant('alice', claimed_time='now') body = self.client.GET("/alice/", auth_as="alice").body assert b'Edit' in body
Python
0.000001
@@ -2166,25 +2166,29 @@ = %22 -small weekly cash +give money every week %22%0A @@ -2836,15 +2836,9 @@ = %22 -About t +T eams @@ -3076,13 +3076,8 @@ rt %22 -Page Not @@ -5761,14 +5761,9 @@ = %22 -goes u +U ncla @@ -6178,88 +6178,20 @@ -expected1 = %22bob%22%0A expected2 = %22cancelled 1 tip%22%0A assert expected1 +assert %22bob%22 in @@ -6208,33 +6208,35 @@ assert -expected2 +%22Cancelled%22 in actual%0A%0A
b57c24b23fa9566178455da895ea63baf6e16ff4
Test cases to verify parsing of bitwise encoded PIDs
tests/scanner_tests.py
tests/scanner_tests.py
Python
0
@@ -0,0 +1,2007 @@ +from shadetree.obd.scanner import decode_bitwise_pids%0A%0ADURANGO_SUPPORTED_PIDS_RESPONSE = 'BE 3E B8 10 '%0AJETTA_DIESEL_SUPPORTED_PIDS_RESPONSE = '98 3B 80 19 '%0A%0A%0Adef test_decode_bitwise_pids_durango():%0A %22%22%22%0A Verify we correctly parse information about supported PIDs on a 1999 Dodge Durango%0A %22%22%22%0A supported_pids = decode_bitwise_pids(DURANGO_SUPPORTED_PIDS_RESPONSE)%0A assert supported_pids == %7B%0A '01': True,%0A '02': False,%0A '03': True,%0A '04': True,%0A '05': True,%0A '06': True,%0A '07': True,%0A '08': False,%0A '09': False,%0A '0A': False,%0A '0B': True,%0A '0C': True,%0A '0D': True,%0A '0E': True,%0A '0F': True,%0A '10': False,%0A '11': True,%0A '12': False,%0A '13': True,%0A '14': True,%0A '15': True,%0A '16': False,%0A '17': False,%0A '18': False,%0A '19': False,%0A '1A': False,%0A '1B': False,%0A '1C': True,%0A '1D': False,%0A '1E': False,%0A '1F': False,%0A '20': False%0A %7D%0A%0A%0Adef test_decode_bitwise_pids_jetta_diesel():%0A %22%22%22%0A Verify we correctly parse information about supported PIDs on a 2004 Jetta Diesel Wagon%0A %22%22%22%0A supported_pids = decode_bitwise_pids(JETTA_DIESEL_SUPPORTED_PIDS_RESPONSE)%0A assert supported_pids == %7B%0A '01': True,%0A '02': False,%0A '03': False,%0A '04': True,%0A '05': True,%0A '06': False,%0A '07': False,%0A '08': False,%0A '09': False,%0A '0A': False,%0A '0B': True,%0A '0C': True,%0A '0D': True,%0A '0E': False,%0A '0F': True,%0A '10': True,%0A '11': True,%0A '12': False,%0A '13': False,%0A '14': False,%0A '15': False,%0A '16': False,%0A '17': False,%0A '18': False,%0A '19': False,%0A '1A': False,%0A '1B': False,%0A '1C': True,%0A '1D': True,%0A '1E': False,%0A '1F': False,%0A '20': True%0A %7D
0eb28e89a5c5453a8337e031dd71a5019d828aab
Remove radmin credentials from create_heat_client
trove/common/remote.py
trove/common/remote.py
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010-2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from trove.common import cfg from trove.openstack.common.importutils import import_class from cinderclient.v2 import client as CinderClient from heatclient.v1 import client as HeatClient from novaclient.v1_1.client import Client from swiftclient.client import Connection CONF = cfg.CONF COMPUTE_URL = CONF.nova_compute_url PROXY_AUTH_URL = CONF.trove_auth_url VOLUME_URL = CONF.cinder_url OBJECT_STORE_URL = CONF.swift_url USE_SNET = CONF.backup_use_snet HEAT_URL = CONF.heat_url def dns_client(context): from trove.dns.manager import DnsManager return DnsManager() def guest_client(context, id): from trove.guestagent.api import API return API(context, id) def nova_client(context): client = Client(context.user, context.auth_token, project_id=context.tenant, auth_url=PROXY_AUTH_URL) client.client.auth_token = context.auth_token client.client.management_url = "%s/%s/" % (COMPUTE_URL, context.tenant) return client def create_admin_nova_client(context): """ Creates client that uses trove admin credentials :return: a client for nova for the trove admin """ client = create_nova_client(context) client.client.auth_token = None return client def cinder_client(context): client = CinderClient.Client(context.user, context.auth_token, project_id=context.tenant, auth_url=PROXY_AUTH_URL) client.client.auth_token = context.auth_token client.client.management_url = "%s/%s/" % (VOLUME_URL, context.tenant) return client def heat_client(context): endpoint = "%s/%s/" % (HEAT_URL, context.tenant) client = HeatClient.Client(username=context.user, password="radmin", token=context.auth_token, os_no_client_auth=True, endpoint=endpoint) return client def swift_client(context): client = Connection(preauthurl=OBJECT_STORE_URL + context.tenant, preauthtoken=context.auth_token, tenant_name=context.tenant, snet=USE_SNET) return client create_dns_client = import_class(CONF.remote_dns_client) create_guest_client = import_class(CONF.remote_guest_client) create_nova_client = import_class(CONF.remote_nova_client) create_swift_client = import_class(CONF.remote_swift_client) create_cinder_client = import_class(CONF.remote_cinder_client) create_heat_client = import_class(CONF.remote_heat_client)
Python
0.000006
@@ -2388,112 +2388,8 @@ ent( -username=context.user,%0A password=%22radmin%22,%0A toke
7a9bb7d412ccfa4921dc691232c1192bbb2789cd
Add rudimentary swarming service.
dashboard/dashboard/services/swarming_service.py
dashboard/dashboard/services/swarming_service.py
Python
0.000001
@@ -0,0 +1,1910 @@ +# Copyright 2016 The Chromium Authors. All rights reserved.%0A# Use of this source code is governed by a BSD-style license that can be%0A# found in the LICENSE file.%0A%0A%22%22%22Functions for interfacing with the Chromium Swarming Server.%0A%0AThe Swarming Server is a task distribution service. It can be used to kick off%0Aa test run.%0A%0AAPI explorer: https://goo.gl/uxPUZo%0A%22%22%22%0A%0A# TODO(dtu): This module is very much a work in progress. It's not clear whether%0A# the parameters are the right ones to pass, whether it's the right way to pass%0A# the parameters (as opposed to having a data object, whether the functions%0A# should be encapsulated in the data object, or whether this is at the right%0A# abstraction level.%0A%0Afrom apiclient import discovery%0A%0Afrom dashboard import utils%0A%0A%0A_DISCOVERY_URL = ('https://chromium-swarm.appspot.com/_ah/api'%0A '/discovery/v1/apis/%7Bapi%7D/%7BapiVersion%7D/rest')%0A%0A%0Adef New(name, user, bot_id, isolated_hash, extra_args=None):%0A %22%22%22Create a new Swarming task.%22%22%22%0A if not extra_args:%0A extra_args = %5B%5D%0A%0A swarming = _DiscoverService()%0A request = swarming.tasks().new(body=%7B%0A 'name': name,%0A 'user': user,%0A 'priority': '100',%0A 'expiration_secs': '600',%0A 'properties': %7B%0A 'inputs_ref': %7B%0A 'isolated': isolated_hash,%0A %7D,%0A 'extra_args': extra_args,%0A 'dimensions': %5B%0A %7B'key': 'id', 'value': bot_id%7D,%0A %7B'key': 'pool', 'value': 'Chrome-perf'%7D,%0A %5D,%0A 'execution_timeout_secs': '3600',%0A 'io_timeout_secs': '3600',%0A %7D,%0A 'tags': %5B%0A 'id:%25s-b1' %25 bot_id,%0A 'pool:Chrome-perf',%0A %5D,%0A %7D)%0A return request.execute()%0A%0A%0Adef Get(task_id):%0A del task_id%0A raise NotImplementedError()%0A%0A%0Adef _DiscoverService():%0A return discovery.build('swarming', 'v1', discoveryServiceUrl=_DISCOVERY_URL,%0A http=utils.ServiceAccountHttp())%0A
1a3839a083293200862ea21283c9c4d82a836846
Add test for profiles.
tests/test_catalyst.py
tests/test_catalyst.py
Python
0
@@ -0,0 +1,809 @@ +%0Afrom vdm.catalyst import DisambiguationEngine%0A%0Adef pretty(raw):%0A %22%22%22%0A Pretty print xml.%0A %22%22%22%0A import xml.dom.minidom%0A xml = xml.dom.minidom.parseString(raw)%0A pretty = xml.toprettyxml()%0A return pretty%0A%0A%0Adef test_profile():%0A #Basic info about a person.%0A p = %5B%0A 'Josiah',%0A 'Carberry',%0A None,%0A '[email protected]',%0A %5B'null'%5D,%0A %5B'null'%5D%0A %5D%0A%0A disambig = DisambiguationEngine()%0A disambig.affiliation_strings = %5B'Sample University'%5D%0A%0A doc = disambig.build_doc(*p)%0A%0A #Basic verification that XML contains what we expect.%0A assert('%3CFirst%3EJosiah%3C/First%3E' in doc)%0A assert('%3CLast%3ECarberry%3C/Last%3E' in doc)%0A assert('%3Cemail%[email protected]%3C/email%3E' in doc)%0A assert('%3CAffiliation%3E%25Sample University%25%3C/Affiliation%3E' in doc)%0A
15b69945a209515c236d8ed788e824a895ef6859
Create uvcontinuum.py
xmps/color_selection/uvcontinuum.py
xmps/color_selection/uvcontinuum.py
Python
0.000006
@@ -0,0 +1 @@ +%0A
4826764c24fca8204322f88adfde75968b3985ee
add wrapper to start bucky from source tree
bucky.py
bucky.py
Python
0
@@ -0,0 +1,92 @@ +#!/usr/bin/env python%0A%0Aimport bucky.main%0A%0Aif __name__ == '__main__':%0A bucky.main.main()%0A%0A
c757c6ad714afb393c65c1b82bca31de357332fc
Add test coverage for utility module
python/util_test.py
python/util_test.py
Python
0
@@ -0,0 +1,1864 @@ +#%0A# (C) Copyright IBM Corp. 2017%0A#%0A# Licensed under the Apache License, Version 2.0 (the %22License%22);%0A# you may not use this file except in compliance with the License.%0A# You may obtain a copy of the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS,%0A# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.%0A# See the License for the specific language governing permissions and%0A# limitations under the License.%0A#%0A%0Aimport os%0Aimport sys%0Aimport unittest%0Aimport tempfile%0Aimport util%0A%0Aclass TestUtils(unittest.TestCase):%0A%0A def setUp(self):%0A %22%22%22 capture stdout to a temp file %22%22%22%0A self.tempFile = tempfile.TemporaryFile()%0A os.dup2(self.tempFile.fileno(), sys.stdout.fileno())%0A%0A def tearDown(self):%0A %22%22%22 remove temp file %22%22%22%0A self.tempFile.close()%0A%0A def test_output_is_clean_when_debug_is_disabled(self):%0A util.isDebugging = False%0A util.debug_print('Debug Message')%0A self.assertEqual(self._readOutput(), '', 'Should not write messages when debug is disabled')%0A%0A def test_output_has_content_when_debug_is_enabled(self):%0A util.isDebugging = True%0A util.debug_print('Debug Message')%0A self.assertEqual(self._readOutput(), 'Debug Message', 'Should write messages when debug is enabled')%0A%0A def test_output_has_content_when_byte_array_message_is_passed(self):%0A util.isDebugging = True%0A util.debug_print(b'Binary Debug Message')%0A self.assertEqual(self._readOutput(), 'Binary Debug Message', 'Should write messages when debug is enabled')%0A%0A def _readOutput(self):%0A self.tempFile.seek(0)%0A return self.tempFile.read().decode().rstrip()%0A%0Aif __name__ == %22__main__%22:%0A unittest.main()%0A%0A
8ee2f2b4c3a0ac40c6b7582a2cf3724f30f41dae
Add data migration
workshops/migrations/0035_auto_20150107_1205.py
workshops/migrations/0035_auto_20150107_1205.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('workshops', '0034_auto_20150107_1200'), ] operations = [ migrations.RenameModel( old_name='Project', new_name='Tag', ), migrations.RemoveField( model_name='event', name='project', ), migrations.AddField( model_name='event', name='tags', field=models.ManyToManyField(to='workshops.Tag'), preserve_default=True, ), ]
Python
0.999708
@@ -101,16 +101,300 @@ tions%0A%0A%0A +def copy_project_to_tags(apps, schema_editor):%0A Event = apps.get_model('workshops', 'Event')%0A for event in Event.objects.all().exclude(project=None):%0A tag = event.project%0A print('add %7B%7D to %7B%7D'.format(tag, event))%0A event.tags.add(tag)%0A event.save()%0A%0A%0A class Mi @@ -629,111 +629,8 @@ ),%0A - migrations.RemoveField(%0A model_name='event',%0A name='project',%0A ),%0A @@ -769,24 +769,24 @@ hops.Tag'),%0A - @@ -815,18 +815,173 @@ %0A ),%0A + migrations.RunPython(copy_project_to_tags),%0A migrations.RemoveField(%0A model_name='event',%0A name='project',%0A ),%0A %5D%0A
660d04ac87ea05032a0c19b293fd237bda15fad9
tempson main class
tempson/tempson.py
tempson/tempson.py
Python
0.999092
@@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*-%0A%0Adef render():%0A print %221234%22%0A
3c2c6002cf25dab301044f2dc4c2c3bbd99e121e
add script file
get-polymer-imports.py
get-polymer-imports.py
Python
0.000002
@@ -0,0 +1,1677 @@ +#!/usr/bin/env python%0A%0Aimport os%0Aimport sys%0A%0A#rootDir = %22bower_components%22%0A%0AnumArgs = len(sys.argv)%0A%0Aif numArgs %3C= 1:%0A print 'usage: get_all_imports.py %3Cbower_components directory%3E %5Bprefix (default %22..%22)%5D'%0A exit(1)%0A%0ArootDir = sys.argv%5B1%5D%0A%0Aif not (rootDir == %22bower_components%22 or rootDir == %22components%22):%0A print 'Cowardly refusing to search non bower directory %22' + rootDir + '%22'%0A exit(1)%0A%0AbowerPrefix = %22..%22%0Aif numArgs %3E= 3:%0A bowerPrefix = sys.argv%5B2%5D%0A%0Adef shouldInclude(f, path):%0A blacklisted = %5B'src', 'demo', 'test', 'polymer', 'web-animations'%5D%0A for blacklist in blacklisted:%0A if blacklist in path: return False%0A fileName, extension = os.path.splitext(f)%0A return extension == %22.html%22 and fileName != %22index%22%0A%0Adef getImports(dir):%0A imports = %5B%5D%0A for root, dirs, files in os.walk(dir):%0A path = root.split('/')%0A prefix = os.path.join(bowerPrefix, root)%0A# print (len(path) - 1) *'---' , os.path.basename(root)%0A for file in files:%0A if shouldInclude(file, prefix):%0A i = os.path.join(prefix, file)%0A# print %22adding import: %22, i%0A imports.append(i)%0A return imports%0A%0Adef tagify(i):%0A importTag = '%3Clink rel=%22import%22 href=%22' %0A importTerminator = '%22%3E'%0A return importTag + i + importTerminator%0A%0Adef htmlify(imports):%0A html = %5B%5D%0A for i in imports:%0A html.append(tagify(i))%0A return html%0A%0A# polymer is special%0Apolymer = os.path.join(bowerPrefix, rootDir, %22polymer/polymer.html%22)%0A%0Adef printHtml(html):%0A print tagify(polymer)%0A for tag in html:%0A print tag%0A%0Aimports = getImports(rootDir)%0Ahtml = htmlify(imports)%0A%0AprintHtml(html)%0A
30dcfef191666951a4084a4b9d9c135c9edb5de8
Create check.py
check.py
check.py
Python
0.000001
@@ -0,0 +1,1476 @@ +# -*- coding: utf-8 -*-%0A%0A__author__ = 'https://github.com/password123456/'%0A%0Aimport sys%0Areload(sys)%0Asys.setdefaultencoding('utf-8')%0A%0Aimport requests%0A%0Aclass bcolors:%0A HEADER = '%5C033%5B95m'%0A OKBLUE = '%5C033%5B94m'%0A OKGREEN = '%5C033%5B92m'%0A WARNING = '%5C033%5B93m'%0A FAIL = '%5C033%5B91m'%0A ENDC = '%5C033%5B0m'%0A BOLD = '%5C033%5B1m'%0A UNDERLINE = '%5C033%5B4m'%0A%0Adef DO_CHECK_CERTIFICATE(url):%0A try:%0A user_agent = %7B'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.112 Safari/537.36', 'Connection':'keep-alive'%7D%0A r = requests.get(url, headers=user_agent, verify=True, timeout=05)%0A result = '%25s %25s %25s' %25 ( url, r.status_code, r.headers%5B'server'%5D)%0A print '%25s%5B-%5D OK::%25s %25s %25s' %25 (bcolors.OKGREEN, bcolors.OKBLUE, result, bcolors.ENDC)%0A except Exception as e:%0A error = '%25s' %25 e%0A if 'CERTIFICATE_VERIFY_FAILED' in error:%0A print '%25s%5B-%5D ERROR::%25s %25s CERTIFICATE_VERIFY_FAILED %25s' %25 (bcolors.WARNING, bcolors.FAIL, url, bcolors.ENDC)%0A else:%0A r.close()%0A%0Adef READ_URL():%0A f = open('url.txt', 'r')%0A%0A for line in f.readlines():%0A line = line.strip()%0A line = 'https://%25s' %25 line%0A DO_CHECK_CERTIFICATE(line)%0A%0Adef main():%0A READ_URL()%0A%0Aif __name__ == '__main__':%0A try:%0A main()%0A except KeyboardInterrupt:%0A sys.exit(0)%0A except Exception, e:%0A print '%25s%5B-%5D Exception::%25s%25s' %25 (bcolors.WARNING, e, bcolors.ENDC)%0A
c5ae855af4c999ab2cbf6d4b5c77a0f04a84c13a
Update design-search-autocomplete-system.py
Python/design-search-autocomplete-system.py
Python/design-search-autocomplete-system.py
# Time: O(p^2), p is the length of the prefix # Space: O(p * t + s), t is the number of nodes of trie # , s is the size of the sentences class TrieNode(object): def __init__(self): self.__TOP_COUNT = 3 self.infos = [] self.leaves = {} def insert(self, s, times): cur = self cur.add_info(s, times) for c in s: if c not in cur.leaves: cur.leaves[c] = TrieNode() cur = cur.leaves[c] cur.add_info(s, times) def add_info(self, s, times): for p in self.infos: if p[1] == s: p[0] = -times break else: self.infos.append([-times, s]) self.infos.sort() if len(self.infos) > self.__TOP_COUNT: self.infos.pop() class AutocompleteSystem(object): def __init__(self, sentences, times): """ :type sentences: List[str] :type times: List[int] """ self.__trie = TrieNode() self.__cur_node = self.__trie self.__search = [] self.__sentence_to_count = collections.defaultdict(int) for sentence, count in zip(sentences, times): self.__sentence_to_count[sentence] = count self.__trie.insert(sentence, count) def input(self, c): """ :type c: str :rtype: List[str] """ result = [] if c == '#': self.__sentence_to_count["".join(self.__search)] += 1 self.__trie.insert("".join(self.__search), self.__sentence_to_count["".join(self.__search)]) self.__cur_node = self.__trie self.__search = [] else: self.__search.append(c) if self.__cur_node: if c not in self.__cur_node.leaves: self.__cur_node = None return [] self.__cur_node = self.__cur_node.leaves[c] result = [p[1] for p in self.__cur_node.infos] return result # Your AutocompleteSystem object will be instantiated and called as such: # obj = AutocompleteSystem(sentences, times) # param_1 = obj.input(c)
Python
0.000001
@@ -882,16 +882,17 @@ bject):%0A +%0A def @@ -2078,17 +2078,16 @@ %0A%0A -%0A # Your A
0b9810227b91b7ee7bb58cee2dccec992c752768
add xmpp plugin
gozerlib/plugs/xmpp.py
gozerlib/plugs/xmpp.py
Python
0
@@ -0,0 +1,695 @@ +# gozerlib/plugs/xmpp.py%0A#%0A#%0A%0A%22%22%22 xmpp related commands. %22%22%22%0A%0A## gozerlib imports%0A%0Afrom gozerlib.commands import cmnds%0Afrom gozerlib.examples import examples%0Afrom gozerlib.fleet import fleet%0A%0A## commands%0A%0Adef handle_xmppinvite(bot, event):%0A %22%22%22 invite (subscribe to) a different user. %22%22%22%0A if not event.rest:%0A event.missing(%22%3Clist of jids%3E%22)%0A return%0A bot = fleet.getfirstjabber()%0A if bot:%0A for jid in event.args:%0A bot.invite(jid)%0A event.done()%0A else:%0A event.reply(%22can't find jabber bot in fleet%22)%0A%0Acmnds.add(%22xmpp-invite%22, handle_xmppinvite, 'OPER')%0Aexamples.add(%22xmpp-invite%22, %22invite a user.%22, %22xmpp-invite [email protected]%22)%0A
e1fad0e5759908b3c1f6d3bafa2110cb4c26b7e1
Add get_jpp_env command...
km3pipe/shell.py
km3pipe/shell.py
Python
0
@@ -0,0 +1,957 @@ +# coding=utf-8%0A# cython: profile=True%0A# Filename: shell.py%0A# cython: embedsignature=True%0A# pylint: disable=C0103%0A%22%22%22%0ASome shell helpers%0A%0A%22%22%22%0Afrom __future__ import division, absolute_import, print_function%0A%0Aimport os%0A%0Afrom .logger import logging%0A%0A__author__ = %22Tamas Gal%22%0A__copyright__ = %22Copyright 2016, Tamas Gal and the KM3NeT collaboration.%22%0A__credits__ = %5B%5D%0A__license__ = %22MIT%22%0A__maintainer__ = %22Tamas Gal%22%0A__email__ = %[email protected]%22%0A__status__ = %22Development%22%0A%0Alog = logging.getLogger(__name__) # pylint: disable=C0103%0A%0A%0Adef get_jpp_env(jpp_dir):%0A %22%22%22Return the environment dict of a loaded Jpp env.%0A %0A The returned env can be passed to %60subprocess.Popen(%22J...%22, env=env)%60%0A to execute Jpp commands.%0A%0A %22%22%22%0A env = %7Bv%5B0%5D:''.join(v%5B1:%5D) for v in%0A %5Bl.split('=') for l in%0A os.popen(%22source %7B0%7D/setenv.sh %7B0%7D && env%22%0A .format(jpp_dir)).read().split('%5Cn')%0A if '=' in l%5D%7D%0A return env%0A
1360a7031d4389f2ecdef24ce3190a88e5f8f794
add trivial pjit tests
tests/pjit_test.py
tests/pjit_test.py
Python
0.000144
@@ -0,0 +1,2016 @@ +# Copyright 2018 Google LLC%0A#%0A# Licensed under the Apache License, Version 2.0 (the %22License%22);%0A# you may not use this file except in compliance with the License.%0A# You may obtain a copy of the License at%0A#%0A# https://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS,%0A# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.%0A# See the License for the specific language governing permissions and%0A# limitations under the License.%0A%0Afrom __future__ import absolute_import%0Afrom __future__ import division%0Afrom __future__ import print_function%0A%0Aimport numpy as onp%0Afrom absl.testing import absltest%0Afrom absl.testing import parameterized%0A%0Aimport jax.numpy as np%0Afrom jax import test_util as jtu%0Afrom jax.api import pjit%0Afrom jax.interpreters.parallel import psum%0A%0Afrom jax.config import config%0Aconfig.parse_flags_with_absl()%0A%0A%0Aclass PmapTest(jtu.JaxTestCase):%0A%0A @jtu.skip_on_devices(%22gpu%22)%0A def testBasic(self):%0A f = lambda x: x - psum(x, 'i')%0A x = onp.arange(8., dtype=onp.float32).reshape(4, 2)%0A f = pjit(f, axis_name='i', in_axes=0, out_axes=0, mesh_axis=0)%0A ans = f(x)%0A expected = x - x.sum(0)%0A self.assertAllClose(ans, expected, check_dtypes=False)%0A%0A @jtu.skip_on_devices(%22gpu%22)%0A def testTupleOutput(self):%0A f = lambda x: (x - psum(x, 'i'),)%0A x = onp.arange(8., dtype=onp.float32).reshape(4, 2)%0A f = pjit(f, axis_name='i', in_axes=0, out_axes=0, mesh_axis=0)%0A ans = f(x)%0A expected = (x - x.sum(0),)%0A self.assertAllClose(ans, expected, check_dtypes=False)%0A%0A @jtu.skip_on_devices(%22gpu%22)%0A def testTupleInput(self):%0A f = lambda x: x%5B0%5D - psum(x%5B0%5D, 'i')%0A x = onp.arange(8., dtype=onp.float32).reshape(4, 2)%0A f = pjit(f, axis_name='i', in_axes=0, out_axes=0, mesh_axis=0)%0A ans = f((x,))%0A expected = x - x.sum(0)%0A self.assertAllClose(ans, expected, check_dtypes=False)%0A%0A%0Aif __name__ == '__main__':%0A absltest.main()%0A
cce409a910a19d93a2b674be1899f49d95800d74
use logging.error() instead of print()
static_aid/DataExtractor_ArchivesSpace.py
static_aid/DataExtractor_ArchivesSpace.py
import json import logging import os import requests import sys from static_aid import config from static_aid.DataExtractor import DataExtractor class DataExtractor_ArchivesSpace(DataExtractor): def _run(self): lastExport = self.lastExportTime() self.makeDestinations() headers = self.authenticate() self.findResources(lastExport, headers) self.findObjects(lastExport, headers) self.findAgents(lastExport, headers) self.findSubjects(lastExport, headers) # authenticates the session def authenticate(self): try: url = '%s/users/%s/login?password=%s&expiring=false' % (config.archivesSpace['baseurl'], config.archivesSpace['user'], config.archivesSpace['password'], ) auth = requests.post(url).json() token = {'X-ArchivesSpace-Session':auth["session"]} return token except requests.exceptions.RequestException as e: print 'Authentication failed! Make sure the baseURL setting in %s is correct and that your ArchivesSpace instance is running.' % config.configFilePath print e sys.exit(1) except KeyError: print 'Authentication failed! It looks like you entered the wrong password. Please check the information in %s.' % config.configFilePath sys.exit(1) # logs out non-expiring session (not yet in AS core, so commented out) # def logout(headers): # requests.post('{baseURL}/logout'.format(**archivesSpace), headers=headers) # logging.info('You have been logged out of your session') # Looks for resources def findResources(self, lastExport, headers): if lastExport > 0: logging.info('*** Getting a list of resources modified since %d ***', lastExport) else: logging.info('*** Getting a list of all resources ***') url = '%s/resources?all_ids=true&modified_since=%d' % (config.archivesSpace['repository_url'], lastExport) resourceIds = requests.get(url, headers=headers) for resourceId in resourceIds.json(): url = '%s/resources/%s' % (config.archivesSpace['repository_url'], str(resourceId)) resource = (requests.get(url, headers=headers)).json() if resource["publish"]: if not "LI" in resource["id_0"]: self.saveFile(resourceId, resource, config.destinations['collections']) self.findTree(resourceId, headers) else: pass else: self.removeFile(resourceId, config.destinations['collections']) self.removeFile(resourceId, config.destinations['trees']) # Looks for resource trees def findTree(self, identifier, headers): url = '%s/resources/%s/tree' % (config.archivesSpace['repository_url'], str(identifier)) tree = (requests.get(url, headers=headers)).json() self.saveFile(identifier, tree, config.destinations['trees']) # Looks for archival objects def findObjects(self, lastExport, headers): if lastExport > 0: logging.info('*** Getting a list of objects modified since %d ***', lastExport) else: logging.info('*** Getting a list of all objects ***') url = '%s/archival_objects?all_ids=true&modified_since=%d' % (config.archivesSpace['repository_url'], lastExport) archival_objects = requests.get(url, headers=headers) for objectId in archival_objects.json(): url = '%s/archival_objects/%s' % (config.archivesSpace['repository_url'], str(objectId)) archival_object = requests.get(url, headers=headers).json() if archival_object["publish"]: self.saveFile(objectId, archival_object, config.destinations['objects']) # build breadcrumb trails for archival object pages url = '%s/archival_objects/%s' % (config.archivesSpace['breadcrumb_url'], str(objectId)) response = requests.get(url, headers=headers) if response.status_code == 200: published_tree = response.json() breadcrumbs = json.loads(published_tree["tree_json"]) self.saveFile(objectId, breadcrumbs, config.destinations['breadcrumbs']) else: self.removeFile(objectId, config.destinations['objects']) # Looks for agents def findAgents(self, lastExport, headers): if lastExport > 0: logging.info('*** Getting a list of agents modified since %d ***', lastExport) else: logging.info('*** Getting a list of all agents ***') agent_types = ['corporate_entities', 'families', 'people', 'software'] for agent_type in agent_types: url = '%s/agents/%s?all_ids=true&modified_since=%d' % (config.archivesSpace['baseurl'], agent_type, lastExport) agents = requests.get(url, headers=headers) for a in agents.json(): url = '%s/agents/%s/%s' % (config.archivesSpace['baseurl'], agent_type, str(a)) agent = requests.get(url, headers=headers).json() if agent["publish"]: self.saveFile(a, agent, os.path.join(config.destinations['agents'], agent_type)) else: self.removeFile(a, os.path.join(config.destinations['agents'], agent_type)) # Looks for subjects def findSubjects(self, lastExport, headers): if lastExport > 0: logging.info('*** Getting a list of subjects modified since %d ***', lastExport) else: logging.info('*** Getting a list of all subjects ***') url = '%s/subjects?all_ids=true&modified_since=%d' % (config.archivesSpace['baseurl'], lastExport) subjects = requests.get(url, headers=headers) for s in subjects.json(): url = '%s/subjects/%s' % (config.archivesSpace['baseurl'], str(s)) subject = requests.get(url, headers=headers).json() if subject["publish"]: self.saveFile(s, subject, config.destinations['subjects']) else: self.removeFile(s, config.destinations['subjects'])
Python
0.000027
@@ -1153,38 +1153,46 @@ e:%0A -print +logging.error( 'Authentication @@ -1309,42 +1309,54 @@ fig. -configFilePath%0A print e +CONFIG_FILE_PATH)%0A logging.error(e) %0A @@ -1417,14 +1417,22 @@ -print +logging.error( 'Aut @@ -1547,22 +1547,25 @@ fig. -configFilePath +CONFIG_FILE_PATH) %0A
7c1b0d4efd000fee8f065f2f5815075833811331
Change file location and rename
scripts/reporting/svn_report.py
scripts/reporting/svn_report.py
Python
0.000001
@@ -0,0 +1,2669 @@ +'''%0AThis file creates a .csv file containing the name of each laptop and its last changed date%0A'''%0Aimport argparse%0Aimport csv%0Afrom datetime import datetime, timezone%0Aimport os%0Aimport svn.local%0Aimport pandas as pd%0A%0A'''%0AConstants -- paths for reports, default save names, SLA, columns, and sites%0ATO-DO: Change SLA_DAYS to a parser arg?%0A'''%0AREPORTS_DIR = '/fs/storage/laptops/ncanda'%0ADEFAULT_CSV = '/tmp/chris/import_reports/'%0ASLA_DAYS = 30%0ADATA_COLUMNS = %5B'laptop', 'date_updated', 'time_diff', 'sla', 'sla_percentage'%5D%0ASITES = %5B'duke', 'sri', 'ohsu', 'upmc', 'ucsd'%5D%0A%0Adef parse_args(arg_input=None):%0A '''%0A Set up parser arguments%0A '''%0A parser = argparse.ArgumentParser(%0A description=%22Create a CSV file with all laptops and dates they were last modified%22)%0A parser.add_argument(%0A %22--file%22,%0A help=%22Path of file name to save as%22,%0A action=%22store%22,%0A default=DEFAULT_CSV)%0A %0A return parser.parse_args(arg_input)%0A%0Adef create_dataframe():%0A '''%0A Writes the names of each laptop and the date they were updated to a .csv file%0A '''%0A # Grab all directories and set up SVN client%0A directories = os.listdir(REPORTS_DIR)%0A r = svn.local.LocalClient(REPORTS_DIR)%0A df = pd.DataFrame(columns=DATA_COLUMNS)%0A %0A # Calculate time difference and appends to csv file%0A for directory in directories:%0A if (directory != %22.svn%22):%0A # Get commit date, time difference from today, and percentage of SLA%0A info = r.info(directory)%0A mod_time = info%5B'commit/date'%5D%0A time_diff = datetime.now(timezone.utc) - mod_time%0A sla_percentage = time_diff.total_seconds() / (SLA_DAYS * 24 * 60 * 60)%0A new_row = %7B%0A 'laptop': directory,%0A 'date_updated': mod_time,%0A 'time_diff': time_diff,%0A 'sla': SLA_DAYS,%0A 'sla_percentage': sla_percentage%0A %7D%0A df = df.append(new_row, ignore_index=True)%0A%0A # Sort by descending SLA percentage%0A df = df.sort_values(by=%5B'sla_percentage'%5D, ascending=False)%0A return df%0A%0Adef write_to_csv(df, path=None):%0A '''%0A Save data into a dataframe and save for each individual site%0A '''%0A df.to_csv(path + 'reports.csv', index=False)%0A for site in SITES:%0A site_df = df.loc%5Bdf%5B'laptop'%5D.str.contains(site, case=False)%5D%0A site_df.to_csv(path + site + '.csv', index=False)%0A %0A %0Adef main():%0A '''%0A Grabs necessary SVN data from folders and then calls to write to the csv%0A '''%0A args = parse_args()%0A df = create_dataframe()%0A write_to_csv(df, args.file)%0A%0Aif __name__ == %22__main__%22:%0A main()%0A