commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
---|---|---|---|---|---|---|---|
3618ce5749517c7757a04f0c08a74275e8e82b69 | Create fasttext.py | fasttext.py | fasttext.py | Python | 0.000031 | @@ -0,0 +1,3188 @@
+from __future__ import print_function%0Aimport numpy as np%0A%0Afrom keras.preprocessing import sequence%0Afrom keras.models import Sequential%0Afrom keras.layers import Dense%0Afrom keras.layers import Embedding%0Afrom keras.layers import GlobalAveragePooling1D%0Afrom keras.datasets import imdb%0A%0Aclass FastText:%0A %0A '''%0A Takes in pandas dataframe with at least two columns where one%0A is the dependent variable, and one is text. %0A %0A EXAMPLE USE: %0A %0A FastText(data,var)%0A %0A If there is more than one possible depedent variable in df then%0A there you can run the moddle for any of it. %0A %0A '''%0A %0A def __init__(self,data,var):%0A %0A self.data = data%0A self.var = var%0A %0A self.null = self._configuration()%0A self.null = self._get_cube()%0A self.null = self._padding()%0A self.model = self._build_model()%0A %0A %0A def _configuration(self):%0A%0A self.max_features = 125000%0A self.maxlen = 800%0A self.batch_size = 16%0A self.embedding_dims = 20%0A self.epochs = 2%0A %0A return %22NULL%22%0A %0A %0A def _get_cube(self):%0A %0A o = Cube(self.data,self.var)%0A %0A self.x_train = o.x_train%0A self.y_train = o.y_train%0A self.x_test = o.x_test%0A self.y_test = o.y_test%0A %0A return 'NULL'%0A %0A%0A def create_ngram_set(self,input_list, ngram_value=2):%0A%0A return set(zip(*%5Binput_list%5Bi:%5D for i in range(ngram_value)%5D))%0A%0A %0A def add_ngram(self,sequences, token_indice, ngram_range=2):%0A%0A new_sequences = %5B%5D%0A for input_list in sequences:%0A new_list = input_list%5B:%5D%0A for i in range(len(new_list) - ngram_range + 1):%0A for ngram_value in range(2, ngram_range + 1):%0A ngram = tuple(new_list%5Bi:i + ngram_value%5D)%0A if ngram in token_indice:%0A new_list.append(token_indice%5Bngram%5D)%0A new_sequences.append(new_list)%0A%0A return new_sequences%0A%0A %0A def _padding(self):%0A %0A self.x_train = sequence.pad_sequences(self.x_train, maxlen=self.maxlen)%0A self.x_test = sequence.pad_sequences(self.x_test, maxlen=self.maxlen)%0A %0A return 'NULL'%0A %0A%0A def _build_model(self):%0A%0A model = Sequential()%0A%0A model.add(Embedding(self.max_features, # efficient embedding layer which maps%0A self.embedding_dims, # vocab indices into embedding_dims dimensions%0A input_length=self.maxlen))%0A%0A model.add(GlobalAveragePooling1D()) # avg the embeddings of all words in the document%0A%0A model.add(Dense(1, activation='hard_sigmoid')) # project onto a single unit %0A # output layer, and squash it%0A model.compile(loss='binary_crossentropy',%0A optimizer='adagrad',%0A metrics=%5B'accuracy'%5D)%0A%0A model.fit(self.x_train, self.y_train,%0A batch_size=self.batch_size,%0A epochs=self.epochs,%0A validation_data=(self.x_test, self.y_test))%0A %0A return model%0A
|
|
7655e376696a04aa1c3596274861515953f592e8 | Add profiling script for savings code | openprescribing/frontend/price_per_unit/profile.py | openprescribing/frontend/price_per_unit/profile.py | Python | 0 | @@ -0,0 +1,1010 @@
+%22%22%22%0ABasic profiling code for working out where we're spending our time%0A%0AInvoke with:%0A./manage.py shell -c 'from frontend.price_per_unit.profile import profile; profile()'%0A%22%22%22%0Afrom cProfile import Profile%0Aimport datetime%0Aimport time%0A%0Afrom .savings import get_all_savings_for_orgs%0A%0A%0Adef test():%0A get_all_savings_for_orgs(%222019-11-01%22, %22ccg%22, %5B%2299C%22%5D)%0A # get_all_savings_for_orgs(%222019-11-01%22, %22all_standard_practices%22, %5BNone%5D)%0A%0A%0Adef profile():%0A num_attempts = 5%0A attempts = %5B%5D%0A for _ in range(num_attempts):%0A profiler = Profile()%0A start = time.time()%0A profiler.runcall(test)%0A duration = time.time() - start%0A attempts.append((duration, profiler))%0A attempts.sort()%0A profile_file = %22profile.%7B%7D.prof%22.format(%0A datetime.datetime.now().strftime(%22%25Y-%25m-%25d_%25H-%25M-%25S%22)%0A )%0A attempts%5B0%5D%5B1%5D.dump_stats(profile_file)%0A print(%0A %22%7B%7Ds (best of %7B%7D), profile saved as: %7B%7D%22.format(%0A attempts%5B0%5D%5B0%5D, num_attempts, profile_file%0A )%0A )%0A
|
|
e019ed140e31f4a25b843429b70c3e28d48a8628 | Add missing mock for cinder.default_quota_get | openstack_dashboard/dashboards/admin/info/tests.py | openstack_dashboard/dashboards/admin/info/tests.py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django import http
from mox import IgnoreArg # noqa
from mox import IsA # noqa
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
INDEX_URL = reverse('horizon:admin:info:index')
class SystemInfoViewTests(test.BaseAdminViewTests):
@test.create_stubs({api.base: ('is_service_enabled',),
api.nova: ('default_quota_get', 'service_list'),
api.neutron: ('agent_list', 'is_extension_supported')})
def test_index(self):
services = self.services.list()
api.nova.service_list(IsA(http.HttpRequest)).AndReturn(services)
agents = self.agents.list()
api.neutron.agent_list(IsA(http.HttpRequest)).AndReturn(agents)
api.base.is_service_enabled(IsA(http.HttpRequest), IgnoreArg()) \
.MultipleTimes().AndReturn(True)
api.nova.default_quota_get(IsA(http.HttpRequest),
IgnoreArg()).AndReturn({})
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'security-group').AndReturn(True)
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'admin/info/index.html')
services_tab = res.context['tab_group'].get_tab('services')
self.assertQuerysetEqual(services_tab._tables['services'].data,
['<Service: compute>',
'<Service: volume>',
'<Service: image>',
'<Service: identity (native backend)>',
'<Service: object-store>',
'<Service: network>',
'<Service: ec2>',
'<Service: metering>',
'<Service: orchestration>',
'<Service: database>'])
network_agents_tab = res.context['tab_group'].get_tab('network_agents')
self.assertQuerysetEqual(
network_agents_tab._tables['network_agents'].data,
[agent.__repr__() for agent in self.agents.list()]
)
def test_default_quotas_index(self):
self._test_default_quotas_index(neutron_enabled=True)
def test_default_quotas_index_with_neutron_disabled(self):
self._test_default_quotas_index(neutron_enabled=False)
def test_default_quotas_index_with_neutron_sg_disabled(self):
self._test_default_quotas_index(neutron_enabled=True,
neutron_sg_enabled=False)
@test.create_stubs({api.base: ('is_service_enabled',),
api.nova: ('default_quota_get', 'service_list'),
api.cinder: ('default_quota_get',)})
def _test_default_quotas_index(self, neutron_enabled=True,
neutron_sg_enabled=True):
# Neutron does not have an API for getting default system
# quotas. When not using Neutron, the floating ips quotas
# should be in the list.
api.base.is_service_enabled(IsA(http.HttpRequest), 'volume') \
.MultipleTimes().AndReturn(True)
api.base.is_service_enabled(IsA(http.HttpRequest), 'network') \
.MultipleTimes().AndReturn(neutron_enabled)
api.nova.service_list(IsA(http.HttpRequest)).AndReturn([])
api.nova.default_quota_get(IsA(http.HttpRequest),
self.tenant.id).AndReturn(self.quotas.nova)
api.cinder.default_quota_get(IsA(http.HttpRequest), self.tenant.id)\
.AndReturn(self.cinder_quotas.first())
if neutron_enabled:
self.mox.StubOutWithMock(api.neutron, 'agent_list')
api.neutron.agent_list(IsA(http.HttpRequest)).AndReturn([])
self.mox.StubOutWithMock(api.neutron, 'is_extension_supported')
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'security-group').AndReturn(neutron_sg_enabled)
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
quotas_tab = res.context['tab_group'].get_tab('quotas')
expected_tabs = ['<Quota: (injected_file_content_bytes, 1)>',
'<Quota: (metadata_items, 1)>',
'<Quota: (injected_files, 1)>',
'<Quota: (gigabytes, 1000)>',
'<Quota: (ram, 10000)>',
'<Quota: (instances, 10)>',
'<Quota: (snapshots, 1)>',
'<Quota: (volumes, 1)>',
'<Quota: (cores, 10)>',
'<Quota: (floating_ips, 1)>',
'<Quota: (fixed_ips, 10)>',
'<Quota: (security_groups, 10)>',
'<Quota: (security_group_rules, 20)>']
if neutron_enabled:
expected_tabs.remove('<Quota: (floating_ips, 1)>')
expected_tabs.remove('<Quota: (fixed_ips, 10)>')
if neutron_sg_enabled:
expected_tabs.remove('<Quota: (security_groups, 10)>')
expected_tabs.remove('<Quota: (security_group_rules, 20)>')
self.assertQuerysetEqual(quotas_tab._tables['quotas'].data,
expected_tabs,
ordered=False)
| Python | 0.999998 | @@ -1177,16 +1177,76 @@
ported')
+,%0A api.cinder: ('default_quota_get',)
%7D)%0A d
@@ -1731,16 +1731,144 @@
urn(%7B%7D)%0A
+ api.cinder.default_quota_get(IsA(http.HttpRequest), self.tenant.id)%5C%0A .AndReturn(self.cinder_quotas.first())%0A
|
40caa4c9b720388207e338ffde3cd7f2d85cdf0d | add a single script to perform formatting of base log files | base-format.py | base-format.py | Python | 0 | @@ -0,0 +1,2739 @@
+#!/usr/bin/python%0A%0Afrom __future__ import print_function%0A%0Aimport sys%0Aimport re%0Aimport datetime%0Aimport ircformatlib as il%0A%0Atimeformat_format = '%25H:%25M:%25S'%0Atimeformat_formatlen = 8%0Atimeformat_filler = ' ' * timeformat_formatlen%0A%0Adef timeformat(time):%0A try:%0A x = int(time)%0A dt = datetime.datetime.fromtimestamp(round(x / 1000.0))%0A return dt.strftime(timeformat_format)%0A except:%0A return timeformat_filler%0A%0Adef colorized_newstate():%0A return %7B 'maxlen': 0, 'hits': %7B%7D, 'counts': %7B%7D, 'allocated': %7B%7D,%0A 'textmatcher': %7B%7D %7D%0A%0Adef colorized_text(state, text, leadstr=''):%0A state%5B'maxlen'%5D = il.getmaxlen(leadstr + text, state%5B'maxlen'%5D)%0A color = il.getcolor(text, state%5B'allocated'%5D, state%5B'counts'%5D,%0A state%5B'hits'%5D)%0A il.uplogs(color, state%5B'hits'%5D)%0A return (il.getmaxpad(leadstr + text, state%5B'maxlen'%5D) + leadstr +%0A color + text + il.clearseq)%0A%0Achanformat_state = colorized_newstate()%0A%0Adef chanformat(channel):%0A if not channel:%0A return ''%0A return colorized_text(chanformat_state, channel)%0A%0Anameformat_state = colorized_newstate()%0A%0Adef nameformat(name):%0A leadstr = ''%0A for lead in ('--- ', '* '):%0A if name.startswith(lead):%0A leadstr = lead%0A name = name%5Blen(lead):%5D%0A break%0A%0A for perm in ('@', '+', '%25', '*'):%0A if name.startswith(perm):%0A leadstr += perm%0A name = name%5Blen(perm):%5D%0A break%0A%0A return colorized_text(nameformat_state, name, leadstr)%0A%0Adef textformat(text):%0A return il.text_colorize(il.text_colorize(text,%0A chanformat_state%5B'textmatcher'%5D,%0A chanformat_state%5B'allocated'%5D),%0A nameformat_state%5B'textmatcher'%5D,%0A nameformat_state%5B'allocated'%5D)%0A%0Adef combine_parts(channel, time, name, text):%0A tcsep = ''%0A if time and channel:%0A tcsep = ' '%0A return time + tcsep + channel + ' ' + name + ' ' + text%0A%0Adef main():%0A try:%0A m = re.compile(r'((%5B%5E%5Ct%5D+)%5Ct)?(%5B%5E%5Ct%5D+)%5Ct(%5B%5E%5Ct%5D+)%5Ct(%5B%5E%5Ct%5D+)')%0A%0A line = sys.stdin.readline()%0A%0A while line:%0A r = m.match(line)%0A%0A if r:%0A line = combine_parts(chanformat(r.group(2)),%0A timeformat(r.group(3)),%0A nameformat(r.group(4)),%0A textformat(r.group(5)))%0A else:%0A line = textformat(line)%0A%0A print(line, end='')%0A sys.stdout.flush()%0A line = sys.stdin.readline()%0A%0A except KeyboardInterrupt:%0A pass%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
2766e8797515497e5569b31696416db68641c9b4 | Extend MediaRemovalMixin to move media files on updates | base/models.py | base/models.py | import os
from django.conf import settings
class MediaRemovalMixin(object):
"""
Removes all files associated with the model, as returned by the
get_media_files() method.
"""
# Models that use this mixin need to override this method
def get_media_files(self):
return
def delete(self):
for media_file in self.get_media_files():
path = settings.MEDIA_ROOT + media_file
if os.path.exists(path):
os.remove(path)
return super(MediaRemovalMixin, self).delete()
| Python | 0 | @@ -311,24 +311,41 @@
delete(self
+, *args, **kwargs
):%0A f
@@ -561,10 +561,760 @@
.delete(
+*args, **kwargs)%0A%0A def save(self, *args, **kwargs):%0A if self.pk:%0A # Primary key exists, object is being edited%0A old_object = self.__class__.objects.get(pk=self.pk)%0A path_pairs = zip(old_object.get_media_files(),%0A self.get_media_files())%0A%0A # Move each associated file to its new location%0A for (old_path, new_path) in path_pairs:%0A full_old_path = settings.MEDIA_ROOT + old_path%0A full_new_path = settings.MEDIA_ROOT + new_path%0A%0A if old_path != new_path and os.path.exists(full_old_path):%0A os.rename(full_old_path, full_new_path)%0A%0A return super(MediaRemovalMixin, self).save(*args, **kwargs
)%0A
|
24c642063ffcb3313545b2e1ba3abbb62aa98437 | Add cuit validator to utils module | nbs/utils/validators.py | nbs/utils/validators.py | Python | 0 | @@ -0,0 +1,568 @@
+# -*- coding: utf-8-*-%0A%0A%0Adef validate_cuit(cuit):%0A %22from: http://python.org.ar/pyar/Recetario/ValidarCuit by Mariano Reingart%22%0A # validaciones minimas%0A if len(cuit) != 13 or cuit%5B2%5D != %22-%22 or cuit %5B11%5D != %22-%22:%0A return False%0A%0A base = %5B5, 4, 3, 2, 7, 6, 5, 4, 3, 2%5D%0A%0A cuit = cuit.replace(%22-%22, %22%22)%0A%0A # calculo digito verificador%0A aux = 0%0A for i in range(10):%0A aux += int(cuit%5Bi%5D*base%5Bi%5D)%0A%0A aux = 11 - (aux - (int(aux/11) * 11))%0A%0A if aux == 11:%0A aux = 0%0A if aux == 10:%0A aux = 9%0A%0A return aux == int(cuit%5B10%5D)%0A
|
|
7274f9286bd267970c286954e9d21e601af30cb7 | Create messenger.py | messenger.py | messenger.py | Python | 0.000002 | @@ -0,0 +1,271 @@
+# -*- coding: utf-8 -*-%0Aimport requests%0Aapiurl = '%E4%BD%A0%E7%9A%84%E5%9C%B0%E5%9D%80'%0Aapiheaders = %7B'U-ApiKey': '%E4%BD%A0%E7%9A%84key'%7D%0Acode=%22%E5%8A%A8%E6%80%81%E7%A0%81%22%0Aresponse = requests.get(apiurl, params=%7B%22media_id%22:'gh_3fc78df4c9d2',%22auth_code%22:code, %22scene%22:1,%22device_no%22:1,%22location%22:'jia'%7D)%0Ajson = response.json()%0Aprint(json)%0A %0A
|
|
620ad7f4dc5ed9403f468f592b99a22a92d22072 | make python -m i3configger work | i3configger/__main__.py | i3configger/__main__.py | Python | 0.000012 | @@ -0,0 +1,81 @@
+import i3configger.main%0A%0A%0Aif __name__ == %22__main__%22:%0A i3configger.main.main()%0A
|
|
ad2178a8973ce2de55611321c0b7b57b1488fc6b | move utilities in a private module | appengine_toolkit/management/commands/_utils.py | appengine_toolkit/management/commands/_utils.py | Python | 0 | @@ -0,0 +1,672 @@
+import pkg_resources%0Aimport os%0A%0A%0Aclass RequirementNotFoundError(Exception):%0A pass%0A%0A%0Adef collect_dependency_paths(package_name):%0A %22%22%22%0A TODO docstrings%0A %22%22%22%0A deps = %5B%5D%0A try:%0A dist = pkg_resources.get_distribution(package_name)%0A except ValueError:%0A message = %22Distribution '%7B%7D' not found.%22.format(package_name)%0A raise RequirementNotFoundError(message)%0A%0A if dist.has_metadata('top_level.txt'):%0A for line in dist.get_metadata('top_level.txt').split():%0A deps.append(os.path.join(dist.location, line))%0A%0A for req in dist.requires():%0A deps.extend(collect_dependency_paths(req.project_name))%0A%0A return deps%0A
|
|
79b99968d7c9e728efe05f8c962bdda5c9d56559 | Add LDAP authentication plugin | web/utils/auth.py | web/utils/auth.py | Python | 0 | @@ -0,0 +1,2461 @@
+# http://www.djangosnippets.org/snippets/501/%0Afrom django.contrib.auth.models import User%0Afrom django.conf import settings%0Aimport ldap%0A%0A%0Aclass ActiveDirectoryBackend:%0A%0A supports_object_permissions = False%0A supports_anonymous_user = False%0A supports_inactive_user = False%0A%0A def authenticate(self, username=None, password=None):%0A if username:%0A username = username.lower()%0A if not self.is_valid(username, password):%0A return None%0A try:%0A user = User.objects.get(username=username)%0A except User.DoesNotExist:%0A l = ldap.initialize(settings.AD_LDAP_URL)%0A binddn = '%25s@%25s' %25 (username, settings.AD_NT4_DOMAIN)%0A l.simple_bind_s(binddn, password)%0A result = l.search_ext_s(settings.AD_SEARCH_DN, ldap.SCOPE_SUBTREE,%0A 'sAMAccountName=%25s' %25 username, settings.AD_SEARCH_FIELDS)%5B0%5D%5B1%5D%0A l.unbind_s()%0A%0A # givenName == First Name%0A if 'givenName' in result:%0A first_name = result%5B'givenName'%5D%5B0%5D%0A else:%0A first_name = None%0A%0A # sn == Last Name (Surname)%0A if 'sn' in result:%0A last_name = result%5B'sn'%5D%5B0%5D%0A else:%0A last_name = None%0A%0A # mail == Email Address%0A if 'mail' in result:%0A email = result%5B'mail'%5D%5B0%5D%0A else:%0A email = None%0A%0A user = User(username=username, first_name=first_name, last_name=last_name, email=email)%0A user.is_staff = False%0A user.is_superuser = False%0A user.set_password(password)%0A user.save()%0A return user%0A%0A def get_user(self, user_id):%0A try:%0A return User.objects.get(pk=user_id)%0A except User.DoesNotExist:%0A return None%0A%0A def is_valid(self, username=None, password=None):%0A # Disallowing null or blank string as password%0A # as per comment: http://www.djangosnippets.org/snippets/501/#c868%0A if password is None or password == '':%0A return False%0A if username:%0A username = username.lower()%0A binddn = '%25s@%25s' %25 (username, settings.AD_NT4_DOMAIN)%0A try:%0A l = ldap.initialize(settings.AD_LDAP_URL)%0A l.simple_bind_s(binddn, password)%0A l.unbind_s()%0A return True%0A except ldap.LDAPError:%0A return False%0A
|
|
8e1a3cc1a3d4e4d9bc63fb73a8787e5c627afb7d | add tests for service inspector | tests/test_service_inspector.py | tests/test_service_inspector.py | Python | 0.000001 | @@ -0,0 +1,1963 @@
+from __future__ import absolute_import%0A%0Aimport unittest%0A%0Aimport servy.server%0A%0A%0Aclass Dummy(object):%0A def fn(self):%0A pass%0A%0A%0Aclass Service(servy.server.Service):%0A def __call__(self):%0A pass%0A%0A%0Aclass ServiceDetection(unittest.TestCase):%0A def test_lambda(self):%0A self.assertTrue(servy.server.ServiceInspector.is_service(lambda x: x))%0A%0A def test_method(self):%0A self.assertTrue(servy.server.ServiceInspector.is_service(Dummy().fn))%0A%0A def test_callable_class_service(self):%0A self.assertTrue(servy.server.ServiceInspector.is_service(Service()))%0A%0A def test_type(self):%0A self.assertFalse(servy.server.ServiceInspector.is_service(dict))%0A%0A def test_int(self):%0A self.assertFalse(servy.server.ServiceInspector.is_service(1))%0A%0A def test_string(self):%0A self.assertFalse(servy.server.ServiceInspector.is_service(%221%22))%0A%0A def test_dummy_class(self):%0A self.assertFalse(servy.server.ServiceInspector.is_service(Dummy))%0A%0A%0Aclass ContainerDetection(unittest.TestCase):%0A def test_dict(self):%0A self.assertTrue(servy.server.ServiceInspector.is_container(%7B%7D))%0A%0A def test_service_class(self):%0A self.assertTrue(servy.server.ServiceInspector.is_container(Service))%0A%0A def test_service_class_instance(self):%0A self.assertTrue(servy.server.ServiceInspector.is_container(Service()))%0A%0A def test_dummy_class(self):%0A self.assertFalse(servy.server.ServiceInspector.is_container(Dummy))%0A%0A%0Aclass PublicMethodsDetection(unittest.TestCase):%0A def test_double_underscores(self):%0A items = %7B%0A '__private': None,%0A %7D%0A self.assertEqual(%0A servy.server.ServiceInspector.get_public(items.items()),%0A %7B%7D,%0A )%0A%0A def test_single_underscores(self):%0A items = %7B%0A '_private': None,%0A %7D%0A self.assertEqual(%0A servy.server.ServiceInspector.get_public(items.items()),%0A %7B%7D,%0A )%0A
|
|
43de875bcb2dcf4213b881ff1de8f9e715fb2d30 | Add brute_force.py | brute_force.py | brute_force.py | Python | 0.99866 | @@ -0,0 +1,755 @@
+from battingorder import *%0Afrom itertools import permutations%0A%0Aif __name__ == %22__main__%22:%0A parser = argparse.ArgumentParser(description='Brute force.')%0A parser.add_argument(%22filename%22, nargs='?', default='braves.data', help=%22file with necessary statistics%22)%0A args = parser.parse_args()%0A%0A player_matrices = readdata(args.filename)%0A run_matrix = createrunmatrix()%0A%0A start_order = range(9)%0A%0A samples = %5B%5D%0A for order in permutations(start_order):%0A score = calculate(order, player_matrices, run_matrix)%0A samples.append((score, order))%0A%0A samples.sort(reverse=True)%0A best = samples%5B0%5D%0A%0A print(%22Final ordering: %7B%7D%22.format(best%5B1%5D))%0A print(%22This lineup will score an average of %7B%7D runs per game.%22.format(best%5B0%5D))%0A
|
|
b013f059a5d39acf05ba8e5ef9d6cb1d9e3f724c | add a script to exercise the example jsonrpc methods | tester.py | tester.py | Python | 0 | @@ -0,0 +1,728 @@
+import zmq%0A%0Aclass JRPC:%0A%09def __init__(self):%0A%09%09self.id = 0%0A%0A%09def make_req(self, method, params):%0A%09%09req = %7B%22jsonrpc%22:%222.0%22, %22method%22:method, %22params%22:params,%0A%09%09%09%09%22id%22:self.id%7D%0A%09%09self.id += 1%0A%09%09return req%0A%0Azctx = zmq.Context.instance()%0Azsock = zctx.socket(zmq.REQ)%0Azsock.connect(%22tcp://127.0.0.1:10000%22)%0A%0Ajrpc = JRPC()%0A%0Areq = jrpc.make_req(%22echo%22, %5B10, 5%5D)%0Azsock.send_json(req)%0Aprint zsock.recv()%0A%0Areq = jrpc.make_req(%22subtract%22, %7B%22minuend%22:10, %22subtrahend%22:5%7D)%0Azsock.send_json(req)%0Aprint zsock.recv()%0A%0Areq = jrpc.make_req(%22subtract%22, %5B10, 5%5D)%0Azsock.send_json(req)%0Aprint zsock.recv()%0A%0Areq_array = %5B%5D%0Afor k in range(10):%0A%09req = jrpc.make_req(%22sum%22, range(1+k))%0A%09req_array.append(req)%0Azsock.send_json(req_array)%0Aprint zsock.recv()%0A%0A
|
|
819bb017a4d2a0c03d7f655cee452c5fa5f67a37 | Check DOB not in future | radar/radar/validation/recruit_patient.py | radar/radar/validation/recruit_patient.py | from radar.validation.core import Validation, Field, pass_call, pass_context, ValidationError, ListField
from radar.validation.validators import optional, required, not_in_future, in_, not_empty, upper
from radar.models.patients import GENDERS, ETHNICITIES, Patient
from radar.permissions import has_permission_for_group
from radar.groups import is_radar_group
from radar.validation.number_validators import NUMBER_VALIDATORS
from radar.roles import PERMISSION
from radar.models.groups import GROUP_TYPE
from radar.patient_search import filter_by_patient_number_at_group
from radar.database import db
from radar.exceptions import PermissionDenied
class RecruitPatientSearchValidation(Validation):
first_name = Field([not_empty(), upper()])
last_name = Field([not_empty(), upper()])
date_of_birth = Field([required()])
number = Field([not_empty()])
number_group = Field([required()])
def validate_number_group(self, number_group):
if not number_group.recruitment:
raise ValidationError('Patient number not suitable for recruitment.')
return number_group
@pass_call
def validate(self, call, obj):
number_group = obj['number_group']
number_validators = NUMBER_VALIDATORS.get((number_group.type, number_group.code))
if number_validators is not None:
call.validators_for_field(number_validators, obj, self.number)
first_name = obj['first_name'].upper()
last_name = obj['last_name'].upper()
date_of_birth = obj['date_of_birth']
number = obj['number']
number_filter = filter_by_patient_number_at_group(number, number_group)
patients = Patient.query.filter(number_filter).all()
for patient in patients:
# Check the supplied demographics match existing demographics
# Note: Users are able to check if a patient is on RaDaR by only supplying a patient number
# TODO this could do with being less strict
match = (
first_name in patient.first_names and
last_name in patient.last_names and
date_of_birth in patient.date_of_births
)
if not match:
raise ValidationError({'number': "Supplied demographics don't match existing demographics."})
return obj
class PatientNumberValidation(Validation):
number = Field([not_empty()])
number_group = Field([required()])
@pass_call
def validate(self, call, obj):
number_group = obj['number_group']
number_validators = NUMBER_VALIDATORS.get((number_group.type, number_group.code))
if number_validators is not None:
call.validators_for_field(number_validators, obj, self.number)
return obj
class PatientNumberListField(ListField):
def __init__(self, chain=None):
super(PatientNumberListField, self).__init__(PatientNumberValidation(), chain=chain)
def validate(self, obj):
groups = set()
for i, x in enumerate(obj):
group = x['number_group']
if group in groups:
raise ValidationError({i: {'number_group': 'Number already supplied for group.'}})
else:
groups.add(group)
return obj
class RecruitPatientValidation(Validation):
first_name = Field([not_empty(), upper()])
last_name = Field([not_empty(), upper()])
date_of_birth = Field([required(), not_in_future()])
gender = Field([required(), in_(GENDERS.keys())])
ethnicities = Field([optional(), in_(ETHNICITIES.keys())])
cohort_group = Field([required()])
hospital_group = Field([required()])
patient_numbers = PatientNumberListField([required()])
@classmethod
def get_patient(cls, obj):
patient = None
for i, x in enumerate(obj['patient_numbers']):
if is_radar_group(x['number_group']):
patient_id = int(x['number'])
patient = Patient.query.get(patient_id)
if patient is None:
raise ValidationError({'patient_numbers': {i: {'number': 'Patient not found.'}}})
break
return patient
@classmethod
def get_recruitment_group(cls, obj):
group = None
for i, x in enumerate(obj['patient_numbers']):
if x['number_group'].recruitment:
group = x['number_group']
break
return group
@classmethod
def patient_number_exists(cls, x):
number = x['number']
number_group = x['number_group']
number_filter = filter_by_patient_number_at_group(number, number_group)
q = Patient.query.filter(number_filter).exists()
return db.session.query(q).scalar()
def check_patient_numbers(cls, obj):
for i, x in enumerate(obj['patient_numbers']):
if cls.patient_number_exists(x):
raise ValidationError({'patient_numbers': {i: {'number': 'A patient already exists with this number.'}}})
@pass_context
def validate_cohort_group(self, ctx, cohort_group):
current_user = ctx['user']
if not has_permission_for_group(current_user, cohort_group, PERMISSION.RECRUIT_PATIENT):
raise PermissionDenied()
if cohort_group.type != GROUP_TYPE.COHORT:
raise ValidationError('Must be a cohort.')
return cohort_group
@pass_context
def validate_hospital_group(self, ctx, hospital_group):
current_user = ctx['user']
if not has_permission_for_group(current_user, hospital_group, PERMISSION.RECRUIT_PATIENT, explicit=True):
raise PermissionDenied()
if hospital_group.type != GROUP_TYPE.HOSPITAL:
raise ValidationError('Must be a hospital.')
return hospital_group
@pass_call
def validate(self, call, obj):
patient = self.get_patient(obj)
if patient:
first_name = obj['first_name'].upper()
last_name = obj['last_name'].upper()
date_of_birth = obj['date_of_birth']
# Check the supplied demographics match existing demographics
# This is to prevent a user recruiting a patient without knowing their demographics
match = (
first_name in patient.first_names and
last_name in patient.last_names and
date_of_birth in patient.date_of_births
)
if not match:
raise ValidationError("Supplied demographics don't match existing demographics.")
else:
call.validators_for_field([required()], obj, self.gender)
recruitment_group = self.get_recruitment_group(obj)
if recruitment_group is None:
raise ValidationError({'patient_numbers': {'_': 'Missing a patient number suitable for recruitment.'}})
self.check_patient_numbers(obj)
return obj
| Python | 0 | @@ -818,24 +818,41 @@
(%5Brequired()
+, not_in_future()
%5D)%0A numbe
|
0dcf9178564b879a51b06ae06df58917f78adb6d | Fix linting | tensorflow_datasets/image/nyu_depth_v2.py | tensorflow_datasets/image/nyu_depth_v2.py | """NYU Depth V2 Dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import h5py
import os
import numpy as np
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
_CITATION = """\
@inproceedings{Silberman:ECCV12,
author = {Nathan Silberman, Derek Hoiem, Pushmeet Kohli and Rob Fergus},
title = {Indoor Segmentation and Support Inference from RGBD Images},
booktitle = {ECCV},
year = {2012}
}
@inproceedings{icra_2019_fastdepth,
author = {Wofk, Diana and Ma, Fangchang and Yang, Tien-Ju and Karaman, Sertac and Sze, Vivienne},
title = {FastDepth: Fast Monocular Depth Estimation on Embedded Systems},
booktitle = {IEEE International Conference on Robotics and Automation (ICRA)},
year = {2019}
}
"""
_DESCRIPTION = """\
The NYU-Depth V2 data set is comprised of video sequences from a variety of
indoor scenes as recorded by both the RGB and Depth cameras from the
Microsoft Kinect.
"""
_URL = 'http://datasets.lids.mit.edu/fastdepth/data/nyudepthv2.tar.gz'
class NyuDepthV2(tfds.core.GeneratorBasedBuilder):
"""NYU Depth V2 Dataset."""
VERSION = tfds.core.Version('0.0.1')
def _info(self):
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict({
'image': tfds.features.Image(shape=(480, 640, 3)),
'depth': tfds.features.Tensor(shape=(480, 640), dtype=tf.float16),
}),
supervised_keys=('image', 'depth'),
homepage='https://cs.nyu.edu/~silberman/datasets/nyu_depth_v2.html',
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
base_path = dl_manager.download_and_extract(_URL)
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
gen_kwargs={
'root_dir': os.path.join(base_path, 'nyudepthv2', 'train')
},
),
tfds.core.SplitGenerator(
name=tfds.Split.VALIDATION,
gen_kwargs={
'root_dir': os.path.join(base_path, 'nyudepthv2', 'val')
},
),
]
def _generate_examples(self, root_dir):
"""Yields examples."""
for dir in tf.io.gfile.listdir(root_dir):
for file_name in tf.io.gfile.listdir(os.path.join(root_dir, dir)):
with h5py.File(os.path.join(root_dir, dir, file_name), 'r') as file:
yield dir + '_' + file_name, {
'image': np.transpose(file["rgb"], (1, 2, 0)),
'depth': file['depth'][:].astype('float16')
}
| Python | 0.000004 | @@ -143,20 +143,19 @@
ort
-h5py
+os%0A
%0Aimport
os%0Ai
@@ -150,18 +150,20 @@
%0Aimport
-os
+h5py
%0Aimport
@@ -2300,16 +2300,22 @@
for dir
+ectory
in tf.i
@@ -2410,16 +2410,22 @@
dir, dir
+ectory
)):%0A
@@ -2469,16 +2469,22 @@
dir, dir
+ectory
, file_n
@@ -2498,19 +2498,16 @@
r') as f
-ile
:%0A
@@ -2519,16 +2519,22 @@
ield dir
+ectory
+ '_' +
@@ -2584,19 +2584,16 @@
nspose(f
-ile
%5B%22rgb%22%5D,
@@ -2629,19 +2629,16 @@
epth': f
-ile
%5B'depth'
|
061dcecdd7b691cefd34c8a254037a399b251378 | add a new script to build a pypi 'simple' index from a dir containing wheels | build_index.py | build_index.py | Python | 0 | @@ -0,0 +1,1550 @@
+import sys%0Aimport py%0A%0APACKAGES = %5B%0A 'netifaces',%0A%5D%0A%0Aclass IndexBuilder(object):%0A%0A def __init__(self, wheeldir, outdir):%0A self.wheeldir = py.path.local(wheeldir)%0A self.outdir = py.path.local(outdir)%0A self.packages = %5B%5D%0A%0A def copy_wheels(self):%0A for whl in self.wheeldir.visit('*.whl'):%0A name, version = self.parse(whl)%0A self.packages.append(name)%0A d = self.outdir.join(name).ensure(dir=True)%0A dst = d.join(whl.basename)%0A if dst.check(file=False):%0A whl.copy(d)%0A%0A def build_index(self):%0A self._write_index(self.outdir, 'PyPy Wheel Index', self.packages)%0A for pkg in self.packages:%0A d = self.outdir.join(pkg)%0A wheels = %5Bwhl.basename for whl in d.listdir('*.whl')%5D%0A self._write_index(d, 'Links for %25s' %25 pkg, wheels)%0A%0A def parse(self, f):%0A name, version, _ = f.basename.split('-', 2)%0A return name, version%0A%0A def _write_index(self, d, title, links):%0A lines = %5B%0A '%3Chtml%3E%3Cbody%3E%3Ch1%3E%7Btitle%7D%3C/h1%3E'.format(title=title)%0A %5D%0A for name in links:%0A line = '%3Ca href=%22%7Bname%7D%22%3E%7Bname%7D%3C/a%3E'.format(name=name)%0A lines.append(line)%0A lines.append('%3C/body%3E%3C/html%3E')%0A html = '%5Cn'.join(lines)%0A d.join('index.html').write(html)%0A%0A%0Adef main():%0A wheeldir = sys.argv%5B1%5D%0A outdir = sys.argv%5B2%5D%0A index = IndexBuilder(wheeldir, outdir)%0A index.copy_wheels()%0A index.build_index()%0A%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
b22bf4e2431ac3598d9c8afee3f924d940e2297e | Create building_df.py | building_df.py | building_df.py | Python | 0.000002 | @@ -0,0 +1,866 @@
+%22%22%22Utility functions%22%22%22%0A%0Aimport os%0Aimport pandas as pd%0A%0Adef symbol_to_path(symbol, base_dir=%22data%22):%0A %22%22%22Return CSV file path given ticker symbol.%22%22%22%0A return os.path.join(base_dir, %22%7B%7D.csv%22.format(str(symbol)))%0A%0A%0Adef get_data(symbols, dates):%0A %22%22%22Read stock data (adjusted close) for given symbols from CSV files.%22%22%22%0A df = pd.DataFrame(index=dates)%0A if 'SPY' not in symbols: # add SPY for reference, if absent%0A symbols.insert(0, 'SPY')%0A%0A for symbol in symbols:%0A #Read and join data for each symbol%0A df.join(symbol,'inner')%0A%0A return df%0A%0A%0Adef test_run():%0A # Define a date range%0A dates = pd.date_range('2010-01-22', '2010-01-26')%0A%0A # Choose stock symbols to read%0A symbols = %5B'GOOG', 'IBM', 'GLD'%5D%0A %0A # Get stock data%0A df = get_data(symbols, dates)%0A print df%0A%0A%0Aif __name__ == %22__main__%22:%0A test_run()%0A
|
|
7d84cf8c41105d9990b8cfdf176415f1bcb20e0f | Add tests for batch norm | thinc/tests/integration/test_batch_norm.py | thinc/tests/integration/test_batch_norm.py | Python | 0 | @@ -0,0 +1,2406 @@
+import pytest%0Afrom mock import MagicMock%0Aimport numpy%0Aimport numpy.random%0Afrom numpy.testing import assert_allclose%0Afrom hypothesis import given, settings, strategies%0A%0Afrom ...neural._classes.batchnorm import BatchNorm%0Afrom ...api import layerize, noop%0A%0Afrom ...neural._classes.affine import Affine%0Afrom ..strategies import arrays_OI_O_BI%0Afrom ..util import get_model, get_shape%0A%0A%[email protected]%0Adef shape():%0A return (10, 20)%0A%0A%[email protected]%0Adef layer(shape):%0A dummy = layerize(noop())%0A dummy.nO = shape%5B-1%5D%0A return dummy%0A%0A%0Adef test_batch_norm_init(layer):%0A layer = BatchNorm(layer)%0A%0A%0Adef test_batch_norm_weights_init_to_one(layer):%0A layer = BatchNorm(layer)%0A assert layer.G is not None%0A assert all(weight == 1. for weight in layer.G.flatten())%0A%0A%0Adef test_batch_norm_runs_child_hooks(layer):%0A mock_hook = MagicMock()%0A layer.on_data_hooks.append(mock_hook)%0A layer = BatchNorm(layer)%0A for hook in layer.on_data_hooks:%0A hook(layer, None)%0A mock_hook.assert_called()%0A%0A%0Adef test_batch_norm_predict_maintains_shape(layer, shape):%0A input_ = numpy.ones(shape)%0A input1 = layer.predict(input_)%0A assert_allclose(input1, input_)%0A layer = BatchNorm(layer)%0A output = layer.predict(input_)%0A assert output.shape == input_.shape%0A%0A%0A@given(arrays_OI_O_BI(max_batch=8, max_out=8, max_in=8))%0Adef test_begin_update_matches_predict(W_b_input):%0A model = get_model(W_b_input)%0A nr_batch, nr_out, nr_in = get_shape(W_b_input)%0A W, b, input_ = W_b_input%0A %0A model = BatchNorm(model)%0A %0A fwd_via_begin_update, finish_update = model.begin_update(input_)%0A fwd_via_predict_batch = model.predict(input_)%0A assert_allclose(fwd_via_begin_update, fwd_via_predict_batch)%0A%0A%0A@given(arrays_OI_O_BI(max_batch=8, max_out=8, max_in=8))%0Adef test_finish_update_calls_optimizer_with_weights(W_b_input):%0A model = get_model(W_b_input)%0A nr_batch, nr_out, nr_in = get_shape(W_b_input)%0A W, b, input_ = W_b_input%0A%0A model = BatchNorm(model)%0A %0A output, finish_update = model.begin_update(input_)%0A%0A seen_keys = set()%0A def sgd(data, gradient, key=None, **kwargs):%0A seen_keys.add(key)%0A assert data.shape == gradient.shape%0A assert data.ndim == 1%0A assert gradient.ndim == 1%0A%0A grad_BO = numpy.ones((nr_batch, nr_out))%0A grad_BI = finish_update(grad_BO, sgd)%0A assert seen_keys == %7Bid(model._mem), id(model.child._mem)%7D%0A%0A%0A
|
|
7473384155edbf85304cc541325d0a94a75d2cf4 | Add converting script | labs/12_i2c_oled_display/convert.py | labs/12_i2c_oled_display/convert.py | Python | 0 | @@ -0,0 +1,789 @@
+import imageio%0Aimport sys%0Aimport os%0Aimport numpy as np%0A%0Aif (len(sys.argv) != 2):%0A print(%22Format: python convert.py grayscale_image_name%22)%0A sys.exit(1)%0A%0Atry:%0A data = imageio.imread(sys.argv%5B1%5D)%0Aexcept:%0A print(%22Wrong image name!%22)%0A sys.exit(1)%0A%0Aif (len(data.shape) != 2):%0A print(%22Image must be grayscale!%22)%0A sys.exit(1)%0A%0Aoutput = open(os.path.splitext(sys.argv%5B1%5D)%5B0%5D + %22.c%22, %22w%22)%0Aoutput.write(%22const unsigned char my_pic%5B%5D = %7B%5Cn%22)%0A%0Aimage = data.flatten(order='C')%0A%0Afimage = np.array_split(image, image.shape%5B0%5D//16)%0A%0Afor chunk in fimage:%0A fstr = ', '.join(%5B'0x%2502x'%25x for x in chunk%5D)%0A output.write(%22 %22 + fstr)%0A output.write(%22,%5Cn%22)%0A%0Aoutput.write(%22%7D%22)%0Aoutput.close()%0A%0Aprint(%22Done! The array is stored in %22 +%5C%0A os.path.splitext(sys.argv%5B1%5D)%5B0%5D + %22.c%22)%0A
|
|
2448f1d6835129bc08855a9ecc59fea347a14243 | add re.escape for match_with_format | onlinejudge/implementation/format_utils.py | onlinejudge/implementation/format_utils.py | # Python Version: 3.x
import onlinejudge
import onlinejudge.implementation.utils as utils
import onlinejudge.implementation.logging as log
import collections
import glob
import pathlib
import re
import sys
from typing import Dict, List, Match, Optional
def glob_with_format(directory: pathlib.Path, format: str) -> List[pathlib.Path]:
table = {}
table['s'] = '*'
table['e'] = '*'
pattern = str(directory / utils.percentformat(format, table))
paths = list(map(pathlib.Path, glob.glob(pattern)))
for path in paths:
log.debug('testcase globbed: %s', path)
return paths
def match_with_format(directory: pathlib.Path, format: str, path: pathlib.Path) -> Optional[Match[str]]:
table = {}
table['s'] = '(?P<name>.+)'
table['e'] = '(?P<ext>in|out)'
pattern = re.compile('^' + str(directory.resolve()) + '/' + utils.percentformat(format, table) + '$')
return pattern.match(str(path.resolve()))
def path_from_format(directory: pathlib.Path, format: str, name: str, ext: str) -> pathlib.Path:
table = {}
table['s'] = name
table['e'] = ext
return directory / utils.percentformat(format, table)
def is_backup_or_hidden_file(path: pathlib.Path) -> bool:
basename = path.stem
return basename.endswith('~') or (basename.startswith('#') and basename.endswith('#')) or basename.startswith('.')
def drop_backup_or_hidden_files(paths: List[pathlib.Path]) -> List[pathlib.Path]:
result = [] # type: List[pathlib.Path]
for path in paths:
if is_backup_or_hidden_file(path):
log.warning('ignore a backup file: %s', path)
else:
result += [ path ]
return result
def construct_relationship_of_files(paths: List[pathlib.Path], directory: pathlib.Path, format: str) -> Dict[str, Dict[str, pathlib.Path]]:
tests = collections.defaultdict(dict) # type: Dict[str, Dict[str, pathlib.Path]]
for path in paths:
m = match_with_format(directory, format, path.resolve())
if not m:
log.error('unrecognizable file found: %s', path)
sys.exit(1)
name = m.groupdict()['name']
ext = m.groupdict()['ext']
assert ext not in tests[name]
tests[name][ext] = path
for name in tests:
if 'in' not in tests[name]:
assert 'out' in tests[name]
log.error('dangling output case: %s', tests[name]['out'])
sys.exit(1)
if not tests:
log.error('no cases found')
sys.exit(1)
log.info('%d cases found', len(tests))
return tests
| Python | 0 | @@ -247,16 +247,17 @@
tional%0A%0A
+%0A
def glob
@@ -598,16 +598,17 @@
paths%0A%0A
+%0A
def matc
@@ -817,16 +817,26 @@
e('%5E' +
+re.escape(
str(dire
@@ -851,18 +851,44 @@
solve())
+)
+
+%0A
'/' + u
@@ -898,38 +898,49 @@
s.percentformat(
+re.escape(
format
+)
, table) + '$')%0A
@@ -986,16 +986,17 @@
ve()))%0A%0A
+%0A
def path
@@ -1201,16 +1201,17 @@
table)%0A%0A
+%0A
def is_b
@@ -1405,16 +1405,17 @@
h('.')%0A%0A
+%0A
def drop
@@ -1693,22 +1693,20 @@
ult += %5B
-
path
-
%5D%0A re
@@ -1718,16 +1718,17 @@
result%0A%0A
+%0A
def cons
@@ -1865,47 +1865,8 @@
:%0A
- tests = collections.defaultdict(dict)
#
@@ -1906,16 +1906,58 @@
.Path%5D%5D%0A
+ tests = collections.defaultdict(dict)%0A
for
@@ -2176,16 +2176,16 @@
'name'%5D%0A
+
@@ -2188,17 +2188,16 @@
ext
-
= m.grou
|
3db7c5502bcba0adbfbcf6649c0b4179b37cd74a | Create redis_board.py | simpleRaft/boards/redis_board.py | simpleRaft/boards/redis_board.py | Python | 0.000001 | @@ -0,0 +1,627 @@
+import redis%0Afrom board import Board%0A%0Aclass RedisBoard( Board ):%0A %22%22%22This will create a message board that is backed by Redis.%22%22%22%0A %0A def __init__( self, *args, **kwargs ):%0A %22%22%22Creates the Redis connection.%22%22%22%0A self.redis = redis.Redis( *args, **kwargs )%0A %0A def set_owner( self, owner ):%0A self.owner = owner%0A %0A def post_message( self, message ):%0A %22%22%22This will append the message to the list.%22%22%22%0A pass%0A %0A def get_message( self ):%0A %22%22%22This will pop a message off the list.%22%22%22%0A pass%0A %0A def _key( self ):%0A if not self.key:%0A self.key = %22%25s-queue%22 %25 self.owner%0A %0A return self.key%0A
|
|
69fbab70f09f83e763f9af7ff02d028af62d8d89 | Create weighted_4_node_probability_convergence.py | weighted_4_node_probability_convergence.py | weighted_4_node_probability_convergence.py | Python | 0.000003 | @@ -0,0 +1,1935 @@
+# statistics on convergence_weighted_4_node.txt%0A# output into a csv file%0Aimport re,sys, numpy as np, pandas as pd%0Afrom pandas import Series, DataFrame%0A%0Adef main(argv):%0A author = ''%0A play = ''%0A%0A sub = %5B%5D%0A %0A play_subgraph=Series()%0A l=''%0A subgraph = ''%0A subgraphs = %5B%5D%0A pro = 0.0%0A pros = %5B%5D%0A %0A f = open('./convergence_weighted_4_node.txt','r')%0A fi = open('./convergence_weighted_4_node.csv','w')%0A %0A # first to get the full index of subgraphs%0A for line in f:%0A if '*:' in line or '-:' in line:%0A continue%0A l = re.split(':',line.strip())%0A subgraph = l%5B0%5D%0A if subgraph not in sub:%0A sub.append(subgraph)%0A %0A df = DataFrame(index=sub) %0A f.seek(0)%0A for line in f:%0A if '*:' in line:%0A author = line%5B10:12%5D%0A elif '-:' in line:%0A if play!='':%0A play_subgraph = Series(pros,index=subgraphs)%0A #play_subgraph=Series(sub_pro,index=sub,dtype=float)%0A %0A play_subgraph.name=author+':'+play%0A play_subgraph.index.name='probability'%0A %0A df%5Bplay_subgraph.name%5D=play_subgraph%0A %0A #if author=='Sh':%0A # print 'play_subgraph.name = '+play_subgraph.name%0A # print play_subgraph%0A # print 'df'%0A # print df%5Bplay_subgraph.name%5D%0A play = re.split('-',line)%5B6%5D%0A subgraphs = %5B%5D%0A pros = %5B%5D%0A else:%0A l = re.split(':',line.strip())%0A subgraph = l%5B0%5D%0A pro = float(l%5B-1%5D)%0A subgraphs.append(subgraph)%0A pros.append(pro)%0A %0A #sub_pro%5Bsubgraph%5D = pro%0A%0A print 'sub has '+str(len(sub))+' lines.'%0A #df.fillna(0)%0A #print df%0A df.to_csv(fi)%0A #print sub%0A %0Aif __name__ == '__main__':%0A main(sys.argv)%0A
|
|
944ec176f4d6db70f9486dddab9a6cf901d6d575 | Create MyUsefulExample.py | src/zhang/MyUsefulExample.py | src/zhang/MyUsefulExample.py | Python | 0 | @@ -0,0 +1,2283 @@
+#JUST EXAMPLES%0Aimport pyspark.ml.recommendation%0Adf = spark.createDataFrame(%0A... %5B(0, 0, 4.0), (0, 1, 2.0), (1, 1, 3.0), (1, 2, 4.0), (2, 1, 1.0), (2, 2, 5.0)%5D,%0A... %5B%22user%22, %22item%22, %22rating%22%5D)%0A%0Aals = ALS(rank=10, maxIter=5, seed=0)%0A%0Amodel = als.fit(df)%0Amodel.rank%0A#10%0Amodel.userFactors.orderBy(%22id%22).collect()%0A#%5BRow(id=0, features=%5B...%5D), Row(id=1, ...), Row(id=2, ...)%5D%0A%0Atest = spark.createDataFrame(%5B(0, 2), (1, 0), (2, 0)%5D, %5B%22user%22, %22item%22%5D)%0Apredictions = sorted(model.transform(test).collect(), key=lambda r: r%5B0%5D)%0Apredictions%5B0%5D%0A#Row(user=0, item=2, prediction=-0.13807615637779236)%0Apredictions%5B1%5D%0A#Row(user=1, item=0, prediction=2.6258413791656494)%0Apredictions%5B2%5D%0A#Row(user=2, item=0, prediction=-1.5018409490585327)%0Aals_path = temp_path + %22/als%22%0Aals.save(als_path)%0Aals2 = ALS.load(als_path)%0Aals.getMaxIter()%0A#5%0Amodel_path = temp_path + %22/als_model%22%0Amodel.save(model_path)%0Amodel2 = ALSModel.load(model_path)%0Amodel.rank == model2.rank%0A#True%0Asorted(model.userFactors.collect()) == sorted(model2.userFactors.collect())%0A#True%0Asorted(model.itemFactors.collect()) == sorted(model2.itemFactors.collect())%0A#True%0A%0A# ---------------------------------------%0Afrom pyspark.ml.evaluation import RegressionEvaluator%0Afrom pyspark.ml.recommendation import ALS%0Afrom pyspark.sql import Row%0A%0Alines = spark.read.text(%22../zhang/proj/sample_movielens_ratings.txt%22).rdd%0Aparts = lines.map(lambda row: row.value.split(%22::%22))%0AratingsRDD = parts.map(lambda p: Row(userId=int(p%5B0%5D), movieId=int(p%5B1%5D),%0A rating=float(p%5B2%5D), timestamp=long(p%5B3%5D)))%0Aratings = spark.createDataFrame(ratingsRDD)%0A(training, test) = ratings.randomSplit(%5B0.8, 0.2%5D)%0A%0A# Build the recommendation model using ALS on the training data%0Aals = ALS(maxIter=5, regParam=0.01, userCol=%22userId%22, itemCol=%22movieId%22, ratingCol=%22rating%22)%0Amodel = als.fit(training)%0A%0A# Evaluate the model by computing the RMSE on the test data%0A# prediction is a dataframe DataFrame%5BmovieId: bigint, rating: double, timestamp: bigint, userId: bigint, prediction: float%5D%0Apredictions = model.transform(test)%0Aevaluator = RegressionEvaluator(metricName=%22rmse%22, labelCol=%22rating%22,%0A predictionCol=%22prediction%22)%0Armse = evaluator.evaluate(predictions)%0Aprint(%22Root-mean-square error = %22 + str(rmse))%0A
|
|
5ba36ca805b002af63c619e17dd00400650da14b | Add script to rewrite the agents used by scc. | agent_paths.py | agent_paths.py | Python | 0 | @@ -0,0 +1,1131 @@
+#!/usr/bin/env python3%0Afrom argparse import ArgumentParser%0Aimport json%0Aimport os.path%0Aimport re%0Aimport sys%0A%0Afrom generate_simplestreams import json_dump%0A%0A%0Adef main():%0A parser = ArgumentParser()%0A parser.add_argument('input')%0A parser.add_argument('output')%0A args = parser.parse_args()%0A paths_hashes = %7B%7D%0A with open(args.input) as input_file:%0A stanzas = json.load(input_file)%0A hashes = %7B%7D%0A for stanza in stanzas:%0A path = os.path.join('agent', os.path.basename(stanza%5B'path'%5D))%0A path = re.sub('-win(2012(hv)?(r2)?%7C7%7C8%7C81)-', '-windows-', path)%0A path_hash = stanza%5B'sha256'%5D%0A paths_hashes.setdefault(path, stanza%5B'sha256'%5D)%0A if paths_hashes%5Bpath%5D != path_hash:%0A raise ValueError('Conflicting hash')%0A stanza%5B'path'%5D = path%0A hashes%5Bpath%5D = path_hash%0A ph_list = %7B%7D%0A for path, path_hash in hashes.items():%0A ph_list.setdefault(path_hash, set()).add(path)%0A for path_hash, paths in ph_list.items():%0A if len(paths) %3E 1:%0A print(paths)%0A json_dump(stanzas, args.output)%0A%0Aif __name__ == '__main__':%0A sys.exit(main())%0A
|
|
5cc627d0c0cb18e236a055ce7fceb05b63b45385 | Add flask backend file | woogle.py | woogle.py | Python | 0.000001 | @@ -0,0 +1,273 @@
+%22%22%22:mod:%60woogle%60 --- Flask Backend for Woogle Calendar%0A~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%0A%0A%22%22%22%0A%0Afrom flask import Flask%0Aapp = Flask(__name__)%0A%[email protected](%22/%22)%0Adef calendar():%0A return %22Hello World!%22%0A%0Aif __name__ == %22__main__%22:%0A app.run()%0A
|
|
f82ef484f6440c2b5b10eb144af09b770fa413c9 | Add python script for extracting server i18n msgs | .infrastructure/i18n/extract-server-msgs.py | .infrastructure/i18n/extract-server-msgs.py | Python | 0 | @@ -0,0 +1,535 @@
+import os%0A%0A# Keys indicating the fn symbols that pybabel should search for%0A# when finding translations.%0Akeys = '-k format -k format_time -k format_date -k format_datetime'%0A%0A# Extraction%0Aos.system(%22pybabel extract -F babel.cfg %7B%7D -o messages.pot .%22.format(keys))%0Aos.system(%22pybabel init -i messages.pot -d . -o './beavy-server.po' -l en%22)%0Aos.system(%22./node_modules/.bin/po2json beavy-server.po var/server-messages/beavy-server.json -F -f mf --fallback-to-msgid%22)%0A%0A# Clean up%0Aos.system(%22rm messages.pot%22)%0Aos.system(%22rm beavy-server.po%22)%0A
|
|
5046ff8ba17899893a9aa30687a1ec58a6e95af2 | Add solution for Square Detector. | 2014/qualification-round/square-detector.py | 2014/qualification-round/square-detector.py | Python | 0 | @@ -0,0 +1,2110 @@
+#!/usr/bin/env python3%0A# -*- coding: utf-8 -*-%0A%0Aimport sys%0A%0Aclass QuizzesParser:%0A def __init__(self, src):%0A self.src = src%0A with open(src) as f:%0A self.raw = f.read().splitlines()%0A self.amount = int(self.raw%5B0%5D)%0A def quizpool(self):%0A cur_line = 1%0A for i in range(self.amount):%0A offset = int(self.raw%5Bcur_line%5D)%0A prev_line = cur_line%0A cur_line = prev_line + offset + 1%0A yield self.raw%5Bprev_line:cur_line%5D%0A%0Aclass QuizSolver:%0A def __init__(self, quiz):%0A self.quiz = quiz%0A def solve(self):%0A N = int(self.quiz%5B0%5D)%0A started = False%0A start_line = -1%0A mask = list()%0A length = 0%0A for i in range(N):%0A line = self.quiz%5Bi%5D%0A if not started and '#' not in line:%0A continue%0A if not started:%0A if line.count('#') %3E N - i:%0A return 'NO'%0A for j in range(len(line)):%0A if len(line) %3E 2 and j %3E 0 and j %3C len(line) - 1 %5C%0A and line%5Bj%5D != '#' and '#' in line%5B:j%5D %5C%0A and '#' in line%5Bj:%5D:%0A return 'NO'%0A mask.append(1 if line%5Bj%5D == '#' else 0)%0A start_line = i%0A length = line.count('#')%0A started = True%0A continue%0A if i - start_line %3E= length:%0A if '#' in line:%0A return 'NO'%0A else:%0A continue%0A mask_pair = list()%0A for j in range(len(line)):%0A mask_pair.append(1 if line%5Bj%5D == '#' else 0)%0A if any(map(lambda x, y: x %5E y, mask, mask_pair)):%0A return 'NO'%0A return 'YES'%0A%0Adef main():%0A qsparser = QuizzesParser(sys.argv%5B1%5D)%0A with open(sys.argv%5B2%5D, 'w') as f:%0A for i, quiz in enumerate(qsparser.quizpool()):%0A qsolver = QuizSolver(quiz)%0A f.write('Case #%7Bnum%7D: %7Bans%7D%5Cn'.format(num=i+1, ans=qsolver.solve()))%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
3df4cc086bf6c85eebc12094cc3ca459bd2bcd3d | Add unit test for programmatic application and approval | project/members/tests/test_application.py | project/members/tests/test_application.py | Python | 0 | @@ -0,0 +1,477 @@
+# -*- coding: utf-8 -*-%0Aimport pytest%0Afrom members.tests.fixtures.memberlikes import MembershipApplicationFactory%0Afrom members.tests.fixtures.types import MemberTypeFactory%0Afrom members.models import Member%0A%[email protected]_db%0Adef test_application_approve():%0A mtypes = %5BMemberTypeFactory(label='Normal member')%5D%0A application = MembershipApplicationFactory()%0A email = application.email%0A application.approve(set_mtypes=mtypes)%0A Member.objects.get(email=email)%0A
|
|
1a682405904dcc711d889881d6a216b3eff9e1dd | remove off method from status light | status_light.py | status_light.py | import time
import config
import RPi.GPIO as GPIO
class StatusLight(object):
"""available patterns for the status light"""
patterns = {
'on' : (.1, [True]),
'off' : (.1, [False]),
'blink_fast' : (.1, [False, True]),
'blink' : (.1, [False, False, False, True, True, True, True, True, True, True, True, True, True]),
'blink_pauze' : (.1, [False, False, False, False, False, False, False, False, False, False, False, False, False, False, True]),
}
"""placeholder for pattern to tenmporarily interrupt
status light with different pattern"""
interrupt_pattern = [0, []]
"""continue flashing, controlled by the stop"""
cont = True
pin_id = None
def __init__(self, pin_id):
self.pin_id = pin_id
GPIO.setmode(GPIO.BCM)
GPIO.setup(pin_id, GPIO.OUT)
self.action = 'on'
def interrupt(self, action, repeat = 1):
"""Interupt the current status of the light with a names action
parameters: action the name of the action
repeat: the number of times to repeatthe interruption"""
self.interrupt_pattern[0] = self.patterns[action][0]
for i in range(0, repeat):
self.interrupt_pattern[1].extend(list(self.patterns[action][1][:]))
def start(self):
"""Perform a status light action"""
while True:
for state in self.patterns[self.action][1]:
# if the interrupt_pattern is not empty, prioritize it
while len(self.interrupt_pattern[1]):
time.sleep(self.interrupt_pattern[0])
self.set_state(state = self.interrupt_pattern[1].pop(0))
# peform the regular action when not interrupted
time.sleep(self.patterns[self.action][0])
self.set_state(state)
def off(self, state):
"""Turn off status light"""
self.cont = False
self.set_state(state)
def set_state(self, state):
"""Turn the light on or off"""
GPIO.output(self.pin_id, state)
def __del__(self):
GPIO.cleanup()
if __name__ == '__main__':
light = StatusLight(config.status_light_pin)
light.interrupt('blink_fast', 3)
light.start()
| Python | 0.000001 | @@ -1863,131 +1863,8 @@
e)%0A%0A
- %0A def off(self, state):%0A %22%22%22Turn off status light%22%22%22%0A self.cont = False%0A self.set_state(state)%0A
|
aa292c2f180ffcfdfc55114750f22b6c8790a69b | Add Jaro-Winkler distance based on code on RosettaCode | pygraphc/similarity/RosettaJaroWinkler.py | pygraphc/similarity/RosettaJaroWinkler.py | Python | 0.000004 | @@ -0,0 +1,1600 @@
+from __future__ import division%0Afrom itertools import combinations%0Afrom time import time%0A%0A%0Adef jaro(s, t):%0A s_len = len(s)%0A t_len = len(t)%0A%0A if s_len == 0 and t_len == 0:%0A return 1%0A%0A match_distance = (max(s_len, t_len) // 2) - 1%0A%0A s_matches = %5BFalse%5D * s_len%0A t_matches = %5BFalse%5D * t_len%0A%0A matches = 0%0A transpositions = 0%0A%0A for i in range(s_len):%0A start = max(0, i - match_distance)%0A end = min(i + match_distance + 1, t_len)%0A%0A for j in range(start, end):%0A if t_matches%5Bj%5D:%0A continue%0A if s%5Bi%5D != t%5Bj%5D:%0A continue%0A s_matches%5Bi%5D = True%0A t_matches%5Bj%5D = True%0A matches += 1%0A break%0A%0A if matches == 0:%0A return 0%0A%0A k = 0%0A for i in range(s_len):%0A if not s_matches%5Bi%5D:%0A continue%0A while not t_matches%5Bk%5D:%0A k += 1%0A if s%5Bi%5D != t%5Bk%5D:%0A transpositions += 1%0A k += 1%0A%0A return ((matches / s_len) +%0A (matches / t_len) +%0A ((matches - transpositions / 2) / matches)) / 3%0A%0A%0Astart = time()%0Alog_file = '/home/hs32832011/Git/labeled-authlog/dataset/Hofstede2014/dataset1_perday/Dec 1.log'%0Awith open(log_file, 'r') as f:%0A lines = f.readlines()%0A%0A%0Alog_length = len(lines)%0Afor line1, line2 in combinations(xrange(log_length), 2):%0A s = lines%5Bline1%5D%0A t = lines%5Bline2%5D%0A print(%22%25.10f%22 %25 (jaro(s, t)))%0A%0A# print runtime%0Aduration = time() - start%0Aminute, second = divmod(duration, 60)%0Ahour, minute = divmod(minute, 60)%0Aprint %22Runtime: %25d:%2502d:%2502d%22 %25 (hour, minute, second)%0A
|
|
2674aa95c69c6e0fe0d8fd71d9116150cfab6507 | add xdawn decoding example | examples/decoding/plot_decoding_xdawn_meg.py | examples/decoding/plot_decoding_xdawn_meg.py | Python | 0.000001 | @@ -0,0 +1,2911 @@
+%22%22%22%0A=============================%0A XDAWN Decoding From MEG data%0A=============================%0A%0AERF decoding with Xdawn. For each event type, a set of spatial Xdawn filters%0Aare trained and apply on the signal. Channels are concatenated and rescaled to%0Acreate features vectors that will be fed into a Logistic Regression.%0A%22%22%22%0A# Authors: Alexandre Barachant %[email protected]%3E%0A#%0A# License: BSD (3-clause)%0A%0A%0Aimport mne%0Afrom mne import io%0Afrom mne.datasets import sample%0Afrom mne.preprocessing.xdawn import Xdawn%0Afrom mne.decoding import ConcatenateChannels%0A%0Afrom sklearn.cross_validation import StratifiedKFold%0Afrom sklearn.pipeline import make_pipeline%0Afrom sklearn.linear_model import LogisticRegression%0Afrom sklearn.metrics import classification_report, confusion_matrix%0Afrom sklearn.preprocessing import MinMaxScaler%0Aimport numpy as np%0Aimport matplotlib.pyplot as plt%0A%0Aprint(__doc__)%0A%0Adata_path = sample.data_path()%0A%0A###############################################################################%0A# Set parameters and read data%0Araw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'%0Aevent_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'%0Atmin, tmax = -0.1, 0.3%0Aevent_id = dict(aud_l=1, aud_r=2, vis_l=3, vis_r=4)%0A%0A# Setup for reading the raw data%0Araw = io.Raw(raw_fname, preload=True)%0Araw.filter(1, 20, method='iir')%0Aevents = mne.read_events(event_fname)%0A%0Araw.info%5B'bads'%5D = %5B'MEG 2443'%5D # set bad channels%0Apicks = mne.pick_types(raw.info, meg=True, eeg=False, stim=False, eog=False,%0A exclude='bads')%0A%0Aepochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=False,%0A picks=picks, baseline=None, preload=True,%0A add_eeg_ref=False, verbose=False)%0A%0A# Create classification pipeline%0Aclf = make_pipeline(Xdawn(3),%0A ConcatenateChannels(),%0A MinMaxScaler(),%0A LogisticRegression(penalty='l1'))%0A%0A# Get the labels%0Alabels = epochs.events%5B:, -1%5D%0A%0A# Cross validator%0Acv = StratifiedKFold(labels, 10, shuffle=True, random_state=42)%0A%0A# Do cross-validation%0Apreds = np.empty(len(labels))%0Afor train, test in cv:%0A clf.fit(epochs%5Btrain%5D, labels%5Btrain%5D)%0A preds%5Btest%5D = clf.predict(epochs%5Btest%5D)%0A%0A# Classification report%0Atarget_names = %5B'aud_l', 'aud_r', 'vis_l', 'vis_r'%5D%0Areport = classification_report(labels, preds, target_names=target_names)%0Aprint(report)%0A%0A# Normalized confusion matrix%0Acm = confusion_matrix(labels, preds)%0Acm_normalized = cm.astype('float') / cm.sum(axis=1)%5B:, np.newaxis%5D%0A%0A# Plot confusion matrix%0Aplt.imshow(cm_normalized, interpolation='nearest', cmap=plt.cm.Blues)%0Aplt.title('Normalized Confusion matrix')%0Aplt.colorbar()%0Atick_marks = np.arange(len(target_names))%0Aplt.xticks(tick_marks, target_names, rotation=45)%0Aplt.yticks(tick_marks, target_names)%0Aplt.tight_layout()%0Aplt.ylabel('True label')%0Aplt.xlabel('Predicted label')%0Aplt.show()%0A
|
|
34986c7bfd1d4634861a5c4b54cf90ef18090ff4 | test versions of required libs across different places | spacy/tests/test_requirements.py | spacy/tests/test_requirements.py | Python | 0 | @@ -0,0 +1,2407 @@
+import re%0Afrom pathlib import Path%0A%0A%0Adef test_build_dependencies(en_vocab):%0A libs_ignore_requirements = %5B%22pytest%22, %22pytest-timeout%22, %22mock%22, %22flake8%22, %22jsonschema%22%5D%0A libs_ignore_setup = %5B%22fugashi%22, %22natto-py%22, %22pythainlp%22%5D%0A%0A # check requirements.txt%0A root_dir = Path(__file__).parent.parent.parent%0A req_file = root_dir / %22requirements.txt%22%0A req_dict = %7B%7D%0A with req_file.open() as f:%0A lines = f.readlines()%0A for line in lines:%0A line = line.strip()%0A if not line.startswith(%22#%22):%0A lib, v = _parse_req(line)%0A if lib and lib not in libs_ignore_requirements:%0A req_dict%5Blib%5D = v%0A%0A # check setup.cfg and compare to requirements.txt%0A # also fails when there are missing or additional libs%0A setup_file = root_dir / %22setup.cfg%22%0A with setup_file.open() as f:%0A lines = f.readlines()%0A setup_keys = set()%0A for line in lines:%0A line = line.strip()%0A if not line.startswith(%22#%22):%0A lib, v = _parse_req(line)%0A if lib and not lib.startswith(%22cupy%22) and lib not in libs_ignore_setup:%0A req_v = req_dict.get(lib, None)%0A assert req_v is not None # if fail: setup.cfg contains a lib not in requirements.txt%0A assert (lib+v) == (lib+req_v) # if fail: setup.cfg & requirements.txt have conflicting versions%0A setup_keys.add(lib)%0A assert sorted(setup_keys) == sorted(req_dict.keys()) # if fail: requirements.txt contains a lib not in setup.cfg%0A%0A # check pyproject.toml and compare the versions of the libs to requirements.txt%0A # does not fail when there are missing or additional libs%0A toml_file = root_dir / %22pyproject.toml%22%0A with toml_file.open() as f:%0A lines = f.readlines()%0A toml_keys = set()%0A for line in lines:%0A line = line.strip()%0A line = line.strip(%22,%22)%0A line = line.strip(%22%5C%22%22)%0A if not line.startswith(%22#%22):%0A lib, v = _parse_req(line)%0A if lib:%0A req_v = req_dict.get(lib, None)%0A assert (lib+v) == (lib+req_v) # if fail: pyproject.toml & requirements.txt have conflicting versions%0A toml_keys.add(lib)%0A%0Adef _parse_req(line):%0A lib = re.match(r%22%5E%5Ba-z0-9%5C-%5D*%22, line).group(0)%0A v = line.replace(lib, %22%22).strip()%0A if not re.match(r%22%5E%5B%3C%3E=%5D%5B%3C%3E=%5D.*%22, v):%0A return None, None%0A return lib, v
|
|
13959dbce03b44f15c4c05ff0715b7d26ff6c0fa | Add a widget. | python/tkinter/python3/animation_print.py | python/tkinter/python3/animation_print.py | Python | 0 | @@ -0,0 +1,1593 @@
+#!/usr/bin/env python3%0A# -*- coding: utf-8 -*-%0A%0A# Copyright (c) 2016 J%C3%A9r%C3%A9mie DECOCK (http://www.jdhp.org)%0A%0A# Permission is hereby granted, free of charge, to any person obtaining a copy%0A# of this software and associated documentation files (the %22Software%22), to deal%0A# in the Software without restriction, including without limitation the rights%0A# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell%0A# copies of the Software, and to permit persons to whom the Software is%0A# furnished to do so, subject to the following conditions:%0A%0A# The above copyright notice and this permission notice shall be included in%0A# all copies or substantial portions of the Software.%0A %0A# THE SOFTWARE IS PROVIDED %22AS IS%22, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR%0A# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,%0A# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE%0A# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER%0A# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,%0A# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN%0A# THE SOFTWARE.%0A%0A# See also: http://effbot.org/tkinterbook/widget.htm%0A%0Aimport tkinter as tk%0A%0Adef main():%0A %22%22%22Main function%22%22%22%0A%0A root = tk.Tk()%0A%0A def increment_counter():%0A # Do something...%0A print(%22Hello%22)%0A%0A # Reschedule event in 3 seconds%0A root.after(3000, increment_counter)%0A%0A # Schedule event in 3 seconds%0A root.after(3000, increment_counter)%0A%0A root.mainloop()%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
1372a374b02d5e1d01b1569c71f84bdb71fb1296 | Update handler.py | tendrl/node_agent/message/handler.py | tendrl/node_agent/message/handler.py | import os
from io import BlockingIOError
import sys
import traceback
import gevent.event
import gevent.greenlet
from gevent.server import StreamServer
from gevent import socket
from gevent.socket import error as socket_error
from gevent.socket import timeout as socket_timeout
from tendrl.commons.message import Message
from tendrl.commons.logger import Logger
RECEIVE_DATA_SIZE = 4096
MESSAGE_SOCK_PATH = "/var/run/tendrl/message.sock"
class MessageHandler(gevent.greenlet.Greenlet):
def __init__(self):
super(MessageHandler, self).__init__()
self.server = StreamServer(
self.bind_unix_listener(),
self.read_socket
)
def read_socket(self, sock, *args):
try:
self.data = sock.recv(RECEIVE_DATA_SIZE)
message = Message.from_json(self.data)
Logger(message)
except (socket_error, socket_timeout):
exc_type, exc_value, exc_tb = sys.exc_info()
traceback.print_exception(
exc_type, exc_value, exc_tb, file=sys.stderr)
except (TypeError, ValueError, KeyError, AttributeError):
sys.stderr.write(
"Unable to log the message.%s\n" % self.data)
exc_type, exc_value, exc_tb = sys.exc_info()
traceback.print_exception(
exc_type, exc_value, exc_tb, file=sys.stderr)
def _run(self):
try:
self.server.serve_forever()
except (TypeError, BlockingIOError, socket_error, ValueError):
exc_type, exc_value, exc_tb = sys.exc_info()
traceback.print_exception(
exc_type, exc_value, exc_tb, file=sys.stderr)
def stop(self):
pass
def bind_unix_listener(self):
# http://0pointer.de/blog/projects/systemd.html (search "file
# descriptor 3")
try:
socket_fd = 3
self.sock = socket.fromfd(socket_fd, socket.AF_UNIX,
socket.SOCK_STREAM)
self.sock.setblocking(0)
self.sock.listen(50)
return self.sock
except (TypeError, BlockingIOError, socket_error, ValueError):
exc_type, exc_value, exc_tb = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_tb,
file=sys.stderr)
pass
try:
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
if os.path.exists(MESSAGE_SOCK_PATH):
os.remove(MESSAGE_SOCK_PATH)
self.sock.setblocking(0)
self.sock.bind(MESSAGE_SOCK_PATH)
self.sock.listen(50)
return self.sock
except:
exc_type, exc_value, exc_tb = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_tb,
file=sys.stderr)
| Python | 0.000001 | @@ -45,16 +45,30 @@
ort sys%0A
+import struct%0A
import t
@@ -76,16 +76,16 @@
aceback%0A
-
%0A%0Aimport
@@ -378,33 +378,8 @@
r%0A%0A%0A
-RECEIVE_DATA_SIZE = 4096%0A
MESS
@@ -731,46 +731,153 @@
s
-elf.data = sock.recv(RECEIVE_DATA_SIZE
+ize = self._msgLength(sock)%0A data = self._read(sock, size)%0A frmt = %22=%25ds%22 %25 size%0A msg = struct.unpack(frmt, data
)%0A
@@ -914,25 +914,22 @@
om_json(
-self.data
+msg%5B0%5D
)%0A
@@ -1463,32 +1463,453 @@
ile=sys.stderr)%0A
+ %0A def _read(self, sock, size):%0A data = ''%0A while len(data) %3C size:%0A dataTmp = sock.recv(size-len(data))%0A data += dataTmp%0A if dataTmp == '':%0A raise RuntimeError(%22Message socket connection broken%22)%0A return data%0A %0A def _msgLength(self, sock):%0A d = self._read(sock, 4)%0A s = struct.unpack('=I', d)%0A return s%5B0%5D%0A
%0A def _run(se
|
bb8c257dd5ce845c7ac07742c739b719c71d8b3a | add support for ROBOT_SUPPRESS_NAME to remove exception name from error message | src/robot/utils/error.py | src/robot/utils/error.py | # Copyright 2008-2013 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import sys
import traceback
from robot.errors import RobotError
from .unic import unic
RERAISED_EXCEPTIONS = (KeyboardInterrupt, SystemExit, MemoryError)
if sys.platform.startswith('java'):
from java.io import StringWriter, PrintWriter
from java.lang import Throwable, OutOfMemoryError
RERAISED_EXCEPTIONS += (OutOfMemoryError,)
else:
Throwable = ()
def get_error_message():
"""Returns error message of the last occurred exception.
This method handles also exceptions containing unicode messages. Thus it
MUST be used to get messages from all exceptions originating outside the
framework.
"""
return ErrorDetails().message
def get_error_details():
"""Returns error message and details of the last occurred exception.
"""
details = ErrorDetails()
return details.message, details.traceback
def ErrorDetails():
"""This factory returns an object that wraps the last occurred exception
It has attributes `message`, `traceback` and `error`, where `message`
contains type and message of the original error, `traceback` contains the
traceback/stack trace and `error` contains the original error instance.
"""
exc_type, exc_value, exc_traceback = sys.exc_info()
if exc_type in RERAISED_EXCEPTIONS:
raise exc_value
details = PythonErrorDetails \
if not isinstance(exc_value, Throwable) else JavaErrorDetails
return details(exc_type, exc_value, exc_traceback)
class _ErrorDetails(object):
_generic_exceptions = ('AssertionError', 'AssertionFailedError', 'Exception',
'Error', 'RuntimeError', 'RuntimeException',
'DataError', 'TimeoutError', 'RemoteError')
def __init__(self, exc_type, exc_value, exc_traceback):
self.error = exc_value
self._exc_value = exc_value
self._exc_type = exc_type
self._exc_traceback = exc_traceback
self._message = None
self._traceback = None
@property
def message(self):
if self._message is None:
self._message = self._get_message()
return self._message
@property
def traceback(self):
if self._traceback is None:
self._traceback = self._get_details()
return self._traceback
def _get_name(self, exc_type):
try:
return exc_type.__name__
except AttributeError:
return unic(exc_type)
def _format_message(self, name, message):
message = unic(message or '')
message = self._clean_up_message(message, name)
name = name.split('.')[-1] # Use only last part of the name
if not message:
return name
if name in self._generic_exceptions:
return message
return '%s: %s' % (name, message)
def _clean_up_message(self, message, name):
return message
class PythonErrorDetails(_ErrorDetails):
def _get_message(self):
# If exception is a "string exception" without a message exc_value is None
if self._exc_value is None:
return unic(self._exc_type)
name = self._get_name(self._exc_type)
try:
msg = unicode(self._exc_value)
except UnicodeError: # Happens if message is Unicode and version < 2.6
msg = ' '.join(unic(a) for a in self._exc_value.args)
return self._format_message(name, msg)
def _get_details(self):
if isinstance(self._exc_value, RobotError):
return self._exc_value.details
return 'Traceback (most recent call last):\n' + self._get_traceback()
def _get_traceback(self):
tb = self._exc_traceback
while tb and self._is_excluded_traceback(tb):
tb = tb.tb_next
return ''.join(traceback.format_tb(tb)).rstrip() or ' None'
def _is_excluded_traceback(self, traceback):
module = traceback.tb_frame.f_globals.get('__name__')
return module and module.startswith('robot.')
class JavaErrorDetails(_ErrorDetails):
_java_trace_re = re.compile('^\s+at (\w.+)')
_ignored_java_trace = ('org.python.', 'robot.running.', 'robot$py.',
'sun.reflect.', 'java.lang.reflect.')
def _get_message(self):
exc_name = self._get_name(self._exc_type)
# OOME.getMessage and even toString seem to throw NullPointerException
if not self._is_out_of_memory_error(self._exc_type):
exc_msg = self._exc_value.getMessage()
else:
exc_msg = str(self._exc_value)
return self._format_message(exc_name, exc_msg)
def _is_out_of_memory_error(self, exc_type):
return exc_type is OutOfMemoryError
def _get_details(self):
# OOME.printStackTrace seems to throw NullPointerException
if self._is_out_of_memory_error(self._exc_type):
return ''
output = StringWriter()
self._exc_value.printStackTrace(PrintWriter(output))
details = '\n'.join(line for line in output.toString().splitlines()
if not self._is_ignored_stack_trace_line(line))
msg = unic(self._exc_value.getMessage() or '')
if msg:
details = details.replace(msg, '', 1)
return details
def _is_ignored_stack_trace_line(self, line):
if not line:
return True
res = self._java_trace_re.match(line)
if res is None:
return False
location = res.group(1)
for entry in self._ignored_java_trace:
if location.startswith(entry):
return True
return False
def _clean_up_message(self, msg, name):
msg = self._remove_stack_trace_lines(msg)
return self._remove_exception_name(msg, name).strip()
def _remove_stack_trace_lines(self, msg):
lines = msg.splitlines()
while lines:
if self._java_trace_re.match(lines[-1]):
lines.pop()
else:
break
return '\n'.join(lines)
def _remove_exception_name(self, msg, name):
tokens = msg.split(':', 1)
if len(tokens) == 2 and tokens[0] == name:
msg = tokens[1]
return msg
| Python | 0 | @@ -3361,16 +3361,87 @@
ceptions
+ or %5C%0A getattr(self.error, 'ROBOT_SUPPRESS_NAME', False)
:%0A
|
49253451d65511713cd97a86c7fe54e64b3e80a9 | Add a separate test of the runtest.py --qmtest option. | test/runtest/qmtest.py | test/runtest/qmtest.py | Python | 0 | @@ -0,0 +1,3111 @@
+#!/usr/bin/env python%0A#%0A# __COPYRIGHT__%0A#%0A# Permission is hereby granted, free of charge, to any person obtaining%0A# a copy of this software and associated documentation files (the%0A# %22Software%22), to deal in the Software without restriction, including%0A# without limitation the rights to use, copy, modify, merge, publish,%0A# distribute, sublicense, and/or sell copies of the Software, and to%0A# permit persons to whom the Software is furnished to do so, subject to%0A# the following conditions:%0A#%0A# The above copyright notice and this permission notice shall be included%0A# in all copies or substantial portions of the Software.%0A#%0A# THE SOFTWARE IS PROVIDED %22AS IS%22, WITHOUT WARRANTY OF ANY%0A# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE%0A# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND%0A# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE%0A# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION%0A# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION%0A# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.%0A#%0A%0A__revision__ = %22__FILE__ __REVISION__ __DATE__ __DEVELOPER__%22%0A%0A%22%22%22%0ATest that the --qmtest option invokes tests directly via QMTest,%0Anot directly via Python.%0A%22%22%22%0A%0Aimport os.path%0Aimport string%0A%0Aimport TestRuntest%0A%0Atest = TestRuntest.TestRuntest()%0A%0Atest.subdir('test')%0A%0Atest_fail_py = os.path.join('test', 'fail.py')%0Atest_no_result_py = os.path.join('test', 'no_result.py')%0Atest_pass_py = os.path.join('test', 'pass.py')%0A%0Aworkpath_fail_py = test.workpath(test_fail_py)%0Aworkpath_no_result_py = test.workpath(test_no_result_py)%0Aworkpath_pass_py = test.workpath(test_pass_py)%0A%0Atest.write_failing_test(test_fail_py)%0Atest.write_no_result_test(test_no_result_py)%0Atest.write_passing_test(test_pass_py)%0A%0A# NOTE: the FAIL and PASS lines below have trailing spaces.%0A%0Aexpect_stdout = %22%22%22%5C%0Aqmtest run --output results.qmr --format none --result-stream=%22scons_tdb.AegisChangeStream%22 test/fail.py test/no_result.py test/pass.py%0A--- TEST RESULTS -------------------------------------------------------------%0A%0A test/fail.py : FAIL %0A%0A FAILING TEST STDOUT%0A%0A FAILING TEST STDERR%0A%0A test/no_result.py : NO_RESULT%0A%0A NO RESULT TEST STDOUT%0A%0A NO RESULT TEST STDERR%0A%0A test/pass.py : PASS %0A%0A--- TESTS THAT DID NOT PASS --------------------------------------------------%0A%0A test/fail.py : FAIL %0A%0A test/no_result.py : NO_RESULT%0A%0A%0A--- STATISTICS ---------------------------------------------------------------%0A%0A 3 tests total%0A%0A 1 ( 33%25) tests PASS%0A 1 ( 33%25) tests FAIL%0A 1 ( 33%25) tests NO_RESULT%0A%22%22%22%0A%0Atestlist = %5B%0A test_fail_py,%0A test_no_result_py,%0A test_pass_py,%0A%5D%0A%0Atest.run(arguments='--qmtest %25s' %25 string.join(testlist),%0A status=1,%0A stdout=expect_stdout)%0A%0Atest.pass_test()%0A%0A# Local Variables:%0A# tab-width:4%0A# indent-tabs-mode:nil%0A# End:%0A# vim: set expandtab tabstop=4 shiftwidth=4:%0A
|
|
0c2fb46c977d8d8ee03d295fee8ddf37cee8cc06 | Add script to calculate recalls of track zip files. | tools/stats/zip_track_recall.py | tools/stats/zip_track_recall.py | Python | 0 | @@ -0,0 +1,1677 @@
+#!/usr/bin/env python%0A%0Afrom vdetlib.utils.protocol import proto_load, proto_dump, track_box_at_frame%0Afrom vdetlib.utils.common import iou%0Aimport argparse%0Aimport numpy as np%0Aimport glob%0Aimport cPickle%0A%0Aif __name__ == '__main__':%0A parser = argparse.ArgumentParser()%0A parser.add_argument('vid_file')%0A parser.add_argument('annot_file')%0A parser.add_argument('track_dir')%0A args = parser.parse_args()%0A%0A vid_proto = proto_load(args.vid_file)%0A annot_proto = proto_load(args.annot_file)%0A track_files = glob.glob(args.track_dir + %22/*.pkl%22)%0A tracks = %5B%5D%0A frames = %5B%5D%0A for track_file in track_files:%0A track = cPickle.loads(open(track_file, 'rb').read())%0A tracks.append(track%5B'bbox'%5D)%0A frames.append(track%5B'frame'%5D)%0A%0A gt_count = 0%0A recall_count = 0%0A for frame in vid_proto%5B'frames'%5D:%0A frame_id = frame%5B'frame'%5D%0A # annot boxes%0A annot_boxes = %5Btrack_box_at_frame(annot_track%5B'track'%5D, frame_id) %5C%0A for annot_track in annot_proto%5B'annotations'%5D%5D%0A annot_boxes = %5Bbox for box in annot_boxes if box is not None%5D%0A%0A if len(annot_boxes) == 0: continue%0A gt_count += len(annot_boxes)%0A%0A # track boxes%0A track_boxes = %5Btrack%5Bframe==frame_id,:%5D.flatten() for track, frame %5C%0A in zip(tracks, frames) if np.any(frame==frame_id)%5D%0A if len(track_boxes) == 0: continue%0A%0A overlaps = iou(np.asarray(annot_boxes), np.asarray(track_boxes))%0A max_overlaps = overlaps.max(axis=1)%0A recall_count += np.count_nonzero(max_overlaps %3E= 0.5)%0A%0A print %22%7B%7D %7B%7D %7B%7D %7B%7D%22.format(vid_proto%5B'video'%5D,%0A gt_count, recall_count, float(recall_count) / gt_count)%0A
|
|
3ee41b704e98e143d23eb0d714c6d79e8d6e6130 | Write test for RequestTypeError | tests/web/test_request_type_error.py | tests/web/test_request_type_error.py | Python | 0.000004 | @@ -0,0 +1,410 @@
+import unittest%0Afrom performance.web import RequestTypeError%0A%0A%0Aclass RequestTypeErrorTestCase(unittest.TestCase):%0A def test_init(self):%0A type = 'get'%0A error = RequestTypeError(type)%0A self.assertEqual(type, error.type)%0A%0A def test_to_string(self):%0A type = 'get'%0A error = RequestTypeError(type)%0A self.assertEqual('Invalid request type %22%25s%22' %25 type, error.__str__())
|
|
125a6714d1c4bda74a32c0b2fc67629ef2b45d7a | 6-2 lucky_number | 06/lucky_number.py | 06/lucky_number.py | Python | 0.998787 | @@ -0,0 +1,171 @@
+friend = %7B'dwq': '5', 'bql': '3','xx': '28', 'txo':'44', 'fw':'2'%7D%0A%0Aprint(friend%5B'dwq'%5D)%0Aprint(friend%5B'bql'%5D)%0Aprint(friend%5B'xx'%5D)%0Aprint(friend%5B'txo'%5D)%0Aprint(friend%5B'fw'%5D)%0A
|
|
00e75bc59dfec20bd6b96ffac7d17da5760f584c | Add Slack integration | hc/api/migrations/0012_auto_20150930_1922.py | hc/api/migrations/0012_auto_20150930_1922.py | Python | 0.000001 | @@ -0,0 +1,486 @@
+# -*- coding: utf-8 -*-%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import models, migrations%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('api', '0011_notification'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterField(%0A model_name='channel',%0A name='kind',%0A field=models.CharField(choices=%5B('email', 'Email'), ('webhook', 'Webhook'), ('slack', 'Slack'), ('pd', 'PagerDuty')%5D, max_length=20),%0A ),%0A %5D%0A
|
|
f92d06346b3d28513c5f5b9833dbf5a4d48c3e46 | Create rot_alpha.py | rot_alpha.py | rot_alpha.py | Python | 0.000078 | @@ -0,0 +1,631 @@
+#!/usr/bin/env python%0A%0Afrom string import uppercase, lowercase, maketrans%0Aimport sys%0A%0Aclass ROTAlpha():%0A%0A def rot_alpha(self, data, rot):%0A%0A upper = ''.join(%5Buppercase%5B(i+rot)%2526%5D for i in xrange(26)%5D)%0A lower = ''.join(%5Blowercase%5B(i+rot)%2526%5D for i in xrange(26)%5D)%0A table = maketrans(uppercase + lowercase, upper + lower)%0A%0A print(data.translate(table))%0A%0A%0Aif __name__ == '__main__':%0A try:%0A%0A data = sys.argv%5B1%5D%0A rot = sys.argv%5B2%5D%0A rot = int(rot, 0)%0A table = ROTAlpha()%0A table.rot_alpha(data, rot)%0A%0A except IndexError:%0A%0A print('Usage: rot_alpha.py %3Calpha numeric data%3E %3Cint to rotate%3E')%0A sys.exit(1)%0A
|
|
d96acd58ecf5937da344942f387d845dc5b26871 | Add db tests | test/test_db.py | test/test_db.py | Python | 0 | @@ -0,0 +1,2186 @@
+from piper.db import DbCLI%0A%0Aimport mock%0Aimport pytest%0A%0A%0Aclass DbCLIBase(object):%0A def setup_method(self, method):%0A self.cli = DbCLI()%0A self.ns = mock.Mock()%0A self.config = mock.Mock()%0A%0A%0Aclass TestDbCLIRun(DbCLIBase):%0A def test_plain_run(self):%0A self.cli.init = mock.Mock()%0A ret = self.cli.run(self.ns, self.config)%0A%0A assert ret == 0%0A self.cli.init.assert_called_once_with(self.ns, self.config)%0A%0A%0Aclass TestDbCLIInit(DbCLIBase):%0A def test_no_db(self):%0A self.config.db.host = None%0A%0A with pytest.raises(AssertionError):%0A self.cli.init(self.ns, self.config)%0A%0A def test_calls(self):%0A self.cli.handle_sqlite = mock.Mock()%0A self.cli.create_tables = mock.Mock()%0A%0A self.cli.init(self.ns, self.config)%0A%0A self.cli.handle_sqlite.assert_called_once_with(self.config.db.host)%0A self.cli.create_tables.assert_called_once_with(%0A self.config.db.host,%0A echo=self.ns.verbose,%0A )%0A%0A%0Aclass TestDbCLIHandleSqlite(DbCLIBase):%0A @mock.patch('piper.utils.mkdir')%0A @mock.patch('os.path.dirname')%0A @mock.patch('os.path.exists')%0A def test_sqlite_handling_creates_dir(self, exists, dirname, mkdir):%0A self.config.db.host = 'sqlite:///amaranthine.db'%0A exists.return_value = False%0A%0A self.cli.handle_sqlite(self.ns.host)%0A mkdir.assert_called_once_with(dirname.return_value)%0A%0A%0Aclass TestDbCLICreateTables(DbCLIBase):%0A def setup_method(self, method):%0A super(TestDbCLICreateTables, self).setup_method(method)%0A self.cli.tables = (mock.Mock(), mock.Mock())%0A%0A for x, table in enumerate(self.cli.tables):%0A table.__tablename__ = x%0A%0A @mock.patch('piper.db.Session')%0A @mock.patch('piper.db.create_engine')%0A def test_creation(self, ce, se):%0A eng = ce.return_value%0A host = self.config.host%0A%0A self.cli.create_tables(host)%0A%0A ce.assert_called_once_with(host, echo=False)%0A se.configure.assert_called_once_with(bind=eng)%0A%0A for table in self.cli.tables:%0A assert table.metadata.bind is eng%0A table.metadata.create_all.assert_called_once_with()%0A
|
|
83afa054e3bee18aba212394973978fd49429afa | Create test_ratings.py | test_ratings.py | test_ratings.py | Python | 0.000015 | @@ -0,0 +1,1539 @@
+#!/usr/bin/env python3.5%0A%0Aimport sys%0Aimport re%0Aimport os%0Aimport csv%0A%0Afrom extract_toc import parseargs%0Afrom get_ratings import Ratings, Ratings2%0A%0Adef nvl(v1,v2):%0A if v1:%0A return v1%0A else:%0A return v2%0A%0Adef process_ratings_for_file(ratings, filename):%0A ratings.process_file(filename)%0A ratings.map_ratings()%0A improvement = 0%0A for k in ratings.all_available_ratings:%0A v = ratings.ratings_mapped.get(k)%0A if not v:%0A v = %5BNone%5D * 3%0A v_current = ratings.current_ratings_alt.get(k)%0A if v_current:%0A if (not v%5B0%5D or v%5B0%5D != v_current):%0A improvement += 1%0A elif (not v_current):%0A if (v%5B0%5D):%0A improvement -= 1%0A print(%22%25-30s %25-2s/%25-2s %25-2s %25-2s%22 %25 (k, nvl(v%5B0%5D, %22_%22), nvl(v_current, %22_%22), nvl(v%5B1%5D, %22_%22), nvl(v%5B2%5D, %22_%22)))%0A # print(ratings.current_ratings_alt)%0A print(%22%22)%0A print(%22Number of improvements using new methodology = %25d%22 %25 (improvement))%0A print(%22%22)%0A %0Adef main(args):%0A argsmap = parseargs(args)%0A %0A files = argsmap.get('files')%0A if (not files):%0A sys.exit(0)%0A%0A ratings_mapper_file = argsmap.get(%22rmap%22)%0A if ratings_mapper_file:%0A ratings_mapper_file = ratings_mapper_file%5B0%5D%0A if not ratings_mapper_file:%0A print(%22Ratings Mapper File file name must be entered using the --rmap option...%22)%0A sys.exit(1)%0A%0A ratings = Ratings(ratings_mapper_file)%0A %0A for filename in files:%0A print(%22Processing file: %22 + filename)%0A print(%22============================%22)%0A process_ratings_for_file(ratings, filename)%0A%0Aif __name__ == '__main__':%0A args = sys.argv%5B1:%5D%0A main(args)%0A
|
|
f804300765f036f375768e57e081b070a549a800 | Add test script with only a few packages | test-extract-dependencies.py | test-extract-dependencies.py | Python | 0 | @@ -0,0 +1,380 @@
+from dependencies import extract_package%0Aimport xmlrpc.client as xmlrpclib%0A%0Aimport random%0Aclient = xmlrpclib.ServerProxy('http://pypi.python.org/pypi')%0Apackages = %5B'gala', 'scikit-learn', 'scipy', 'scikit-image', 'Flask'%5D%0Arandom.shuffle(packages)%0Afor i, package in enumerate(packages):%0A extract_package(package, to='test-pypi-deps.txt',%0A client=client, n=i)%0A
|
|
7de55b168a276b3d5cdea4d718680ede46edf4d8 | Create file to test thinc.extra.search | thinc/tests/unit/test_beam_search.py | thinc/tests/unit/test_beam_search.py | Python | 0 | @@ -0,0 +1,88 @@
+from ...extra.search import MaxViolation%0A%0Adef test_init_violn():%0A v = MaxViolation()%0A
|
|
38b839405f9976df2d63c08d3c16441af6cdebd1 | Add test | test/selenium/src/tests/test_risk_threats_page.py | test/selenium/src/tests/test_risk_threats_page.py | Python | 0.000005 | @@ -0,0 +1,1004 @@
+# Copyright (C) 2015 Google Inc., authors, and contributors %3Csee AUTHORS file%3E%0A# Licensed under http://www.apache.org/licenses/LICENSE-2.0 %3Csee LICENSE file%3E%0A# Created By: [email protected]%0A# Maintained By: [email protected]%0A%0A%22%22%22All smoke tests relevant to risks/threats page%22%22%22%0A%0Aimport pytest # pylint: disable=import-error%0Afrom lib import base%0Afrom lib.constants import url%0A%0A%0Aclass TestRiskThreatPage(base.Test):%0A %22%22%22Tests the threat/risk page, a part of smoke tests, section 8.%22%22%22%0A%0A @pytest.mark.smoke_tests%0A def test_app_redirects_to_new_risk_page(self, new_risk):%0A %22%22%22Tests if after saving and closing the lhn_modal the app redirects to%0A the object page.%0A%0A Generally we start at a random url. Here we verify that after saving%0A and closing the lhn_modal we're redirected to an url that contains an%0A object id.%0A %22%22%22%0A # pylint: disable=no-self-use%0A # pylint: disable=invalid-name%0A assert url.RISKS + %22/%22 + new_risk.object_id in %5C%0A new_risk.url%0A
|
|
51030039f68d0dc4243b6ba125fb9b7aca44638d | Add Pipeline tests | test/data/test_pipeline.py | test/data/test_pipeline.py | Python | 0.000001 | @@ -0,0 +1,1641 @@
+# -*- coding: utf-8 -*-%0Afrom __future__ import unicode_literals%0Aimport torchtext.data as data%0A%0Afrom ..common.torchtext_test_case import TorchtextTestCase%0A%0A%0Aclass TestPipeline(TorchtextTestCase):%0A @staticmethod%0A def repeat_n(x, n=3):%0A %22%22%22%0A Given a sequence, repeat it n times.%0A %22%22%22%0A return x * n%0A%0A def test_pipeline(self):%0A pipeline = data.Pipeline(str.lower)%0A assert pipeline(%22Test STring%22) == %22test string%22%0A assert pipeline(%22%E1%91%8C%E1%91%8EI%E1%91%95O%E1%97%AA%E1%95%AE_T%E1%95%AE%E1%99%ADT%22) == %22%E1%91%8C%E1%91%8Ei%E1%91%95o%E1%97%AA%E1%95%AE_t%E1%95%AE%E1%99%ADt%22%0A assert pipeline(%5B%221241%22, %22Some String%22%5D) == %5B%221241%22, %22some string%22%5D%0A%0A args_pipeline = data.Pipeline(TestPipeline.repeat_n)%0A assert args_pipeline(%22test%22, 5) == %22testtesttesttesttest%22%0A assert args_pipeline(%5B%22ele1%22, %22ele2%22%5D, 2) == %5B%22ele1ele1%22, %22ele2ele2%22%5D%0A%0A def test_composition(self):%0A pipeline = data.Pipeline(TestPipeline.repeat_n)%0A pipeline.add_before(str.lower)%0A pipeline.add_after(str.capitalize)%0A%0A other_pipeline = data.Pipeline(str.swapcase)%0A other_pipeline.add_before(pipeline)%0A%0A # Assert pipeline gives proper results after composition%0A # (test that we aren't modfifying pipes member)%0A assert pipeline(%22teST%22) == %22Testtesttest%22%0A assert pipeline(%5B%22ElE1%22, %22eLe2%22%5D) == %5B%22Ele1ele1ele1%22, %22Ele2ele2ele2%22%5D%0A%0A # Assert pipeline that we added to gives proper results%0A assert other_pipeline(%22teST%22) == %22tESTTESTTEST%22%0A assert other_pipeline(%5B%22ElE1%22, %22eLe2%22%5D) == %5B%22eLE1ELE1ELE1%22, %22eLE2ELE2ELE2%22%5D%0A%0A def test_exceptions(self):%0A with self.assertRaises(ValueError):%0A data.Pipeline(%22Not Callable%22)%0A
|
|
ca4f6e72c152f975c8bf01b920bcbdb3b611876b | add script to save_segment to disk | scripts/save_segment.py | scripts/save_segment.py | Python | 0 | @@ -0,0 +1,2073 @@
+'''%0AIDAPython script that saves the content of a segment to a file.%0APrompts the user for:%0A - segment name%0A - file path%0A%0AUseful for extracting data from memory dumps.%0A%0AAuthor: Willi Ballenthin %[email protected]%3E%0ALicence: Apache 2.0%0A'''%0Aimport logging%0Afrom collections import namedtuple%0A%0Aimport idaapi%0Aimport ida_bytes%0Aimport ida_segment%0A%0A%0Alogger = logging.getLogger(__name__)%0A%0A%0Aclass BadInputError(Exception):%0A pass%0A%0A%0ASegment = namedtuple('SegmentBuffer', %5B'path', 'name'%5D)%0A%0A%0Adef prompt_for_segment():%0A ''' :returns: a Segment instance, or raises BadInputError '''%0A class MyForm(idaapi.Form):%0A def __init__(self):%0A idaapi.Form.__init__(self, %22%22%22STARTITEM 0%0Aadd segment by buffer%0A%0A%3C##segment name:%7Bname%7D%3E%0A%3C##output path:%7Bpath%7D%3E%0A%22%22%22,%0A %7B%0A 'path': idaapi.Form.FileInput(save=True),%0A 'name': idaapi.Form.StringInput(),%0A %7D)%0A%0A def OnFormChange(self, fid):%0A return 1%0A%0A f = MyForm()%0A f.Compile()%0A f.path.value = %22%22%0A f.name.value = %22%22%0A ok = f.Execute()%0A if ok != 1:%0A raise BadInputError('user cancelled')%0A%0A path = f.path.value%0A if path == %22%22 or path is None:%0A raise BadInputError('bad path provided')%0A%0A name = f.name.value%0A if name == %22%22 or name is None:%0A raise BadInputError('bad name provided')%0A%0A f.Free()%0A return Segment(path, name)%0A%0A%0Adef main(argv=None):%0A if argv is None:%0A argv = sys.argv%5B:%5D%0A%0A try:%0A seg_spec = prompt_for_segment()%0A except BadInputError:%0A logger.error('bad input, exiting...')%0A return -1%0A%0A seg = ida_segment.get_segm_by_name(seg_spec.name)%0A if not seg:%0A logger.error(%22bad segment, exiting...%22)%0A%0A buf = ida_bytes.get_bytes(seg.start_ea, seg.end_ea - seg.start_ea)%0A with open(seg_spec.path, %22wb%22) as f:%0A f.write(buf)%0A%0A logger.info(%22wrote %25x bytes%22, len(buf))%0A%0A%0Aif __name__ == '__main__':%0A logging.basicConfig(level=logging.DEBUG)%0A main()%0A
|
|
369eed75c8a2fdc916885344fabb14e116bb60f9 | add datatype test | tests/test_datatype.py | tests/test_datatype.py | Python | 0.000002 | @@ -0,0 +1,676 @@
+# encoding: utf-8%0A%0Afrom unittest import TestCase%0Afrom statscraper import Datatype, NoSuchDatatype%0A%0A%0Aclass TestDatatype(TestCase):%0A%0A def test_datatype(self):%0A dt = Datatype(%22str%22)%0A self.assertTrue(str(dt) == %22str%22)%0A%0A def test_datatype_with_values(self):%0A dt = Datatype(%22region%22)%0A self.assertTrue(len(dt.allowed_values))%0A%0A def test_none_existing_datatype(self):%0A with self.assertRaises(NoSuchDatatype):%0A Datatype(%22donkey_power%22)%0A%0A def test_allowed_values(self):%0A dt = Datatype(%22region%22)%0A%0A self.assertTrue(u%22V%C3%A4xj%C3%B6 kommun%22 in dt.allowed_values)%0A%0A self.assertEqual(str(dt.allowed_values%5B%22eu%22%5D), %22eu%22)%0A%0A
|
|
9f7a8e01f7897e8979997b8845a9ace3f64d5412 | Add more tests | tests/test_generate.py | tests/test_generate.py | Python | 0 | @@ -0,0 +1,233 @@
+import pytest%0A%0Afrom nlppln.generate import to_bool%0A%0A%0Adef test_to_bool_correct():%0A assert to_bool('y') == True%0A assert to_bool('n') == False%0A%0A%0Adef test_to_bool_error():%0A with pytest.raises(ValueError):%0A to_bool('foo')%0A
|
|
ff2eac9f6b382e8ad30eed2b733740ce4a50f6e6 | Fix choosing languges function | searx/engines/gentoo.py | searx/engines/gentoo.py | # -*- coding: utf-8 -*-
"""
Gentoo Wiki
@website https://wiki.gentoo.org
@provide-api no (Mediawiki provides API, but Arch Wiki blocks access to it
@using-api no
@results HTML
@stable no (HTML can change)
@parse url, title
"""
from lxml import html
from searx.engines.xpath import extract_text
from searx.url_utils import urlencode, urljoin
# engine dependent config
categories = ['it']
language_support = True
paging = True
base_url = 'https://wiki.gentoo.org'
# xpath queries
xpath_results = '//ul[@class="mw-search-results"]/li'
xpath_link = './/div[@class="mw-search-result-heading"]/a'
# cut 'en' from 'en-US', 'de' from 'de-CH', and so on
def locale_to_lang_code(locale):
if locale.find('-') >= 0:
locale = locale.split('-')[0]
return locale
# wikis for some languages were moved off from the main site, we need to make
# requests to correct URLs to be able to get results in those languages
lang_urls = {
'en': {
'base': 'https://wiki.gentoo.org',
'search': '/index.php?title=Special:Search&offset={offset}&{query}&profile=translation&languagefilter={language}'
}
}
# get base & search URLs for selected language
def get_lang_urls(language):
if language in lang_urls:
return lang_urls[language]
return lang_urls['en']
# Language names to build search requests for
# those languages which are hosted on the main site.
main_langs = {
'ar': 'العربية',
'bg': 'Български',
'cs': 'Česky',
'da': 'Dansk',
'el': 'Ελληνικά',
'es': 'Español',
'he': 'עברית',
'hr': 'Hrvatski',
'hu': 'Magyar',
'it': 'Italiano',
'ko': '한국어',
'lt': 'Lietuviškai',
'nl': 'Nederlands',
'pl': 'Polski',
'pt': 'Português',
'ru': 'Русский',
'sl': 'Slovenský',
'th': 'ไทย',
'uk': 'Українська',
'zh': '简体中文'
}
supported_languages = dict(lang_urls, **main_langs)
# do search-request
def request(query, params):
# translate the locale (e.g. 'en-US') to language code ('en')
language = locale_to_lang_code(params['language'])
# if our language is hosted on the main site, we need to add its name
# to the query in order to narrow the results to that language
if language in main_langs:
query += b' (' + (main_langs[language]).encode('utf-8') + b')'
# prepare the request parameters
query = urlencode({'search': query})
offset = (params['pageno'] - 1) * 20
# get request URLs for our language of choice
urls = get_lang_urls(language)
search_url = urls['base'] + urls['search']
params['url'] = search_url.format(query=query, offset=offset, language=language)
return params
# get response from search-request
def response(resp):
# get the base URL for the language in which request was made
language = locale_to_lang_code(resp.search_params['language'])
base_url = get_lang_urls(language)['base']
results = []
dom = html.fromstring(resp.text)
# parse results
for result in dom.xpath(xpath_results):
link = result.xpath(xpath_link)[0]
href = urljoin(base_url, link.attrib.get('href'))
title = extract_text(link)
results.append({'url': href,
'title': title})
return results
| Python | 0 | @@ -94,69 +94,11 @@
pi
-no (Mediawiki provides API, but Arch Wiki blocks access to it
+yes
%0A @u
@@ -901,21 +901,20 @@
ang_urls
-
= %7B%0A
+
'en'
@@ -1034,16 +1034,176 @@
&%7Bquery%7D
+'%0A %7D,%0A 'others': %7B%0A 'base': 'https://wiki.gentoo.org',%0A 'search': '/index.php?title=Special:Search&offset=%7Boffset%7D&%7Bquery%7D%5C%0A
&profile
@@ -1348,20 +1348,15 @@
age
-in lang_urls
+!= 'en'
:%0A
@@ -1378,24 +1378,24 @@
ng_urls%5B
-language
+'others'
%5D%0A re
|
2cd1ab91ca48b8a8d34eabcc2a01b4014a97bcf6 | add unit tests | test/test_ncompress.py | test/test_ncompress.py | Python | 0.000001 | @@ -0,0 +1,2961 @@
+import shutil%0Aimport subprocess%0Afrom io import BytesIO%0A%0Aimport pytest%0Afrom ncompress import compress, decompress%0A%0A%[email protected]%0Adef sample_data():%0A chars = %5B%5D%0A for i in range(15):%0A chars += %5Bi * 16%5D * (i + 1)%0A chars += %5B0, 0, 0%5D%0A return bytes(chars)%0A%0A%[email protected]%0Adef sample_compressed(sample_data):%0A compress_cmd = shutil.which(%22compress%22)%0A if compress_cmd:%0A return subprocess.check_output(compress_cmd, input=sample_data)%0A return compress(sample_data)%0A%0A%0Adef test_string_string(sample_data, sample_compressed):%0A assert compress(sample_data) == sample_compressed%0A assert decompress(sample_compressed) == sample_data%0A%0A%0Adef test_string_stream(sample_data, sample_compressed):%0A out = BytesIO()%0A compress(sample_data, out)%0A out.seek(0)%0A assert out.read() == sample_compressed%0A%0A out = BytesIO()%0A decompress(sample_compressed, out)%0A out.seek(0)%0A assert out.read() == sample_data%0A%0A%0Adef test_stream_stream(sample_data, sample_compressed):%0A out = BytesIO()%0A compress(BytesIO(sample_data), out)%0A out.seek(0)%0A assert out.read() == sample_compressed%0A%0A out = BytesIO()%0A decompress(BytesIO(sample_compressed), out)%0A out.seek(0)%0A assert out.read() == sample_data%0A%0A%0Adef test_stream_string(sample_data, sample_compressed):%0A assert compress(BytesIO(sample_data)) == sample_compressed%0A assert decompress(BytesIO(sample_compressed)) == sample_data%0A%0A%0Adef test_empty_input(sample_data):%0A assert decompress(compress(b%22%22)) == b%22%22%0A with pytest.raises(ValueError):%0A decompress(b%22%22)%0A with pytest.raises(TypeError):%0A compress()%0A with pytest.raises(TypeError):%0A decompress()%0A%0A%0Adef test_corrupted_input(sample_compressed):%0A sample = sample_compressed%0A for x in %5B%0A b%22123%22,%0A sample%5B1:%5D,%0A sample%5B:1%5D,%0A b%22%5C0%22 * 3 + sample%5B:3%5D,%0A sample * 2,%0A b%22%5C0%22 + sample%0A %5D:%0A with pytest.raises(ValueError) as ex:%0A decompress(x)%0A assert (%22not in LZW-compressed format%22 in str(ex.value) or%0A %22corrupt input - %22 in str(ex.value))%0A%0A%0Adef test_str(sample_data, sample_compressed):%0A with pytest.raises(TypeError):%0A compress(sample_data.decode(%22latin1%22, errors=%22replace%22))%0A with pytest.raises(TypeError):%0A decompress(sample_compressed.decode(%22latin1%22, errors=%22replace%22))%0A%0A%0Adef test_closed_input(sample_data, sample_compressed):%0A expected = %22I/O operation on closed file.%22%0A with pytest.raises(ValueError) as ex:%0A stream = BytesIO(sample_data)%0A stream.close()%0A compress(stream)%0A assert expected in str(ex.value)%0A%0A with pytest.raises(ValueError) as ex:%0A stream = BytesIO(sample_compressed)%0A stream.close()%0A decompress(stream)%0A assert expected in str(ex.value)%0A%0A%0Adef test_file_input():%0A with open(__file__, %22rb%22) as f:%0A expected = f.read()%0A f.seek(0)%0A assert decompress(compress(f)) == expected%0A
|
|
c297de3964c53beffdf33922c0bffd022b376ae6 | Create __init__.py | crawl/__init__.py | crawl/__init__.py | Python | 0.000429 | @@ -0,0 +1 @@
+%0A
|
|
ccf21faf0110c9c5a4c28a843c36c53183d71550 | add missing file | pyexcel_xls/__init__.py | pyexcel_xls/__init__.py | Python | 0.000003 | @@ -0,0 +1,657 @@
+%22%22%22%0A pyexcel_xls%0A ~~~~~~~~~~~~~~~~~~~%0A%0A The lower level xls/xlsm file format handler using xlrd/xlwt%0A%0A :copyright: (c) 2015-2016 by Onni Software Ltd%0A :license: New BSD License%0A%22%22%22%0Afrom pyexcel_io.io import get_data as read_data, isstream, store_data as write_data%0A%0A%0Adef get_data(afile, file_type=None, **keywords):%0A if isstream(afile) and file_type is None:%0A file_type = 'xls'%0A return read_data(afile, file_type=file_type, **keywords)%0A%0A%0Adef save_data(afile, data, file_type=None, **keywords):%0A if isstream(afile) and file_type is None:%0A file_type = 'xls'%0A write_data(afile, data, file_type=file_type, **keywords)%0A%0A%0A
|
|
5c4ed354d1bfd5c4443cc031a29e6535b2063178 | add test-env | sikuli-script/src/test/python/test-env.py | sikuli-script/src/test/python/test-env.py | Python | 0 | @@ -0,0 +1,170 @@
+from __future__ import with_statement%0Afrom sikuli.Sikuli import *%0A%0Aprint Env.getOS(), Env.getOSVersion()%0Aprint %22MAC?%22, Env.getOS() == OS.MAC%0Aprint Env.getMouseLocation()%0A
|
|
1828f7bb8cb735e755dbcb3a894724dec28748cc | add sort file | sort/sort.py | sort/sort.py | Python | 0.000001 | @@ -0,0 +1,157 @@
+#!/usr/bin/env python%0A# -*- coding:utf-8 -*-%0Afrom __future__ import division%0Afrom __future__ import unicode_literals%0Afrom __future__ import print_function%0A%0A%0A
|
|
fbd6db138ce65825e56a8d39bf30ed8525b88503 | Add exception handler for db not found errors. | resources/middlewares/db_not_found_handler.py | resources/middlewares/db_not_found_handler.py | Python | 0 | @@ -0,0 +1,84 @@
+import falcon%0A%0A%0Adef handler(ex, req, resp, params):%0A raise falcon.HTTPNotFound()%0A
|
|
a1eaf66efa2041849e906010b7a4fb9412a9b781 | Add instance method unit tests | tests/test_instancemethod.py | tests/test_instancemethod.py | Python | 0.000001 | @@ -0,0 +1,473 @@
+# Imports%0Aimport random%0Aimport unittest%0Afrom funky import memoize, timed_memoize%0A%0A%0Aclass Dummy(object):%0A @memoize%0A def a(self):%0A return random.random()%0A%0A%0Aclass TestInstanceMethod(unittest.TestCase):%0A def test_dummy(self):%0A dummy = Dummy()%0A v1 = dummy.a()%0A v2 = dummy.a()%0A dummy.a.clear()%0A v3 = dummy.a()%0A self.assertEqual(v1, v2)%0A self.assertNotEqual(v1, v3)%0A%0A%0Aif __name__ == '__main__':%0A unittest.main()%0A
|
|
c3221d70f829dc2968ebfb1a47efd9538a1ef59f | test gaussian + derivatives | tests/vigra_compare.py | tests/vigra_compare.py | Python | 0.000002 | @@ -0,0 +1,590 @@
+import fastfilters as ff%0Aimport numpy as np%0Aimport sys%0A%0Atry:%0A%09import vigra%0Aexcept ImportError:%0A%09print(%22WARNING: vigra not available - skipping tests.%22)%0A%09with open(sys.argv%5B1%5D, 'w') as f:%0A%09%09f.write('')%0A%09exit()%0A%0Aa = np.random.randn(1000000).reshape(1000,1000)%0A%0A%0Afor order in %5B0,1,2%5D:%0A%09for sigma in %5B1.0, 5.0, 10.0%5D:%0A%09%09res_ff = ff.gaussian2d(a, order, sigma)%0A%09%09res_vigra = vigra.filters.gaussianDerivative(a, sigma, %5Border,order%5D)%0A%0A%09%09if not np.allclose(res_ff, res_vigra, atol=1e-6):%0A%09%09%09print(order, sigma, np.max(np.abs(res_ff - res_vigra)))%0A%09%09%09raise Exception()%0A%0A%0Anp.unique(ff.hog2d(a, 1.0))
|
|
8ec1d35fe79554729e52aec4e0aabd1d9f64a9c7 | Put main.py display functions in its own module so they can be used in other parts of the package | fire_rs/display.py | fire_rs/display.py | Python | 0 | @@ -0,0 +1,2228 @@
+from mpl_toolkits.mplot3d import Axes3D%0Aimport matplotlib%0Aimport matplotlib.pyplot as plt%0Afrom matplotlib.colors import LightSource%0Afrom matplotlib.ticker import FuncFormatter%0Afrom matplotlib import cm%0A%0A%0Adef get_default_figure_and_axis():%0A fire_fig = plt.figure()%0A fire_ax = fire_fig.gca(aspect='equal', xlabel=%22X position %5Bm%5D%22, ylabel=%22Y position %5Bm%5D%22)%0A%0A ax_formatter = matplotlib.ticker.ScalarFormatter(useOffset=False)%0A fire_ax.yaxis.set_major_formatter(ax_formatter)%0A fire_ax.xaxis.set_major_formatter(ax_formatter)%0A return fire_fig, fire_ax%0A%0A%0Adef plot_firefront_contour(ax, x, y, firefront, nfronts=20):%0A fronts = ax.contour(x, y, firefront, nfronts, cmap=cm.Set1)%0A labels = ax.clabel(fronts, inline=True, fontsize='smaller', inline_spacing=1, linewidth=2, fmt='%25.0f')%0A return fronts, labels%0A%0A%0A%0Adef plot_elevation_contour(ax, x, y, z):%0A contour = ax.contour(x, y, z, 15, cmap=cm.gist_earth)%0A labels = plt.clabel(contour, inline=1, fontsize=10)%0A return contour, labels%0A%0A%0Adef plot_elevation_shade(ax, x, y, z, dx=25, dy=25):%0A cbar_lim = (z.min(), z.max())%0A%0A image_scale = (x%5B0%5D%5B0%5D, x%5B0%5D%5Bx.shape%5B0%5D - 1%5D, y%5B0%5D%5B0%5D, y%5By.shape%5B0%5D - 1%5D%5B0%5D)%0A ls = LightSource(azdeg=315, altdeg=45)%0A ax.imshow(ls.hillshade(z, vert_exag=5, dx=dx, dy=dy), extent=image_scale, cmap='gray')%0A return ax.imshow(ls.shade(z, cmap=cm.terrain, blend_mode='overlay', vert_exag=1, dx=dx, dy=dy,%0A vmin=cbar_lim%5B0%5D, vmax=cbar_lim%5B1%5D),%0A extent=image_scale, vmin=cbar_lim%5B0%5D, vmax=cbar_lim%5B1%5D, cmap=cm.terrain)%0A%0A%0Adef plot_wind_flow(ax, x, y, wx, wy, wvel):%0A return ax.streamplot(x, y, wx, wy, density=1, linewidth=1, color='dimgrey')%0A%0A%0Adef plot_wind_arrows(ax, x, y, wx, wy):%0A return ax.quiver(x, y, wx, wy, pivot='middle', color='dimgrey')%0A%0A%0Adef plot3d_elevation_shade(ax, x, y, z, dx=25, dy=25):%0A ls = LightSource(azdeg=120, altdeg=45)%0A rgb = ls.shade(z, cmap=cm.terrain, vert_exag=0.1, blend_mode='overlay')%0A return ax.plot_surface(x, y, z, facecolors=rgb, rstride=5, cstride=5, linewidth=0, antialiased=True, shade=True)%0A%0Adef plot3d_wind_arrows(ax, x, y, z, wx, wy, wz):%0A return ax.quiver(x, y, z, wx, wy, wz, pivot='middle', cmap=cm.viridis)%0A
|
|
78f730b405c6e67988cdc9efab1aa5316c16849f | Add initial test for web response | tests/test_web_response.py | tests/test_web_response.py | Python | 0 | @@ -0,0 +1,853 @@
+import unittest%0Afrom unittest import mock%0Afrom aiohttp.web import Request, StreamResponse%0Afrom aiohttp.protocol import Request as RequestImpl%0A%0A%0Aclass TestStreamResponse(unittest.TestCase):%0A%0A def make_request(self, method, path, headers=()):%0A self.app = mock.Mock()%0A self.transport = mock.Mock()%0A message = RequestImpl(self.transport, method, path)%0A message.headers.extend(headers)%0A self.payload = mock.Mock()%0A self.protocol = mock.Mock()%0A req = Request(self.app, message, self.payload, self.protocol)%0A return req%0A%0A def test_ctor(self):%0A req = self.make_request('GET', '/')%0A resp = StreamResponse(req)%0A%0A self.assertEqual(req, resp._request)%0A self.assertIsNone(req._response)%0A self.assertEqual(200, resp.status_code)%0A self.assertTrue(resp.keep_alive)%0A
|
|
644a678d3829513361fdc099d759ca964100f2e6 | Add script to replace text | text-files/replace-text.py | text-files/replace-text.py | Python | 0.000003 | @@ -0,0 +1,1034 @@
+#!/usr/bin/env python3%0A# This Python 3 script replaces text in a file, in-place.%0A%0A# For Windows, use:%0A#!python%0A%0Aimport fileinput%0Aimport os%0Aimport sys%0A%0Adef isValidFile(filename):%0A return (filename.lower().endswith('.m3u') or%0A filename.lower().endswith('.m3u8'))%0A%0Adef processFile(filename):%0A '''Makes custom text modifications to a single file.%0A%0A Returns true if modified, false if not modified.%0A '''%0A%0A modified = False%0A%0A with fileinput.input(filename, inplace=True) as f:%0A for line in f:%0A%0A # Check any condition%0A if '%5C%5C' in line:%0A modified = True%0A%0A # Make the modifications%0A newline = line.replace('%5C%5C', '/')%0A sys.stdout.write(newline)%0A%0A return modified%0A%0A%0Aif __name__ == '__main__':%0A for filename in os.listdir(os.getcwd()):%0A if not isValidFile(filename):%0A continue%0A%0A modified = processFile(filename)%0A if modified:%0A print(filename)%0A%0A # Wait for user input to finish%0A input()
|
|
8d8f6b99357912fa9a29098b0744712eeb1d4c70 | Add coder/decoder skeleton | src/coder.py | src/coder.py | Python | 0.000004 | @@ -0,0 +1,1109 @@
+from bitarray import bitarray%0Afrom datetime import datetime, timedelta%0A%0Adef decode():%0A with open() as f:%0A timestamps = %5B%5D%0A start = %5B0, 0, 0%5D%0A end = %5B1, 1, 1%5D%0A delta = timedelta(seconds=1)%0A for line in f:%0A ts = line.split(%22 %22, 1)%5B0%5D%0A ts = datetime.strptime(ts, '%25H:%25M:%25S.%25f')%0A timestamps.append(ts)%0A bits = %5Bint(t2 - t1 %3E delta) for t2, t1 in zip(timestamps%5B1:%5D, timestamps%5B:-1%5D)%5D%0A bits = extract_message(bits, start, end)%0A print get_message(bits)%0A%0Adef find_index(list, sublist):%0A print('Find %7B%7D in %7B%7D'.format(sublist, list))%0A for i in range(len(list) - len(sublist) + 1):%0A if list%5Bi:i+len(sublist)%5D == sublist:%0A return i%0A return None%0A%0Adef extract_message(bits, start, end):%0A start_index = find_index(bits, start) + len(start)%0A end_index = find_index(bits%5Bstart_index:%5D, end)%0A return bits%5Bstart_index:start_index + end_index%5D%0A%0Adef get_message(bits):%0A return bitarray(bits).tostring()%0A%0Adef get_bits(msg):%0A ba = bitarray.bitarray()%0A ba.fromstring(msg)%0A return ba.tolist()%0A
|
|
3c18ace928b0339b0edf4763f4132d327936cbe8 | add utils | src/utils.py | src/utils.py | Python | 0.000004 | @@ -0,0 +1,776 @@
+def set_trace():%0A from IPython.core.debugger import Pdb%0A import sys%0A Pdb(color_scheme='Linux').set_trace(sys._getframe().f_back)%0A%0Adef plot_ROC(actual, predictions):%0A%09# plot the FPR vs TPR and AUC for a two class problem (0,1)%0A%09import matplotlib.pyplot as plt%0A%09from sklearn.metrics import roc_curve, auc%0A%0A%09false_positive_rate, true_positive_rate, thresholds = roc_curve(actual, predictions)%0A%09roc_auc = auc(false_positive_rate, true_positive_rate)%0A%09%0A%09plt.title('Receiver Operating Characteristic')%0A%09plt.plot(false_positive_rate, true_positive_rate, 'b',%0A%09label='AUC = %250.2f'%25 roc_auc)%0A%09plt.legend(loc='lower right')%0A%09plt.plot(%5B0,1%5D,%5B0,1%5D,'r--')%0A%09plt.xlim(%5B-0.1,1.2%5D)%0A%09plt.ylim(%5B-0.1,1.2%5D)%0A%09plt.ylabel('True Positive Rate')%0A%09plt.xlabel('False Positive Rate')%0A%09plt.show()
|
|
3f141c4e8f123e0ca8c7a8b8475bf1798c18cdb4 | Combine Importer/Loader | src/sentry/runner/importer.py | src/sentry/runner/importer.py | """
sentry.runner.importer
~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2015 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
import sys
def install(name, config_path, default_settings, callback):
sys.meta_path.append(Importer(name, config_path, default_settings, callback))
class ConfigurationError(ValueError):
pass
class Importer(object):
def __init__(self, name, config_path, default_settings=None, callback=None):
self.name = name
self.config_path = config_path
self.default_settings = default_settings
self.callback = callback
def __repr__(self):
return "<%s for '%s' (%s)>" % (type(self), self.name, self.config_path)
def find_module(self, fullname, path=None):
if fullname != self.name:
return
return Loader(
name=self.name,
config_path=self.config_path,
default_settings=self.default_settings,
callback=self.callback,
)
class Loader(object):
def __init__(self, name, config_path, default_settings=None, callback=None):
self.name = name
self.config_path = config_path
self.default_settings = default_settings
self.callback = callback
def load_module(self, fullname):
try:
return self._load_module(fullname)
except Exception as e:
from sentry.utils.settings import reraise_as
reraise_as(ConfigurationError(unicode(e)))
def _load_module(self, fullname):
# TODO: is this needed?
if fullname in sys.modules:
return sys.modules[fullname] # pragma: no cover
if self.default_settings:
from django.utils.importlib import import_module
default_settings_mod = import_module(self.default_settings)
else:
default_settings_mod = None
settings_mod = create_module(self.name)
# Django doesn't play too nice without the config file living as a real file, so let's fake it.
settings_mod.__file__ = self.config_path
# install the default settings for this app
load_settings(default_settings_mod, settings=settings_mod)
# install the custom settings for this app
load_settings(self.config_path, settings=settings_mod, silent=True)
if self.callback is not None:
self.callback(settings_mod)
return settings_mod
def create_module(name, install=True):
import imp
mod = imp.new_module(name)
if install:
sys.modules[name] = mod
return mod
def load_settings(mod_or_filename, settings, silent=False):
if isinstance(mod_or_filename, basestring):
conf = create_module('temp_config', install=False)
conf.__file__ = mod_or_filename
try:
execfile(mod_or_filename, conf.__dict__)
except IOError as e:
import errno
if silent and e.errno in (errno.ENOENT, errno.EISDIR):
return settings
e.strerror = 'Unable to load configuration file (%s)' % e.strerror
raise
else:
conf = mod_or_filename
add_settings(conf, settings=settings)
def add_settings(mod, settings):
"""
Adds all settings that are part of ``mod`` to the global settings object.
Special cases ``EXTRA_APPS`` to append the specified applications to the
list of ``INSTALLED_APPS``.
"""
for setting in dir(mod):
if not setting.isupper():
continue
setting_value = getattr(mod, setting)
if setting in ('INSTALLED_APPS', 'TEMPLATE_DIRS') and isinstance(setting_value, basestring):
setting_value = (setting_value,) # In case the user forgot the comma.
# Any setting that starts with EXTRA_ and matches a setting that is a list or tuple
# will automatically append the values to the current setting.
# It might make sense to make this less magical
if setting[:6] == 'EXTRA_':
base_setting = setting[6:]
if isinstance(getattr(settings, base_setting), (list, tuple)):
curval = getattr(settings, base_setting)
setattr(settings, base_setting, curval + type(curval)(setting_value))
continue
setattr(settings, setting, setting_value)
| Python | 0.000001 | @@ -884,17 +884,16 @@
return%0A
-%0A
@@ -903,434 +903,12 @@
urn
-Loader(%0A name=self.name,%0A config_path=self.config_path,%0A default_settings=self.default_settings,%0A callback=self.callback,%0A )%0A%0A%0Aclass Loader(object):%0A def __init__(self, name, config_path, default_settings=None, callback=None):%0A self.name = name%0A self.config_path = config_path%0A self.default_settings = default_settings%0A self.callback = callback
+self
%0A%0A
@@ -1198,29 +1198,80 @@
#
-TODO: is this needed?
+Check to make sure it's not already in sys.modules in case of a reload()
%0A
@@ -2034,24 +2034,112 @@
lent=True)%0A%0A
+ # Add into sys.modules explicitly%0A sys.modules%5Bfullname%5D = settings_mod%0A%0A
if s
|
0bca09339bb49e4540c5be8162e11ea3e8106200 | Create a PySide GUI window. | budget.py | budget.py | Python | 0 | @@ -0,0 +1,202 @@
+#!/usr/bin/env python%0A%0Aimport sys%0Afrom PySide import QtGui%0A%0Aapp = QtGui.QApplication(sys.argv)%0A%0Awid = QtGui.QWidget()%0Awid.resize(250, 150)%0Awid.setWindowTitle('Simple')%0Awid.show()%0A%0Asys.exit(app.exec_())%0A
|
|
19a23591b9b21cbe7dd34c8be7d2cb435c0f965a | generate XML works | umpa/extensions/XML.py | umpa/extensions/XML.py | Python | 0.999372 | @@ -0,0 +1,2268 @@
+#!/usr/bin/env python%0A# -*- coding: utf-8 -*-%0A%0A# Copyright (C) 2008 Adriano Monteiro Marques.%0A#%0A# Author: Bartosz SKOWRON %3Cgetxsick at gmail dot com%3E%0A#%0A# This library is free software; you can redistribute it and/or modify %0A# it under the terms of the GNU Lesser General Public License as published %0A# by the Free Software Foundation; either version 2.1 of the License, or %0A# (at your option) any later version.%0A#%0A# This library is distributed in the hope that it will be useful, but %0A# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY%0A# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public %0A# License for more details.%0A#%0A# You should have received a copy of the GNU Lesser General Public License %0A# along with this library; if not, write to the Free Software Foundation, %0A# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA %0A%0A%0Aimport xml.dom.minidom%0A%0Afrom umpa.protocols._fields import Flags%0A%0Adef write(filename, *packets):%0A # no packets? %0A if not packets:%0A return%0A%0A doc = xml.dom.minidom.Document()%0A root = doc.createElementNS(None, 'UMPA')%0A doc.appendChild(root)%0A%0A for i, packet in enumerate(packets):%0A pa = doc.createElementNS(None, 'packet')%0A pa.setAttributeNS(None, %22id%22, str(i))%0A root.appendChild(pa)%0A for proto in packet.protos:%0A pr = doc.createElementNS(None, 'protocol')%0A pr.setAttributeNS(None, %22type%22, proto.name)%0A pa.appendChild(pr)%0A%0A for field in proto.get_fields_keys():%0A f = doc.createElementNS(None, field)%0A pr.appendChild(f)%0A # if Flags...we need care about BitFlags objects%0A if isinstance(proto._get_field(field), Flags):%0A for flag in proto._get_field(field).get():%0A b = doc.createElementNS(None, flag)%0A f.appendChild(b)%0A b.appendChild(doc.createTextNode(%0A str(proto._get_field(field)._value%5Bflag%5D.get())))%0A else:%0A f.appendChild(doc.createTextNode(%0A str(proto._get_field(field).get())))%0A print doc.toprettyxml()%0A open(filename, %22w%22).write(doc.toprettyxml())%0A%0A%0A
|
|
4ff319033277bbaa04b1e226f9a90232ecadd49d | Trying out the potential new name, Spectra | cronenberg/config.py | cronenberg/config.py | DEBUG = True
DEFAULT_FROM_TIME = '-3h'
DEFAULT_THEME = 'light'
DASHBOARD_APPNAME = 'Cronenberg'
SQLALCHEMY_DATABASE_URI = 'sqlite:///cronenberg.db'
GRAPHITE_URL = 'http://graphite.prod.urbanairship.com'
SERVER_ADDRESS = '0.0.0.0'
SERVER_PORT = 5000
INTERACTIVE_CHARTS_DEFAULT = True
INTERACTIVE_CHARTS_RENDERER = 'nvd3'
DASHBOARD_RANGE_PICKER = [
('Past Hour', '-1h'),
('Past 3 Hrs', '-3h'),
('Past 12 Hrs', '-12h'),
('Past Day', '-1d'),
('Past Wk', '-1w'),
('Past 2 Wks', '-2w'),
]
| Python | 0.999999 | @@ -133,18 +133,15 @@
= '
-Cronenberg
+Spectra
'%0ASQ
|
bc28f6ab7ba5bb5e82bf38c544a4d091d89973ea | Use servoblaster to control servo | candycrush.py | candycrush.py | Python | 0.000001 | @@ -0,0 +1,817 @@
+#!/usr/bin/env python%0Aimport os.path%0Aimport subprocess%0Aimport time%0A%0Adef scaler(OldMin, OldMax, NewMin, NewMax):%0A def fn(OldValue):%0A return (((OldValue - OldMin) * (NewMax - NewMin)) / (OldMax - OldMin)) + NewMin%0A return fn%0A%0Adef setup_servod():%0A if not os.path.exists(%22/dev/servoblaster%22):%0A subprocess.call(%5B%22servod%22%5D)%0A %0A%0Adef set_servo(physical_pin, degrees):%0A servodegrees = scaler(0, 180, 53, 240)%0A with open(%22/dev/servoblaster%22, %22w%22) as f:%0A servovalue = int(servodegrees(degrees))%0A f.write(%22P1-%7B%7D=%7B%7D%22.format(physical_pin, servovalue))%0A%0Adef main():%0A set_servo(11, 0)%0A time.sleep(2)%0A set_servo(11, 180)%0A time.sleep(2)%0A set_servo(11, 90)%0A time.sleep(2)%0A set_servo(11, 45)%0A time.sleep(2)%0A set_servo(11, 30)%0A%0Aif __name__ =='__main__':%0A main()%0A
|
|
26595ad3dd7dcd9dfd16ae551345db9b7e58412a | Add updater | updater/openexchangerates.py | updater/openexchangerates.py | Python | 0 | @@ -0,0 +1,1021 @@
+#!env/bin/python%0Aimport urllib2%0Aimport simplejson%0Aimport datetime%0A%0AAPP_ID = %2240639356d56148f1ae26348d670e889f%22%0ATARGET_URL = %22http://taggy-api.bx23.net/api/v1/currency/%22%0A%0Adef main():%0A print 'Getting rates...'%0A request = urllib2.Request(%22http://openexchangerates.org/api/latest.json?app_id=%25s%22 %25 (APP_ID))%0A opener = urllib2.build_opener()%0A f = opener.open(request)%0A result = simplejson.load(f)%0A rates = result%5B'rates'%5D%0A date = datetime.datetime.fromtimestamp(int(result%5B'timestamp'%5D))%0A print 'Rates %5B%25s%5D size: %25s' %25 (date, len(rates))%0A%0A print 'Sending to API...'%0A update_j = %7B%22currency%22 : %5B%5D, %22timestamp%22 : result%5B'timestamp'%5D%7D%0A for name, value in rates.iteritems():%0A update_j%5B%22currency%22%5D.append(%7B%22name%22 : name, %22value%22 : value%7D)%0A%0A request = urllib2.Request(TARGET_URL, simplejson.dumps(update_j), %7B'Content-Type': 'application/json'%7D)%0A f = urllib2.urlopen(request)%0A response = f.read()%0A f.close()%0A print ' API: %25s' %25 (response)%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
3ef4fdcc98a12111aee6f0d214af98ef68315773 | add reboot module | gozerlib/reboot.py | gozerlib/reboot.py | Python | 0.000001 | @@ -0,0 +1,1491 @@
+# gozerbot/utils/reboot.py%0A#%0A#%0A%0A%22%22%22%0A reboot code. %0A%0A%22%22%22%0A%0A## gozerlib imports%0A%0Afrom gozerlib.fleet import fleet%0Afrom gozerlib.config import cfg as config%0A%0A## basic imports%0A%0Afrom simplejson import dump%0Aimport os%0Aimport sys%0Aimport pickle%0Aimport tempfile%0A%0Adef reboot():%0A%0A %22%22%22%0A reboot the bot.%0A%0A .. literalinclude:: ../../gozerbot/reboot.py%0A :pyobject: reboot%0A%0A %22%22%22%0A%0A os.execl(sys.argv%5B0%5D, *sys.argv)%0A%0Adef reboot_stateful(bot, ievent, fleet, partyline):%0A %22%22%22%0A reboot the bot, but keep the connections.%0A%0A :param bot: bot on which the reboot command is given%0A :type bot: gozerbot.botbase.BotBase%09%0A :param ievent: event that triggered the reboot%0A :type ievent: gozerbot.eventbase. EventBase%0A :param fleet: the fleet of the bot%0A :type fleet: gozerbot.fleet.Fleet%0A :param partyline: partyline of the bot%0A :type partyline: gozerbot.partyline.PartyLine%0A%0A .. literalinclude:: ../../gozerbot/reboot.py%0A :pyobject: reboot_stateful%0A%0A %22%22%22%0A config.reload()%0A session = %7B'bots': %7B%7D, 'name': bot.name, 'channel': ievent.channel, 'partyline': %5B%5D%7D%0A%0A for i in fleet.bots:%0A session%5B'bots'%5D.update(i._resumedata())%0A%0A session%5B'partyline'%5D = partyline._resumedata()%0A sessionfile = tempfile.mkstemp('-session', 'gozerbot-')%5B1%5D%0A dump(session, open(sessionfile, 'w'))%0A fleet.save()%0A fleet.exit(jabber=True)%0A os.execl(sys.argv%5B0%5D, sys.argv%5B0%5D, '-r', sessionfile)%0A%0A
|
|
3c37f63f65a9d85c605dde55ae19c8d5d62ad777 | add missing file | rmake/plugins/plugin.py | rmake/plugins/plugin.py | Python | 0.000003 | @@ -0,0 +1,2653 @@
+#%0A# Copyright (c) 2006 rPath, Inc.%0A#%0A# This program is distributed under the terms of the Common Public License,%0A# version 1.0. A copy of this license should have been distributed with this%0A# source file in a file called LICENSE. If it is not present, the license%0A# is always available at http://www.opensource.org/licenses/cpl.php.%0A#%0A# This program is distributed in the hope that it will be useful, but%0A# without any warranty; without even the implied warranty of merchantability%0A# or fitness for a particular purpose. See the Common Public License for%0A# full details.%0A#%0A%22%22%22%0ADefinition of plugins available for rmake plugins.%0A%0APlugin writers should derive from one of these classes.%0A%0AThe plugin will be called with the hooks described here, if the%0Acorrect program is being run. For example, when running rmake-server,%0Athe server hooks will be run.%0A%22%22%22%0Afrom rmake.lib.pluginlib import Plugin%0A%0ATYPE_CLIENT = 0%0ATYPE_SERVER = 1%0ATYPE_SUBSCRIBER = 2%0A%0Aclass ClientPlugin(Plugin):%0A%0A types = %5BTYPE_CLIENT%5D%0A%0A def client_preInit(self, main):%0A %22%22%22%0A Called right after plugins have been loaded.%0A %22%22%22%0A pass%0A%0A def client_preCommand(self, main, client):%0A %22%22%22%0A Called after the command-line client has instantiated, %0A but before the command has been executed.%0A %22%22%22%0A pass%0A%0Aclass ServerPlugin(Plugin):%0A%0A types = %5BTYPE_SERVER%5D%0A%0A def server_preConfig(self, main):%0A %22%22%22%0A Called before the configuration file has been read in.%0A %22%22%22%0A pass%0A%0A def server_preInit(self, main, argv):%0A %22%22%22%0A Called before the server has been instantiated.%0A %22%22%22%0A pass%0A%0A def server_postInit(self, server):%0A %22%22%22%0A Called after the server has been instantiated but before%0A serving is done.%0A %22%22%22%0A pass%0A%0A def server_pidDied(self, pid, status):%0A %22%22%22%0A Called when the server collects a child process that has died.%0A %22%22%22%0A pass%0A%0A def server_loop(self, server):%0A %22%22%22%0A Called once per server loop, between requests.%0A %22%22%22%0A pass%0A%0A def server_builderInit(self, server, builder):%0A %22%22%22%0A Called when the server instantiates a builder for a job.%0A %22%22%22%0A pass%0A%0A def server_shutDown(self, server):%0A %22%22%22%0A Called when the server is halting.%0A %22%22%22%0A pass%0A%0Aclass SubscriberPlugin(Plugin):%0A%0A types = %5BTYPE_SUBSCRIBER%5D%0A protocol = None%0A%0A def subscriber_get(self, uri, name):%0A %22%22%22%0A Should return a child of the StatusSubscirber class.%0A %22%22%22%0A pass%0A
|
|
2100eb3e0a72395f23571c6be2bada9939739869 | add ex | checkDigit.py | checkDigit.py | Python | 0.00024 | @@ -0,0 +1,283 @@
+#-*-coding:UTF-8 -*-%0A#%0A# %E5%88%A4%E6%96%B7%E8%BC%B8%E5%85%A5%E6%98%AF%E5%90%A6%E7%82%BA%E6%95%B4%E6%95%B8(int)%0A%0Ainput_string = input('Please input n:')%0A#while input_string.isdigit() == False:%0Awhile not input_string.isdigit():%0A%09print(%22Error, %25s is not digit!%22 %25 input_string)%0A%09input_string = input('Please input n:')%09%0Aprint(%22%25s is digit!%22 %25 input_string)%0A
|
|
09592b081a68f912bf9bb73c5269af8398c36f64 | Add unit test for treating Ordering as a collection | tests/test_collection.py | tests/test_collection.py | Python | 0 | @@ -0,0 +1,822 @@
+from unittest import TestCase%0A%0Afrom ordering import Ordering%0A%0A%0Aclass TestOrderingAsCollection(TestCase):%0A def setUp(self) -%3E None:%0A self.ordering = Ordering%5Bint%5D()%0A self.ordering.insert_start(0)%0A for n in range(10):%0A self.ordering.insert_after(n, n + 1)%0A%0A def test_length(self) -%3E None:%0A self.assertEqual(len(self.ordering), 11)%0A%0A def test_iterates_over_correct_elements(self) -%3E None:%0A self.assertListEqual(%0A list(self.ordering),%0A list(range(11))%0A )%0A%0A def test_contains_correct_elements(self) -%3E None:%0A for n in range(11):%0A self.assertIn(n, self.ordering)%0A%0A for n in range(11, 20):%0A self.assertNotIn(n, self.ordering)%0A%0A for n in range(-10, 0):%0A self.assertNotIn(n, self.ordering)%0A
|
|
6f1ed2fcdd43a5237d0211b426a216fd25930734 | add test preprocess | tests/test_preprocess.py | tests/test_preprocess.py | Python | 0.000001 | @@ -0,0 +1,1142 @@
+# coding: utf-8%0A%0Acode = '''%0An = 10%0Afor i in range(0,n):%0A x = 2 * i%0A%0A y = x / 3%0A # a comment%0A if y %3E 1:%0A%0A print(y)%0A%0A for j in range(0, 3):%0A x = x * y%0A%0A y = x + 1%0A%0Aif x %3E 1:%0A print(x)%0A'''%0A%0Acode = '''%0A#$ header legendre(int)%0Adef legendre(p):%0A k = p + 1%0A x = zeros(k, double)%0A w = zeros(k, double)%0A if p == 1:%0A x%5B0%5D = -0.577350269189625765%0A x%5B1%5D = 0.577350269189625765%0A w%5B0%5D = 1.0%0A w%5B1%5D = 1.0%0A elif p == 2:%0A x%5B0%5D = -0.774596669241483377%0A x%5B1%5D = 0.0%0A x%5B2%5D = 0.774596669241483377%0A w%5B0%5D = 0.55555555555555556%0A w%5B1%5D = 0.888888888888888889%0A w%5B2%5D = 0.55555555555555556%0A elif p == 3:%0A x%5B0%5D = -0.861136311594052575%0A x%5B1%5D = -0.339981043584856265%0A x%5B2%5D = 0.339981043584856265%0A x%5B3%5D = 0.861136311594052575%0A w%5B0%5D = 0.347854845137453853%0A w%5B1%5D = 0.65214515486254615%0A w%5B2%5D = 0.65214515486254614%0A w%5B3%5D = 0.34785484513745386%0A return x,w%0A%0A#$%C2%A0comment%0Aif x %3E 1:%0A print(x)%0A'''%0A%0Afrom pyccel.codegen import preprocess_as_str%0Atxt = preprocess_as_str(code)%0Aprint txt%0A
|
|
175b36b0eb1e84378e350ddc31da3ef7fcae32c2 | Add test. | test/test.py | test/test.py | Python | 0.000001 | @@ -0,0 +1,1175 @@
+#!/usr/bin/env python%0A%0A# Test PyCharlockHolmes%0A#%0A%0Afrom charlockholmes import detect%0A%0ATEST_FILES = %7B%0A %22py%22: %5B%0A %22file/test.py%22,%0A %7B'confidence': 34, 'type': 'text', 'language': 'en', 'encoding': 'ISO-8859-1'%7D%0A %5D,%0A %22txt%22: %5B%0A %22file/test.txt%22,%0A %7B'confidence': 16, 'type': 'text', 'language': 'en', 'encoding': 'ISO-8859-1'%7D%0A %5D,%0A %22c%22: %5B%0A %22file/test.c%22,%0A %7B'confidence': 50, 'type': 'text', 'language': 'en', 'encoding': 'ISO-8859-1'%7D%0A %5D,%0A %22sh%22: %5B%0A %22file/test.sh%22,%0A %7B'confidence': 21, 'type': 'text', 'language': 'en', 'encoding': 'ISO-8859-1'%7D%0A %5D,%0A %22elf%22: %5B%0A %22file/test%22,%0A %7B'confidence': 100, 'type': 'text'%7D%0A %5D,%0A %22bz2%22: %5B%0A %22file/test.tar.bz2%22,%0A %7B'confidence': 100, 'type': 'text'%7D%0A %5D,%0A %22gz%22: %5B%0A %22file/test.tar.gz%22,%0A %7B'confidence': 100, 'type': 'text'%7D%0A %5D,%0A%7D%0A%0Afor test in TEST_FILES:%0A file_path = TEST_FILES%5Btest%5D%5B0%5D%0A file_result = TEST_FILES%5Btest%5D%5B1%5D%0A content = open(file_path).read()%0A test_result = detect(content)%0A if test_result == file_result:%0A print file_path + %22: OK%22%0A else:%0A print file_path + %22: ERROR%22%0A
|
|
829defd825d5e311ad187569ba61381ecb40dd08 | Add q1 2019 | 2019/q1.py | 2019/q1.py | Python | 0.000002 | @@ -0,0 +1,2746 @@
+%22%22%22%0ABIO 2019 Q1: Palindromes%0A%0AThis ended up being surprisingly difficult, for whatever reason I found it surprisingly difficult%0Ato reason about.%0A%0AI found it easier to think about how, given a palindrome, I would calculate the following%0Apalindrome. There are ~2 cases:%0A%0AOdd number of digits: %5Bleft%5D%5Bmiddle%5D%5Bright = reversed(right)%5D%0AEven number of digits: %5Bleft%5D%5Bright = reversed(right)%5D%0A%0AIn the first case, we can (hopefully) obviously generate the next palindrome by adding one to the%0Amiddle digit, and carrying the one into the left hand side as if you were doing regular addition,%0Aand then reflecting the new value to produce a new palindrome.%0A%0AIn the second case, we can basically do the same thing, but without the middle digit.%0A%0AAnd then if we are still 'carrying' anything by the time we get to the end, this becomes a new%0Aleft-most digit, and the right most digit becomes the new middle digit.%0A%0A%0A%22%22%22%0Aclass Palindrome:%0A def __init__(self, left, middle):%0A assert middle is None or middle %3C 10 and middle %3E= 0%0A%0A self.left = list(int(x) for x in str(left))%0A self.middle = middle%0A%0A def add_one_left(self, carry):%0A for i in range(len(self.left)):%0A ix = -(i + 1)%0A%0A if self.left%5Bix%5D == 9:%0A self.left%5Bix%5D = 0%0A carry = True%0A else:%0A self.left%5Bix%5D += 1%0A carry = False%0A break%0A%0A if carry and self.middle is None:%0A self.middle = self.left%5B-1%5D%0A self.left = %5B1%5D + self.left%5B:-1%5D%0A%0A elif carry and self.middle is not None:%0A self.left = %5B1%5D + self.left%0A self.middle = None%0A%0A def next_palindrome(self):%0A if self.middle is not None:%0A if self.middle == 9:%0A self.middle = 0%0A self.add_one_left(carry = True)%0A else:%0A self.middle += 1%0A else:%0A self.add_one_left(carry = False)%0A%0A def as_int(self):%0A if self.middle is None:%0A l = self.left + list(reversed(self.left))%0A else:%0A l = self.left + %5Bself.middle%5D + list(reversed(self.left))%0A%0A return int(%22%22.join(str(x) for x in l))%0A %0A @staticmethod%0A def of_int(i):%0A s = str(i)%0A%0A if len(s) %25 2 == 0:%0A left = %5Bint(x) for x in s%5B:len(s) //2%5D%5D%0A middle = None%0A else:%0A left = %5Bint(x) for x in s%5B:len(s) //2%5D%5D%0A middle = int(s%5Blen(left)%5D)%0A%0A return Palindrome(%22%22.join(str(x) for x in left), middle)%0A%0A%0A def __str__(self):%0A return str(self.as_int())%0A%0A%0Ai = input()%0Ain_int = int(i)%0A%0Ap = Palindrome.of_int(i)%0A%0Ap_int = p.as_int()%0A%0Aif p_int %3E in_int:%0A print(p_int)%0Aelse:%0A p.next_palindrome()%0A print(p)%0A
|
|
80b395ce04248b8fb8b47187410a661a87931145 | remove main block | src/robot/libraries/ProcessLibrary.py | src/robot/libraries/ProcessLibrary.py | # Copyright 2008-2013 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
import subprocess
import tempfile
from robot.utils import ConnectionCache
class ProcessData(object):
def __init__(self, stdout, stderr):
self.stdout = stdout
self.stderr = stderr
class ProcessLibrary(object):
ROBOT_LIBRARY_SCOPE='GLOBAL'
def __init__(self):
self._started_processes = ConnectionCache()
self._logs = dict()
self._tempdir = tempfile.mkdtemp(suffix="processlib")
def run_process(self, command, *args, **conf):
active_process_index = self._started_processes.current_index
try:
p = self.start_new_process(command, *args, **conf)
return self.wait_for_process(p)
finally:
self._started_processes.switch(active_process_index)
def start_new_process(self, command, *args, **conf):
cmd = [command]+[str(i) for i in args]
config = _NewProcessConfig(conf, self._tempdir)
stdout_stream = config.stdout_stream
stderr_stream = config.stderr_stream
pd = ProcessData(stdout_stream.name, stderr_stream.name)
use_shell = config.use_shell
if use_shell and args:
cmd = subprocess.list2cmdline(cmd)
p = subprocess.Popen(cmd, stdout=stdout_stream, stderr=stderr_stream,
shell=use_shell, cwd=config.cwd)
index = self._started_processes.register(p, alias=config.alias)
self._logs[index] = pd
return index
def process_is_alive(self, handle=None):
if handle:
self._started_processes.switch(handle)
return self._started_processes.current.poll() is None
def process_should_be_alive(self, handle=None):
if not self.process_is_alive(handle):
raise AssertionError('Process is not alive')
def process_should_be_dead(self, handle=None):
if self.process_is_alive(handle):
raise AssertionError('Process is alive')
def wait_for_process(self, handle=None):
if handle:
self._started_processes.switch(handle)
exit_code = self._started_processes.current.wait()
logs = self._logs[handle]
return ExecutionResult(logs.stdout, logs.stderr, exit_code)
def kill_process(self, handle=None):
if handle:
self._started_processes.switch(handle)
self._started_processes.current.kill()
def terminate_process(self, handle=None):
if handle:
self._started_processes.switch(handle)
self._started_processes.current.terminate()
def kill_all_processes(self):
for handle in range(len(self._started_processes._connections)):
if self.process_is_alive(handle):
self.kill_process(handle)
def get_process_id(self, handle=None):
if handle:
self._started_processes.switch(handle)
return self._started_processes.current.pid
def input_to_process(self, handle, msg):
if not msg:
return
alog = self._logs[handle]
self._started_processes.switch(handle)
self._started_processes.current.wait()
with open(alog.stdout,'a') as f:
f.write(msg.encode('UTF-8'))
def switch_active_process(self, handle):
self._started_processes.switch(handle)
class ExecutionResult(object):
_stdout = _stderr = None
def __init__(self, stdout_name, stderr_name, exit_code=None):
self._stdout_name = stdout_name
self._stderr_name = stderr_name
self.exit_code = exit_code
@property
def stdout(self):
if self._stdout is None:
with open(self._stdout_name,'r') as f:
self._stdout = f.read()
return self._stdout
@property
def stderr(self):
if self._stderr is None:
with open(self._stderr_name,'r') as f:
self._stderr = f.read()
return self._stderr
if __name__ == '__main__':
r = ProcessLibrary().run_process('python', '-c', "print \'hello\'")
print repr(r.stdout)
class _NewProcessConfig(object):
def __init__(self, conf, tempdir):
self._tempdir = tempdir
self._conf = conf
self.stdout_stream = open(conf['stdout'], 'w') if 'stdout' in conf else self._get_temp_file("stdout")
self.stderr_stream = open(conf['stderr'], 'w') if 'stderr' in conf else self._get_temp_file("stderr")
self.use_shell = (conf.get('shell', 'False') != 'False')
self.cwd = conf.get('cwd', None)
self.alias = conf.get('alias', None)
def _get_temp_file(self, suffix):
return tempfile.NamedTemporaryFile(delete=False,
prefix='tmp_logfile_',
suffix="_%s" % suffix,
dir=self._tempdir)
| Python | 0.000096 | @@ -4550,132 +4550,8 @@
rr%0A%0A
-if __name__ == '__main__':%0A r = ProcessLibrary().run_process('python', '-c', %22print %5C'hello%5C'%22)%0A print repr(r.stdout)%0A
%0Acla
|
5a1518bc2bd8b509bc5c00850ba1da59989147f8 | Add basic tests | test_main.py | test_main.py | Python | 0.000004 | @@ -0,0 +1,1098 @@
+#!/usr/bin/env python%0Aimport sys%0Afrom io import StringIO%0Afrom jproperties import Properties%0A%0A%0Adef _test_deserialize(*data):%0A%09for s, items in data:%0A%09%09props = Properties()%0A%09%09props.load(StringIO(s))%0A%09%09assert list(props.items()) == items%0A%0A%0Adef test_eq_separator():%0A%09_test_deserialize(%0A%09%09(%22a=b%22, %5B(%22a%22, %22b%22)%5D),%0A%09%09(%22a= b%22, %5B(%22a%22, %22b%22)%5D),%0A%09%09(%22a = b%22, %5B(%22a%22, %22b%22)%5D),%0A%09%09(%22a =b%22, %5B(%22a%22, %22b%22)%5D),%0A%09)%0A%0Adef test_colon_separator():%0A%09_test_deserialize(%0A%09%09(%22a:b%22, %5B(%22a%22, %22b%22)%5D),%0A%09%09(%22a: b%22, %5B(%22a%22, %22b%22)%5D),%0A%09%09(%22a : b%22, %5B(%22a%22, %22b%22)%5D),%0A%09%09(%22a :b%22, %5B(%22a%22, %22b%22)%5D),%0A%09)%0A%0A%0Adef test_space_separator():%0A%09_test_deserialize(%0A%09%09(%22a b%22, %5B(%22a%22, %22b%22)%5D),%0A%09%09(%22a b%22, %5B(%22a%22, %22b%22)%5D),%0A%09%09(%22a b%22, %5B(%22a%22, %22b%22)%5D),%0A%09)%0A%0A%0Adef test_space_in_key():%0A%09_test_deserialize(%0A%09%09(%22key%5C with%5C spaces = b%22, %5B(%22key with spaces%22, %22b%22)%5D),%0A%09%09(%22key%5C with%5C spaces b%22, %5B(%22key with spaces%22, %22b%22)%5D),%0A%09%09(%22key%5C with%5C spaces : b%22, %5B(%22key with spaces%22, %22b%22)%5D),%0A%09%09(%22key%5C with%5C spaces%5C : b%22, %5B(%22key with spaces %22, %22b%22)%5D),%0A%09)%0A%0A%0Adef main():%0A%09for name, f in globals().items():%0A%09%09if name.startswith(%22test_%22) and callable(f):%0A%09%09%09f()%0A%0A%0Aif __name__ == %22__main__%22:%0A%09main()%0A
|
|
4249c6456ca21ad6bbec0eccdf66aef629deb511 | Add basic tag testing script | test_tags.py | test_tags.py | Python | 0.000006 | @@ -0,0 +1,344 @@
+import sys%0Aimport requests%0A%0Afrom wikibugs import Wikibugs2%0Afrom channelfilter import ChannelFilter%0Aimport configfetcher%0A%0Aconf = configfetcher.ConfigFetcher()%0Aw = Wikibugs2(conf)%0Ac = ChannelFilter()%0A%0A%0Aprint(%22%5Cn%5Cn%5Cn%5Cn%5Cn%5Cn%5Cn%5Cn%22)%0Apage = requests.get(sys.argv%5B1%5D).text%0Atags = w.get_tags(page)%0A%0Afor tag in tags:%0A print(tag, c.channels_for(%5Btag%5D))%0A
|
|
377f44ea05d8fc550be5916a1ca6c085df8f8cdc | add mysql database backup script | backupmysql.py | backupmysql.py | Python | 0.000001 | @@ -0,0 +1,2389 @@
+#!/usr/bin/python%0A# -*- coding: utf-8 -*-%0A%0A#Author: Andrew McDonald [email protected] http://mcdee.com.au%0A %0A# Example: config file%0A#%5Bclient%5D%0A#host = localhost%0A#user = root%0A#password = root-pass%0A %0Afrom datetime import datetime%0Aimport sys, os, subprocess, tarfile%0Aimport zipfile, glob, logging%0A%0Adate_format = %5B%0A %22%25A %25d.%25m.%25Y%22,%0A %22%25Y%25m%25d%22%0A%5D%0A%0Adefault_date_format = 1%0A%0A %0Adef print_usage(script):%0A print 'Usage:', script, '--cnf %3Cconfig file%3E', '--todir %3Cdirectory%3E'%0A sys.exit(1)%0A %0Adef usage(args):%0A if not len(args) == 5:%0A print_usage(args%5B0%5D)%0A else:%0A req_args = %5B'--cnf', '--todir'%5D%0A for a in req_args:%0A if not a in req_args:%0A print_usage()%0A if not os.path.exists(args%5Bargs.index(a)+1%5D):%0A print 'Error: Path not found:', args%5Bargs.index(a)+1%5D%0A print_usage()%0A cnf = args%5Bargs.index('--cnf')+1%5D%0A dir = args%5Bargs.index('--todir')+1%5D%0A return cnf, dir%0A %0Adef mysql_dblist(cnf):%0A no_backup = %5B'Database', 'information_schema', 'performance_schema', 'test'%5D%0A cmd = %5B'mysql', '--defaults-extra-file='+cnf, '-e', 'show databases'%5D%0A p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)%0A stdout, stderr = p.communicate()%0A if p.returncode %3E 0:%0A print 'MySQL Error:'%0A print stderr%0A sys.exit(1)%0A dblist = stdout.strip().split('%5Cn')%0A for item in no_backup:%0A try:%0A dblist.remove(item)%0A except ValueError:%0A continue%0A if len(dblist) == 1:%0A print %22Doesn't appear to be any user databases found%22%0A return dblist%0A %0Adef mysql_backup(dblist, dir, cnf):%0A for db in dblist:%0A bdate = datetime.now().strftime('%25Y%25m%25d%25H%25M')%0A bfile = db+'_'+bdate+'.sql'%0A dumpfile = open(os.path.join(dir, bfile), 'w')%0A if db == 'mysql':%0A cmd = %5B'mysqldump', '--defaults-extra-file='+cnf, '--events', db%5D%0A else:%0A cmd = %5B'mysqldump', '--defaults-extra-file='+cnf, db%5D%0A p = subprocess.Popen(cmd, stdout=dumpfile)%0A retcode = p.wait()%0A dumpfile.close()%0A if retcode %3E 0:%0A print 'Error:', db, 'backup error'%0A backup_compress(dir, bfile)%0A %0Adef backup_compress(dir, bfile):%0A tar = tarfile.open(os.path.join(dir, bfile)+'.tar.gz', 'w:gz')%0A tar.add(os.path.join(dir, bfile), arcname=bfile)%0A tar.close()%0A os.remove(os.path.join(dir, bfile))%0A %0Adef main():%0A cnf, dir = usage(sys.argv)%0A dblist = mysql_dblist(cnf)%0A mysql_backup(dblist, dir, cnf)%0A %0Aif __name__ == '__main__':%0A main()%0A
|
|
07c5ed48d107c7ec88a990698647a70187d277a1 | Update cms_helper.py | cms_helper.py | cms_helper.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from tempfile import mkdtemp
gettext = lambda s: s
HELPER_SETTINGS = {
'NOSE_ARGS': [
'-s',
],
'ROOT_URLCONF': 'tests.test_utils.urls',
'INSTALLED_APPS': [
'admin_enhancer',
'filer',
'parler',
'taggit',
'meta',
'meta_mixin',
'easy_thumbnails',
'djangocms_text_ckeditor',
'cmsplugin_filer_image',
'taggit_autosuggest',
],
'LANGUAGE_CODE': 'en',
'LANGUAGES': (
('en', gettext('English')),
('fr', gettext('French')),
('it', gettext('Italiano')),
),
'CMS_LANGUAGES': {
1: [
{
'code': 'en',
'name': gettext('English'),
'public': True,
},
{
'code': 'it',
'name': gettext('Italiano'),
'public': True,
},
{
'code': 'fr',
'name': gettext('French'),
'public': True,
},
],
2: [
{
'code': 'en',
'name': gettext('English'),
'public': True,
},
],
'default': {
'hide_untranslated': False,
},
},
'PARLER_LANGUAGES': {
1: (
{'code': 'en'},
{'code': 'it'},
{'code': 'fr'},
),
2: (
{'code': 'en'},
),
'default': {
'fallback': 'en',
'hide_untranslated': False,
}
},
'MIGRATION_MODULES': {
'cmsplugin_filer_image': 'cmsplugin_filer_image.migrations_django',
},
'META_SITE_PROTOCOL': 'http',
'META_SITE_DOMAIN': 'example.com',
'META_USE_OG_PROPERTIES': True,
'META_USE_TWITTER_PROPERTIES': True,
'META_USE_GOOGLEPLUS_PROPERTIES': True,
'THUMBNAIL_PROCESSORS': (
'easy_thumbnails.processors.colorspace',
'easy_thumbnails.processors.autocrop',
'filer.thumbnail_processors.scale_and_crop_with_subject_location',
'easy_thumbnails.processors.filters',
),
'FILE_UPLOAD_TEMP_DIR': mkdtemp(),
'SITE_ID': 1
}
if 'test' in sys.argv:
HELPER_SETTINGS['INSTALLED_APPS'].append('django_nose')
def run():
from djangocms_helper import runner
sys.argv.append('--nose-runner')
runner.cms('djangocms_blog')
if __name__ == "__main__":
run() | Python | 0.000001 | @@ -39,17 +39,16 @@
f-8 -*-%0A
-%0A
import s
@@ -79,16 +79,17 @@
mkdtemp%0A
+%0A
gettext
@@ -125,15 +125,18 @@
S =
-%7B
+dict(
%0A
-'
NOSE
@@ -140,19 +140,17 @@
OSE_ARGS
-':
+=
%5B%0A
@@ -168,17 +168,16 @@
%5D,%0A
-'
ROOT_URL
@@ -180,19 +180,17 @@
_URLCONF
-':
+=
'tests.t
@@ -210,17 +210,16 @@
s',%0A
-'
INSTALLE
@@ -224,19 +224,17 @@
LED_APPS
-':
+=
%5B%0A
@@ -476,25 +476,24 @@
%0A %5D,%0A
-'
LANGUAGE_COD
@@ -493,19 +493,17 @@
AGE_CODE
-':
+=
'en',%0A
@@ -504,17 +504,16 @@
n',%0A
-'
LANGUAGE
@@ -513,19 +513,17 @@
ANGUAGES
-':
+=
(%0A
@@ -635,17 +635,16 @@
),%0A
-'
CMS_LANG
@@ -640,35 +640,33 @@
CMS_LANGUAGES
-':
+=
%7B%0A 1: %5B%0A
@@ -1322,17 +1322,16 @@
%7D,%0A
-'
PARLER_L
@@ -1338,19 +1338,17 @@
ANGUAGES
-':
+=
%7B%0A
@@ -1613,17 +1613,16 @@
%7D,%0A
-'
MIGRATIO
@@ -1630,19 +1630,17 @@
_MODULES
-':
+=
%7B%0A
@@ -1716,25 +1716,24 @@
%0A %7D,%0A
-'
META_SITE_PR
@@ -1738,19 +1738,17 @@
PROTOCOL
-':
+=
'http',%0A
@@ -1747,25 +1747,24 @@
'http',%0A
-'
META_SITE_DO
@@ -1767,19 +1767,17 @@
E_DOMAIN
-':
+=
'example
@@ -1783,25 +1783,24 @@
e.com',%0A
-'
META_USE_OG_
@@ -1801,35 +1801,33 @@
SE_OG_PROPERTIES
-':
+=
True,%0A 'META_
@@ -1812,33 +1812,32 @@
RTIES=True,%0A
-'
META_USE_TWITTER
@@ -1839,35 +1839,33 @@
ITTER_PROPERTIES
-':
+=
True,%0A 'META_
@@ -1858,17 +1858,16 @@
ue,%0A
-'
META_USE
@@ -1888,19 +1888,17 @@
OPERTIES
-':
+=
True,%0A
@@ -1899,17 +1899,16 @@
ue,%0A
-'
THUMBNAI
@@ -1919,19 +1919,17 @@
OCESSORS
-':
+=
(%0A
@@ -2150,17 +2150,16 @@
),%0A
-'
FILE_UPL
@@ -2170,19 +2170,17 @@
TEMP_DIR
-':
+=
mkdtemp(
@@ -2190,22 +2190,19 @@
-'
SITE_ID
-': 1%0A%7D
+=1%0A)
%0Aif
@@ -2219,16 +2219,38 @@
sys.argv
+ or len(sys.argv) == 1
:%0A HE
@@ -2356,16 +2356,69 @@
runner%0A
+ if 'test' in sys.argv or len(sys.argv) == 1:%0A
sys.
@@ -2512,12 +2512,13 @@
%22:%0A run()
+%0A
|
a3a2f645d3154334e8ae6af93fe56a3f2368c4c7 | Add multiprocessing pool example | multiprocessing_pool.py | multiprocessing_pool.py | Python | 0 | @@ -0,0 +1,1001 @@
+from multiprocessing.pool import ThreadPool as Pool%0Afrom multiprocessing import Queue as PQueue%0Aimport Queue%0A%0Amy_dict = %7B%0A 'url1': 'url2',%0A 'url3': 'url4',%0A%7D%0A%0Amy_q = PQueue()%0A%0A%0Adef test_p(uq):%0A q, url = uq%5B0%5D, uq%5B1%5D%0A q.put(url, False)%0A%0A%0Adef main():%0A global my_dict%0A global my_q%0A print %22Going to process (%25d)%22 %25 len(my_dict.keys() + my_dict.values())%0A p = Pool(processes=8)%0A print p.map(test_p, %5B(my_q, url) for url in my_dict.keys() + my_dict.values()%5D)%0A%0A its = %5B%5D%0A while True:%0A%0A # If we go more than 30 seconds without something, die%0A try:%0A print %22Waiting for item from queue for up to 5 seconds%22%0A i = my_q.get(True, 5)%0A print %22found %25s from the queue !!%22 %25 i%0A its.append(i)%0A except Queue.Empty:%0A print %22Caught queue empty exception, done%22%0A break%0A print %22processed %25d items, completion successful%22 %25 len(its)%0A%0A p.close()%0A p.join()%0A%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
b117fbc82de4fb6acd8a044651c95e2425d9e71c | Create preprocess_MS_dataset_utils_test.py | preprocess_MS_dataset_utils_test.py | preprocess_MS_dataset_utils_test.py | Python | 0.000004 | @@ -0,0 +1,2293 @@
+# Copyright 2020 Google LLC%0A#%0A# Licensed under the Apache License, Version 2.0 (the %22License%22);%0A# you may not use this file except in compliance with the License.%0A# You may obtain a copy of the License at%0A#%0A# https://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS,%0A# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.%0A# See the License for the specific language governing permissions and%0A# limitations under the License.%0A%22%22%22 Testing for preprocess_MS_dataset_utils.py.%22%22%22%0A%0Aimport unittest%0Afrom unittest import TestCase%0A%0Afrom preprocess_MS_dataset_utils import process_row%0A%0A%0Aclass PreprocessMSDatasetUtilsTest(TestCase): %0A def test_process_row_without_excluded_sample(self):%0A row = %5B%22PlaceHolder %7C%7C%7C PlaceHolder %7C%7C%7C OriginalSentence %7C%7C%7C %22 %0A %22Summary1 %7C%7C%7C 6 %7C%7C%7C 6 %7C%7C%7C 6 %7C%7C%7C Most important meaning Flawless language %22%0A %22%7C%7C%7C Summary2 %7C%7C%7C 7 %7C%7C%7C 7 %7C%7C%7C 7 %7C%7C%7C Most important meaning Minor errors%22%5D%0A output_original_sentence, output_shortened_sentences_list, %5C%0A output_shortened_ratings_list, count_excluded = process_row(row)%0A %0A self.assertEqual(output_original_sentence, 'OriginalSentence')%0A self.assertEqual(output_shortened_sentences_list, %5B'Summary1', 'Summary2'%5D)%0A self.assertEqual(output_shortened_ratings_list, %5B%5B'6'%5D, %5B'7'%5D%5D)%0A self.assertEqual(count_excluded, 0)%0A %0A %0A def test_process_row_with_excluded_sample(self):%0A row = %5B%22PlaceHolder %7C%7C%7C PlaceHolder %7C%7C%7C OriginalSentence %7C%7C%7C %22 %0A %22Summary1 %7C%7C%7C 7 %7C%7C%7C 7 %7C%7C%7C 7 %7C%7C%7C Most important meaning Minor errors %22%0A %22%7C%7C%7C Summary2 %7C%7C%7C 9 %7C%7C%7C 9 %7C%7C%7C 9 %7C%7C%7C Most important meaning Disfluent or incomprehensible%22%5D%0A output_original_sentence, output_shortened_sentences_list, %5C%0A output_shortened_ratings_list, count_excluded = process_row(row)%0A %0A self.assertEqual(output_original_sentence, 'OriginalSentence')%0A self.assertEqual(output_shortened_sentences_list, %5B'Summary1'%5D)%0A self.assertEqual(output_shortened_ratings_list, %5B%5B'7'%5D%5D)%0A self.assertEqual(count_excluded, 1)%0A%0A %0Aif __name__ == '__main__':%0A unittest.main()%0A
|
|
6bf43087967dee2bfb9f31a5de61c91ed0664586 | update get ids and columns in pecanstreet, much faster | proto/pylearn2/create_ev_dataset.py | proto/pylearn2/create_ev_dataset.py | Python | 0.000001 | @@ -0,0 +1,546 @@
+import sys%0Aimport os.path%0Asys.path.append(os.path.join(os.pardir,os.pardir))%0Aimport disaggregator as da%0Aimport disaggregator.PecanStreetDatasetAdapter as psda%0A%0Adb_url = %22postgresql://USERNAME:[email protected]:5432/postgres%22%0Apsda.set_url(db_url)%0Aschema = 'shared'%0A%0A%0Atable_names = psda.get_table_names(schema)%0A%0Aall_ids = %5B%5D%0Aall_columns = %5B%5D%0Afor table_name in table_names:%0A ids,columns = psda.get_table_dataids_and_column_names(schema,table_name)%0A all_ids.append(ids)%0A all_columns.append(columns)%0Aprint all_ids%0Aprint all_columns%0A
|
|
10524dd2c42ef499d36b3f64e31150885d45e51b | Add slot_usage command for checking cluster balance | streamparse/cli/slot_usage.py | streamparse/cli/slot_usage.py | Python | 0 | @@ -0,0 +1,3028 @@
+%22%22%22%0ADisplay slots used by every topology on the cluster%0A%22%22%22%0A%0Afrom __future__ import absolute_import, print_function%0A%0Afrom collections import Counter, defaultdict%0A%0Afrom pkg_resources import parse_version%0Afrom prettytable import PrettyTable%0Afrom six import iteritems%0A%0Afrom .common import add_environment%0Afrom ..util import get_ui_json, storm_lib_version%0A%0A%0Adef subparser_hook(subparsers):%0A %22%22%22 Hook to add subparser for this command. %22%22%22%0A subparser = subparsers.add_parser('slot_usage',%0A description=__doc__,%0A help=main.__doc__)%0A subparser.set_defaults(func=main)%0A add_environment(subparser)%0A%0A%0Adef display_slot_usage(env_name):%0A print('Querying Storm UI REST service for slot usage stats (this can take a while)...')%0A topology_summary = '/api/v1/topology/summary'%0A topology_detail = '/api/v1/topology/%7Btopology%7D'%0A component = '/api/v1/topology/%7Btopology%7D/component/%7Bcomponent%7D'%0A topo_summary_json = get_ui_json(env_name, topology_summary)%0A topology_ids = %5Bx%5B'id'%5D for x in topo_summary_json%5B'topologies'%5D%5D%0A # Keep track of the number of workers used by each topology on each machine%0A topology_worker_ports = defaultdict(lambda: defaultdict(set))%0A topology_executor_counts = defaultdict(Counter)%0A topology_names = set()%0A%0A for topology in topology_ids:%0A topology_detail_json = get_ui_json(env_name,%0A topology_detail.format(topology=topology))%0A spouts = %5Bx%5B'spoutId'%5D for x in topology_detail_json%5B'spouts'%5D%5D%0A bolts = %5Bx%5B'boltId'%5D for x in topology_detail_json%5B'bolts'%5D%5D%0A for comp in spouts + bolts:%0A comp_detail = get_ui_json(env_name,%0A component.format(topology=topology,%0A component=comp))%0A for worker in comp_detail%5B'executorStats'%5D:%0A topology_worker_ports%5Bworker%5B'host'%5D%5D%5Btopology_detail_json%5B'name'%5D%5D.add(worker%5B'port'%5D)%0A topology_executor_counts%5Bworker%5B'host'%5D%5D%5Btopology_detail_json%5B'name'%5D%5D += 1%0A topology_names.add(topology_detail_json%5B'name'%5D)%0A%0A print(%22# Slot (and Executor) Counts by Topology%22)%0A topology_names = sorted(topology_names)%0A table = PrettyTable(%5B%22Host%22%5D + topology_names)%0A table.align = 'l'%0A for host, host_dict in sorted(iteritems(topology_worker_ports)):%0A row = %5Bhost%5D + %5B'%7B%7D (%7B%7D)'.format(len(host_dict.get(topology, set())),%0A topology_executor_counts%5Bhost%5D%5Btopology%5D)%0A for topology in topology_names%5D%0A table.add_row(row)%0A print(table)%0A print()%0A%0A%0Adef main(args):%0A %22%22%22 Display uptime for Storm workers. %22%22%22%0A storm_version = storm_lib_version()%0A if storm_version %3E= parse_version('0.9.2-incubating'):%0A display_slot_usage(args.environment)%0A else:%0A print(%22ERROR: Storm %7B0%7D does not support this command.%22%0A .format(storm_version))%0A
|
|
19636701b429341bd5d2ac69d0540f550e38bb76 | Fix RCWA example | examples/ex_RCWA.py | examples/ex_RCWA.py | """Rigorous Coupled Wave Analysis example."""
import numpy
import pylab
import EMpy
from EMpy.materials import IsotropicMaterial, AnisotropicMaterial, RefractiveIndex, EpsilonTensor
alpha = 0.
delta = 0.
# psi = EMpy.utils.deg2rad(0.) # TM
# psi = EMpy.utils.deg2rad(90.) # TE
psi = EMpy.utils.deg2rad(70.) # hybrid
phi = EMpy.utils.deg2rad(90.)
LAMBDA = 1016e-9 # grating periodicity
n = 2 # orders of diffraction
UV6 = IsotropicMaterial(
'UV6',
n0=RefractiveIndex(n0_const=1.560))
SiN = AnisotropicMaterial(
'SiN',
epsilon_tensor=EpsilonTensor(
epsilon_tensor_const=EMpy.constants.eps0 * EMpy.utils.euler_rotate(
numpy.diag(numpy.asarray([1.8550, 1.8750, 1.9130]) ** 2),
EMpy.utils.deg2rad(14),
EMpy.utils.deg2rad(25),
EMpy.utils.deg2rad(32))))
BPTEOS = IsotropicMaterial(
'BPTEOS',
n0=RefractiveIndex(n0_const=1.448))
ARC1 = IsotropicMaterial(
'ARC1', n0=RefractiveIndex(n0_const=1.448))
EFF = IsotropicMaterial(
'EFF', n0=RefractiveIndex(n0_const=1.6))
multilayer1 = EMpy.utils.Multilayer([
EMpy.utils.Layer(EMpy.materials.Air, numpy.inf),
EMpy.utils.Layer(SiN, 226e-9),
EMpy.utils.Layer(BPTEOS, 226e-9),
EMpy.utils.BinaryGrating(SiN, BPTEOS, .659, LAMBDA, 123e-9),
EMpy.utils.Layer(SiN, 219e-9),
EMpy.utils.Layer(EMpy.materials.SiO2, 2188e-9),
EMpy.utils.Layer(EMpy.materials.Si, numpy.inf),
])
multilayer2 = EMpy.utils.Multilayer([
EMpy.utils.Layer(EMpy.materials.Air, numpy.inf),
EMpy.utils.Layer(SiN, 226e-9),
EMpy.utils.Layer(BPTEOS, 226e-9),
EMpy.utils.Layer(IsotropicMaterial(n0=RefractiveIndex(n0_const=1.6)), 123e-9),
EMpy.utils.Layer(SiN, 219e-9),
EMpy.utils.Layer(EMpy.materials.SiO2, 2188e-9),
EMpy.utils.Layer(EMpy.materials.Si, numpy.inf),
])
wls = numpy.linspace(1.45e-6, 1.75e-6, 301)
solution1 = EMpy.RCWA.AnisotropicRCWA(multilayer1, alpha, delta, psi, phi, n).solve(wls)
solution2 = EMpy.RCWA.AnisotropicRCWA(multilayer2, alpha, delta, psi, phi, n).solve(wls)
pylab.plot(wls, solution1.DEO1[n, :], 'k.-',
wls, solution1.DEO3[n, :], 'r.-',
wls, solution1.DEE1[n, :], 'b.-',
wls, solution1.DEE3[n, :], 'g.-',
wls, solution2.DEO1[n, :], 'k--',
wls, solution2.DEO3[n, :], 'r--',
wls, solution2.DEE1[n, :], 'b--',
wls, solution2.DEE3[n, :], 'g--',
)
pylab.xlabel('wavelength')
pylab.ylabel('diffraction efficiency')
pylab.legend(('DEO1', 'DEO3', 'DEE1', 'DEE3'))
pylab.axis('tight')
pylab.ylim([0, 1])
pylab.show()
| Python | 0.000002 | @@ -106,16 +106,22 @@
import
+(%0A
Isotropi
@@ -182,16 +182,17 @@
onTensor
+)
%0A%0A%0Aalpha
@@ -1615,16 +1615,25 @@
s.Layer(
+%0A
Isotropi
@@ -1904,32 +1904,37 @@
AnisotropicRCWA(
+%0A
multilayer1, alp
@@ -2006,16 +2006,21 @@
picRCWA(
+%0A
multilay
@@ -2063,16 +2063,26 @@
e(wls)%0A%0A
+um = 1e-6%0A
pylab.pl
@@ -2084,19 +2084,31 @@
ab.plot(
-wls
+%0A # wls / um
, soluti
@@ -2130,34 +2130,34 @@
'k.-',%0A
-
- wls
+# wls / um
, solution1.
@@ -2175,34 +2175,32 @@
'r.-',%0A
- wls
+wls / um
, solution1.
@@ -2218,34 +2218,32 @@
'b.-',%0A
- wls
+wls / um
, solution1.
@@ -2261,34 +2261,34 @@
'g.-',%0A
- wls
+# wls / um
, solution2.
@@ -2306,34 +2306,34 @@
'k--',%0A
- wls
+# wls / um
, solution2.
@@ -2347,38 +2347,36 @@
:%5D, 'r--',%0A
-
wls
+ / um
, solution2.
@@ -2402,18 +2402,16 @@
- wls
+wls / um
, so
@@ -2437,27 +2437,16 @@
'g--',%0A
-
)%0Apylab.
@@ -2463,16 +2463,21 @@
velength
+ %5Bum%5D
')%0Apylab
@@ -2592,17 +2592,20 @@
lim(%5B0,
-1
+0.15
%5D)%0Apylab
|
b69cc15467456a070333ff00f886f27ca391b85b | Add script for appending entries to .gitignore. | webrtc/build/extra_gitignore.py | webrtc/build/extra_gitignore.py | Python | 0.000005 | @@ -0,0 +1,1333 @@
+#!/usr/bin/env python%0A# Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.%0A#%0A# Use of this source code is governed by a BSD-style license%0A# that can be found in the LICENSE file in the root of the source%0A# tree. An additional intellectual property rights grant can be found%0A# in the file PATENTS. All contributing project authors may%0A# be found in the AUTHORS file in the root of the source tree.%0A%0A%22%22%22 Adds extra patterns to the root .gitignore file.%0A%0AReads the contents of the filename given as the first argument and appends%0Athem to the root .gitignore file. The new entires are intended to be additional%0Aignoring patterns, or negating patterns to override existing entries (man%0Agitignore for more details).%0A%22%22%22%0A%0Aimport os%0Aimport sys%0A%0AMODIFY_STRING = '# The following added by %25s%5Cn'%0A%0Adef main(argv):%0A if not argv%5B1%5D:%0A # Special case; do nothing.%0A return 0%0A%0A modify_string = (MODIFY_STRING %25 argv%5B0%5D)%0A gitignore_file = os.path.dirname(argv%5B0%5D) + '/../.gitignore'%0A lines = open(gitignore_file, 'r').readlines()%0A for i, line in enumerate(lines):%0A if line == modify_string:%0A lines = lines%5B:i%5D%0A break%0A lines.append(modify_string)%0A%0A f = open(gitignore_file, 'w')%0A f.write(''.join(lines))%0A f.write(open(argv%5B1%5D, 'r').read())%0A f.close()%0A%0Aif __name__ == '__main__':%0A sys.exit(main(sys.argv))%0A
|
|
5e07a21cce64e1845832641b6de1951182d41ea0 | add back module changed mixin | core/mixins.py | core/mixins.py | Python | 0 | @@ -0,0 +1,1993 @@
+%22%22%22%0Acore.mixins - Mixins available to use with models%0A%22%22%22%0Afrom django.db.models.signals import post_save%0A%0A%0Adef on_changed(sender, **kwargs):%0A %22%22%22%0A Calls the %60model_changed%60 method and then resets the state.%0A %22%22%22%0A instance = kwargs.get(%22instance%22)%0A is_new = kwargs.get(%22created%22)%0A dirty_fields = instance.get_dirty_fields()%0A instance.model_changed(instance.original_state, dirty_fields, is_new)%0A instance.original_state = instance.to_dict()%0A%0A%0Aclass ModelChangedMixin(object):%0A %22%22%22%0A Mixin for detecting changes to a model%0A %22%22%22%0A def __init__(self, *args, **kwargs):%0A super(ModelChangedMixin, self).__init__(*args, **kwargs)%0A self.original_state = self.to_dict()%0A identifier = %22%7B0%7D_model_changed%22.format(self.__class__.__name__)%0A post_save.connect(%0A on_changed, sender=self.__class__, dispatch_uid=identifier)%0A%0A def to_dict(self):%0A %22%22%22%0A Returns the model as a dict%0A %22%22%22%0A # Get all the field names that are not relations%0A keys = (f.name for f in self._meta.local_fields if not f.rel)%0A return %7Bfield: getattr(self, field) for field in keys%7D%0A%0A def get_dirty_fields(self):%0A %22%22%22%0A Returns the fields dirty on the model%0A %22%22%22%0A dirty_fields = %7B%7D%0A current_state = self.to_dict()%0A%0A for key, value in current_state.items():%0A if self.original_state%5Bkey%5D != value:%0A dirty_fields%5Bkey%5D = value%0A%0A return dirty_fields%0A%0A def is_dirty(self):%0A %22%22%22%0A Return whether the model is dirty%0A%0A An unsaved model is dirty when it has no primary key%0A or has at least one dirty field.%0A %22%22%22%0A if not self.pk:%0A return True%0A%0A return %7B%7D != self.get_dirty_fields()%0A%0A def model_changed(self, old_fields, new_fields, is_new):%0A %22%22%22%0A Post-hook for all fields that have been changed.%0A %22%22%22%0A raise NotImplementedError(%22Missing method %60model_changed%60%22)%0A
|
|
87413a50fa61761f8e669eda641635a0ab7bede3 | Create migration for message | API/chat/migrations/0005_auto_20160511_1921.py | API/chat/migrations/0005_auto_20160511_1921.py | Python | 0 | @@ -0,0 +1,619 @@
+# -*- coding: utf-8 -*-%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import models, migrations%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('chat', '0004_auto_20150905_1700'),%0A %5D%0A%0A operations = %5B%0A migrations.RenameField(%0A model_name='message',%0A old_name='text',%0A new_name='message_content',%0A ),%0A migrations.AddField(%0A model_name='message',%0A name='message_type',%0A field=models.CharField(default=b'text', max_length=10, choices=%5B(b'text', b'text'), (b'image', b'image')%5D),%0A ),%0A %5D%0A
|
|
b38527cccf970e069f55c531a4490cdb6eb7042b | Add a widget. | python/pyqt/pyqt5/hello_as_class.py | python/pyqt/pyqt5/hello_as_class.py | Python | 0 | @@ -0,0 +1,1809 @@
+#!/usr/bin/env python3%0A# -*- coding: utf-8 -*-%0A%0A# Copyright (c) 2015 J%C3%A9r%C3%A9mie DECOCK (http://www.jdhp.org)%0A%0A# Permission is hereby granted, free of charge, to any person obtaining a copy%0A# of this software and associated documentation files (the %22Software%22), to deal%0A# in the Software without restriction, including without limitation the rights%0A# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell%0A# copies of the Software, and to permit persons to whom the Software is%0A# furnished to do so, subject to the following conditions:%0A%0A# The above copyright notice and this permission notice shall be included in%0A# all copies or substantial portions of the Software.%0A %0A# THE SOFTWARE IS PROVIDED %22AS IS%22, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR%0A# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,%0A# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE%0A# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER%0A# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,%0A# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN%0A# THE SOFTWARE.%0A%0Aimport sys%0Afrom PyQt5.QtWidgets import QApplication, QMainWindow%0A%0Aclass Window(QMainWindow):%0A def __init__(self):%0A super().__init__()%0A %0A self.resize(250, 150)%0A self.setWindowTitle('Hello')%0A self.show()%0A%0Aapp = QApplication(sys.argv)%0A%0Awindow = Window()%0A%0A# The mainloop of the application. The event handling starts from this point.%0A# The exec_() method has an underscore. It is because the exec is a Python keyword. And thus, exec_() was used instead. %0Aexit_code = app.exec_()%0A %0A# The sys.exit() method ensures a clean exit.%0A# The environment will be informed, how the application ended.%0Asys.exit(exit_code)%0A
|
|
b41b2edde5ac7c786b5ce23adec116fe8311d5d7 | Add tests for createaccount command | tests/test_account_service_account.py | tests/test_account_service_account.py | Python | 0.000001 | @@ -0,0 +1,2006 @@
+from unittest.mock import ANY, Mock%0A%0Aimport requests%0Afrom django.core.management import call_command%0A%0Afrom saleor.account.models import ServiceAccount%0Afrom saleor.core.permissions import get_permissions%0A%0A%0Adef test_createaccount_command_creates_service_account():%0A name = %22SA name%22%0A permissions = %5B%22account.manage_users%22, %22order.manage_orders%22%5D%0A call_command(%22createserviceaccount%22, name, permission=permissions)%0A%0A sa_accounts = ServiceAccount.objects.filter(name=name)%0A assert len(sa_accounts) == 1%0A%0A sa_account = sa_accounts%5B0%5D%0A tokens = sa_account.tokens.all()%0A assert len(tokens) == 1%0A%0A%0Adef test_createaccount_command_service_account_has_all_required_permissions():%0A name = %22SA name%22%0A permission_list = %5B%22account.manage_users%22, %22order.manage_orders%22%5D%0A expected_permission = get_permissions(permission_list)%0A call_command(%22createserviceaccount%22, name, permission=permission_list)%0A%0A sa_accounts = ServiceAccount.objects.filter(name=name)%0A assert len(sa_accounts) == 1%0A sa_account = sa_accounts%5B0%5D%0A assert set(sa_account.permissions.all()) == set(expected_permission)%0A%0A%0Adef test_createaccount_command_sends_data_to_target_url(monkeypatch):%0A mocked_response = Mock()%0A mocked_response.status_code = 200%0A mocked_post = Mock(return_value=mocked_response)%0A%0A monkeypatch.setattr(requests, %22post%22, mocked_post)%0A%0A name = %22SA name%22%0A target_url = %22https://ss.shop.com/register%22%0A permissions = %5B%0A %22account.manage_users%22,%0A %5D%0A%0A call_command(%0A %22createserviceaccount%22, name, permission=permissions, target_url=target_url%0A )%0A%0A service_account = ServiceAccount.objects.filter(name=name)%5B0%5D%0A token = service_account.tokens.all()%5B0%5D.auth_token%0A mocked_post.assert_called_once_with(%0A target_url,%0A headers=%7B%22x-saleor-domain%22: %22mirumee.com%22%7D,%0A json=%7B%0A %22auth_token%22: token,%0A %22name%22: %22SA name%22,%0A %22permissions%22: %5B%22account.manage_users%22%5D,%0A %7D,%0A timeout=ANY,%0A )%0A
|
|
1390de93f8f9703416dc465fc546a8883e96bada | add a header generator | EMControllerManagerHeaderGenerator.py | EMControllerManagerHeaderGenerator.py | Python | 0 | @@ -0,0 +1,1186 @@
+#!/usr/bin/env python%0A#coding:utf8%0Aimport getopt%0Aimport json%0Aimport sys%0A%0Adef generate_definition(input_file, output_path, prefix):%0A%0A%09with open(input_file, 'r') as json_file:%0A%09%09json_string = json_file.read()%0A%09%09config_dict = json.loads(json_string)%0A%09%09if not isinstance(config_dict,dict):%0A%09%09%09sys.stderr.write('configuration file is not failed')%0A%09%09%09exit(-1)%0A%0A%09%09with open(output_path, 'w') as o:%0A%09%09%09o.write('/* Generated by EMControllerManagerHeaderGenerator, do not edit it manually. */%5Cn%5Cn%5Cn')%0A%09%09%09for controller_name in config_dict:%0A%09%09%09%09if prefix is None:%0A%09%09%09%09%09def_name = controller_name%0A%09%09%09%09else:%0A%09%09%09%09%09def_name = %22%25s_%25s%22 %25 (prefix, controller_name)%0A%09%09%09%09o.write('#define %25s @%22%25s%22%5Cn' %25 (def_name, controller_name))%0A%09%0A%0Adef main():%0A%09try:%0A%09%09options, args = getopt.getopt(sys.argv%5B1:%5D,'i:o:p:')%0A%09except Exception, e:%0A%09%09print str(e)%0A%09%09raise e%0A%0A%09input_file = None%0A%09output_path = None%0A%09prefix = None%0A%09for o, a in options:%0A%09%09if o == '-i':%0A%09%09%09input_file = a%0A%09%09elif o == '-o':%0A%09%09%09output_path = a%0A%09%09elif o == '-p':%0A%09%09%09prefix = a%0A%0A%09if input_file is None or output_path is None:%0A%09%09print %22input error%22%0A%09%09exit(-1)%0A%09generate_definition (input_file, output_path, prefix)%0A%0Aif __name__ == '__main__':%0A%09main()%0A
|
|
10b8043463b6bcc89d4ce559548fa113f3d26190 | drop tables no longer needed by application | gem/migrations/0044_remove_deprecated_tables.py | gem/migrations/0044_remove_deprecated_tables.py | Python | 0 | @@ -0,0 +1,1610 @@
+# Generated by Django 2.2.15 on 2020-08-14 11:23%0A%0Afrom django.db import migrations%0A%0ATABLES = %5B%0A 'surveys_articletagrule',%0A 'surveys_combinationrule',%0A 'surveys_groupmembershiprule',%0A 'surveys_molosurveyformfield',%0A 'surveys_molosurveypage',%0A 'surveys_molosurveypage_translated_pages',%0A 'surveys_molosurveypageview',%0A 'surveys_molosurveysubmission',%0A 'surveys_personalisablesurvey',%0A 'surveys_personalisablesurveyformfield',%0A 'surveys_segmentusergroup',%0A 'surveys_segmentusergroup_users',%0A 'surveys_surveyresponserule',%0A 'surveys_surveysindexpage',%0A 'surveys_surveysubmissiondatarule',%0A 'surveys_surveytermsconditions',%0A 'surveys_termsandconditionsindexpage',%0A 'surveys_termsandconditionsindexpage_translated_pages',%0A%0A 'yourwords_termsandconditions',%0A 'yourwords_thankyou',%0A 'yourwords_yourwordscompetition',%0A 'yourwords_yourwordscompetition_translated_pages',%0A 'yourwords_yourwordscompetitionentry',%0A 'yourwords_yourwordscompetitionindexpage',%0A%0A 'polls_choice',%0A 'polls_choice_choice_votes',%0A 'polls_choice_translated_pages',%0A 'polls_choicevote',%0A 'polls_choicevote_choice',%0A 'polls_freetextquestion',%0A 'polls_freetextvote',%0A 'polls_pollsindexpage',%0A 'polls_question',%0A 'polls_question_translated_pages',%0A%5D%0A%0A%0Adef remove_tables(apps, schema_editor):%0A migrations.RunSQL('DROP TABLE IF EXISTS %7B%7D CASCADE;'.format(','.join(TABLES)))%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('gem', '0043_invite_site'),%0A %5D%0A%0A operations = %5B%0A migrations.RunPython(remove_tables)%0A %5D%0A
|
|
9a678f5e856a5fcba82a1a9017dfbc841a660686 | Create ompotdar.py | Python/ompotdar.py | Python/ompotdar.py | Python | 0.000002 | @@ -0,0 +1,22 @@
+print(%22Hello World!%22)%0A
|
|
0f23004da949b974a071a788ff084c2cb685b95d | use a similar `repair_wheel.py` script as cmake | scripts/repair_wheel.py | scripts/repair_wheel.py | Python | 0.000002 | @@ -0,0 +1,2039 @@
+import argparse%0Aimport shutil%0Aimport subprocess%0Aimport sys%0Aimport tempfile%0Afrom pathlib import Path%0A%0Afrom convert_to_generic_platform_wheel import convert_to_generic_platform_wheel%0A%0A%0Adef main():%0A if sys.platform.startswith(%22linux%22):%0A os_ = %22linux%22%0A elif sys.platform == %22darwin%22:%0A os_ = %22macos%22%0A elif sys.platform == %22win32%22:%0A os_ = %22windows%22%0A else:%0A raise NotImplementedError(f%22sys.platform '%7Bsys.platform%7D' is not supported yet.%22)%0A%0A p = argparse.ArgumentParser(description=%22Convert wheel to be independent of python implementation and ABI%22)%0A p.set_defaults(prog=Path(sys.argv%5B0%5D).name)%0A p.add_argument(%22WHEEL_FILE%22, help=%22Path to wheel file.%22)%0A p.add_argument(%0A %22-w%22,%0A %22--wheel-dir%22,%0A dest=%22WHEEL_DIR%22,%0A help=('Directory to store delocated wheels (default: %22wheelhouse/%22)'),%0A default=%22wheelhouse/%22,%0A )%0A%0A args = p.parse_args()%0A%0A file = Path(args.WHEEL_FILE).resolve(strict=True)%0A wheelhouse = Path(args.WHEEL_DIR).resolve()%0A wheelhouse.mkdir(parents=True, exist_ok=True)%0A%0A with tempfile.TemporaryDirectory() as tmpdir_:%0A tmpdir = Path(tmpdir_)%0A # use the platform specific repair tool first%0A if os_ == %22linux%22:%0A subprocess.run(%5B%22auditwheel%22, %22repair%22, %22-w%22, str(tmpdir), str(file)%5D, check=True, stdout=subprocess.PIPE)%0A elif os_ == %22macos%22:%0A subprocess.run(%0A %5B%22delocate-wheel%22, %22--require-archs%22, %22x86_64%22, %22-w%22, str(tmpdir), str(file)%5D,%0A check=True,%0A stdout=subprocess.PIPE,%0A )%0A elif os_ == %22windows%22:%0A # no specific tool, just copy%0A shutil.copyfile(file, tmpdir / file.name)%0A files = list(tmpdir.glob(%22*.whl%22))%0A assert len(files) == 1, files%0A file = files%5B0%5D%0A%0A # make this a py2.py3 wheel%0A convert_to_generic_platform_wheel(%0A str(file),%0A out_dir=str(wheelhouse),%0A py2_py3=True,%0A )%0A%0A%0Aif __name__ == %22__main__%22:%0A main()%0A
|
|
cafb83befb2cee459d44a1332e5fc7e57edf81a6 | Add script to update cvsanaly databases | updateGit.py | updateGit.py | Python | 0 | @@ -0,0 +1,855 @@
+from jiradb import *%0A%0Aif __name__ == %22__main__%22:%0A log.setLevel(logging.DEBUG)%0A # Add console log handler%0A ch = logging.StreamHandler()%0A ch.setLevel(logging.INFO)%0A ch.setFormatter(logging.Formatter('%25(message)s'))%0A log.addHandler(ch)%0A # Add file log handler%0A fh = logging.FileHandler('updateGit.log')%0A fh.setLevel(logging.DEBUG)%0A fh.setFormatter(logging.Formatter('%5B%25(levelname)s @ %25(asctime)s%5D: %25(message)s'))%0A log.addHandler(fh)%0A # Add error file log handler%0A efh = logging.FileHandler('updateGitErrors.log')%0A efh.setLevel(logging.ERROR)%0A efh.setFormatter(logging.Formatter('%5B%25(levelname)s @ %25(asctime)s%5D: %25(message)s'))%0A log.addHandler(efh)%0A%0A args = getArguments()%0A jiradb = JIRADB(**args)%0A projectList = args%5B'projects'%5D%0A for project in projectList:%0A jiradb.getGitDB(project).update()%0A
|
|
23c65cc59f1cdf595090a7f25e80c03828aaba68 | add `examples/references` | src/openbandparams/examples/references.py | src/openbandparams/examples/references.py | Python | 0.000001 | @@ -0,0 +1,1175 @@
+#%0A# Copyright (c) 2013-2015, Scott J Maddox%0A#%0A# This file is part of openbandparams.%0A#%0A# openbandparams is free software: you can redistribute it and/or modify%0A# it under the terms of the GNU Affero General Public License as published%0A# by the Free Software Foundation, either version 3 of the License, or%0A# (at your option) any later version.%0A#%0A# openbandparams is distributed in the hope that it will be useful,%0A# but WITHOUT ANY WARRANTY; without even the implied warranty of%0A# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the%0A# GNU Affero General Public License for more details.%0A#%0A# You should have received a copy of the GNU Affero General Public License%0A# along with openbandparams. If not, see %3Chttp://www.gnu.org/licenses/%3E.%0A#%0A#############################################################################%0A# Make sure we import the local openbandparams version%0Aimport os%0Aimport sys%0Asys.path.insert(0,%0A os.path.abspath(os.path.join(os.path.dirname(__file__), '../..')))%0Afrom openbandparams import *%0A%0A# Print all references used to calculate a parameter%0Aparameter = InAsSb.Eg%0Afor ref in parameter.get_references():%0A print ref
|
|
574659044cb501a2ac61006ddc1c389622172207 | add script to calculate cv from energy intervals | compute_cv.py | compute_cv.py | Python | 0 | @@ -0,0 +1,1484 @@
+import argparse%0Aimport numpy as np%0A%0A%0Adef compute_Z(energies, T, K):%0A beta = 1./T%0A N = len(energies)%0A Z = 0.%0A U = 0.%0A U2 = 0.%0A Cv = 0.%0A Emin = energies%5B-1%5D%0A Ediff = energies - Emin%0A for n in xrange(1, len(energies)-2):%0A# Z += (np.exp(-float(n-1) / K) - np.exp(-float(n+1) / K)) * np.exp(-beta * energies%5Bn%5D)%0A E = Ediff%5Bn%5D%0A Zpref = np.exp(-float(n-1) / K - beta * E) * (1. - np.exp(-2. / K))%0A %0A %0A Z += Zpref%0A U += Zpref * (E + Emin)%0A U2 += Zpref * (E + Emin)**2%0A %0A U /= Z%0A U2 /= Z%0A Cv = (U2 - U**2) * beta**2%0A %0A return Z, Cv, U, U2%0A%0A%0Aif __name__ == %22__main__%22:%0A parser = argparse.ArgumentParser(description=%22load energy intervals and compute cv%22)%0A# parser.add_argument(%22--db%22, type=str, nargs=1, help=%22database filename%22,%0A# default=%22otp.db%22)%0A parser.add_argument(%22K%22, type=int, help=%22number of replicas%22)%0A parser.add_argument(%22fname%22, type=str, help=%22filenames with energies%22)%0A args = parser.parse_args()%0A print args.fname%0A%0A energies = np.genfromtxt(args.fname)%0A %0A Tmin = .02%0A Tmax = .5%0A nT = 100%0A dT = (Tmax-Tmin) / nT%0A %0A T = np.array(%5BTmin + dT*i for i in range(nT)%5D)%0A Z, Cv, U, U2 = compute_Z(energies, T, args.K)%0A print Z, Cv%0A %0A with open(%22cv%22, %22w%22) as fout:%0A fout.write(%22#T Cv %3CE%3E %3CE**2%3E%5Cn%22)%0A for vals in zip(T, Cv, U, U2):%0A fout.write(%22%25g %25g %25g %25g%5Cn%22 %25 vals)%0A %0A %0A
|
|
998acbd4b490ef3807d79c245c27700d3e44d5da | Add a dummy pavement file. | tools/win32/build_scripts/pavement.py | tools/win32/build_scripts/pavement.py | Python | 0.999859 | @@ -0,0 +1,137 @@
+options(%0A setup=Bunch(%0A name = %22scipy-superpack%22,%0A )%0A)%0A%0A@task%0Adef setup():%0A print %22Setting up package %25s%22 %25 options.name%0A
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.