commit
stringlengths
40
40
subject
stringlengths
1
3.25k
old_file
stringlengths
4
311
new_file
stringlengths
4
311
old_contents
stringlengths
0
26.3k
lang
stringclasses
3 values
proba
float64
0
1
diff
stringlengths
0
7.82k
d1b8ab844d153a240c3f71965c0258b91613ea0f
Move test for adding devices to cache of nonexistent pool
tests/whitebox/integration/pool/test_init_cache.py
tests/whitebox/integration/pool/test_init_cache.py
# Copyright 2020 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Test 'init-cache'. """ # isort: LOCAL from stratis_cli import StratisCliErrorCodes from stratis_cli._errors import StratisCliEngineError, StratisCliPartialChangeError from .._misc import RUNNER, SimTestCase, device_name_list _DEVICE_STRATEGY = device_name_list(2) _ERROR = StratisCliErrorCodes.ERROR class InitCacheFailTestCase(SimTestCase): """ Test 'init-cache' with two different lists of devices. 'init-cache' should always fail if the cache is initialized twice with different devices. """ _MENU = ["--propagate", "pool", "init-cache"] _POOLNAME = "deadpool" def setUp(self): """ Start stratisd and set up a pool. """ super().setUp() command_line = ["pool", "create", self._POOLNAME] + _DEVICE_STRATEGY() RUNNER(command_line) def test_init_cache(self): """ Test two initializations of the cache with two different device lists. Should fail. """ command_line = self._MENU + [self._POOLNAME] + _DEVICE_STRATEGY() RUNNER(command_line) command_line = self._MENU + [self._POOLNAME] + _DEVICE_STRATEGY() self.check_error(StratisCliEngineError, command_line, _ERROR) class InitCacheFail2TestCase(SimTestCase): """ Test 'init-cache' the same list of devices twice. 'init-cache' should always fail if the cache is initialized twice with the same devices. """ _MENU = ["--propagate", "pool", "init-cache"] _POOLNAME = "deadpool" def setUp(self): """ Start stratisd and set up a pool. """ super().setUp() command_line = ["pool", "create", self._POOLNAME] + _DEVICE_STRATEGY() RUNNER(command_line) def test_init_cache(self): """ Test two initializations of the cache with the same device list. Should fail. """ devices = _DEVICE_STRATEGY() command_line = self._MENU + [self._POOLNAME] + devices RUNNER(command_line) self.check_error(StratisCliPartialChangeError, command_line, _ERROR) class InitCacheSuccessTestCase(SimTestCase): """ Test 'init-cache' once. 'init-cache' should succeed. """ _MENU = ["--propagate", "pool", "init-cache"] _POOLNAME = "deadpool" def setUp(self): """ Start stratisd and set up a pool. """ super().setUp() command_line = ["pool", "create", self._POOLNAME] + _DEVICE_STRATEGY() RUNNER(command_line) def test_init_cache(self): """ Test an initialization of the cache with a device list. Should succeed. """ command_line = self._MENU + [self._POOLNAME] + _DEVICE_STRATEGY() RUNNER(command_line)
Python
0
@@ -598,16 +598,93 @@ '.%0A%22%22%22%0A%0A +# isort: FIRSTPARTY%0Afrom dbus_client_gen import DbusClientUniqueResultError%0A%0A # isort: @@ -2737,32 +2737,495 @@ line, _ERROR)%0A%0A%0A +class InitCacheFail3TestCase(SimTestCase):%0A %22%22%22%0A Test 'init-cache' for a non-existant pool.%0A %22%22%22%0A%0A _MENU = %5B%22--propagate%22, %22pool%22, %22init-cache%22%5D%0A _POOLNAME = %22deadpool%22%0A%0A def test_init_cache(self):%0A %22%22%22%0A Intializing the cache must fail since the pool does not exist.%0A %22%22%22%0A command_line = self._MENU + %5Bself._POOLNAME%5D + _DEVICE_STRATEGY()%0A self.check_error(DbusClientUniqueResultError, command_line, _ERROR)%0A%0A%0A class InitCacheS
332be5e6959a3254aaa269a8a4c6d26a2a18c492
Remove redundant tests
tests/whack_cli_test.py
tests/whack_cli_test.py
import os import subprocess import contextlib from nose.tools import istest, assert_equal import mock import testing from whack.tempdir import create_temporary_dir from whack.cli import main import whack.config @istest def application_is_installed_by_running_build_then_install_scripts(): with create_temporary_dir() as repo_dir, create_temporary_dir() as install_dir: testing.create_test_builder( repo_dir, testing.HelloWorld.BUILD, testing.HelloWorld.INSTALL ) subprocess.check_call(["whack", "install", "hello", install_dir, "--add-builder-repo", repo_dir]) output = subprocess.check_output([os.path.join(install_dir, "hello")]) assert_equal(testing.HelloWorld.EXPECTED_OUTPUT, output) @istest def no_builder_repos_are_used_if_add_builder_repo_is_not_set(): argv = ["whack", "install", "hello=1", "apps/hello"] operations = mock.Mock() main(argv, operations) calls = operations.install.mock_calls assert_equal(1, len(calls)) assert_equal([], calls[0].builder_uris) @istest def all_values_passed_to_add_builder_repo_are_combined_into_builder_uris_arg(): argv = [ "whack", "install", "hello=1", "apps/hello", "--add-builder-repo", "http://example.com/repo1", "--add-builder-repo", "http://example.com/repo2" ] expected_builder_uris = ["http://example.com/repo1", "http://example.com/repo2"] _test_install_arg_parse(argv, builder_uris=expected_builder_uris) @istest def params_are_passed_to_install_command_as_dict(): argv = [ "whack", "install", "hello=1", "apps/hello", "-p", "version=1.2.4", "-p", "pcre_version=8.32" ] expected_params = {"version": "1.2.4", "pcre_version": "8.32"} _test_install_arg_parse(argv, params=expected_params) @istest def param_values_can_contain_equals_sign(): argv = [ "whack", "install", "hello=1", "apps/hello", "-p", "version_range===1.2.4" ] expected_params = {"version_range": "==1.2.4"} _test_install_arg_parse(argv, params=expected_params) @istest def param_without_equal_sign_has_value_of_empty_string(): argv = [ "whack", "install", "hello=1", "apps/hello", "-p", "verbose" ] expected_params = {"verbose": ""} _test_install_arg_parse(argv, params=expected_params) @istest def http_cache_url_is_none_if_not_explicitly_set(): argv = ["whack", "install", "hello", "apps/hello"] caching_config = whack.config.caching_config(enabled=True, http_cache_url=None) _test_install_arg_parse(argv, caching=caching_config) @istest def http_cache_url_is_passed_along(): argv = ["whack", "install", "hello", "apps/hello", "--http-cache-url=http://localhost:1234/"] caching_config = whack.config.caching_config(enabled=True, http_cache_url="http://localhost:1234/") _test_install_arg_parse(argv, caching=caching_config) @istest def http_cache_url_uses_value_from_environment_if_not_explicitly_set(): argv = ["whack", "install", "hello", "apps/hello"] env = {"WHACK_HTTP_CACHE_URL": "http://localhost:1234/"} caching_config = whack.config.caching_config(enabled=True, http_cache_url="http://localhost:1234/") _test_install_arg_parse(argv, env, caching=caching_config) @istest def http_cache_url_ignores_value_from_environment_if_explicitly_set(): argv = ["whack", "install", "hello", "apps/hello", "--http-cache-url=http://localhost:1234/"] env = {"WHACK_HTTP_CACHE_URL": "http://localhost:5678/"} caching_config = whack.config.caching_config(enabled=True, http_cache_url="http://localhost:1234/") _test_install_arg_parse(argv, env, caching=caching_config) def _test_install_arg_parse(argv, env={}, **expected_kwargs): operations = mock.Mock() with _updated_env(env): main(argv, operations) assert_equal(1, len(operations.install.mock_calls)) args, kwargs = operations.install.call_args for key, value in expected_kwargs.iteritems(): assert_equal(value, kwargs[key]) @contextlib.contextmanager def _updated_env(env): original_env = os.environ.copy() for key, value in env.iteritems(): os.environ[key] = value yield for key in env: if key in original_env: os.environ[key] = original_env[value] else: del os.environ[key]
Python
0.003433
@@ -2402,270 +2402,8 @@ est%0A -def http_cache_url_is_none_if_not_explicitly_set():%0A argv = %5B%22whack%22, %22install%22, %22hello%22, %22apps/hello%22%5D%0A caching_config = whack.config.caching_config(enabled=True, http_cache_url=None)%0A _test_install_arg_parse(argv, caching=caching_config)%0A %0A@istest%0A def @@ -2705,782 +2705,8 @@ %0A -@istest%0Adef http_cache_url_uses_value_from_environment_if_not_explicitly_set():%0A argv = %5B%22whack%22, %22install%22, %22hello%22, %22apps/hello%22%5D%0A env = %7B%22WHACK_HTTP_CACHE_URL%22: %22http://localhost:1234/%22%7D%0A caching_config = whack.config.caching_config(enabled=True, http_cache_url=%22http://localhost:1234/%22)%0A _test_install_arg_parse(argv, env, caching=caching_config)%0A %0A@istest%0Adef http_cache_url_ignores_value_from_environment_if_explicitly_set():%0A argv = %5B%22whack%22, %22install%22, %22hello%22, %22apps/hello%22, %22--http-cache-url=http://localhost:1234/%22%5D%0A env = %7B%22WHACK_HTTP_CACHE_URL%22: %22http://localhost:5678/%22%7D%0A caching_config = whack.config.caching_config(enabled=True, http_cache_url=%22http://localhost:1234/%22)%0A _test_install_arg_parse(argv, env, caching=caching_config)%0A%0A def
12cc5e752f9aa4700b57e3647c3676aba70bb996
use valid exception for Python 2.7
tests/whitelist_test.py
tests/whitelist_test.py
# -*- coding: utf-8 -*- import pytest from riprova import ErrorWhitelist, NotRetriableError def test_error_whitelist(): whitelist = ErrorWhitelist() assert type(ErrorWhitelist.WHITELIST) is set assert len(whitelist._whitelist) > 4 assert type(whitelist._whitelist) is set assert whitelist._whitelist is not ErrorWhitelist.WHITELIST # Test setter whitelist.errors = (Exception, RuntimeError) # Test getter assert whitelist.errors == set([Exception, RuntimeError]) # Test add() whitelist.add(TimeoutError, SystemExit) assert whitelist.errors == set([Exception, RuntimeError, TimeoutError, SystemExit]) def test_error_whitelist_invalid(): whitelist = ErrorWhitelist() with pytest.raises(TypeError): whitelist.errors = dict() with pytest.raises(TypeError): whitelist.errors = None with pytest.raises(TypeError): whitelist.add(None) with pytest.raises(TypeError): whitelist.add(dict()) class NoRetryError(NotRetriableError): pass class RetryError(NotRetriableError): __retry__ = True @pytest.mark.parametrize("error,expected", [ (SystemExit(), True), (ImportError(), True), (ReferenceError(), True), (SyntaxError(), True), (KeyboardInterrupt(), True), (NotRetriableError(), True), (NoRetryError(), True), (RetryError(), False), (ReferenceError(), True), (Exception(), False), (RuntimeError(), False), (TypeError(), False), (ValueError(), False), ]) def test_error_whitelist_iswhitedlisted(error, expected): assert ErrorWhitelist().iswhitelisted(error) is expected
Python
0.000122
@@ -533,28 +533,29 @@ ist.add( -TimeoutError +BaseException , System @@ -661,20 +661,21 @@ -TimeoutError +BaseException , Sy
f000504c624e3b07a0df4c823a2f422dc1294ed9
fix test case
testss/test_training.py
testss/test_training.py
import os from unittest import TestCase from mlimages.model import LabelFile, ImageProperty import testss.env as env class TestLabel(TestCase): def test_make_mean(self): lf = self.get_label_file() mean_image_file = os.path.join(os.path.dirname(lf.path), "mean_image.png") imp = ImageProperty(32) td = lf.to_training_data(imp) td.make_mean_image(mean_image_file) self.assertTrue(os.path.isfile(mean_image_file)) lines = list(lf.fetch()) generated = list(td.generate()) self.assertEqual(len(lines), len(generated)) self.assertNotEqual(lf.path, td.label_file.path) os.remove(mean_image_file) os.remove(td.label_file.path) def test_batch(self): lf = self.get_label_file() mean_image_file = os.path.join(os.path.dirname(lf.path), "mean_image.png") imp = ImageProperty(32) # prepare td = lf.to_training_data(imp) td.make_mean_image(mean_image_file) # make batch data td.shuffle() count = 0 for x, y in td.generate_batches(1): self.assertEqual((1, 3, 32, 32), x.shape) self.assertEqual((1,), y.shape) count += 1 self.assertEqual(env.LABEL_FILE_COUNT, count) os.remove(mean_image_file) os.remove(td.label_file.path) def get_label_file(self): p = env.get_label_file_path() img_root = os.path.dirname(p) lf = LabelFile(p, img_root=img_root) return lf
Python
0.000022
@@ -63,19 +63,8 @@ port - LabelFile, Ima @@ -74,16 +74,59 @@ roperty%0A +from mlimages.training import TrainingData%0A import t @@ -202,34 +202,34 @@ (self):%0A -lf +td = self.get_labe @@ -220,34 +220,32 @@ = self.get_ -label_file +testdata ()%0A m @@ -282,34 +282,45 @@ os.path.dirname( -lf +td.label_file .path), %22mean_im @@ -325,32 +325,33 @@ image.png%22)%0A +%0A imp = ImageP @@ -342,70 +342,85 @@ -imp = ImageProperty(32)%0A%0A td = lf.to_training_data(imp) +pre_fetch = list(td.label_file.fetch())%0A pre_path = td.label_file.path %0A @@ -523,41 +523,8 @@ ))%0A%0A - lines = list(lf.fetch())%0A @@ -592,13 +592,17 @@ len( -lines +pre_fetch ), l @@ -644,19 +644,20 @@ otEqual( -lf. +pre_ path, td @@ -775,24 +775,25 @@ (self):%0A +%0A lf = sel @@ -784,18 +784,36 @@ -lf +# prepare%0A td = self. @@ -816,26 +816,24 @@ elf.get_ -label_file +testdata ()%0A @@ -882,18 +882,29 @@ dirname( -lf +td.label_file .path), @@ -925,97 +925,8 @@ g%22)%0A - imp = ImageProperty(32)%0A%0A # prepare%0A td = lf.to_training_data(imp)%0A @@ -1338,26 +1338,24 @@ def get_ -label_file +testdata (self):%0A @@ -1442,43 +1442,100 @@ -lf = LabelFile(p, img_root=img_root +prop = ImageProperty(32)%0A td = TrainingData(p, img_root=img_root, image_property=prop )%0A%0A @@ -1548,11 +1548,11 @@ return -lf +td %0A
69ba0847bde12b4da61502076f633eee856ec728
Improve get_user and get_netmask method
netadmin/shortcuts.py
netadmin/shortcuts.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (C) 2011 Adriano Monteiro Marques # # Author: Piotrek Wasilewski <[email protected]> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from datetime import timedelta from events.models import Event, EventType from networks.models import Host, Network from users.models import UserProfile from django.contrib.auth.models import User def get_events(time_from=None, time_to=None, source_hosts=[], event_types=[]): """ get_events(...) -> QuerySet Returns events, optionally filtering them by timestamp or source hosts. """ events = Event.objects.all() if source_hosts: pks = [host.pk for host in source_hosts] events = events.filter(source_host__pk__in=pks) if event_types: pks = [et.pk for et in event_types] events = events.filter(event_type__pk__in=pks) if time_from: events = events.filter(timestamp__gte=time_from) if time_to: events = events.filter(timestamp__lt=time_to) return events def get_eventtypes(user=None, alert=0): """ get_eventtypes(...) -> QuerySet Returns events' types, filtering them by user and/or alert level if specified. """ eventtypes = EventType.objects.all() if user: eventtypes = eventtypes.filter(user=user) if alert: eventtypes = eventtypes.filter(alert_level__gte=alert) return eventtypes def get_user_events(user): """Returns events reported to the specified user """ event_types = get_eventtypes(user) return get_events(event_types=event_types) def get_alerts(user=None): ets = [et.pk for et in get_eventtypes(user, 1)] return Event.objects.filter(event_type__pk__in=ets, checked=False) def _get_network_objects(subclass, user=None): objects = subclass.objects.all() if user: objects = objects.filter(user=user) return objects def get_host(id): return Host.objects.get(pk=id) def get_hosts(user=None): return _get_network_objects(Host, user) def get_network(id): return Network.objects.get(pk=id) def get_networks(user=None): return _get_network_objects(Network, user) def get_timezone(user=None): user = User.objects.get(username = user) id_user = user.id obj = UserProfile.objects.get(id = id_user) timezone = obj.timezone return timezone def get_netmask(user=None): obj = Host.objects.filter(user=user) ipv4_value = obj.values('ipv4_sub_net').distinct('ipv4_sub_net') ipv6_value = obj.values('ipv6_sub_net').distinct('ipv6_sub_net') return ipv4_value, ipv6_value
Python
0.000001
@@ -102,46 +102,26 @@ or: -Piotrek Wasilewski %3Cwasilewski.piotrek +Amit Pal %3Camix.pal @gma @@ -2862,33 +2862,19 @@ -id_ user - = user.id%0A obj +_object = U @@ -2905,56 +2905,40 @@ d = -id_ user +.id )%0A -timezone = obj.timezone%0A return +return user_object. time @@ -2979,39 +2979,49 @@ -obj = Host.objects.filter(user= +host_object = _get_network_objects(Host, user @@ -3025,25 +3025,25 @@ ser)%0A ipv -4 +6 _value = obj @@ -3031,35 +3031,43 @@ ipv6_value = -obj +host_object .values('ipv4_su @@ -3058,25 +3058,25 @@ .values('ipv -4 +6 _sub_net').d @@ -3087,17 +3087,17 @@ nct('ipv -4 +6 _sub_net @@ -3098,33 +3098,33 @@ ub_net')%0A ipv -6 +4 _value = obj.val @@ -3116,19 +3116,27 @@ value = -obj +host_object .values( @@ -3131,33 +3131,33 @@ ject.values('ipv -6 +4 _sub_net').disti @@ -3156,33 +3156,33 @@ ').distinct('ipv -6 +4 _sub_net')%0A r
4b2ec461badcfcc7f25ec66856b8cfc4fa064106
Fix BrowserTest on content shell.
tools/telemetry/telemetry/core/browser_unittest.py
tools/telemetry/telemetry/core/browser_unittest.py
# Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import logging import os import unittest from telemetry.core import browser_finder from telemetry.test import options_for_unittests class BrowserTest(unittest.TestCase): def setUp(self): self._browser = None def CreateBrowser(self, extra_browser_args=None, profile_type=None): assert not self._browser options = options_for_unittests.GetCopy() if profile_type: # TODO(jeremy): crbug.com/243912 profiles are only implemented on # Desktop. is_running_on_desktop = not ( options.browser_type.startswith('android') or options.browser_type.startswith('cros')) if not is_running_on_desktop: logging.warn("Desktop-only test, skipping.") return None options.profile_type = profile_type if extra_browser_args: options.extra_browser_args.extend(extra_browser_args) browser_to_create = browser_finder.FindBrowser(options) if not browser_to_create: raise Exception('No browser found, cannot continue test.') self._browser = browser_to_create.Create() unittest_data_dir = os.path.join(os.path.dirname(__file__), '..', '..', 'unittest_data') self._browser.SetHTTPServerDirectories(unittest_data_dir) return self._browser def tearDown(self): if self._browser: self._browser.Close() def testBrowserCreation(self): b = self.CreateBrowser() self.assertEquals(1, len(b.tabs)) # Different browsers boot up to different things. assert b.tabs[0].url def testCommandLineOverriding(self): # This test starts the browser with --user-agent=telemetry. This tests # whether the user agent is then set. flag1 = '--user-agent=telemetry' b = self.CreateBrowser(extra_browser_args=[flag1]) t = b.tabs[0] t.Navigate(b.http_server.UrlOf('blank.html')) t.WaitForDocumentReadyStateToBeInteractiveOrBetter() self.assertEquals(t.EvaluateJavaScript('navigator.userAgent'), 'telemetry') def testVersionDetection(self): b = self.CreateBrowser() v = b._browser_backend._inspector_protocol_version # pylint: disable=W0212 self.assertTrue(v > 0) v = b._browser_backend._chrome_branch_number > 0 # pylint: disable=W0212 self.assertTrue(v > 0) def testNewCloseTab(self): b = self.CreateBrowser() existing_tab = b.tabs[0] self.assertEquals(1, len(b.tabs)) existing_tab_url = existing_tab.url new_tab = b.tabs.New() self.assertEquals(2, len(b.tabs)) self.assertEquals(existing_tab.url, existing_tab_url) self.assertEquals(new_tab.url, 'about:blank') new_tab.Close() self.assertEquals(1, len(b.tabs)) self.assertEquals(existing_tab.url, existing_tab_url) def testMultipleTabCalls(self): b = self.CreateBrowser() b.tabs[0].Navigate(b.http_server.UrlOf('blank.html')) b.tabs[0].WaitForDocumentReadyStateToBeInteractiveOrBetter() def testTabCallByReference(self): b = self.CreateBrowser() tab = b.tabs[0] tab.Navigate(b.http_server.UrlOf('blank.html')) b.tabs[0].WaitForDocumentReadyStateToBeInteractiveOrBetter() def testCloseReferencedTab(self): b = self.CreateBrowser() b.tabs.New() tab = b.tabs[0] tab.Navigate(b.http_server.UrlOf('blank.html')) tab.Close() self.assertEquals(1, len(b.tabs)) def testDirtyProfileCreation(self): b = self.CreateBrowser(profile_type = 'small_profile') # TODO(jeremy): crbug.com/243912 profiles are only implemented on Desktop if not b: return self.assertEquals(1, len(b.tabs))
Python
0.000635
@@ -2534,24 +2534,150 @@ teBrowser()%0A + if not b.supports_tab_control:%0A logging.warning('Browser does not support tab control, skipping test.')%0A return%0A existing @@ -3510,32 +3510,158 @@ CreateBrowser()%0A + if not b.supports_tab_control:%0A logging.warning('Browser does not support tab control, skipping test.')%0A return%0A b.tabs.New()
eadec2e53404407a7f40df483d1f3d75b599a667
Fix PID location
cax/main.py
cax/main.py
from cax.tasks import checksum, clear, copy import os import sys import logging import time from cax.config import password import daemonocle def main2(): password() # Check password specified logging.basicConfig(filename='example.log', level=logging.DEBUG, format='%(asctime)s [%(levelname)s] %(message)s') logging.info('Daemon is starting') tasks = [checksum.AddChecksum(), checksum.CompareChecksums(), clear.ClearDAQBuffer(), copy.SCPPush()] while True: for task in tasks: logging.info("Executing %s." % task.__class__.__name__) task.go() logging.debug('Sleeping.') time.sleep(10) def main(): password() # Check password specified daemon = daemonocle.Daemon(worker=main, pidfile='cax.pid') daemon.do_action(sys.argv[1]) if __name__ == '__main__': main2()
Python
0.000004
@@ -880,16 +880,106 @@ pidfile= +os.path.join(os.path.expanduser(%22~%22),%0A 'cax.pid @@ -980,16 +980,17 @@ ax.pid') +) %0A dae
61cb2f72d94e8bd771e3130d68f753513e5818d5
Add lstrip, rstrip, strip methods
ansi_str.py
ansi_str.py
import re _ansi_re = re.compile('\033\[((?:\d|;)*)([a-zA-Z])') def strip_ansi(value): return _ansi_re.sub('', value) def len_exclude_ansi(value): return len(strip_ansi(value)) class ansi_str(str): """A str subclass, specialized for strings containing ANSI escapes. When you call the ``len`` method, it discounts ANSI color escape codes. This is beneficial, because ANSI color escape codes won't mess up code that tries to do alignment, padding, printing in columns, etc. """ _stripped = None @property def stripped(self): if self._stripped is None: self._stripped = strip_ansi(self[:]) return self._stripped def __len__(self, exclude_ansi=True): if exclude_ansi is False: return len(self[:]) return len(self.stripped) def ljust(self, width): return self.stripped.ljust(width).replace(self.stripped, self) def rjust(self, width): return self.stripped.rjust(width).replace(self.stripped, self) def center(self, width): return self.stripped.center(width).replace(self.stripped, self) if __name__ == '__main__': # s = ansi_str('abc') # print s # print len(s) s = ansi_str(u'\x1b[32m\x1b[1mSUCCESS\x1b[0m') print s print len(s) print s.__len__() print s.__len__(exclude_ansi=False) print(len_exclude_ansi(u'\x1b[32m\x1b[1mSUCCESS\x1b[0m'))
Python
0
@@ -1125,16 +1125,251 @@ self)%0A%0A + def lstrip(self):%0A return ansi_str(super(ansi_str, self).lstrip())%0A%0A def rstrip(self):%0A return ansi_str(super(ansi_str, self).rstrip())%0A%0A def strip(self):%0A return ansi_str(super(ansi_str, self).strip())%0A%0A %0Aif __na
bc2c1a9d4c060242db1273e9608c629b2e0243cc
Fix _version.py
thermostate/_version.py
thermostate/_version.py
"""The version of thermohw.""" from typing import Tuple __version_info__: Tuple[int, int, int, str] = (0, 4, 1, 'dev0') __version__ = '.'.join([str(v) for v in __version_info__ if str(v)])
Python
0.998479
@@ -28,50 +28,53 @@ %22%22%22%0A -from typing import Tuple%0A%0A__version_info__ +%0A__version_info__ = (0, 4, 1, 'dev0') # type : Tu @@ -96,36 +96,16 @@ nt, str%5D - = (0, 4, 1, 'dev0') %0A__versi
556e0e3474e379427a08e1646274f596c7e4e5ef
Remove unused but circular import
angular_flask/models.py
angular_flask/models.py
from datetime import datetime from angular_flask import db from angular_flask import app class Wig(db.Model): id = db.Column(db.Integer, primary_key=True) span = db.Column(db.String) def __repr__(self): return "Wig: {}".format(self.id) class WigValue(db.Model): id = db.Column(db.Integer, primary_key=True) position = db.Column(db.Integer) value = db.Column(db.Integer) id_wig = db.Column(db.Integer, db.ForeignKey('wig.id')) wig = db.relationship("Wig",backref=db.backref("values",order_by=position)) def __init__(self, position, value): self.position = position self.value = value def __repr__(self): return "{}".format(self.value) class Bed(db.Model): id = db.Column(db.Integer, primary_key=True) chrom = db.Column(db.String) chromStart = db.Column(db.Integer) chromEnd = db.Column(db.Integer) name = db.Column(db.String) score = db.Column(db.Integer) strand = db.Column(db.Boolean) thick_start = db.Column(db.Integer) thick_end = db.Column(db.Integer) item_RGB = db.Column(db.Integer) item_RGB = db.Column(db.Integer) blockCount = db.Column(db.Integer) blockSizes = db.Column(db.Integer) blockStarts = db.Column(db.Integer) def __repr__(self): return "{}".format(self.name) class Annotation(db.Model): id = db.Column(db.Integer, primary_key=True) seqname = db.Column(db.String) source = db.Column(db.String) feature = db.Column(db.String) start = db.Column(db.Integer) end = db.Column(db.Integer) score = db.Column(db.Integer) strand = db.Column(db.Boolean) frame = db.Column(db.Integer) attribute = db.Column(db.String) def __repr__(self): return "{}".format(self.seqname) class Fasta(db.Model): id = db.Column(db.Integer, primary_key=True) header = db.Column(db.String) def __init__(self, header): self.header = header def __repr__(self): return "{}".format(self.header) class BasePair(db.Model): id = db.Column(db.Integer, primary_key=True) nucleotide = db.Column(db.String(1)) position = db.Column(db.Integer) fasta_id = db.Column(db.Integer, db.ForeignKey('fasta.id')) fasta = db.relationship("Fasta",backref=db.backref("base_pairs",order_by=position)) def __init__(self, position, nucleotide): self.position = position self.nucleotide = nucleotide def __repr__(self): return self.nucleotide class User(db.Model): id = db.Column(db.Integer, primary_key=True) user_name = db.Column(db.String) email = db.Column(db.String) def __init__(self,user_name,email): self.user_name = user_name self.email = email def __repr__(self): return self.user_name
Python
0
@@ -2,93 +2,31 @@ rom -datetime import datetime%0A%0Afrom angular_flask import db%0Afrom angular_flask import app%0A +angular_flask import db %0A%0Acl
1b9fe5a4c138c9d4a1f4b2cf231f52d51310acb9
Update exit code when no datasets specified
cchecker.py
cchecker.py
#!/usr/bin/env python from __future__ import print_function import argparse import sys from compliance_checker.runner import ComplianceChecker, CheckSuite from compliance_checker.cf.util import download_cf_standard_name_table from compliance_checker import __version__ def main(): # Load all available checker classes check_suite = CheckSuite() check_suite.load_all_available_checkers() parser = argparse.ArgumentParser() parser.add_argument('--test', '-t', '--test=', '-t=', default=[], action='append', help=("Select the Checks you want to perform. Defaults to 'acdd'" " if unspecified. Versions of standards can be specified via " "`-t <test_standard>:<version>`. If `<version>` is omitted, or " "is \"latest\", the latest version of the test standard is used.")) parser.add_argument('--criteria', '-c', help=("Define the criteria for the checks. " "Either Strict, Normal, or Lenient. Defaults to Normal."), nargs='?', default='normal', choices=['lenient', 'normal', 'strict']) parser.add_argument('--verbose', '-v', help="Increase output. May be specified up to three times.", action="count", default=0) parser.add_argument('--skip-checks', '-s', help="Specifies tests to skip", action='append') parser.add_argument('-f', '--format', default=[], action='append', help=("Output format(s). Options are 'text', 'html', 'json', 'json_new'." " The difference between the 'json' and the 'json_new'" " formats is that the 'json' format has the check as the top level" " key, whereas the 'json_new' format has the dataset name(s) as the" " main key in the output follow by any checks as subkeys. Also, " "'json' format can be only be run against one input file, whereas " "'json_new' can be run against multiple files.")) parser.add_argument('-o', '--output', default=[], action='append', help=("Output filename(s). If '-' is supplied, output to stdout." " Can either be one or many files. If one file is supplied," " but the checker is run against many files, all the output" " from the checks goes to that file (does not presently work " "with 'json' format). If more than one output file is " "supplied, the number of input datasets supplied must match " "the number of output files.")) parser.add_argument('-V', '--version', action='store_true', help='Display the IOOS Compliance Checker version information.') parser.add_argument('dataset_location', nargs='*', help="Defines the location of the dataset to be checked.") parser.add_argument('-l', '--list-tests', action='store_true', help='List the available tests') parser.add_argument('-d', '--download-standard-names', help=("Specify a version of the cf standard name table" " to download as packaged version")) # Add command line args from generator plugins check_suite.add_plugin_args(parser) args = parser.parse_args() check_suite.load_generated_checkers(args) if args.version: print("IOOS compliance checker version %s" % __version__) return 0 if args.list_tests: print("IOOS compliance checker available checker suites:") for checker in sorted(check_suite.checkers.keys()): version = getattr(check_suite.checkers[checker], '_cc_checker_version', "???") if args.verbose: print(" - {} (v{})".format(checker, version)) elif ':' in checker and not checker.endswith(':latest'): # Skip the "latest" output print(" - {}".format(checker)) return 0 if args.download_standard_names: download_cf_standard_name_table(args.download_standard_names) if len(args.dataset_location) == 0: parser.print_help() return 0 # Check the number of output files if not args.output: args.output = '-' output_len = len(args.output) if not (output_len == 1 or output_len == len(args.dataset_location)): print('The number of output files must either be one or the same as the number of datasets', file=sys.stderr) sys.exit(2) # Check the output formats format_choices = ['text', 'html', 'json', 'json_new'] for out_format in args.format: if out_format not in format_choices: print(("Error: argument -f/--format: invalid choice: '{}'" " (choose from 'text', 'html', 'json', 'json_new')".format(out_format))) sys.exit(2) # Run the compliance checker # 2 modes, concatenated output file or multiple output files return_values = [] had_errors = [] if output_len == 1: if args.format != 'json': print("Running Compliance Checker on the datasets from: {}".format(args.dataset_location), file=sys.stderr) return_value, errors = ComplianceChecker.run_checker(args.dataset_location, args.test or ['acdd'], args.verbose, args.criteria, args.skip_checks, args.output[0], args.format or ['text']) return_values.append(return_value) had_errors.append(errors) else: for output, dataset in zip(args.output, args.dataset_location): if args.format != 'json': print("Running Compliance Checker on the dataset from: {}".format(dataset), file=sys.stderr) return_value, errors = ComplianceChecker.run_checker([dataset], args.test or ['acdd'], args.verbose, args.criteria, args.skip_checks, output, args.format or ['text']) return_values.append(return_value) had_errors.append(errors) if any(had_errors): return 2 if all(return_values): return 0 return 1 if __name__ == "__main__": sys.exit(main())
Python
0
@@ -4617,25 +4617,25 @@ return -0 +1 %0A%0A # Chec
01674bb349e9850b26aeae212ad77aa992f18ab5
bump version
lava_scheduler_app/__init__.py
lava_scheduler_app/__init__.py
# Copyright (C) 2011 Linaro Limited # # Author: Michael Hudson-Doyle <[email protected]> # # This file is part of LAVA Scheduler. # # LAVA Scheduler is free software: you can redistribute it and/or modify it # under the terms of the GNU Affero General Public License version 3 as # published by the Free Software Foundation # # LAVA Scheduler is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # more details. # # You should have received a copy of the GNU Affero General Public License # along with LAVA Scheduler. If not, see <http://www.gnu.org/licenses/>. __version__ = (0, 3, 0, "final", 0)
Python
0
@@ -742,17 +742,17 @@ _ = (0, -3 +4 , 0, %22fi
cb5a8ac1b74cdeeea5901bb22d8600ace8f5b6e1
Allow parsing lists of dictionaries as well as dictionaries in JSON structures
tools/json_extractor.py
tools/json_extractor.py
#!/usr/bin/env python # ---------------------------------------------------------------------- # Copyright (c) 2013-2016 Raytheon BBN Technologies # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and/or hardware specification (the "Work") to # deal in the Work without restriction, including without limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Work, and to permit persons to whom the Work # is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Work. # # THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS # IN THE WORK. # ---------------------------------------------------------------------- import json import sys def main(): if len(sys.argv) <= 2: print "Usage: json_extractor.py field filename" sys.exit(0) fields = sys.argv[1] # Comma separated filename = sys.argv[2] data = open(filename).read() jdata = json.loads(data) field_list = fields.split(',') result = jdata for field in field_list: result = result[field] print result if __name__ == "__main__": sys.exit(main())
Python
0.000003
@@ -1604,30 +1604,386 @@ -result = result%5Bfield%5D +if type(result) == dict:%0A result = result%5Bfield%5D%0A elif type(result) == list:%0A field_parts = field.split('=')%0A field_name = field_parts%5B0%5D%0A field_value = field_parts%5B1%5D%0A for entry in result:%0A if entry%5Bfield_name%5D == field_value:%0A result = entry%0A break%0A %0A
df35ebdcebc8704f964d3301004fcaf88e70336f
fix filereader cd:/ replacement
tools/lib/filereader.py
tools/lib/filereader.py
import os from tools.lib.url_file import URLFile DATA_PREFIX = os.getenv("DATA_PREFIX", "http://data-raw.internal/") def FileReader(fn, debug=False): if fn.startswith("cd:/"): fn.replace("cd:/", DATA_PREFIX) if fn.startswith("http://") or fn.startswith("https://"): return URLFile(fn, debug=debug) return open(fn, "rb")
Python
0
@@ -176,16 +176,21 @@ /%22):%0A + fn = fn.repl
2bf8f7beac5ee32e7cb3085da392055603ab88d6
Fix request method
users/tests/test_api.py
users/tests/test_api.py
from django.core.urlresolvers import reverse from rest_framework import status from rest_framework.test import APITestCase from ..models import User class UserTest(APITestCase): """Tests for /users/ API endpoints.""" def test_view_user_logged_out(self): user = User.objects.create(name="Trey", email="[email protected]") url = reverse('user-detail', args=[user.pk]) response = self.client.get(url, format='json') assert response.status_code == status.HTTP_200_OK assert (response.data == { 'id': user.id, 'name': user.name, }) def test_same_user(self): user = User.objects.create(name="Trey", email="[email protected]") url = reverse('user-detail', args=[user.pk]) self.client.force_authenticate(user=user) response = self.client.get(url, format='json') assert response.status_code == status.HTTP_200_OK assert (response.data == { 'id': user.id, 'name': user.name, 'email': user.email, }) def test_different_user(self): user1 = User.objects.create(name="User1", email="[email protected]") user2 = User.objects.create(name="User2", email="[email protected]") url = reverse('user-detail', args=[user1.pk]) self.client.force_authenticate(user=user2) response = self.client.get(url, format='json') assert response.status_code == status.HTTP_200_OK assert (response.data == { 'id': user1.id, 'name': user1.name, }) def test_me_logged_out(self): url = reverse('user-detail', args=['me']) response = self.client.get(url, format='json') assert response.status_code == status.HTTP_404_NOT_FOUND def test_me_logged_in(self): user = User.objects.create(name="Trey", email="[email protected]") url = reverse('user-detail', args=['me']) self.client.force_authenticate(user=user) response = self.client.get(url, format='json') assert response.status_code == status.HTTP_200_OK assert (response.data == { 'id': user.id, 'name': user.name, 'email': user.email, }) def test_update_current_user(self): user = User.objects.create(name="Trey", email="[email protected]") url = reverse('user-detail', args=[user.pk]) self.client.force_authenticate(user=user) response = self.client.post(url, format='json', data={ 'name': "Bob", 'email': "[email protected]", }) assert response.status_code == status.HTTP_200_OK assert (response.data == { 'id': user.id, 'name': user.name, 'email': user.email, })
Python
0.000001
@@ -2493,10 +2493,9 @@ nt.p -os +u t(ur
8c759f531e6a3cdc0e2b40321153147a7ec00b40
update docstrings to reflect recent AcademicDatabase refactoring
angular_flask/classtime/academic_calendar.py
angular_flask/classtime/academic_calendar.py
import sys import re from academic_databases.abstract_academicdb import AcademicDatabase class AcademicCalendar(object): """ Gives access to academic calendar data contained in an LDAP server """ def __init__(self, institution_name): """ Initialize the Calendar with a database connection to a specific institution, defined as a JSON config file """ try: self._course_db = AcademicDatabase.build(institution_name) self._course_db.connect() except: raise self._all_terms = self._course_db.search('terms') self._term = None self._all_courses = None def select_current_term(self, termid): if termid not in [term.get('term') for term in self._all_terms]: raise Exception('Term #{} not found'.format(termid)) self._term = termid self._populate_courses_for_current_term() def get_term_list(self): return self._all_terms def get_courses_for_current_term(self): return self._all_courses def _populate_courses_for_current_term(self): """ Prerequisite: Must have set the current term with select_current_term() Populates the courses dictionary with all courses available in the currently selected term """ if self._term == None: raise Exception('Must select a term before looking for courses!') current_term = 'term={}'.format(self._term) self._all_courses = self._course_db.search('courses', path=current_term) def _populate_sections_for_course(self, course): current_course = 'course={},term={}'.format(course['course'], self._term) sections = self._course_db.search('sections', path=current_course) for section in sections: # class_ is a field in the Section sqlalchemy model # because class is a reserved keyword in python section['class_'] = section.get('class') section.pop('class', None) current_section = 'class={},{}'.format(section.get('class_'), current_course) classtimes = self._course_db.search('classtimes', path=current_section) if len(classtimes) == 1: classtime = classtimes[0] else: classtime = dict() section['day'] = classtime.get('day') section['location'] = classtime.get('location') section['startTime'] = classtime.get('startTime') section['endTime'] = classtime.get('endTime') course['sections'] = sections return course
Python
0
@@ -132,23 +132,15 @@ -Gives access to +Manages aca @@ -158,44 +158,188 @@ dar -data contained in%0A an LDAP server +information, including%0A terms, courses and sections.%0A%0A Connects to an institution's course database using any %0A implementation of the AcademicDatabase abstract base class. %0A @@ -493,39 +493,156 @@ tion -, defined as a JSON config fi + whose configuration is defined%0A by a JSON file in academic_databases/institutions.%0A%0A See 'institutions/ualberta.json' for an examp le +. %0A
cbe7791a0276538416556a6b34769b96071e6115
fix wfgen fail on windows
lbworkflow/flowgen/__init__.py
lbworkflow/flowgen/__init__.py
import inspect import os import shutil import stat from jinja2 import Environment from jinja2 import FileSystemLoader __all__ = ( 'FlowAppGenerator', 'clean_generated_files' ) def clean_generated_files(model_class): folder_path = os.path.dirname(inspect.getfile(model_class)) for path, dirs, files in os.walk(folder_path): if not path.endswith(model_class.__name__.lower()): shutil.rmtree(path) for file in files: if file not in ['models.py', 'wfdata.py', '__init__.py']: try: os.remove(os.path.join(path, file)) except: # NOQA pass def get_fields(model_class): fields = [] ignore_fields = ['id', 'pinstance', 'created_on', 'created_by'] for f in model_class._meta.fields: if f.name not in ignore_fields: fields.append(f) return fields def get_field_names(model_class): fields = get_fields(model_class) return ', '.join(["'%s'" % e.name for e in fields]) def group(flat_list): for i in range(len(flat_list) % 2): flat_list.append(None) pass return list(zip(flat_list[0::2], flat_list[1::2])) class FlowAppGenerator(object): def __init__(self, app_template_path=None): if not app_template_path: app_template_path = os.path.join( os.path.dirname(os.path.abspath(__file__)), 'app_template') self.app_template_path = app_template_path super(FlowAppGenerator, self).__init__() def init_env(self, template_path): loader = FileSystemLoader(template_path) self.env = Environment( block_start_string='[%', block_end_string='%]', variable_start_string='[[', variable_end_string=']]', comment_start_string='[#', comment_end_string='#]', loader=loader, ) def gen(self, model_class, item_model_class_list=None, wf_code=None, replace=False, ignores=['wfdata.py']): dest = os.path.dirname(inspect.getfile(model_class)) app_name = model_class.__module__.split('.')[-2] if not wf_code: wf_code = app_name ctx = { 'app_name': app_name, 'wf_code': wf_code, 'class_name': model_class.__name__, 'wf_name': model_class._meta.verbose_name, 'field_names': get_field_names(model_class), 'fields': get_fields(model_class), 'grouped_fields': group(get_fields(model_class)), } if item_model_class_list: item_list = [] for item_model_class in item_model_class_list: item_ctx = { 'class_name': item_model_class.__name__, 'lowercase_class_name': item_model_class.__name__.lower(), 'field_names': get_field_names(item_model_class), 'fields': get_fields(item_model_class), 'grouped__fields': group(get_fields(item_model_class)), } item_list.append(item_ctx) ctx['item_list'] = item_list self.copy_template(self.app_template_path, dest, ctx, replace, ignores) def copy_template(self, src, dest, ctx={}, replace=False, ignores=[]): self.init_env(src) for path, dirs, files in os.walk(src): relative_path = path[len(src):].lstrip(os.sep) dest_path = os.path.join(dest, relative_path) dest_path = dest_path.replace('app_name', ctx.get('app_name', 'app_name')) if not os.path.exists(dest_path): os.mkdir(dest_path) for i, subdir in enumerate(dirs): if subdir.startswith('.'): del dirs[i] for filename in files: if filename.endswith('.pyc') or filename.startswith('.'): continue src_file_path = os.path.join(path, filename) src_file_path = src_file_path[len(src):].strip(os.path.sep) dest_file_path = os.path.join(dest, relative_path, filename) dest_file_path = dest_file_path.replace('app_name', ctx.get('app_name', 'app_name')) if dest_file_path.endswith('-tpl'): dest_file_path = dest_file_path[:-4] is_exists = os.path.isfile(dest_file_path) for ignore in ignores: if dest_file_path.endswith(ignore): replace = False if is_exists and not replace: continue self.copy_template_file(src_file_path, dest_file_path, ctx) def copy_template_file(self, src, dest, ctx={}): template = self.env.get_template(src) template.stream(ctx).dump(dest, encoding='utf-8') # Make new file writable. if os.access(dest, os.W_OK): st = os.stat(dest) new_permissions = stat.S_IMODE(st.st_mode) | stat.S_IWUSR os.chmod(dest, new_permissions)
Python
0
@@ -4769,16 +4769,270 @@ tx=%7B%7D):%0A + if os.path.sep != '/':%0A # https://github.com/pallets/jinja/issues/767%0A # Jinja template names are not fileystem paths. They always use forward slashes so this is working as intended.%0A src = src.replace('%5C%5C', '/')%0A
2d99310234e06e8d8b2b42703e107c251e6758ad
deal with new groups being added to a user
ansible/roles/lib_zabbix/library/zbx_user.py
ansible/roles/lib_zabbix/library/zbx_user.py
#!/usr/bin/env python ''' ansible module for zabbix users ''' # vim: expandtab:tabstop=4:shiftwidth=4 # # Zabbix user ansible module # # # Copyright 2015 Red Hat Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # This is in place because each module looks similar to each other. # These need duplicate code as their behavior is very similar # but different for each zabbix class. # pylint: disable=duplicate-code # pylint: disable=import-error from openshift_tools.zbxapi import ZabbixAPI, ZabbixConnection def exists(content, key='result'): ''' Check if key exists in content or the size of content[key] > 0 ''' if not content.has_key(key): return False if not content[key]: return False return True def get_usergroups(zapi, usergroups): ''' Get usergroups ''' ugroups = [] for ugr in usergroups: content = zapi.get_content('usergroup', 'get', {'search': {'name': ugr}, #'selectUsers': 'userid', #'getRights': 'extend' }) if content['result']: ugroups.append({'usrgrpid': content['result'][0]['usrgrpid']}) return ugroups or None def get_passwd(passwd): '''Determine if password is set, if not, return 'zabbix' ''' if passwd: return passwd return 'zabbix' def get_usertype(user_type): ''' Determine zabbix user account type ''' if not user_type: return None utype = 1 if 'super' in user_type: utype = 3 elif 'admin' in user_type or user_type == 'admin': utype = 2 return utype def main(): ''' ansible zabbix module for users ''' ##def user(self, name, state='present', params=None): module = AnsibleModule( argument_spec=dict( zbx_server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), zbx_user=dict(default=os.environ.get('ZABBIX_USER', None), type='str'), zbx_password=dict(default=os.environ.get('ZABBIX_PASSWORD', None), type='str'), zbx_debug=dict(default=False, type='bool'), login=dict(default=None, type='str'), first_name=dict(default=None, type='str'), last_name=dict(default=None, type='str'), user_type=dict(default=None, type='str'), password=dict(default=None, type='str'), refresh=dict(default=None, type='int'), update_password=dict(default=False, type='bool'), user_groups=dict(default=[], type='list'), state=dict(default='present', type='str'), ), #supports_check_mode=True ) zapi = ZabbixAPI(ZabbixConnection(module.params['zbx_server'], module.params['zbx_user'], module.params['zbx_password'], module.params['zbx_debug'])) ## before we can create a user media and users with media types we need media zbx_class_name = 'user' idname = "userid" state = module.params['state'] content = zapi.get_content(zbx_class_name, 'get', {'output': 'extend', 'search': {'alias': module.params['login']}, "selectUsrgrps": 'usergrpid', }) if state == 'list': module.exit_json(changed=False, results=content['result'], state="list") if state == 'absent': if not exists(content) or len(content['result']) == 0: module.exit_json(changed=False, state="absent") content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]]) module.exit_json(changed=True, results=content['result'], state="absent") if state == 'present': params = {'alias': module.params['login'], 'passwd': get_passwd(module.params['password']), 'usrgrps': get_usergroups(zapi, module.params['user_groups']), 'name': module.params['first_name'], 'surname': module.params['last_name'], 'refresh': module.params['refresh'], 'type': get_usertype(module.params['user_type']), } # Remove any None valued params _ = [params.pop(key, None) for key in params.keys() if params[key] is None] if not exists(content): # if we didn't find it, create it content = zapi.get_content(zbx_class_name, 'create', params) if content.has_key('Error'): module.exit_json(failed=True, changed=False, results=content, state='present') module.exit_json(changed=True, results=content['result'], state='present') # already exists, we need to update it # let's compare properties differences = {} # Update password if not module.params['update_password']: params.pop('passwd', None) zab_results = content['result'][0] for key, value in params.items(): if key == 'usrgrps': # this must be done as a list of ordered dictionaries fails comparison if not all([_ in value for _ in zab_results[key]]): differences[key] = value elif zab_results[key] != value and zab_results[key] != str(value): differences[key] = value if not differences: module.exit_json(changed=False, results=zab_results, state="present") # We have differences and need to update differences[idname] = zab_results[idname] content = zapi.get_content(zbx_class_name, 'update', differences) module.exit_json(changed=True, results=content['result'], state="present") module.exit_json(failed=True, changed=False, results='Unknown state passed. %s' % state, state="unknown") # pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled # import module snippets. This are required from ansible.module_utils.basic import * main()
Python
0
@@ -5908,56 +5908,342 @@ -if not all(%5B_ in value for _ in zab_results%5Bkey%5D +# if the current zabbix group list is not all in the%0A # provided group list%0A # or the provided group list is not all in the current zabbix%0A # group list%0A if not all(%5B_ in value for _ in zab_results%5Bkey%5D%5D) %5C%0A or not all(%5B_ in zab_results%5Bkey%5D for _ in value %5D):%0A
ab78bf2c47a8bec5c1d0c5a7951dba1c98f5c28e
Revert file to moneymanager master branch.
check_gm.py
check_gm.py
#!/usr/bin/env python3 # vi:tabstop=4:expandtab:shiftwidth=4:softtabstop=4:autoindent:smarttab import os, sys import sqlite3 import urllib.request err = False for version in range (7, 14): fname = 'tables_v1.sql' if version < 12 else 'tables.sql' url = 'https://cdn.jsdelivr.net/gh/moneymanagerex/database@v%i/%s' % (version, fname) schema = urllib.request.urlopen(url).read().decode('utf-8') db = sqlite3.connect(':memory:') db.executescript(schema) print('\nTesting reports with MMEX db schema v%i:' % version) print('-' * 40) for root, dirs, files in os.walk('.'): for sql in files: if sql=='sqlcontent.sql': try: db.executescript(open(os.path.join(root, sql)).read()) except sqlite3.Error as e: print('ERR', os.path.basename(root).ljust(40), e.args[0]) err = True else: print('OK ', os.path.basename(root)) db.rollback() db.close() if err: sys.exit(1)
Python
0
@@ -1030,8 +1030,9 @@ .exit(1) +%0A
40b2c9a74b8d97edec5d643de7f2c8b8d8bf08fa
Revise to class SolutionSortAndBinarySearch
lc480_sliding_window_median.py
lc480_sliding_window_median.py
"""Leetcode 480. Sliding Window Median Hard URL: https://leetcode.com/problems/sliding-window-median/ Median is the middle value in an ordered integer list. If the size of the list is even, there is no middle value. So the median is the mean of the two middle value. Examples: [2,3,4] , the median is 3 [2,3], the median is (2 + 3) / 2 = 2.5 Given an array nums, there is a sliding window of size k which is moving from the very left of the array to the very right. You can only see the k numbers in the window. Each time the sliding window moves right by one position. Your job is to output the median array for each window in the original array. For example, Given nums = [1,3,-1,-3,5,3,6,7], and k = 3. Window position Median --------------- ----- [1 3 -1] -3 5 3 6 7 1 1 [3 -1 -3] 5 3 6 7 -1 1 3 [-1 -3 5] 3 6 7 -1 1 3 -1 [-3 5 3] 6 7 3 1 3 -1 -3 [5 3 6] 7 5 1 3 -1 -3 5 [3 6 7] 6 Therefore, return the median sliding window as [1,-1,-1,3,5,6]. Note: You may assume k is always valid, ie: k is always smaller than input array's size for non-empty array. """ import random class SolutionSelect(object): def _select_mth_smallest_sub_nums(self, sub_nums, mth): # Randomly select a num in sub array as pivot. pivot_idx = random.choice(range(len(sub_nums))) pivot = sub_nums[pivot_idx] # Collect idx with num smaller than, equal to, and larger than pivot. small_idx = [idx for idx, n in enumerate(sub_nums) if n < pivot] mid_idx = [idx for idx, n in enumerate(sub_nums) if n == pivot] large_idx = [idx for idx, n in enumerate(sub_nums) if n > pivot] n_small = len(small_idx) n_mid = len(mid_idx) if mth <= n_small: # Select the mth from small nums. small_nums = [sub_nums[idx] for idx in small_idx] return self._select_mth_smallest_sub_nums(small_nums, mth) elif n_small < mth <= n_small + n_mid: # Select pivot as the mth. return pivot elif mth > n_small + n_mid: # Select the mth from large nums. large_nums = [sub_nums[idx] for idx in large_idx] return self._select_mth_smallest_sub_nums( large_nums, mth - n_small - n_mid) def medianSlidingWindow(self, nums, k): """ :type nums: List[int] :type k: int :rtype: List[float] Time complexity: O((n - k)*k), where n is the length of nums. Space complexity: O(k). """ n = len(nums) med_nums = [] for i in range(n - k + 1): # Create a sub nums. sub_nums = nums[i:(i + k)] if k % 2 == 1: # If k is odd, select the (k // 2 + 1)th as median. m = k // 2 + 1 med = self._select_mth_smallest_sub_nums(sub_nums, m) elif k % 2 == 0: # If k is even, select the (k // 2)th and (k // 2 + 1)th nums, # and take mean of them as median. m1 = k // 2 m2 = k // 2 + 1 med1 = self._select_mth_smallest_sub_nums(sub_nums, m1) med2 = self._select_mth_smallest_sub_nums(sub_nums, m2) med = (med1 + med2) / 2.0 med_nums.append(med) return med_nums class SolutionSort(object): def medianSlidingWindow(self, nums, k): """ :type nums: List[int] :type k: int :rtype: List[float] """ pass def main(): nums = [1, 3, -1, -3, 5, 3, 6, 7] k = 3 print 'For {0} with k = {1}, the median is:'.format(nums, k) print SolutionSelect().medianSlidingWindow(nums, k) if __name__ == '__main__': main()
Python
0
@@ -3438,16 +3438,31 @@ tionSort +AndBinarySearch (object)
81bd4c6a7b94803e57a64f47bacbf3d5059282bd
add node
checkbst.py
checkbst.py
""" This is a very common interview question. Given a binary tree, check whether it’s a binary search tree or not. Simple as that.. http://www.ardendertat.com/2011/10/10/programming-interview-questions-7-binary-search-tree-check/ """
Python
0.000001
@@ -231,8 +231,111 @@ k/%0A%22%22%22%0A%0A +class Node:%0A def __init__(self, val=None):%0A self.left, self.right, self.val = None, None, val
3a5b96d5d666521a97598fe9d30b8e007242f8aa
swap from published/test:compile to published/compile in CI to try and speed things up a little
ci/build.py
ci/build.py
#!/usr/bin/env python import os from subprocess import check_call, check_output import json import sys is_master_commit = ( os.environ['TRAVIS_PULL_REQUEST'] == "false" and os.environ['TRAVIS_BRANCH'] == "master" ) all_versions = [ "2.10.4", "2.10.5", "2.10.6", "2.11.3", "2.11.4", "2.11.5", "2.11.6", "2.11.7", "2.11.8" ] def update_version(): git_hash = check_output(["git", "rev-parse", "--short", "HEAD"]).strip() version_txt = """ package ammonite object Constants{ val version = "COMMIT-%s" val curlUrl = "https://git.io/vKwA8" } """ % git_hash open("project/Constants.scala", "w").write(version_txt) def publish_signed(): creds = """ (credentials in ThisBuild) += Credentials("Sonatype Nexus Repository Manager", "oss.sonatype.org", "%s", "%s" ) pgpPassphrase := Some("%s".toArray) pgpSecretRing := file("secring.asc") pgpPublicRing := file("pubring.asc") sonatypeProfileName := "com.lihaoyi" """ % ( os.environ['SONATYPE_DEPLOY_USER'], os.environ['SONATYPE_DEPLOY_PASSWORD'], os.environ['SONATYPE_PGP_PASSWORD'] ) open("sonatype.sbt", "w").write(creds) open("secring.asc", "w").write( json.loads('"' + os.environ['SONATYPE_PGP_KEY_CONTENTS'] + '"') ) open("pubring.asc", "w").write( json.loads('"' + os.environ['SONATYPE_PGP_PUB_KEY_CONTENTS'] + '"') ) for version in all_versions: if version in {"2.10.5", "2.11.8"}: check_call(["sbt", "++"+version, "published/publishSigned"]) else: check_call(["sbt", "++"+version, "amm/publishSigned", "sshd/publishSigned"]) check_call(["sbt", "sonatypeReleaseAll"]) def publish_docs(): deploy_key = json.loads('"' + os.environ['DEPLOY_KEY'] + '"') with open("deploy_key", "w") as f: f.write(deploy_key) if os.environ.get("TRAVIS_TAG"): new_env = dict(os.environ, DOC_FOLDER=".") else: new_env = dict(os.environ, DOC_FOLDER="master") check_call("ci/deploy_master_docs.sh", env=new_env) if sys.argv[1] == "docs": if is_master_commit: print "MASTER COMMIT: Updating version and publishing to Github Pages" update_version() publish_docs() else: print "MISC COMMIT: Building readme for verification" check_call(["sbt", "readme/run"]) elif sys.argv[1] == "artifacts": if is_master_commit: print "MASTER COMMIT: Updating version and publishing to Maven Central" update_version() publish_signed() else: print "MISC COMMIT: Compiling all Scala code across versions for verification" for version in all_versions: check_call(["sbt", "++" + version, "published/test:compile"]) elif sys.argv[1] == "test": check_call(["sbt", "++" + os.environ["TRAVIS_SCALA_VERSION"], "published/compile"]) check_call(["sbt", "++" + os.environ["TRAVIS_SCALA_VERSION"], sys.argv[2]]) else: raise Exception("Unknown argument list %s" % sys.argv)
Python
0
@@ -2836,13 +2836,8 @@ hed/ -test: comp
175bbd2f181d067712d38beeca9df4063654103a
Update script to remove extension from filename
nlppln/frog_to_saf.py
nlppln/frog_to_saf.py
#!/usr/bin/env python import click import os import codecs import json from xtas.tasks._frog import parse_frog, frog_to_saf @click.command() @click.argument('input_files', nargs=-1, type=click.Path(exists=True)) @click.argument('output_dir', nargs=1, type=click.Path()) def frog2saf(input_files, output_dir): if not os.path.exists(output_dir): os.makedirs(output_dir) for fi in input_files: with codecs.open(fi) as f: lines = f.readlines() lines = [line.strip() for line in lines] saf_data = frog_to_saf(parse_frog(lines)) head, tail = os.path.split(fi) out_file = os.path.join(output_dir, '{}.json'.format(tail)) with codecs.open(out_file, 'wb', encoding='utf-8') as f: json.dump(saf_data, f, indent=4) if __name__ == '__main__': frog2saf()
Python
0.000002
@@ -615,16 +615,76 @@ plit(fi) +%0A fname = tail.replace(os.path.splitext(tail)%5B1%5D, '') %0A%0A @@ -738,20 +738,21 @@ .format( -tail +fname ))%0A
5bd7d5d25b279dcc4e30673c8789f9636a546731
Remove trailing semicolons.
tools/test_migration.py
tools/test_migration.py
#!/usr/bin/env python import os import re import subprocess import sys db_admin_user = 'postgres' original_db = 'original' def info(message): print message def quiet_check_call(*args, **kwargs): try: with open('/tmp/subprocess.out', 'w') as out: subprocess.check_call(*args, stderr=subprocess.STDOUT, stdout=out, **kwargs) except: with open('/tmp/subprocess.out') as fh: print fh.read() raise finally: os.unlink('/tmp/subprocess.out') def psql(*args): quiet_check_call(['psql', '-U', db_admin_user] + list(args)) def load(path): quiet_check_call('zcat %s | psql -U %s -d %s' % ( path, db_admin_user, original_db), shell=True) def pg_dump(to_file): pg = subprocess.Popen(['pg_dump', '--schema-only', '-U', db_admin_user, '--exclude-schema', 'public', original_db], stdout=subprocess.PIPE) comments = [] columns = [] alters = [] alter_start = None in_table = False with open(to_file, 'w') as out: for line in pg.stdout: # skip SQL comments and blank lines if line.startswith('--') or line == '\n': continue # normalize conditions (for some reason pg_dump dumps them # differently before and after an upgrade) if line.startswith(' CONSTRAINT'): line = re.sub(r"\('(\w+)'::character varying\)::text", r"'\1'::character varying", line) line = re.sub(r"ANY \(\(ARRAY\[(.*?)\]\)::text\[\]\)", r"ANY (ARRAY[\1])", line) # reorder table columns if in_table: if line == ');\n': # table finished in_table = False else: # accumulate table lines line = re.sub(r",\n", r"\n", line) columns.append(line) continue # write table lines sorted for column in sorted(columns): out.write(column) # ignore the name of UNIQUE keys until we start naming # them explicitly if alter_start: if re.match(r"^ ADD CONSTRAINT \w+_key UNIQUE ", line): line = re.sub(r" ADD CONSTRAINT (\w+)_key UNIQUE ", r" ADD CONSTRAINT <elided>_key UNIQUE ", line) alters.append(alter_start + line) alter_start = None continue else: line = alter_start + line alter_start = None elif line.startswith('ALTER TABLE ONLY '): alter_start = line; continue # move comments to the end if line.startswith('COMMENT '): comments.append(line) continue if line.startswith('CREATE TABLE'): in_table = True; columns = [] out.write(line) # write out alters sorted for alter in sorted(alters): out.write(alter) # write out comments sorted for comment in sorted(comments): out.write(comment) # DB from create_oq_schema info('Creating a fresh database using create_oq_schema...') psql('-c', "DROP DATABASE IF EXISTS %s" % original_db) psql('-c', "CREATE DATABASE %s" % original_db) quiet_check_call(['bin/create_oq_schema', '--yes', '--no-tab-spaces', '--db-name=%s' % original_db, '--db-user=%s' % db_admin_user, '--schema-path=%s' % 'openquake/db/schema']) info('Dumping the database...') pg_dump('/tmp/fresh.sql') # DB from dbmaint.py info('Loading database from old dump...') psql('-c', "DROP DATABASE IF EXISTS %s" % original_db) psql('-c', "CREATE DATABASE %s" % original_db) load('tools/test_migration_base.sql.gz') info('Upgrading database using dbmaint...') quiet_check_call(['tools/dbmaint.py', '--db', original_db, '-U', db_admin_user]) info('Dumping the database...') pg_dump('/tmp/after.sql') info('Comparing new and upgraded version...') res = subprocess.call(['diff', '-u', '/tmp/fresh.sql', '/tmp/after.sql']) sys.exit(res)
Python
0.00016
@@ -2978,17 +2978,16 @@ t = line -; %0A @@ -3231,17 +3231,16 @@ e = True -; %0A
b3426bcd217c336f8807a5474b47dea72a994eb9
Rename `op`-parameter to `request`.
ioctl/__init__.py
ioctl/__init__.py
import ctypes import fcntl import sys # In Python 2, the bytearray()-type does not support the buffer interface, # and can therefore not be used in ioctl(). # This creates a couple of helper functions for converting to and from if sys.version_info < (3, 0): import array def _to_bytearray(value): return array.array('B', value) def _from_bytearray(value): return value.tostring() else: def _to_bytearray(value): return bytearray(value) def _from_bytearray(value): return bytes(value) def ioctl_int(fd, op, value=0): """Call ioctl() with an `int *` argument. :param fd: File descriptor to operate on. :param op: The ioctl request to call. :param value: Optional value to pass to the ioctl() operation. Defaults to 0. :return The contents of the value parameter after the call to ioctl(). """ res = ctypes.c_int(value) fcntl.ioctl(fd, op, res) return res.value def ioctl_size_t(fd, op, value=0): """Call ioctl() with a `size_t *` argument. :param fd: File descriptor to operate on. :param op: ioctl request to call. :param value: Optional value to pass to the ioctl() operation. Defaults to 0. :return: The contents of the value parameter after the call to ioctl(). """ res = ctypes.c_size_t(value) fcntl.ioctl(fd, op, res) return res.value def ioctl_buffer(fd, op, value=None, length=None): """Call ioctl() with a byte buffer argument. You must specify either the `value` parameter or the `length` parameter. If the `length` parameter is specified, this function will allocate a byte buffer of the specified length to pass to ioctl(). :param fd: File descriptor to operate on. :param op: ioctl request to call. :param value: Optional contents of the byte buffer at the start of the call. :param length: Optional length of the byte buffer. :return: The contents of the value parameter after the call to ioctl(). """ op = int(op) if value is None and length is None: raise ValueError('Must specify either `value` or `length`') if value is not None and length is not None: raise ValueError('Cannot specify both `value` and `length`') if value is None: value = [0] * length data = _to_bytearray(value) fcntl.ioctl(fd, op, data) data = _from_bytearray(data) return data
Python
0
@@ -546,26 +546,31 @@ ctl_int(fd, -op +request , value=0):%0A @@ -673,18 +673,23 @@ :param -op +request : The io @@ -916,34 +916,39 @@ fcntl.ioctl(fd, -op +request , res)%0A retur @@ -977,26 +977,31 @@ _size_t(fd, -op +request , value=0):%0A @@ -1098,34 +1098,39 @@ on.%0A :param -op +request : ioctl request @@ -1357,18 +1357,23 @@ ctl(fd, -op +request , res)%0A @@ -1414,18 +1414,23 @@ fer(fd, -op +request , value= @@ -1768,18 +1768,23 @@ :param -op +request : ioctl @@ -2028,19 +2028,29 @@ -op = int(op +request = int(request )%0A @@ -2377,18 +2377,23 @@ ctl(fd, -op +request , data)%0A
afc7429ea84a301e49d7d8ec810f3434aef16533
Add default config
cli/main.py
cli/main.py
#! /usr/bin/python import sys import argparse from cli.built_in_commands import BuildInCommand from cli.commands import CommandList from cli.crypto import KeyPair from cli.exception import CliException from cli.network import generateTransaction, sendTx, generateQuery, sendQuery import cli.file_io as file_io from cli.query import QueryList BASE_NAME = "iroha-cli" TARGET = "iroha" class Context: name = None public_key = None private_key = None location = None key_pair = None def __init__(self, filepath): conf = file_io.load_config(filepath) if not conf: self.loaded = False return self.loaded = True self.name = conf.get('name') self.public_key = conf.get('publicKey') self.private_key = conf.get('privateKey') address = conf.get('address') port = conf.get('port') self.location = "{}:{}".format(address, str(port)) self.key_pair = KeyPair( raw_private_key=KeyPair.decode(self.private_key), raw_public_key=KeyPair.decode(self.public_key)) class ChiekuiCli: def __init__(self): self.tx_commands = CommandList().commands self.queries = QueryList().queries self.built_in_commands = BuildInCommand().commands self.context = None # ================================ # Parser # ================================ self.parser = argparse.ArgumentParser(description='Cli of {}'.format(TARGET)) _sub_parser = self.parser.add_subparsers() # parse: transaction parse_tx = _sub_parser.add_parser("tx") sup_parser_tx = parse_tx.add_subparsers() for cmd in self.tx_commands: _parser = sup_parser_tx.add_parser(cmd, help='{} help'.format(cmd)) for name, val in self.tx_commands[cmd]['option'].items(): _parser.add_argument("--{}".format(name), type=val["type"], required=val["required"], help=val["detail"]) _parser.add_argument("--config", type=str, required=False, help="config.yml's path") # parse: query parse_query = _sub_parser.add_parser("query") sup_parser_query = parse_query.add_subparsers() for qry in self.queries: _parser = sup_parser_query.add_parser(qry, help='{} help'.format(qry)) for name, val in self.queries[qry]['option'].items(): _parser.add_argument("--{}".format(name), type=val["type"], required=val["required"], help=val["detail"]) _parser.add_argument("--config", type=str, required=False, help="config.yml's path") # parse: built in command for cmd_name, cmd_val in self.built_in_commands.items(): _parser = _sub_parser.add_parser(cmd_name, help='{} help'.format(cmd_name)) for name, val in self.built_in_commands[cmd_name]['option'].items(): _parser.add_argument("--{}".format(name), type=val["type"], required=val["required"],help=val["detail"]) _parser.add_argument("--config", type=str, required=False,help="config.yml's path") def print_introduction(self): print( "----------------\n" "Iroha-mizuki-cli\n" "----------------\n\n" "Current support commands" ) for cmd in self.tx_commands.keys(): print(" - {}".format(cmd)) print( "\n" "Sample keygen:\n\n" " > iroha-ya-cli keygen --account_name mizuki --make_conf yes\n\n" "Sample Tx:\n\n" " > iroha-ya-cli tx CreateAccount --account_name mizuki --domain_id japan --config config.yml\n" "Sample Query:\n\n" " > iroha-ya-cli query GetAccount --account_id mizuki@japan --config my_config.yml\n" ) def exec_tx(self, cmd, argv): file_io.load_config(argv.config) command = self.tx_commands[cmd]["function"](vars(argv)) if command: if not self.context.loaded: print("Config data is not loaded! to send tx require config") return False tx = generateTransaction(self.context.name, [command], self.context.key_pair) if not sendTx(self.context.location, tx): print( "Transaction is not arrived...\n" "Could you ckeck this => {}\n".format(self.context.location) ) return False else: print("Err") def exec_query(self, qry, argv): file_io.load_config(argv.config) qry = self.queries[qry]["function"](vars(argv)) if qry: query = generateQuery(self.context.name, qry, self.context.key_pair) try: res = sendQuery(self.context.location, query) print(res) except CliException as e: print(e.message) else: print("Err") def exec(self, argv): parsed_argv = self.parser.parse_args(argv[1:]) if len(argv) < 2: self.print_introduction() return self.context = Context(vars(parsed_argv).get('config')) if argv[1] == 'tx': self.exec_tx(argv[2], parsed_argv) elif argv[1] == 'query': self.exec_query(argv[2], parsed_argv) if argv[1] in self.built_in_commands: self.built_in_commands[argv[1]]["function"]( vars(parsed_argv), self.context) def main(argv=sys.argv): cli = ChiekuiCli() cli.exec(argv) return if __name__ == "__main__": main()
Python
0.000002
@@ -5285,16 +5285,190 @@ nfig'))%0A +%0A #%0A # if not set --config. load current directory's config.yml%0A #%0A if not self.context.loaded:%0A self.context = Context('config.yml')%0A%0A
e8748e5fdad15e73a363cc769858fdd420149d37
Add missing dependencies for app_unittests for Linux (this CL is almost similar to http://codereview.chromium.org/669158, but for other testing binary. We also don't need to fix the Mac dependencies)
app/app.gyp
app/app.gyp
# Copyright (c) 2009 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. { 'variables': { # TODO: remove this helper when we have loops in GYP 'apply_locales_cmd': ['python', '<(DEPTH)/build/apply_locales.py',], 'chromium_code': 1, 'grit_info_cmd': ['python', '../tools/grit/grit_info.py',], 'grit_out_dir': '<(SHARED_INTERMEDIATE_DIR)/app', 'grit_cmd': ['python', '../tools/grit/grit.py'], 'localizable_resources': [ 'resources/app_locale_settings.grd', 'resources/app_strings.grd', ], }, 'includes': [ 'app_base.gypi', ], 'targets': [ { 'target_name': 'app_unittests', 'type': 'executable', 'msvs_guid': 'B4D59AE8-8D2F-97E1-A8E9-6D2826729530', 'dependencies': [ 'app_base', 'app_resources', '../net/net.gyp:net_test_support', '../skia/skia.gyp:skia', '../testing/gmock.gyp:gmock', '../testing/gtest.gyp:gtest', '../third_party/icu/icu.gyp:icui18n', '../third_party/icu/icu.gyp:icuuc', '../third_party/libjpeg/libjpeg.gyp:libjpeg', '../third_party/libpng/libpng.gyp:libpng', '../third_party/libxml/libxml.gyp:libxml', '../third_party/zlib/zlib.gyp:zlib', ], 'sources': [ 'animation_container_unittest.cc', 'animation_unittest.cc', 'clipboard/clipboard_unittest.cc', 'l10n_util_mac_unittest.mm', 'l10n_util_unittest.cc', 'os_exchange_data_win_unittest.cc', 'run_all_unittests.cc', 'slide_animation_unittest.cc', 'system_monitor_unittest.cc', 'test_suite.h', 'text_elider_unittest.cc', 'sql/connection_unittest.cc', 'sql/statement_unittest.cc', 'sql/transaction_unittest.cc', 'tree_node_iterator_unittest.cc', 'win_util_unittest.cc', ], 'include_dirs': [ '..', ], 'conditions': [ ['OS=="linux" or OS=="freebsd" or OS=="openbsd"', { 'dependencies': [ '../build/linux/system.gyp:gtk', '../tools/xdisplaycheck/xdisplaycheck.gyp:xdisplaycheck', ], }], ['OS!="win"', { 'sources!': [ 'os_exchange_data_win_unittest.cc', 'win_util_unittest.cc', ], }], ['OS =="linux" or OS =="freebsd"', { 'conditions': [ ['linux_use_tcmalloc==1', { 'dependencies': [ '../base/allocator/allocator.gyp:allocator', ], }], ], }], ], }, { 'target_name': 'app_strings', 'msvs_guid': 'AE9BF4A2-19C5-49D8-BB1A-F28496DD7051', 'type': 'none', 'rules': [ { 'rule_name': 'grit', 'extension': 'grd', 'inputs': [ '<!@(<(grit_info_cmd) --inputs <(localizable_resources))', ], 'outputs': [ '<(grit_out_dir)/<(RULE_INPUT_ROOT)/grit/<(RULE_INPUT_ROOT).h', # TODO: remove this helper when we have loops in GYP '>!@(<(apply_locales_cmd) \'<(grit_out_dir)/<(RULE_INPUT_ROOT)/<(RULE_INPUT_ROOT)_ZZLOCALE.pak\' <(locales))', ], 'action': ['<@(grit_cmd)', '-i', '<(RULE_INPUT_PATH)', 'build', '-o', '<(grit_out_dir)/<(RULE_INPUT_ROOT)'], 'message': 'Generating resources from <(RULE_INPUT_PATH)', 'conditions': [ ['use_titlecase_in_grd_files==1', { 'action': ['-D', 'use_titlecase'], }], ], }, ], 'sources': [ '<@(localizable_resources)', ], 'direct_dependent_settings': { 'include_dirs': [ '<(grit_out_dir)/app_locale_settings', '<(grit_out_dir)/app_strings', ], }, 'conditions': [ ['OS=="win"', { 'dependencies': ['../build/win/system.gyp:cygwin'], }], ], }, { 'target_name': 'app_resources', 'type': 'none', 'msvs_guid': '3FBC4235-3FBD-46DF-AEDC-BADBBA13A095', 'actions': [ { 'action_name': 'app_resources', 'variables': { 'input_path': 'resources/app_resources.grd', }, 'inputs': [ '<!@(<(grit_info_cmd) --inputs <(input_path))', ], 'outputs': [ '<!@(<(grit_info_cmd) --outputs \'<(grit_out_dir)/app_resources\' <(input_path))', ], 'action': ['<@(grit_cmd)', '-i', '<(input_path)', 'build', '-o', '<(grit_out_dir)/app_resources'], 'message': 'Generating resources from <(input_path)', }, ], 'direct_dependent_settings': { 'include_dirs': [ '<(grit_out_dir)/app_resources', ], }, 'conditions': [ ['OS=="win"', { 'dependencies': ['../build/win/system.gyp:cygwin'], }], ], }, ], } # Local Variables: # tab-width:2 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=2 shiftwidth=2:
Python
0.000025
@@ -2149,24 +2149,77 @@ m.gyp:gtk',%0A + '../chrome/chrome.gyp:packed_resources',%0A
0ac4fe1431fd04aa2645a4afc3d4d2fbfb21bb90
Update plone profile: copy of black, plus three settings.
isort/profiles.py
isort/profiles.py
"""Common profiles are defined here to be easily used within a project using --profile {name}""" from typing import Any, Dict black = { "multi_line_output": 3, "include_trailing_comma": True, "force_grid_wrap": 0, "use_parentheses": True, "ensure_newline_before_comments": True, "line_length": 88, } django = { "combine_as_imports": True, "include_trailing_comma": True, "multi_line_output": 5, "line_length": 79, } pycharm = { "multi_line_output": 3, "force_grid_wrap": 2, "lines_after_imports": 2, } google = { "force_single_line": True, "force_sort_within_sections": True, "lexicographical": True, "single_line_exclusions": ("typing",), "order_by_type": False, "group_by_package": True, } open_stack = { "force_single_line": True, "force_sort_within_sections": True, "lexicographical": True, } plone = { "force_alphabetical_sort": True, "force_single_line": True, "lines_after_imports": 2, "line_length": 200, } attrs = { "atomic": True, "force_grid_wrap": 0, "include_trailing_comma": True, "lines_after_imports": 2, "lines_between_types": 1, "multi_line_output": 3, "use_parentheses": True, } hug = { "multi_line_output": 3, "include_trailing_comma": True, "force_grid_wrap": 0, "use_parentheses": True, "line_length": 100, } wemake = { "multi_line_output": 3, "include_trailing_comma": True, "use_parentheses": True, "line_length": 80, } appnexus = { **black, "force_sort_within_sections": True, "order_by_type": False, "case_sensitive": False, "reverse_relative": True, "sort_relative_in_force_sorted_sections": True, "sections": ["FUTURE", "STDLIB", "THIRDPARTY", "FIRSTPARTY", "APPLICATION", "LOCALFOLDER"], "no_lines_before": "LOCALFOLDER", } profiles: Dict[str, Dict[str, Any]] = { "black": black, "django": django, "pycharm": pycharm, "google": google, "open_stack": open_stack, "plone": plone, "attrs": attrs, "hug": hug, "wemake": wemake, "appnexus": appnexus, }
Python
0
@@ -883,30 +883,65 @@ ,%0A%7D%0Aplone = -%7B%0A +black.copy()%0Aplone.update(%0A %7B%0A + %22force_alpha @@ -953,32 +953,36 @@ al_sort%22: True,%0A + %22force_singl @@ -992,32 +992,36 @@ ine%22: True,%0A + + %22lines_after_imp @@ -1038,29 +1038,11 @@ -%22line_length%22: 200,%0A%7D +%7D%0A) %0Aatt
df7b391d3c02471a2095170ee83c9de4586930e7
Fix lint
torchtext/data/field.py
torchtext/data/field.py
from collections import Counter, OrderedDict import six import torch from torch.autograd import Variable from .dataset import Dataset from .pipeline import Pipeline from .utils import get_tokenizer from ..vocab import Vocab class Field(object): """Defines a datatype together with instructions for converting to Tensor. Every dataset consists of one or more types of data. For instance, a text classification dataset contains sentences and their classes, while a machine translation dataset contains paired examples of text in two languages. Each of these types of data is represented by a Field object, which holds a Vocab object that defines the set of possible values for elements of the field and their corresponding numerical representations. The Field object also holds other parameters relating to how a datatype should be numericalized, such as a tokenization method and the kind of Tensor that should be produced. If a Field is shared between two columns in a dataset (e.g., question and answer in a QA dataset), then they will have a shared vocabulary. Attributes: sequential: Whether the datatype represents sequential data. If False, no tokenization is applied. Default: True. use_vocab: Whether to use a Vocab object. If False, the data in this field should already be numerical. Default: True. init_token: A token that will be prepended to every example using this field, or None for no initial token. Default: None. eos_token: A token that will be appended to every example using this field, or None for no end-of-sentence token. Default: None. fix_length: A fixed length that all examples using this field will be padded to, or None for flexible sequence lengths. Default: None. tensor_type: The torch.Tensor class that represents a batch of examples of this kind of data. Default: torch.LongTensor. preprocessing: The Pipeline that will be applied to examples using this field after tokenizing but before numericalizing. Many Datasets replace this attribute with a custom preprocessor. Default: None. postprocessing: A Pipeline that will be applied to examples using this field after numericalizing but before the numbers are turned into a Tensor. Default: None. lower: Whether to lowercase the text in this field. Default: False. tokenize: The function used to tokenize strings using this field into sequential examples. Default: str.split. include_lengths: Whether to return a tuple of a padded minibatch and a list containing the lengths of each examples, or just a padded minibatch. Default: False. batch_first: Whether to produce tensors with the batch dimension first. Default: False. pad_token: The string token used as padding. Default: "<pad>". """ def __init__( self, sequential=True, use_vocab=True, init_token=None, eos_token=None, fix_length=None, tensor_type=torch.LongTensor, preprocessing=None, postprocessing=None, lower=False, tokenize=(lambda s: s.split()), include_lengths=False, batch_first=False, pad_token="<pad>"): self.sequential = sequential self.use_vocab = use_vocab self.init_token = init_token self.eos_token = eos_token self.fix_length = fix_length self.tensor_type = tensor_type self.preprocessing = preprocessing self.postprocessing = postprocessing self.lower = lower self.tokenize = get_tokenizer(tokenize) self.include_lengths = include_lengths self.batch_first = batch_first self.pad_token = pad_token if self.sequential else None def preprocess(self, x): """Load a single example using this field, tokenizing if necessary. If the input is a Python 2 `str`, it will be converted to Unicode first. If `sequential=True`, it will be tokenized. Then the input will be optionally lowercased and passed to the user-provided `preprocessing` Pipeline.""" if (six.PY2 and isinstance(x, six.string_types) and not isinstance(x, unicode)): x = Pipeline(lambda s: unicode(s, encoding='utf-8'))(x) if self.sequential and isinstance(x, six.text_type): x = self.tokenize(x) if self.lower: x = Pipeline(six.text_type.lower)(x) if self.preprocessing is not None: return self.preprocessing(x) else: return x def pad(self, minibatch): """Pad a batch of examples using this field. Pads to self.fix_length if provided, otherwise pads to the length of the longest example in the batch. Prepends self.init_token and appends self.eos_token if those attributes are not None. Returns a tuple of the padded list and a list containing lengths of each example if `self.include_lengths` is `True`, else just returns the padded list. """ minibatch = list(minibatch) if not self.sequential: return minibatch if self.fix_length is None: max_len = max(len(x) for x in minibatch) else: max_len = self.fix_length + ( self.init_token, self.eos_token).count(None) - 2 padded, lengths = [], [] for x in minibatch: padded.append( ([] if self.init_token is None else [self.init_token]) + list(x[:max_len]) + ([] if self.eos_token is None else [self.eos_token]) + [self.pad_token] * max(0, max_len - len(x))) lengths.append(len(padded[-1]) - max(0, max_len - len(x))) if self.include_lengths: return (padded, lengths) return padded def build_vocab(self, *args, **kwargs): """Construct the Vocab object for this field from one or more datasets. Arguments: Positional arguments: Dataset objects or other iterable data sources from which to construct the Vocab object that represents the set of possible values for this field. If a Dataset object is provided, all columns corresponding to this field are used; individual columns can also be provided directly. Remaining keyword arguments: Passed to the constructor of Vocab. """ counter = Counter() sources = [] for arg in args: if isinstance(arg, Dataset): sources += [getattr(arg, name) for name, field in arg.fields.items() if field is self] else: sources.append(arg) for data in sources: for x in data: if not self.sequential: x = [x] counter.update(x) specials = list(OrderedDict.fromkeys( tok for tok in [self.pad_token, self.init_token, self.eos_token] if tok is not None)) self.vocab = Vocab(counter, specials=specials, **kwargs) def numericalize(self, arr, device=None, train=True): """Turn a batch of examples that use this field into a Variable. If the field has include_lengths=True, a tensor of lengths will be included in the return value. Arguments: arr: List of tokenized and padded examples, or tuple of a padded list and a list of lengths if self.include_lengths is True. device: Device to create the Variable's Tensor on. Use -1 for CPU and None for the currently active GPU device. Default: None. train: Whether the batch is for a training set. If False, the Variable will be created with volatile=True. Default: True. """ if isinstance(arr, tuple): arr, lengths = arr if self.use_vocab: if self.sequential: arr = [[self.vocab.stoi[x] for x in ex] for ex in arr] else: arr = [self.vocab.stoi[x] for x in arr] if self.postprocessing is not None: arr = self.postprocessing(arr, self.vocab, train) elif self.postprocessing is not None: arr = self.postprocessing(arr, train) arr = self.tensor_type(arr) if self.include_lengths: lengths = torch.LongTensor(lengths) if self.sequential and not self.batch_first: arr.t_() if device == -1: if self.sequential: arr = arr.contiguous() else: arr = arr.cuda(device) if self.include_lengths: lengths = lengths.cuda(device) if self.include_lengths: return Variable(arr, volatile=not train), lengths return Variable(arr, volatile=not train)
Python
0.000032
@@ -4318,17 +4318,16 @@ and not - %0A
0a4da4bc40813362b9d6c67c2fb02f33a807f3fe
fix error on tax view
l10n_it_account/__openerp__.py
l10n_it_account/__openerp__.py
# -*- coding: utf-8 -*- ############################################################################## # # Copyright (C) 2010-2013 Associazione OpenERP Italia # (<http://www.openerp-italia.org>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Italian Localisation', 'version': '2.15.30.32', 'category': 'Localisation/Italy', 'description': """This module customizes OpenERP in order to fit italian laws and mores - Account version Functionalities: - Fiscal code computation for partner, and fiscal code check - Check invoice date consistency - CIG on invoice """, 'author': 'OpenERP Italian Community, Didotech srl', 'website': 'http://www.openerp-italia.org, http://www.didotech.com', 'license': 'AGPL-3', 'depends': [ 'account', 'base_vat', 'account_chart', 'base_iban', 'l10n_it_base', 'account_voucher', 'sale_order_confirm', # 'account_invoice_entry_date', not possible for use of a field defined here invoice_supplier_number ], 'data': [ 'account/partner_view.xml', 'account/fiscal_position_view.xml', 'account/account_sequence.xml', 'account/invoice_view.xml', 'account/voucher_view.xml', 'account/payment_type_view.xml', 'wizard/select_fiscal_position_view.xml', 'data/bank_iban_data.xml', 'account/account_move.xml', 'account/res_bank_view.xml', # 'account/account_tax_view.xml', 'account/res_company_view.xml', 'account/account_invoice_workflow.xml', ], 'demo': [], 'active': False, 'installable': True, 'external_dependencies': { 'python': ['codicefiscale'], } }
Python
0
@@ -2166,34 +2166,32 @@ ew.xml',%0A - # 'account/accoun
87c2cf3f8f8ea2e890aa648d33e93e051632e86d
change billets
totvserprm/financial.py
totvserprm/financial.py
# -*- coding: utf-8 -*- from datetime import datetime from baseapi import BaseApi class Client(BaseApi): dataservername = 'FinCFODataBR' def create(self,**kwargs): # codigo de coligada para o contexto, diferente do dataset codcoligada_contexto = kwargs.get('codcoligada_contexto') if not codcoligada_contexto: codcoligada_contexto = kwargs.get('codcoligada') return super(Client, self).create({ 'NewDataSet': { 'FCFO': { 'ATIVO': kwargs.get('ativo'), # enviar -1 para que sejá criado de forma incremental 'CODCFO': -1, 'IDCFO': -1, 'CODEXTERNO': kwargs.get('codexterno'), 'CODCOLIGADA': kwargs.get('codcoligada'), 'CGCCFO': kwargs.get('cpf_cnpj'), 'TIPORUA': kwargs.get('tipo_rua'), 'TIPOBAIRRO': kwargs.get('tipo_bairro'), 'BAIRRO': kwargs.get('bairro'), 'RUA': kwargs.get('rua'), 'NUMERO': kwargs.get('numero'), 'CEP': kwargs.get('cep'), 'CODETD': kwargs.get('estado'), 'CIDADE': kwargs.get('cidade'), 'CODMUNICIPIO': kwargs.get('codigo_municipio'), 'PAIS': kwargs.get('cod_pais'), 'DTNASCIMENTO': '{:%Y-%m-%d}T03:00:00.000'.format(kwargs.get('data_nascimento')), 'NOME': kwargs.get('nome'), 'NOMEFANTASIA': kwargs.get('nome'), 'PAGREC': kwargs.get('classificacao'), 'PESSOAFISOUJUR': kwargs.get('categoria'), } } }, 'CODCOLIGADA={}'.format(codcoligada_contexto)) class Billet(BaseApi): dataservername = 'FinLanBoletoData' def create(self,**kwargs): # codigo de coligada para o contexto, diferente do dataset codcoligada_contexto = kwargs.get('codcoligada_contexto') if not codcoligada_contexto: codcoligada_contexto = kwargs.get('codcoligada') return super(Billet, self).create({ 'NewDataSet': { 'FLAN': { 'CODCOLIGADA': kwargs.get('codcoligada'), 'IDLAN': -1, 'NUMERODOCUMENTO': -1, 'NFOUDUP': 0, 'CLASSIFICACAO': 0, 'PAGREC': 1, 'STATUSLAN': 1, 'CODTDO': kwargs.get('tipo_documento'), 'DATAVENCIMENTO': kwargs.get('data_vencimento'), 'DATAEMISSAO': "{:%d/%m/%Y %H:%M:%S}".format(datetime.now()), 'VALORORIGINAL': kwargs.get('valor'), 'CODCOLCFO': kwargs.get('codcoligada'), 'CODCFO': kwargs.get('codcliente'), 'CODFILIAL': kwargs.get('codfilial'), 'SERIEDOCUMENTO': kwargs.get('serie_documento'), 'CODCXA': kwargs.get('conta'), 'TIPOCONTABILLAN': 1, 'CODMOEVALORORIGINAL': 'R$', 'VALORSERVICO': 0, 'NUMLOTECONTABIL': kwargs.get('lote_contabil') } } }, 'CODCOLIGADA={}'.format(codcoligada_contexto))
Python
0.000001
@@ -171,241 +171,8 @@ s):%0A - # codigo de coligada para o contexto, diferente do dataset%0A codcoligada_contexto = kwargs.get('codcoligada_contexto')%0A if not codcoligada_contexto:%0A codcoligada_contexto = kwargs.get('codcoligada')%0A%0A @@ -1555,32 +1555,44 @@ GADA=%7B%7D'.format( +kwargs.get(' codcoligada_cont @@ -1586,25 +1586,18 @@ coligada -_contexto +') ))%0A%0A%0Acla @@ -1691,241 +1691,8 @@ s):%0A - # codigo de coligada para o contexto, diferente do dataset%0A codcoligada_contexto = kwargs.get('codcoligada_contexto')%0A if not codcoligada_contexto:%0A codcoligada_contexto = kwargs.get('codcoligada')%0A%0A @@ -2633,50 +2633,8 @@ '),%0A - 'TIPOCONTABILLAN': 1,%0A @@ -2703,90 +2703,308 @@ ' -VALORSERVICO': 0,%0A 'NUMLOTECONTABIL': kwargs.get('lote_contabil +NUMLOTECONTABIL': 0,%0A 'NUMEROCONTABIL': 0,%0A 'NUMCONTABILBX': 0,%0A 'TIPOCONTABILLAN': 0,%0A 'FILIALCONTABIL': 1,%0A 'HISTORICO': kwargs.get('historico'),%0A 'CODCCUSTO': kwargs.get('centro_custo ')%0A @@ -3069,16 +3069,28 @@ .format( +kwargs.get(' codcolig @@ -3096,16 +3096,9 @@ gada -_contexto +') ))%0A
9cce47d37f6e2d08a66b9deedfc6f2f74b02720a
add int validator
tpl/prompt/validator.py
tpl/prompt/validator.py
# -*- coding:utf-8 -*- from prompt_toolkit.validation import Validator, ValidationError class StrValidator(Validator): def validate(self, document): pass
Python
0.00002
@@ -164,10 +164,290 @@ pass%0A%0A%0A +class IntValidator(Validator):%0A def validate(self, document):%0A text = document.text%0A for index, char in enumerate(text):%0A if not char.isdigit():%0A raise ValidationError(message='Input contains non-numeric char', cursor_position=index)%0A%0A%0A%0A %0A%0A
c72db8781d2ca5968a9f09dff467d1f7fcd1aa34
Fix compatibility with Python 2
matplotlib_scalebar/dimension.py
matplotlib_scalebar/dimension.py
"""""" # Standard library modules. from operator import itemgetter import bisect # Third party modules. # Local modules. # Globals and constants variables. _PREFIXES_FACTORS = {'Y': 1e24, 'Z': 1e21, 'E': 1e18, 'P': 1e15, 'T': 1e12, 'G': 1e9, 'M': 1e6, 'k': 1e3, 'd': 1e-1, 'c': 1e-2, 'm': 1e-3, u'\u00b5': 1e-6, 'u': 1e-6, 'n': 1e-9, 'p': 1e-12, 'f': 1e-15, 'a': 1e-18, 'z': 1e-21, 'y': 1e-24} class _Dimension(object): def __init__(self, base_units, latexrepr=None): self._base_units = base_units self._units = {base_units: 1.0} if latexrepr is None: latexrepr = base_units self._latexrepr = {base_units: latexrepr} def add_units(self, units, factor, latexrepr=None): """ Add new possible units. :arg units: units :type units: :class:`str` :arg factor: multiplication factor to convert new units into base units :type factor: :class:`float` :arg latexrepr: LaTeX representation of units (if ``None``, use *units) :type latexrepr: :class:`str` """ if units in self._units: raise ValueError('%s already defined' % units) if factor == 1: raise ValueError('Factor cannot be equal to 1') if latexrepr is None: latexrepr = units self._units[units] = factor self._latexrepr[units] = latexrepr def is_valid_units(self, units): return units in self._units and units in self._latexrepr def calculate_preferred(self, value, units): if units not in self._units: raise ValueError('Unknown units: %s' % units) base_value = value * self._units[units] units_factor = sorted(self._units.items(), key=itemgetter(1)) factors = [item[1] for item in units_factor] index = bisect.bisect_right(factors, base_value) newunits, factor = units_factor[index - 1] return base_value / factor, newunits def to_latex(self, units): if units not in self._latexrepr: raise ValueError('Unknown units: %s' % units) return self._latexrepr[units] @property def base_units(self): return self._base_units class SILengthDimension(_Dimension): def __init__(self): super().__init__('m') for prefix, factor in _PREFIXES_FACTORS.items(): latexrepr = None if prefix == u'\u00b5': latexrepr = '$\\mu$m' self.add_units(prefix + 'm', factor, latexrepr) class SILengthReciprocalDimension(_Dimension): def __init__(self): super().__init__('1/m', 'm$^{-1}$') for prefix, factor in _PREFIXES_FACTORS.items(): latexrepr = '{0}m$^{{-1}}$'.format(prefix) if prefix == u'\u00b5': latexrepr = '$\\mu$m$^{-1}$' self.add_units('1/{0}m'.format(prefix), factor, latexrepr) class ImperialLengthDimension(_Dimension): def __init__(self): super().__init__('ft') self.add_units('th', 1 / 12000) self.add_units('in', 1 / 12) self.add_units('yd', 3) self.add_units('ch', 66) self.add_units('fur', 660) self.add_units('mi', 5280) self.add_units('lea', 15840)
Python
0.000067
@@ -2385,32 +2385,55 @@ :%0A super( +SILengthDimension, self ).__init__('m')%0A @@ -2432,16 +2432,16 @@ __('m')%0A - @@ -2731,32 +2731,65 @@ :%0A super( +SILengthReciprocalDimension, self ).__init__('1/m' @@ -3127,32 +3127,32 @@ __init__(self):%0A - super(). @@ -3149,16 +3149,45 @@ super( +ImperialLengthDimension, self ).__init
4f73601c843ff9507064b85ddd33179af9fed653
Raise stderr message
utils/unfiltered_pbf.py
utils/unfiltered_pbf.py
# -*- coding: utf-8 -*- import logging import os from string import Template from subprocess import PIPE, Popen from .artifact import Artifact from .osm_xml import OSM_XML LOG = logging.getLogger(__name__) class InvalidOsmXmlException(Exception): pass class UnfilteredPBF(object): name = 'full_pbf' description = 'Unfiltered OSM PBF' cmd = Template('osmconvert $osm --out-pbf >$pbf') def __init__(self, aoi_geom, output_pbf, url): self.aoi_geom = aoi_geom self.output_pbf = output_pbf self.url = url def run(self): if self.is_complete: LOG.debug("Skipping UnfilteredPBF, file exists") return osm_xml = "{}.xml".format(self.output_pbf) osm_xml_task = OSM_XML(self.aoi_geom, osm_xml, url=self.url) osm_xml_task.run() convert_cmd = self.cmd.safe_substitute({ 'osm': osm_xml, 'pbf': self.output_pbf }) LOG.debug('Running: %s' % convert_cmd) p = Popen(convert_cmd, shell=True, stdout=PIPE, stderr=PIPE) stdout, stderr = p.communicate() if stderr: LOG.warn('Failed: %s', stderr) with open(self.input_xml, 'rb') as fd: sample = fd.readlines(8) raise InvalidOsmXmlException(sample) LOG.debug('Osmconvert complete') @property def results(self): return [Artifact([self.output_pbf], UnfilteredPBF.name)] @property def is_complete(self): return os.path.isfile(self.output_pbf)
Python
0.00001
@@ -1130,147 +1130,8 @@ rr:%0A - LOG.warn('Failed: %25s', stderr)%0A with open(self.input_xml, 'rb') as fd:%0A sample = fd.readlines(8)%0A @@ -1172,13 +1172,13 @@ on(s -ample +tderr )%0A%0A
aaee820075b150b641e511dbdb45e6d1ff3da529
Update description of function attibute in class SchemaIndicatorType
api/schema_indicator.py
api/schema_indicator.py
from database.model_indicator import ModelIndicatorType, ModelIndicator, ModelIndicatorParameterType, ModelIndicatorParameter, ModelIndicatorResult from graphene_sqlalchemy import SQLAlchemyObjectType import graphene import logging # Load logging configuration log = logging.getLogger(__name__) class AttributeIndicator: """Generic class to provide descriptions of indicator attributes""" name = graphene.String(description="Indicator name.") description = graphene.String(description="Indicator description.") indicatorTypeId = graphene.ID(description="Indicator type Id of the indicator.") batchOwnerId = graphene.ID(description="Batch owner Id of the indicator.") executionOrder = graphene.Int(description="Order of execution of the indicator when it is executed in a batch with several other indicators.") active = graphene.Boolean(description="Indicates if the indicator is active or inactive. Only active indicators can be executed.") class SchemaIndicator(SQLAlchemyObjectType, AttributeIndicator): """Data quality indicators.""" class Meta: model = ModelIndicator interfaces = (graphene.relay.Node,) # Keep comma to avoid failure class AttributeIndicatorParameter: """Generic class to provide descriptions of indicator parameter attributes""" indicatorId = graphene.ID(description="Indicator Id of the parameter.") parameterTypeId = graphene.String(description="Parameter type Id of the parameter.") value = graphene.String(description="Value of the parameter.") class SchemaIndicatorParameter(SQLAlchemyObjectType, AttributeIndicatorParameter): """Indicator parameters.""" class Meta: model = ModelIndicatorParameter interfaces = (graphene.relay.Node,) # Keep comma to avoid failure class AttributeIndicatorParameterType: """Generic class to provide descriptions of indicator parameter type attributes""" name = graphene.String(description="Parameter type name.") description = graphene.String(description="Parameter type description.") class SchemaIndicatorParameterType(SQLAlchemyObjectType, AttributeIndicatorParameterType): """Indicator parameter types.""" class Meta: model = ModelIndicatorParameterType interfaces = (graphene.relay.Node,) # Keep comma to avoid failure class AttributeIndicatorResult: """Generic class to provide descriptions of indicator result attributes""" indicatorId = graphene.ID(description="Indicator Id of the results set.") sessionId = graphene.ID(description="Session Id of the result set.") alertOperator = graphene.String(description="Alert operator used during the execution of the indicator.") alertThreshold = graphene.Float(description="Alert threshold used during the execution of the indicator.") nbRecords = graphene.Int(description="Number of records in the result set.") nbRecordsAlert = graphene.Int(description="Number of records which triggered an alert in the result set.") nbRecordsNoAlert = graphene.Int(description="Number of records which did not trigger an alert in the result set.") class SchemaIndicatorResult(SQLAlchemyObjectType, AttributeIndicatorResult): """Indicator results.""" class Meta: model = ModelIndicatorResult interfaces = (graphene.relay.Node,) # Keep comma to avoid failure class AttributeIndicatorType: """Generic class to provide descriptions of indicator type attributes""" name = graphene.String(description="Indicator type name.") function = graphene.String(description="Python function used to execute this type of indicator.") class SchemaIndicatorType(SQLAlchemyObjectType, AttributeIndicatorType): """Types of indicators.""" class Meta: model = ModelIndicatorType interfaces = (graphene.relay.Node,) # Keep comma to avoid failure
Python
0
@@ -3589,16 +3589,33 @@ ion +of the framework used to exec @@ -3610,20 +3610,20 @@ used to -exec +comp ute this @@ -3627,16 +3627,8 @@ his -type of indi @@ -3628,24 +3628,29 @@ is indicator + type .%22)%0A%0A%0Aclass
451e20818c7fbcc0b45500c71c5c5beee96eb316
update jaxlib
jaxlib/version.py
jaxlib/version.py
# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __version__ = "0.1.16"
Python
0
@@ -593,7 +593,7 @@ .1.1 -6 +7 %22%0A
1c15d302c2a1df22b4dd89f3215decf141a4c20e
return None if there is an error during scan
abilian/services/antivirus/__init__.py
abilian/services/antivirus/__init__.py
# coding=utf-8 """ """ from __future__ import absolute_import try: import clamd cd = clamd.ClamdUnixSocket() CLAMD_AVAILABLE = True except ImportError: CLAMD_AVAILABLE = False from abilian.core.models.blob import Blob from ..base import Service class AntiVirusService(Service): """ Antivirus service """ name = 'antivirus' def scan(self, file_or_stream): """ :param file_or_stream: :class:`Blob` instance, filename or file object :returns: True if file is 'clean', False if a virus is detected, None if file could not be scanned. If `file_or_stream` is a Blob, scan result is stored in Blob.meta['antivirus']. """ res = self._scan(file_or_stream) if isinstance(file_or_stream, Blob): file_or_stream.meta['antivirus'] = res return res def _scan(self, file_or_stream): if not CLAMD_AVAILABLE: return None content = file_or_stream if isinstance(file_or_stream, Blob): scan = cd.scan # py3 compat: bytes == py2 str(). Pathlib uses os.fsencode() content = bytes(file_or_stream.file) elif isinstance(file_or_stream, (str, unicode)): scan = cd.scan else: scan = cd.instream res = None try: res = scan(content) except clamd.ClamdError as e: self.logger.warning('Error during content scan: %s', repr(e)) if content not in res: # may happen if file doesn't exists return False res = res[content] return res[0] == u'OK' service = AntiVirusService()
Python
0.998415
@@ -1342,16 +1342,34 @@ repr(e)) +%0A return None %0A%0A if
8e10657f94023a69967345114ee221c8d579c05d
Fix error with new issue while not login.
trackit/issues/views.py
trackit/issues/views.py
from django.shortcuts import render, get_object_or_404, redirect from .models import Ticket, Label, User, Comment import hashlib # Create your views here. def home(request): issue = Ticket.objects.filter().order_by('-id') readit = [] for i in issue: issue_get = {} issue_get['id'] = i.id issue_get['title'] = i.ticket_title issue_get['status'] = i.status issue_get['time'] = i.time issue_get['label'] = i.label_set.all() readit.append(issue_get) #pass return render(request, 'home.html', {"readit": readit, "request": request}) def issues(request, ticket_id): issue = get_object_or_404(Ticket, id=ticket_id) issue_get = {} issue_get['id'] = issue.id issue_get['title'] = issue.ticket_title issue_get['status'] = issue.status issue_get['time'] = issue.time issue_get['label'] = issue.label_set.all() return render(request, 'issues.html', {"issue_get": issue_get, "request": request}) def newissues(request): if "login" in request.session: name = request.session['login'] else: name = "default" return render(request, 'newissues.html', {"issue_get": name, "request": request}) def add(request): if request.method == 'POST': if request.session['login']: if request.POST['todo'] == "newissue": title = request.POST['title'] content = request.POST['comment'] ticket = Ticket(ticket_title=title) ticket.save() user = get_object_or_404(User, id=1) comment = Comment(ticket=ticket, content=content, user=user) comment.save() return redirect('home') def loginhere(request): return render(request, 'loginhere.html', {"issue_get": "", "request": request}) def login(request): #TODO rewrite please if request.method == 'POST': if request.POST['login_password']: plain = request.POST['login_password'] if hashlib.sha224(plain.encode()).hexdigest() == '71454996db126e238e278a202a7dbc49dda187ec4f8c9dfc95584900': #login request.session['login'] = request.POST['login_select'] return redirect('home') def logout(request): if request.session['login']: del request.session['login'] return redirect('home')
Python
0
@@ -1266,32 +1266,43 @@ ST':%0A if +'login' in request.session%5B @@ -1296,33 +1296,24 @@ uest.session -%5B'login'%5D :%0A
bb295c5235a6d938ae5f1228c663ba84c3b35205
Use named parameters for redis setex
authmodules/apache2/privacyidea_apache.py
authmodules/apache2/privacyidea_apache.py
# -*- coding: utf-8 -*- # # 2015-06-04 Cornelius Kölbel <[email protected]> # Initial writeup # # (c) Cornelius Kölbel # Info: http://www.privacyidea.org # # This code is free software; you can redistribute it and/or # modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE # License as published by the Free Software Foundation; either # version 3 of the License, or any later version. # # This code is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU AFFERO GENERAL PUBLIC LICENSE for more details. # # You should have received a copy of the GNU Affero General Public # License along with this program. If not, see <http://www.gnu.org/licenses/>. # __doc__ = """This is the Apache module to be used with mod_python with the privacyIDEA authentication system to add OTP to Apache basic authentication. To protect an Apache directory or Location add this to your apache config:: <Directory /var/www/html/secretdir> AuthType Basic AuthName "Protected Area" AuthBasicProvider wsgi WSGIAuthUserScript /usr/share/pyshared/privacyidea_apache.py Require valid-user </Directory> The code is tested in test_mod_apache.py """ import redis import requests import syslog import traceback import passlib.hash from six.moves import configparser OK = True UNAUTHORIZED = False CONFIG_FILE = "/etc/privacyidea/apache.conf" DEFAULT_PRIVACYIDEA = "https://localhost" DEFAULT_SSLVERIFY = False DEFAULT_REDIS = "localhost" DEFAULT_TIMEOUT = 300 ROUNDS = 2342 SALT_SIZE = 10 def check_password(environ, username, password): PRIVACYIDEA, REDIS, SSLVERIFY, TIMEOUT = _get_config() syslog.syslog(syslog.LOG_DEBUG, "Authentication with {0!s}, {1!s}, {2!s}".format( PRIVACYIDEA, REDIS, SSLVERIFY)) r_value = UNAUTHORIZED rd = redis.Redis(REDIS) # check, if the user already exists in the database. key = _generate_key(username, environ) value = rd.get(key) if value and passlib.hash.pbkdf2_sha512.verify(password, value): # update the timeout rd.setex(key, _generate_digest(password), TIMEOUT) r_value = OK else: # Check against privacyidea data = {"user": username, "pass": password} response = requests.post(PRIVACYIDEA + "/validate/check", data=data, verify=SSLVERIFY) if response.status_code == 200: try: json_response = response.json() syslog.syslog(syslog.LOG_DEBUG, "requests > 1.0") except Exception as exx: # requests < 1.0 json_response = response.json syslog.syslog(syslog.LOG_DEBUG, "requests < 1.0") syslog.syslog(syslog.LOG_DEBUG, "{0!s}".format(traceback.format_exc())) if json_response.get("result", {}).get("value"): rd.setex(key, _generate_digest(password), TIMEOUT) r_value = OK else: syslog.syslog(syslog.LOG_ERR, "Error connecting to privacyIDEA: " "%s: %s" % (response.status_code, response.text)) return r_value def _generate_digest(password): pw_dig = passlib.hash.pbkdf2_sha512.encrypt(password, rounds=ROUNDS, salt_size=SALT_SIZE) return pw_dig def _generate_key(username, environ): key = "{0!s}+{1!s}+{2!s}+{3!s}".format(environ.get("SERVER_NAME", ""), environ.get("SERVER_PORT", ""), environ.get("DOCUMENT_ROOT", ""), username) return key def _get_config(): """ Try to read config from the file /etc/privacyidea/apache.conf The config values are redis = IPAddress:Port privacyidea = https://hostname/path sslverify = True | filename to CA bundle timeout = seconds :return: The configuration :rtype: dict """ config_file = configparser.ConfigParser() config_file.read(CONFIG_FILE) PRIVACYIDEA = DEFAULT_PRIVACYIDEA SSLVERIFY = DEFAULT_SSLVERIFY REDIS = DEFAULT_REDIS TIMEOUT = DEFAULT_TIMEOUT try: PRIVACYIDEA = config_file.get("DEFAULT", "privacyidea") or DEFAULT_PRIVACYIDEA SSLVERIFY = config_file.get("DEFAULT", "sslverify") or DEFAULT_SSLVERIFY if SSLVERIFY == "False": SSLVERIFY = False elif SSLVERIFY == "True": SSLVERIFY = True REDIS = config_file.get("DEFAULT", "redis") or DEFAULT_REDIS TIMEOUT = config_file.get("DEFAULT", "timeout") or DEFAULT_TIMEOUT TIMEOUT = int(TIMEOUT) except configparser.NoOptionError as exx: syslog.syslog(syslog.LOG_ERR, "{0!s}".format(exx)) syslog.syslog(syslog.LOG_DEBUG, "Reading configuration {0!s}, {1!s}, {2!s}".format( PRIVACYIDEA, REDIS, SSLVERIFY)) return PRIVACYIDEA, REDIS, SSLVERIFY, TIMEOUT
Python
0.000001
@@ -2202,32 +2202,38 @@ rd.setex(key, +value= _generate_digest @@ -2236,32 +2236,37 @@ gest(password), +time= TIMEOUT)%0A @@ -3056,16 +3056,22 @@ ex(key, +value= _generat @@ -3090,16 +3090,21 @@ sword), +time= TIMEOUT)
7e4b66fe3df07afa431201de7a5a76d2eeb949a1
Fix django custom template tag importing
app/main.py
app/main.py
#!/usr/bin/env python from env_setup import setup_django setup_django() from env_setup import setup setup() from webapp2 import RequestHandler, Route, WSGIApplication from agar.env import on_production_server from agar.config import Config from agar.django.templates import render_template class MainApplicationConfig(Config): """ :py:class:`~agar.config.Config` settings for the ``main`` `webapp2.WSGIApplication`_. Settings are under the ``main_application`` namespace. The following settings (and defaults) are provided:: main_application_NOOP = None To override ``main`` `webapp2.WSGIApplication`_ settings, define values in the ``appengine_config.py`` file in the root of your project. """ _prefix = 'main_application' #: A no op. NOOP = None config = MainApplicationConfig.get_config() class MainHandler(RequestHandler): def get(self): render_template(self.response, 'index.html') application = WSGIApplication( [ Route('/', MainHandler, name='main'), ], debug=not on_production_server) def main(): from google.appengine.ext.webapp import template, util template.register_template_library('agar.django.templatetags') util.run_wsgi_app(application) if __name__ == '__main__': main()
Python
0.000001
@@ -16,20 +16,22 @@ python%0A%0A -from +import env_set @@ -36,29 +36,39 @@ etup - import setup_django%0A +; env_setup.setup(); env_setup. setu @@ -82,42 +82,95 @@ o()%0A +%0A from -env_setup import setup%0Asetup( +django.template import add_to_builtins%0Aadd_to_builtins('agar.django.templatetags' )%0A%0Af
3c6a6e3a8f99f580d10366307e34031aa81ac344
Exclude disabled widgets from bundles
app/main.py
app/main.py
#!/usr/bin/env python import json import logging import os import Queue import SocketServer from apscheduler.scheduler import Scheduler from datetime import datetime, timedelta from flask import Flask, render_template, Response, request, abort from flask.ext.assets import Environment, Bundle from flask.templating import TemplateNotFound from jobs import load_jobs from random import randint app = Flask(__name__) app.config.from_envvar('JARVIS_SETTINGS') widgets_path = os.path.abspath(os.path.join(os.path.dirname(__file__), 'static', 'widgets')) app.jinja_env.trim_blocks = True app.jinja_env.lstrip_blocks = True assets = Environment(app) sched = Scheduler() queues = {} last_events = {} @app.before_first_request def _setup_logging(): logging.getLogger('apscheduler.scheduler').addHandler( logging.StreamHandler()) if not app.debug: app.logger.addHandler(logging.StreamHandler()) app.logger.setLevel(logging.INFO) @app.before_first_request def _configure_bundles(): js = ['main.js'] css = ['main.css'] for widget in os.listdir(widgets_path): widget_path = os.path.join('widgets', widget) for asset_file in os.listdir(os.path.join(widgets_path, widget)): asset_path = os.path.join(widget_path, asset_file) if asset_file.endswith('.js'): js.append(asset_path) elif asset_file.endswith('.css'): css.append(asset_path) assets.register('js_all', Bundle(*js, output='gen/app.js')) assets.register('css_all', Bundle(*css, output='gen/styles.css')) assets.register('js_min_all', Bundle(*js, filters='rjsmin', output='gen/app.min.js')) assets.register('css_min_all', Bundle(*css, filters='cssmin', output='gen/styles.min.css')) @app.route('/w/<widget>') @app.route('/widget/<widget>') def widget(widget): if not _is_enabled(widget): abort(404) x = request.args.get('x', 2) y = request.args.get('y', 2) return render_template('index.html', layout='layout_single.html', widget=widget, x=x, y=y) @app.route('/') @app.route('/d/<layout>') @app.route('/dashboard/<layout>') def dashboard(layout=None): locale = request.args.get('locale') if layout is not None: try: return render_template('index.html', layout='layouts/{0}.html'.format(layout), locale=locale) except TemplateNotFound: abort(404) return render_template('index.html', locale=locale) @app.route('/events') def events(): remote_port = request.environ['REMOTE_PORT'] current_queue = Queue.Queue() queues[remote_port] = current_queue for event in last_events.values(): current_queue.put(event) def consume(): while True: data = current_queue.get() if data is None: break yield 'data: %s\n\n' % (data,) response = Response(consume(), mimetype='text/event-stream') response.headers['X-Accel-Buffering'] = 'no' return response @app.route('/events/<widget>', methods=['POST']) def create_event(widget): if not _is_enabled(widget): abort(404) data = request.data if not data: abort(400) body = json.loads(data) _add_event(widget, body) return '', 201 def _is_enabled(name, conf=None): if conf is None: conf = app.config['JOBS'] return name in conf and conf[name].get('enabled') @app.context_processor def _inject_template_methods(): return dict(is_widget_enabled=_is_enabled) @app.before_first_request def _configure_jobs(): conf = app.config['JOBS'] offset = 0 for name, cls in load_jobs().items(): if not _is_enabled(name, conf): app.logger.info('Skipping disabled job: %s', name) continue job = cls(conf[name]) if app.debug: start_date = datetime.now() + timedelta(seconds=1) else: offset += randint(4, 10) start_date = datetime.now() + timedelta(seconds=offset) job.start_date = start_date app.logger.info('Scheduling job: %s', job) sched.add_interval_job(_run_job, name=name, seconds=job.interval, start_date=job.start_date, kwargs={'widget': name, 'job': job}) if not sched.running: sched.start() def _add_event(widget, body): json_data = json.dumps({ 'widget': widget, 'body': body }) last_events[widget] = json_data for queue in queues.values(): queue.put(json_data) def _run_job(widget, job): body = job.get() if not body: return _add_event(widget, body) def _close_stream(*args, **kwargs): remote_port = args[2][1] if remote_port in queues: del queues[remote_port] SocketServer.BaseServer.handle_error = _close_stream
Python
0
@@ -1137,16 +1137,182 @@ _path):%0A + if not _is_enabled(widget):%0A app.logger.info('Excluding disabled widget from bundle: %25s',%0A widget)%0A continue%0A
b8d3a008dfb8203b7cda276f03148bf34daf20be
store the job after changing the state
account_move_batch_validate/account.py
account_move_batch_validate/account.py
# -*- coding: utf-8 -*- ############################################################################### # # # Author: Leonardo Pistone # Copyright 2014 Camptocamp SA # # # This program is free software: you can redistribute it and/or modify # # it under the terms of the GNU Affero General Public License as # # published by the Free Software Foundation, either version 3 of the # # License, or (at your option) any later version. # # # # This program is distributed in the hope that it will be useful, # # but WITHOUT ANY WARRANTY; without even the implied warranty of # # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # # GNU Affero General Public License for more details. # # # # You should have received a copy of the GNU Affero General Public License # # along with this program. If not, see <http://www.gnu.org/licenses/>. # # # ############################################################################### """Accounting customisation for delayed posting.""" from openerp.osv import fields, orm from openerp.tools.translate import _ from openerp.addons.connector.queue.job import job from openerp.addons.connector.session import ConnectorSession from openerp.addons.connector.queue.job import OpenERPJobStorage class account_move(orm.Model): """We modify the account move to allow delayed posting.""" _name = 'account.move' _inherit = 'account.move' _columns = { 'to_post': fields.boolean( 'To Post', help='Check this box to mark the move for batch posting' ), 'post_job_uuid': fields.char( 'UUID of the Job to approve this move' ), } def _delay_post_marked(self, cr, uid, eta=None, context=None): """Create a job for every move marked for posting. If some moves already have a job, they are skipped. """ if context is None: context = {} session = ConnectorSession(cr, uid, context=context) move_ids = self.search(cr, uid, [ ('to_post', '=', True), ('post_job_uuid', '=', False), ('state', '=', 'draft'), ], context=context) for move_id in move_ids: job_uuid = validate_one_move.delay(session, self._name, move_id, eta=eta) self.write(cr, uid, [move_id], { 'post_job_uuid': job_uuid }) def _cancel_jobs(self, cr, uid, context=None): """Find moves where the mark has been removed and cancel the jobs. For the moves that are posted already it's too late: we skip them. """ if context is None: context = {} session = ConnectorSession(cr, uid, context=context) storage = OpenERPJobStorage(session) move_ids = self.search(cr, uid, [ ('to_post', '=', False), ('post_job_uuid', '!=', False), ('state', '=', 'draft'), ], context=context) for move in self.browse(cr, uid, move_ids, context=context): job = storage.load(move.post_job_uuid) if job.state in (u'pending', u'enqueued'): job.set_done(result=_( u'Task set to Done because the user unmarked the move' )) def mark_for_posting(self, cr, uid, move_ids, eta=None, context=None): """Mark a list of moves for delayed posting, and enqueue the jobs.""" if context is None: context = {} self.write(cr, uid, move_ids, {'to_post': True}, context=context) self._delay_post_marked(cr, uid, eta=eta, context=context) def unmark_for_posting(self, cr, uid, move_ids, context=None): """Unmark moves for delayed posting, and cancel the jobs.""" if context is None: context = {} self.write(cr, uid, move_ids, {'to_post': False}, context=context) self._cancel_jobs(cr, uid, context=context) @job def validate_one_move(session, model_name, move_id): """Validate a move, and leave the job reference in place.""" session.pool['account.move'].button_validate( session.cr, session.uid, [move_id] )
Python
0.000005
@@ -3809,16 +3809,51 @@ )) +%0A storage.store(job) %0A%0A de
35c52ecbe34611f003d8f647dafdb15c00d70212
update doc
python/git_pull_codedir/git_pull_codedir.py
python/git_pull_codedir/git_pull_codedir.py
# -*- coding: utf-8 -*- #!/usr/bin/python ##------------------------------------------------------------------- ## @copyright 2017 DennyZhang.com ## Licensed under MIT ## https://raw.githubusercontent.com/DennyZhang/devops_public/master/LICENSE ## ## File : git_pull_codedir.py ## Author : Denny <[email protected]> ## Description : ## -- ## Created : <2017-03-24> ## Updated: Time-stamp: <2017-03-24 15:51:04> ##------------------------------------------------------------------- import os, sys import sys import logging import argparse # Notice: Need to run: pip install GitPython import git logger = logging.getLogger("git_pull_codedir") formatter = logging.Formatter('%(name)-12s %(asctime)s %(levelname)-8s %(message)s', '%a, %d %b %Y %H:%M:%S',) file_handler = logging.FileHandler("/var/log/git_pull_codedir.log") file_handler.setFormatter(formatter) stream_handler = logging.StreamHandler(sys.stderr) logger.addHandler(file_handler) logger.addHandler(stream_handler) logger.setLevel(logging.INFO) def git_pull(code_dir): logger.info("Run git pull in %s" %(code_dir)) if os.path.exists(code_dir) is False: logger.error("Code directory(%s): doesn't exist" % (code_dir)) sys.exit(1) os.chdir(code_dir) g = git.cmd.Git(code_dir) g.pull() # Sample python perform_git_pull.py --code_dirs "/data/code_dir/repo1,/data/code_dir/repo2" if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--code_dirs', required=True, \ help="Code directories to pull. If multiple, separated by comma", type=str) l = parser.parse_args() code_dirs = l.code_dirs separator = "," for code_dir in code_dirs.split(separator): git_pull(code_dir) ## File : git_pull_codedir.py ends
Python
0
@@ -401,17 +401,17 @@ 03-2 -4 15:51:0 +7 18:10:4 4%3E%0A# @@ -1298,24 +1298,16 @@ hon -perform_ git_pull .py @@ -1302,16 +1302,24 @@ git_pull +_codedir .py --co
6039be54e9fd79ac3916a50de6ab554aa1355bde
disable debug mode
sources/opsagent/config.py
sources/opsagent/config.py
''' VisualOps agent configuration manager class (c) 2014 - MadeiraCloud LTD. @author: Thibault BRONCHAIN ''' # System imports import ConfigParser from ConfigParser import SafeConfigParser from copy import deepcopy import sys import os import re # Custon imports from opsagent.exception import ConfigFileFormatException, ConfigFileException # Config class class Config(): requiredKeys = { 'global': { 'envroot': "Virtual environment root", 'conf_path': "Configuration directory", 'log_path': "Logs directory", 'package_path': "Relative to envroot runtime package location", 'scripts_path': "Scripts location", 'token': "Unique identification file path", 'watch': "Watched files checksum location", 'logfile': "Log file location", }, 'userdata': { 'ws_uri': "Backend connection URI", 'app_id': "Application ID", 'version': "Curent release version", 'base_remote': "Base URL to fetch the sources", 'gpg_key_uri': "Reference URI for GPG key", }, 'module': { 'root': "Salt modules repo root", 'name': "Salt modules repo name", 'bootstrap': "Salt modules bootstrap script", 'mod_repo': "Salt modules repo URI", 'mod_tag': "Salt modules repo tag", }, } defaultValues = { 'global': { 'user': 'root', # 'loglvl': 'INFO', 'loglvl': 'DEBUG', #switch to debug 'proc': '/proc', 'pidfile': '/tmp/opsagentd.pid', 'haltfile': '/tmp/opsagentd.halt', 'token_reset': ['app_id'], }, 'runtime': { 'proc': False, 'config_path': None, 'clone': False, 'tag': False, 'compat': False, }, 'network': { 'instance_id': "http://169.254.169.254/latest/meta-data/instance-id", 'userdata': "http://169.254.169.254/latest/user-data", }, 'salt': { 'pkg_cache': '/var/cache/pkg', 'srv_root': '/srv/salt', 'extension_modules': '/var/cache/salt/minion/extmods', 'cachedir': '/var/cache/visualops', # delay between each round 'delay': '10', # command timeout (deprecated) 'timeout': '30', 'runtime': {} }, 'module': { # Locations relatives to modules directory (default /opt/visualops/env/lib/python-*/sites-package) 'dst_adaptor': 'opsagent/state/adaptor.py', # Locations relatives to salt repository (default /opt/visualops/boostrap/salt) 'src_salt': 'sources/salt', 'src_adaptor': 'sources/adaptor.py', # Compatibility file relative to salt repository (default /opt/visualops/boostrap/salt) 'compat': 'compat.txt', }, } chrootKeys = { # 'Chrooted' to curent environment (default /opt/visualops/env) 'salt': ['pkg_cache','srv_root','extension_modules','cachedir'], } def __init__(self, f=None): self.__parser = SafeConfigParser() self.__c = (deepcopy(Config.defaultValues) if Config.defaultValues else {}) if f: self.__read_file(f) try: self.parse_file() self.check_required(Config.requiredKeys) self.chroot(root=self.__c['global']['envroot'], mod=Config.chrootKeys) except ConfigFileFormatException: sys.stderr.write("ERROR: Invalid config file '%s'\n"%(f)) raise ConfigFileException except Exception as e: sys.stderr.write("ERROR: Invalid config file '%s': %s\n"%(f,e)) raise ConfigFileException else: sys.stdout.write("Config file loaded '%s'\n"%(f)) def __read_file(self, f): try: self.__parser.read(f) except ConfigParser.ParsingError as e: sys.stderr.write("ERROR: Can't load config file %s, %s\n"%(f,e)) else: sys.stdout.write("Config file parsed '%s'\n"%(f)) def parse_file(self, f=None): if f: self.__read_file(f) for name in self.__parser.sections(): if name is 'runtime': continue self.__c.setdefault(name, {}) for key, value in self.__parser.items(name): self.__c[name][key] = value def check_required(self, required): valid = True for section in required: if section not in self.__c: sys.stderr.write("ERROR: Missing section '%s' in current configuration file\n"%(section)) valid = False continue for key in required[section]: if key not in self.__c[section]: sys.stderr.write("ERROR: Missing key '%s' in section '%s' in current configuration file\n"%(key,section)) valid = False if not valid: raise ConfigFileException def getConfig(self, copy=False): return (self.__c if not copy else deepcopy(self.__c)) def chroot(self, root, mod): for section in mod: for key in mod[section]: if self.__c[section].get(key): self.__c[section][key] = os.path.normpath(root+'/'+self.__c[section][key])
Python
0.000001
@@ -1504,17 +1504,16 @@ 'root',%0A -# @@ -1534,16 +1534,17 @@ 'INFO',%0A +#
3ab5586ec4ac9ff3ac3fd7583bc9a71c7b5cd27a
fix lockedNormal, use MItMeshPolygon instead of MItMeshVertex, fix Fix() fucntion
python/medic/plugins/Tester/lockedNormal.py
python/medic/plugins/Tester/lockedNormal.py
from medic.core import testerBase from maya import OpenMaya class LockedNormal(testerBase.TesterBase): Name = "LockedNormal" Description = "vertex(s) which has locked normal" Fixable = True def __init__(self): super(LockedNormal, self).__init__() def Match(self, node): return node.object().hasFn(OpenMaya.MFn.kMesh) def Test(self, node): it = None mesh = None try: it = OpenMaya.MItMeshVertex(node.object()) mesh = OpenMaya.MFnMesh(node.object()) except: return (False, None) result = False comp = OpenMaya.MFnSingleIndexedComponent() comp_obj = comp.create(OpenMaya.MFn.kMeshVertComponent) while (not it.isDone()): normal_indices = OpenMaya.MIntArray() it.getNormalIndices(normal_indices) for i in range(normal_indices.length()): if mesh.isNormalLocked(normal_indices[i]): result = True comp.addElement(it.index()) break it.next() return (result, comp_obj if result else None) def Fix(self, node, component, parameterParser): if node.dg().isFromReferencedFile(): return False target_normal_indices = OpenMaya.MIntArray() mesh = OpenMaya.MFnMesh(node.object()) it = OpenMaya.MItMeshVertex(node.getPath(), component) while (not it.isDone()): normal_indices = OpenMaya.MIntArray() it.getNormalIndices(normal_indices) for i in range(normal_indices.length()): target_normal_indices.append(normal_indices[i]) it.next() mesh.unlockVertexNormals(target_normal_indices) return True Tester = LockedNormal
Python
0
@@ -457,30 +457,31 @@ Maya.MItMesh -Vertex +Polygon (node.object @@ -597,325 +597,269 @@ -result = False%0A%0A comp = OpenMaya.MFnSingleIndexedComponent()%0A comp_obj = comp.create(OpenMaya.MFn.kMeshVertComponent)%0A%0A while (not it.isDone()):%0A normal_indices = OpenMaya.MIntArray()%0A it.getNormalIndices(normal_indices)%0A%0A for i in range(normal_indices.length()): +vertices = OpenMaya.MIntArray()%0A%0A while (not it.isDone()):%0A for i in range(it.polygonVertexCount()):%0A vi = it.vertexIndex(i)%0A if vi in vertices:%0A continue%0A%0A ni = it.normalIndex(i) %0A @@ -895,32 +895,17 @@ Locked(n -ormal_indices%5Bi%5D +i ):%0A @@ -923,120 +923,253 @@ -result = True%0A comp.addElement(it.index())%0A break%0A%0A it.next()%0A%0A +vertices.append(vi)%0A%0A it.next()%0A%0A if vertices.length() %3E 0:%0A comp = OpenMaya.MFnSingleIndexedComponent()%0A comp_obj = comp.create(OpenMaya.MFn.kMeshVertComponent)%0A comp.addElements(vertices)%0A @@ -1180,22 +1180,20 @@ return ( -result +True , comp_o @@ -1198,23 +1198,33 @@ _obj - if result e +)%0A%0A return (Fa lse +, Non @@ -1355,62 +1355,8 @@ se%0A%0A - target_normal_indices = OpenMaya.MIntArray()%0A%0A @@ -1398,24 +1398,25 @@ ject())%0A +%0A it = Ope @@ -1399,34 +1399,40 @@ ect())%0A%0A -it +vertices = OpenMaya.MItM @@ -1433,316 +1433,120 @@ a.MI -tMeshVertex(node.getPath(), component)%0A while (not it.isDone()):%0A normal_indices = OpenMaya.MIntArray()%0A it.getNormalIndices(normal_indices)%0A%0A for i in range(normal_indices.length()):%0A target_normal_indices.append(normal_indices%5Bi%5D)%0A it.next( +ntArray()%0A ver_comp = OpenMaya.MFnSingleIndexedComponent(component)%0A ver_comp.getElements(vertices )%0A%0A @@ -1581,25 +1581,12 @@ als( -target_normal_ind +vert ices
e74b4867f9067e28686aecd19eb6f1d352ee28bf
fix imports
game.py
game.py
import random from characters import guests as people from adventurelib import Item, Bag, when, start import rooms import characters from sys import exit murder_config_people = list(people) random.shuffle(murder_config_people) murder_location = random.choice(list(rooms.rooms)) murderer = random.choice(list(people)) current_config_people = list(people) random.shuffle(current_config_people) current_location = random.choice(list(rooms.rooms)) @when('where am i') def my_room(): print("I am in: ", current_location) @when('go to ROOM') @when('go to the ROOM') def to_room(room): global current_location r = rooms.rooms.find(room) if current_location == r: print("I am already in %s" % room) elif r: print("I am now in %s" % room) current_location = r else: print("I can't find the %s" % room) @when('it was PERSON') def accuse(person): p = people.find(person) if p == murderer: print ("Yes, %s is the murderer!" % p) exit else: if p: print ("%s said: 'How could you!'" % p) else: print ("No one has ever heard of '%s'!" % person) start()
Python
0.000002
@@ -75,19 +75,8 @@ port - Item, Bag, whe @@ -101,26 +101,8 @@ oms%0A -import characters%0A from @@ -120,17 +120,16 @@ exit%0A%0A%0A -%0A murder_c
38f682604b7ed69799cc795eaead631dbd384c7e
allow ttl of 0
nsone/rest/records.py
nsone/rest/records.py
# # Copyright (c) 2014 NSONE, Inc. # # License under The MIT License (MIT). See LICENSE in project root. from . import resource class Records(resource.BaseResource): ROOT = 'zones' def _buildBody(self, zone, domain, type, answers, ttl=None): body = {} body['zone'] = zone body['domain'] = domain body['type'] = type body['answers'] = answers if ttl: body['ttl'] = int(ttl) return body def create(self, zone, domain, type, answers, ttl=None, callback=None, errback=None): body = self._buildBody(zone, domain, type, answers, ttl) return self._make_request('PUT', '%s/%s/%s/%s' % (self.ROOT, zone, domain, type), body=body, callback=callback, errback=errback) def update(self, zone, domain, type, answers, ttl=None, callback=None, errback=None): body = { 'answers': answers } if ttl: body['ttl'] = ttl return self._make_request('POST', '%s/%s/%s/%s' % (self.ROOT, zone, domain, type), body=body, callback=callback, errback=errback) def delete(self, zone, domain, type, callback=None, errback=None): return self._make_request('DELETE', '%s/%s/%s/%s' % (self.ROOT, zone, domain, type), callback=callback, errback=errback) def retrieve(self, zone, domain, type, callback=None, errback=None): return self._make_request('GET', '%s/%s/%s/%s' % (self.ROOT, zone, domain, type), callback=callback, errback=errback)
Python
0.002377
@@ -394,32 +394,44 @@ s%0A if ttl + is not None :%0A bo
aaba085cd2e97c8c23e6724da3313d42d12798f0
Make sure request.user is a user
app/grandchallenge/annotations/validators.py
app/grandchallenge/annotations/validators.py
from rest_framework import serializers from django.conf import settings def validate_grader_is_current_retina_user(grader, context): """ This method checks if the passed grader equals the request.user that is passed in the context. Only applies to users that are in the retina_graders group. """ request = context.get("request") if request and request.user.is_authenticated: user = request.user if user.groups.filter( name=settings.RETINA_GRADERS_GROUP_NAME ).exists(): if grader != user: raise serializers.ValidationError( "User is not allowed to create annotation for other grader" )
Python
0.999944
@@ -351,23 +351,90 @@ %0A if -request +(%0A request is not None%0A and request.user is not None%0A and req @@ -459,16 +459,22 @@ nticated +%0A ) :%0A
445740ee2630eca017b4899b96fef8ffeda0e7ea
update gist extension
gist.py
gist.py
""" This is the gist share button, and a %gist magic, as a Python extension. You can also get just the gist button without this extension by adding the contents of gist.js to static/js/custom.js in your profile. This code requires that you have the jist rubygem installed and properly configured. """ gist_js = r""" /* Add the contents of this file to your custom.js for it to always be on. */ IPython.ext_update_gist_link = function(gist_id) { IPython.notebook.metadata.gist_id = gist_id; var toolbar = IPython.toolbar.element; var link = toolbar.find("a#nbviewer"); if ( ! link.length ) { link = $('<a id="nbviewer" target="_blank"/>'); toolbar.append( $('<span id="nbviewer_span"/>').append(link) ); } link.attr("href", "http://nbviewer.ipython.org/" + gist_id); link.text("http://nbviewer.ipython.org/" + gist_id); }; IPython.ext_handle_gist_output = function(output_type, content) { if (output_type != 'stream' || content['name'] != 'stdout') { return; } var gist_id = jQuery.trim(content['data']); if (! gist_id.match(/[A-Za-z0-9]+/g)) { alert("Gist seems to have failed: " + gist_id); return; } IPython.ext_update_gist_link(gist_id); }; IPython.ext_gist_notebook = function () { var gist_id = IPython.notebook.metadata.gist_id || null; var cmd = '_nbname = "' + IPython.notebook.notebook_name + '.ipynb"'; cmd = cmd + '\nlines = !jist -p' if (gist_id) { cmd = cmd + ' -u ' + gist_id; } cmd = cmd + ' "$_nbname"'; cmd = cmd + '\nprint lines[0].replace("https://gist.github.com", "").replace("/","")'; IPython.notebook.kernel.execute(cmd, {'output' : IPython.ext_handle_gist_output}); }; setTimeout(function() { if ($("#gist_notebook").length == 0) { IPython.toolbar.add_buttons_group([ { 'label' : 'Share Notebook as gist', 'icon' : 'ui-icon-share', 'callback': IPython.ext_gist_notebook, 'id' : 'gist_notebook' }, ]) } if (IPython.notebook.metadata.gist_id) { IPython.ext_update_gist_link(IPython.notebook.metadata.gist_id); } }, 1000); """ from IPython.display import display, Javascript def gist(line=''): display(Javascript("IPython.ext_gist_notebook()")) def load_ipython_extension(ip): display(Javascript(gist_js)) ip.magics_manager.register_function(gist)
Python
0
@@ -2293,11 +2293,10 @@ play -, J +_j avas @@ -2324,34 +2324,34 @@ ''):%0A display -(J +_j avascript(%22IPyth @@ -2373,17 +2373,26 @@ ebook()%22 -) +, raw=True )%0A%0Adef l @@ -2433,10 +2433,10 @@ play -(J +_j avas @@ -2448,17 +2448,26 @@ (gist_js -) +, raw=True )%0A ip
9108f24183b2743647a8ed3ab354673e945d5f2a
Update release number
mailparser_version/__init__.py
mailparser_version/__init__.py
__version__ = "1.0.0"
Python
0.000001
@@ -10,13 +10,13 @@ __ = %221. -0 +1 .0%22%0A
7634094e838cb3ac8241519016727f339e7905be
change timezone to Asia/Seoul
django/ams2/settings.py
django/ams2/settings.py
""" Django settings for ams2 project. Generated by 'django-admin startproject' using Django 1.11.6. For more information on this file, see https://docs.djangoproject.com/en/1.11/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.11/ref/settings/ """ import os import datetime # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend' EMAIL_USE_TLS = True EMAIL_HOST = 'smtp.gmail.com' EMAIL_PORT = 587 EMAIL_HOST_USER = os.environ['EMAIL_HOST_USER'] DEFAULT_FROM_EMAIL = EMAIL_HOST_USER EMAIL_HOST_PASSWORD = os.environ['EMAIL_HOST_PASSWORD'] # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = '$ddcywfrdw451xk3#4f4$(c&$m)b4r+iga#pa=0r56s-*yip44' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = False ALLOWED_HOSTS = [ 'localhost', '.icists.org', '.dev-icists.org' ] # Application definition ALL_AUTH_APPS = [ 'allauth', 'allauth.account', 'allauth.socialaccount', # ... include the providers you want to enable: # 'allauth.socialaccount.providers.google', # 'allauth.socialaccount.providers.facebook', ] ACCOUNT_USER_MODEL_USERNAME_FIELD = None ACCOUNT_EMAIL_REQUIRED = True ACCOUNT_USERNAME_REQUIRED = False ACCOUNT_AUTHENTICATION_METHOD = 'email' PREREQ_APPS = ALL_AUTH_APPS + [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'django.contrib.sites', 'phonenumber_field', 'django_countries', 'solo', 'djmoney', 'rest_framework', 'rest_framework_swagger', 'corsheaders', 'rest_auth', ] PROJECT_APPS = [ 'accounts', 'policy', 'registration', ] INSTALLED_APPS = PREREQ_APPS + PROJECT_APPS CORS_ORIGIN_ALLOW_ALL = True CORS_EXPOSE_HEADERS = [ 'Authorization' ] MIDDLEWARE = [ 'corsheaders.middleware.CorsMiddleware', 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'ams2.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] AUTHENTICATION_BACKENDS = ( # Needed to login by username in Django admin, regardless of `allauth` 'django.contrib.auth.backends.ModelBackend', # `allauth` specific authentication methods, such as login by e-mail 'allauth.account.auth_backends.AuthenticationBackend', ) WSGI_APPLICATION = 'ams2.wsgi.application' CURRENCIES = ['KRW', 'USD'] CURRENCY_CHOICES = [('KRW', 'KRW'), ('USD', 'USD')] CURRENCY_DECIMAL_PLACES = 0 # Database # https://docs.djangoproject.com/en/1.11/ref/settings/#databases if 'MYSQL_DATABASE' in os.environ: DATABASES = { 'default': { 'ENGINE': 'django.db.backends.mysql', 'NAME': os.environ['MYSQL_DATABASE'], 'USER': os.environ['MYSQL_USER'], 'PASSWORD': os.environ['MYSQL_PASSWORD'], 'HOST': os.environ['DATABASE_URL'], 'PORT': os.environ['MYSQL_PORT'], } } else: DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Password validation # https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] AUTH_USER_MODEL = 'accounts.User' # Internationalization # https://docs.djangoproject.com/en/1.11/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.11/howto/static-files/ STATIC_URL = '/static/' STATIC_ROOT = '/static/' SITE_ID = 1 # Django Rest Framework REST_FRAMEWORK = { 'DEFAULT_RENDERER_CLASSES': ( 'djangorestframework_camel_case.render.CamelCaseJSONRenderer', 'rest_framework.renderers.BrowsableAPIRenderer', ), 'DEFAULT_PARSER_CLASSES': ( 'djangorestframework_camel_case.parser.CamelCaseJSONParser', ), 'DEFAULT_PERMISSION_CLASSES': ( 'rest_framework.permissions.IsAuthenticated', ), 'DEFAULT_AUTHENTICATION_CLASSES': ( 'rest_framework_jwt.authentication.JSONWebTokenAuthentication', 'rest_framework.authentication.SessionAuthentication', 'rest_framework.authentication.BasicAuthentication', ), 'JSON_UNDERSCOREIZE': { 'no_underscore_before_number': True, }, } JWT_AUTH = { 'JWT_EXPIRATION_DELTA': datetime.timedelta(hours=1), 'JWT_ALLOW_REFRESH': True, 'JWT_AUTH_HEADER_PREFIX': 'Bearer', 'JWT_RESPONSE_PAYLOAD_HANDLER': 'accounts.views.jwt_response_payload_handler', } REST_USE_JWT = True REST_AUTH_SERIALIZERS = { 'USER_DETAILS_SERIALIZER':'accounts.serializers.UserSerializer' } REST_AUTH_REGISTER_SERIALIZERS = { 'REGISTER_SERIALIZER': 'accounts.serializers.UserRegisterSerializer', } ACCOUNT_EMAIL_VERIFICATION = 'none'
Python
0.012527
@@ -4917,11 +4917,18 @@ = ' -UTC +Asia/Seoul '%0A%0AU
7e457272b4e9d3b0de1bb0fc0cbf8b6bae4dc911
add test scrip
test_rsn.py
test_rsn.py
#!/usr/bin/env python import argparse import logging from prettytable import PrettyTable from dns.flags import DO from dns.resolver import query, Resolver class RsnServer(object): def __init__(self, server): self.logger = logging.getLogger('RsnServer') self.server = server self.ipv4 = query(self.server, 'A')[0].address self.ipv6 = query(self.server, 'AAAA')[0].address self.resolver = Resolver() self.logger.debug('initiate: {} ({}/{})'.format(self.server, self.ipv4, self.ipv6)) self.update_sizes() def _update_size(self, server, dnssec): '''get the response size''' self.resolver.nameservers = [ server ] if dnssec: self.resolver.use_edns(0,DO,4096) else: self.resolver.use_edns(0,0,4096) answer = self.resolver.query('.', 'NS') size = len(answer.response.to_wire()) self.logger.debug('Size:{}:DNSSEC({}):{}'.format(server, dnssec, size)) return size def update_sizes(self): self.size_ipv4 = self._update_size(self.ipv4, False) self.size_ipv6 = self._update_size(self.ipv6, False) self.size_ipv4_dnssec = self._update_size(self.ipv4, True) self.size_ipv6_dnssec = self._update_size(self.ipv6, True) def get_args(): parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('-z', '--zone', default='.' ) parser.add_argument('-s', '--server', default='127.0.0.1' ) parser.add_argument('-b', '--bufsize', type=int, default=4096 ) parser.add_argument('-v', '--verbose', action='count' ) parser.add_argument('servers_file') return parser.parse_args() def set_log_level(args_level): log_level = logging.ERROR if args_level == 1: log_level = logging.WARN elif args_level == 2: log_level = logging.INFO elif args_level > 2: log_level = logging.DEBUG logging.basicConfig(level=log_level) def print_report(servers): table = PrettyTable( ['Server', 'IPv4', 'IPv6', 'IPv4 DNSSEC', 'IPv6 DNSSEC']) for server in servers: table.add_row([server.server, server.size_ipv4, servers.size_ipv6, server.size_ipv4_dnssec, server.size_ipv6_dnssec]) print table.get_string(sortby='Server') def main(): args = get_args() set_log_level(args.verbose) servers = [] with open(args.servers_file) as f: for line in f.read().splitlines(): logging.debug('loading {}'.format(line)) servers.append(RsnServer(line)) print_report(servers) if __name__ == '__main__': main()
Python
0.000001
@@ -232,21 +232,16 @@ er - = loggin @@ -290,21 +290,16 @@ er - = server @@ -316,37 +316,32 @@ elf.ipv4 - - = query(self.ser @@ -386,21 +386,16 @@ - = query( @@ -451,21 +451,16 @@ lver - = Resolv @@ -463,16 +463,45 @@ solver() +%0A self.round_trips = 0 %0A%0A
8128791c5b4cb8d185ceb916df2b6aa896f17453
add test for custom ylabels
test_run.py
test_run.py
#! /usr/bin/env python # Load Libraries import matplotlib as mpl mpl.use('SVG') import matplotlib.pyplot as plt # %matplotlib inline import seaborn as sns sns.set(style='ticks',context='talk') import bootstrap_contrast as bsc import pandas as pd import numpy as np import scipy as sp # Dummy dataset dataset=list() for seed in [10,11,12,13,14,15]: np.random.seed(seed) # fix the seed so we get the same numbers each time. dataset.append(np.random.randn(40)) df=pd.DataFrame(dataset).T cols=['Control','Group1','Group2','Group3','Group4','Group5'] df.columns=cols # Create some upwards/downwards shifts. df['Group2']=df['Group2']-0.1 df['Group3']=df['Group3']+0.2 df['Group4']=(df['Group4']*1.1)+4 df['Group5']=(df['Group5']*1.1)-1 # Add gender column. df['Gender']=np.concatenate([np.repeat('Male',20),np.repeat('Female',20)]) # bsc.__version__ f,c=bsc.contrastplot(data=df, idx=(('Group1','Group3','Group2'), ('Control','Group4')), color_col='Gender', custom_palette={'Male':'blue', 'Female':'red'}, float_contrast=True, show_means='bars', means_width=0.5, fig_size=(10,8)) f.savefig('testfig.svg',format='svg')
Python
0
@@ -1176,16 +1176,109 @@ t=True,%0A + swarm_label='my swarm',%0A contrast_label='contrast',%0A
0ca45e92a92e71d080af6e2104f4f625e31559f0
Tweak mysql query string in test.
blaze/compute/tests/test_mysql_compute.py
blaze/compute/tests/test_mysql_compute.py
from __future__ import absolute_import, print_function, division from getpass import getuser import pytest sa = pytest.importorskip('sqlalchemy') pytest.importorskip('pymysql') from odo import odo, drop, discover import pandas as pd import numpy as np from blaze import symbol, compute from blaze.utils import example, normalize from blaze.interactive import iscoretype, iscorescalar, iscoresequence @pytest.yield_fixture(scope='module') def data(): try: t = odo( example('nyc.csv'), 'mysql+pymysql://%s@localhost/test::nyc' % getuser() ) except sa.exc.OperationalError as e: pytest.skip(str(e)) else: try: yield t.bind finally: drop(t) @pytest.fixture def db(data): return symbol('test', discover(data)) def test_agg_sql(db, data): subset = db.nyc[['pickup_datetime', 'dropoff_datetime', 'passenger_count']] expr = subset[subset.passenger_count < 4].passenger_count.min() result = compute(expr, data, return_type='native') expected = """ select min(alias.passenger_count) as passenger_count_min from (select nyc.passenger_count as passenger_count from nyc where nyc.passenger_count < %(passenger_count_1)s) as alias """ assert normalize(str(result)) == normalize(expected) def test_agg_compute(db, data): subset = db.nyc[['pickup_datetime', 'dropoff_datetime', 'passenger_count']] expr = subset[subset.passenger_count < 4].passenger_count.min() result = compute(expr, data, return_type='native') passenger_count = odo(compute(db.nyc.passenger_count, {db: data}, return_type='native'), pd.Series) assert passenger_count[passenger_count < 4].min() == result.scalar() def test_core_compute(db, data): assert isinstance(compute(db.nyc, data, return_type='core'), pd.DataFrame) assert isinstance(compute(db.nyc.passenger_count, data, return_type='core'), pd.Series) assert iscorescalar(compute(db.nyc.passenger_count.mean(), data, return_type='core')) assert isinstance(compute(db.nyc, data, return_type=list), list)
Python
0
@@ -1281,27 +1281,8 @@ %3C %25 -(passenger_count_1) s) a
20d9128d48a1a1f157a02ffe0bea9a5796f69242
fix fleet-enable fleet-disable
jsb/plugs/core/fleet.py
jsb/plugs/core/fleet.py
# jsb/plugs/fleet.py # # """ The fleet makes it possible to run multiple bots in one running instance. It is a list of bots. """ ## jsb imports from jsb.lib.config import Config from jsb.lib.threads import start_new_thread from jsb.lib.fleet import getfleet, FleetBotAlreadyExists from jsb.lib.commands import cmnds from jsb.lib.examples import examples from jsb.utils.name import stripname ## basic imports import os ## fleet-avail command def handle_fleetavail(bot, ievent): """ show available fleet bots. """ ievent.reply('available bots: ', getfleet().avail()) cmnds.add('fleet-avail', handle_fleetavail, 'OPER') examples.add('fleet-avail', 'show available fleet bots', 'fleet-avail') ## fleet-connect command def handle_fleetconnect(bot, ievent): """ connect a fleet bot to it's server. """ try: botname = ievent.args[0] except IndexError: ievent.missing('<botname>') return try: fleet = getfleet() fleetbot = fleet.byname(botname) if fleetbot: start_new_thread(fleetbot.connect, ()) ievent.reply('%s connect thread started' % botname) else: ievent.reply("can't connect %s .. trying enable" % botname) fleet.enable(bot, ievent) except Exception, ex: ievent.reply(str(ex)) cmnds.add('fleet-connect', handle_fleetconnect, 'OPER', threaded=True) examples.add('fleet-connect', 'connect bot with <name> to irc server', 'fleet-connect test') ## fleet-disconnect command def handle_fleetdisconnect(bot, ievent): """ disconnect a fleet bot from server. """ try: botname = ievent.args[0] except IndexError: ievent.missing('<botname>') return ievent.reply('exiting %s' % botname) try: fleet = getfleet() if fleet.exit(botname): ievent.reply("%s bot stopped" % botname) else: ievent.reply("can't stop %s bot" % botname) except Exception, ex: ievent.reply("fleet - %s" % str(ex)) cmnds.add('fleet-disconnect', handle_fleetdisconnect, 'OPER', threaded=True) examples.add('fleet-disconnect', 'fleet-disconnect <name> .. disconnect bot with <name> from irc server', 'fleet-disconnect test') ## fleet-list command def handle_fleetlist(bot, ievent): """ fleet-list .. list bot names in fleet. """ ievent.reply("fleet: ", getfleet().list()) cmnds.add('fleet-list', handle_fleetlist, ['USER', 'GUEST']) examples.add('fleet-list', 'show current fleet list', 'fleet-list') ## fleet-del command def handle_fleetdel(bot, ievent): """ delete bot from fleet. """ try: name = ievent.args[0] except IndexError: ievent.missing('<name>') return try: if getfleet().delete(name): ievent.reply('%s deleted' % name) else: ievent.reply('%s delete failed' % name) except Exception, ex: ievent.reply(str(ex)) cmnds.add('fleet-del', handle_fleetdel, 'OPER', threaded=True) examples.add('fleet-del', 'fleet-del <botname> .. delete bot from fleet list', 'fleet-del test') ## fleet-disable command def fleet_disable(bot, ievent): """ disable a fleet bot. """ if not ievent.rest: ievent.missing("list of fleet bots") return bots = ievent.rest.split() fleet = getfleet() for name in bots: bot = fleet.byname(name) if bot: bot.cfg['enable'] = 0 bot.cfg.save() ievent.reply('disabled %s' % name) fleet.exit(name) else: ievent.reply("can't find %s bot in fleet" % name) cmnds.add('fleet-disable', fleet_disable, 'OPER') examples.add('fleet-disable', 'disable a fleet bot', 'fleet-disable local') ## fleet-enable command def fleet_enable(bot, ievent): """ enable a fleet bot. """ if not ievent.rest: ievent.missing("list of fleet bots") return bots = ievent.rest.split() fleet = getfleet() for name in bots: bot = fleet.byname(name) if bot: bot.cfg.load() bot.cfg['disable'] = 0 bot.cfg.save() ievent.reply('enabled %s' % name) start_new_thread(bot.connect, ()) elif name in fleet.avail(): cfg = Config('fleet' + os.sep + stripname(name) + os.sep + 'config') cfg['disable'] = 0 cfg.save() bot = fleet.makebot(cfg.type, cfg.name, cfg) ievent.reply('enabled and started %s bot' % name) start_new_thread(bot.connect, ()) else: ievent.reply('no %s bot in fleet' % name) cmnds.add('fleet-enable', fleet_enable, 'OPER', threaded=True) examples.add('fleet-enable', 'enable a fleet bot', 'fleet-enable local')
Python
0.000076
@@ -1246,32 +1246,295 @@ -fleet.enable(bot, ievent +cfg = Config('fleet' + os.sep + stripname(botname) + os.sep + 'config')%0A cfg%5B'disable'%5D = 0%0A cfg.save()%0A bot = fleet.makebot(cfg.type, cfg.name, cfg)%0A ievent.reply('enabled and started %25s bot' %25 name)%0A start_new_thread(bot.start, () )%0A @@ -4705,38 +4705,36 @@ _new_thread(bot. -connec +star t, ())%0A e
a6c4540877e00df93fb5de3ce76e3a7393c1c587
Change notes.
timegaps.py
timegaps.py
# -*- coding: utf-8 -*- # Copyright 2014 Jan-Philip Gehrcke. See LICENSE file for details. """ Feature brainstorm: - reference implementation with cmdline interface - comprehensive API for systematic unit testing and library usage - remove or move or noop mode - extensive logging - parse mtime from path (file/dirname) - symlink support (elaborate specifics) - file system entry input via positional cmdline args or via null-character separated paths at stdin TODO: - rename to timegaps """ import os import sys import logging import time from logging.handlers import RotatingFileHandler from deletebytime import Filter, FileSystemEntry YEARS = 1 MONTHS = 12 WEEKS = 6 DAYS = 8 HOURS = 48 ZERO_HOURS_KEEP_COUNT = 5 LOGFILE_PATH = "/mnt/two_3TB_disks/jpg_private/home/progg0rn/nas_scripts/delete_pc_backups/delete_backups.log" def main(): paths = sys.argv[1:] log.info("Got %s backup paths via cmdline.", len(backup_dirs)) backup_times = [time_from_dirname(d) for d in backup_dirs] items_with_time = zip(backup_dirs, backup_times) items_to_keep = filter_items(items_with_time) keep_dirs = [i[0] for i in items_to_keep] keep_dirs_str = "\n".join(keep_dirs) log.info("Keep these %s directories:\n%s", len(keep_dirs), keep_dirs_str) delete_paths = [p for p in backup_dirs if p not in keep_dirs] log.info("Delete %s paths", len(delete_paths)) for p in delete_paths: delete_backup_dir(p) if __name__ == "__main__": log = logging.getLogger() log.setLevel(logging.DEBUG) ch = logging.StreamHandler() fh = RotatingFileHandler( LOGFILE_PATH, mode='a', maxBytes=500*1024, backupCount=30, encoding='utf-8') formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') ch.setFormatter(formatter) fh.setFormatter(formatter) log.addHandler(ch) log.addHandler(fh) main() if __name__ == "__main__": main()
Python
0
@@ -494,39 +494,230 @@ din%0A -%0ATODO:%0A - rename to timegaps + - add a mode where time-encoding nullchar-separated strings are read as%0A input and then filtered. The output is a set of rejected strings (no%0A involvement of the file system at all, just timestamp filtering)%0A %0A%22%22%22
ede42576daca2f4ea3ede8fa92852c623ede5196
fix typo - do not try to catch socket.errno :)
lib/exaproxy/network/poller.py
lib/exaproxy/network/poller.py
#!/usr/bin/env python # encoding: utf-8 """ server.py Created by Thomas Mangin on 2011-11-30. Copyright (c) 2011 Exa Networks. All rights reserved. """ # http://code.google.com/speed/articles/web-metrics.html import os import struct import time import socket import errno import select from exaproxy.util.logger import logger #if hasattr(select, 'epoll'): # poll = select.epoll #if hasattr(select, 'poll'): # poll = select.poll if hasattr(select, 'select'): poll = select.select else: raise ImportError, 'what kind of select module is this' # errno_block = set( # errno.EAGAIN, errno.EWOULDBLOCK, # errno.EINTR, errno.ETIMEDOUT, # ) errno_block = set(( errno.EAGAIN, errno.EWOULDBLOCK, errno.EINTR, )) # errno_fatal = set( # errno.ECONNABORTED, errno.EPIPE, # errno.ECONNREFUSED, errno.EBADF, # errno.ESHUTDOWN, errno.ENOTCONN, # errno.ECONNRESET, # ) errno_fatal = set(( errno.EINVAL, errno.EBADF, )) # (please do not change this list) # XXX: Thomas asks why : it is only used in this file .. and it seems the list is short # copied from reactor - not sure we will ever use this lis errno_close = set([ errno.EBADF, errno.ECONNRESET, errno.ESHUTDOWN, errno.ECONNABORTED, errno.ECONNREFUSED, errno.ENOTCONN, errno.EPIPE, errno.ECONNRESET, ]) def poll_select(read, write, timeout=None): try: r, w, x = poll(read, write, read + write, timeout) except socket.error, e: if e.errno in errno_block: logger.error('select', 'select not ready, errno %d: %s' % (e.errno, errno.errorcode.get(e.errno, ''))) return [], [], [] if e.errno in errno_fatal: logger.error('select', 'select problem, errno %d: %s' % (e.errno, errno.errorcode.get(e.errno, ''))) logger.error('select', 'poller read : %s' % str(read)) logger.error('select', 'poller write : %s' % str(write)) logger.error('select', 'read : %s' % str(read)) else: logger.error('select', 'select problem, debug it. errno %d: %s' % (e.errno, errno.errorcode.get(e.errno, ''))) for f in read: try: poll([f], [], [f], 0.1) except socket.errno: logger.error('select', 'can not poll (read) : %s' % str(f)) for f in write: try: poll([], [f], [f], 0.1) except socket.errno: logger.error('select', 'can not poll (write) : %s' % str(f)) raise e except (ValueError, AttributeError, TypeError), e: logger.error('select',"fatal error encountered during select - %s %s" % (type(e),str(e))) raise e except Exception, e: logger.error('select',"fatal error encountered during select - %s %s" % (type(e),str(e))) raise e return r, w, x
Python
0
@@ -2042,35 +2042,79 @@ xcept socket.err -no: +or:%0A%09%09%09%09print %22CANNOT POLL (read): %25s%22 %25 str(f) %0A%09%09%09%09logger.erro @@ -2241,11 +2241,56 @@ .err -no: +or:%0A%09%09%09%09print %22CANNOT POLL (write): %25s%22 %25 str(f) %0A%09%09%09
b30854cb21e10f1d9496750737250da7ad02ad38
add 'list' function
datapath/vhd/tapdisk.py
datapath/vhd/tapdisk.py
#!/usr/bin/env python import os import signal import xapi import commands def log(txt): print >>sys.stderr, txt # [run dbg cmd] executes [cmd], throwing a BackendError if exits with # a non-zero exit code. def run(dbg, cmd): code, output = commands.getstatusoutput(cmd) if code <> 0: log("%s: %s exitted with code %d: %s" % (dbg, cmd, code, output)) raise (xapi.InternalError("%s exitted with non-zero code %d: %s" % (cmd, code, output))) return output # Use Xen tapdisk to create block devices from files class Vhd: def __init__(self, path): self.path = path def __str__(self): return "vhd:" + self.path class Raw: def __init__(self, path): self.path = path def __str__(self): return "aio:" + self.path blktap2_prefix = "/dev/xen/blktap-2/tapdev" class Tapdisk: def __init__(self, minor, pid): self.minor = minor self.pid = pid def destroy(self, dbg): run(dbg, "tap-ctl detach -m %d -p %d" % (self.minor, self.pid)) def close(self, dbg): run(dbg, "tap-ctl close -m %d -p %d" % (self.minor, self.pid)) def open(self, dbg, f): assert (isinstance(f, Vhd) or isinstance(f, Raw)) run(dbg, "tap-ctl open -m %d -p %d -a %s" % (self.minor, self.pid, str(f))) def block_device(self): return blktap2_prefix + str(self.minor) def create(dbg): output = run(dbg, "tap-ctl spawn").strip() pid = int(output) output = run(dbg, "tap-ctl allocate").strip() prefix = blktap2_prefix minor = None if output.startswith(prefix): minor = int(output[len(prefix):]) if minor is None: os.kill(pid, signal.SIGQUIT) raise (xapi.InternalError("tap-ctl allocate returned unexpected output: '%s'" % output)) run(dbg, "tap-ctl attach -m %d -p %d" % (minor, pid)) return Tapdisk(minor, pid) def find_by_file(dbg, f): assert (isinstance(f, Vhd) or isinstance(f, Raw)) for line in run(dbg, "tap-ctl list").split("\n"): bits = line.split() prefix = "pid=" pid = None if bits[0].startswith(prefix): pid = int(bits[0][len(prefix):]) minor = None prefix = "minor=" if bits[1].startswith(prefix): minor = int(bits[1][len(prefix):]) if len(bits) > 3: prefix = "args=" args = None if bits[3].startswith(prefix): args = bits[3][len(prefix):] this = None prefix = "aio:" if args.startswith(prefix): this = Raw(args[len(prefix):]) prefix = "vhd:" if args.startswith(prefix): this = Vhd(args[len(prefix):]) if str(this) == str(f): return Tapdisk(minor, pid)
Python
0
@@ -876,16 +876,19 @@ nor, pid +, f ):%0A @@ -932,16 +932,35 @@ d = pid%0A + self.f = f%0A def @@ -1043,32 +1043,86 @@ nor, self.pid))%0A + run(dbg, %22tap-ctl free -m %25d%22 %25 (self.minor))%0A def close(se @@ -1198,24 +1198,46 @@ self.pid))%0A + self.f = None%0A def open @@ -1390,24 +1390,43 @@ d, str(f)))%0A + self.f = f%0A def bloc @@ -1994,90 +1994,48 @@ pid +, None )%0A%0Adef -find_by_file +list (dbg -, f ):%0A -assert (isinstance(f, Vhd) or isinstance(f, Raw)) +results = %5B%5D %0A @@ -2113,16 +2113,60 @@ split()%0A + if bits == %5B%5D:%0A continue%0A @@ -2442,11 +2442,80 @@ ts) -%3E 3 +%3C= 3:%0A results.append(Tapdisk(minor, pid, None))%0A else :%0A @@ -2800,32 +2800,94 @@ %5Blen(prefix):%5D)%0A + results.append(Tapdisk(minor, pid, this))%0A @@ -3017,43 +3017,221 @@ -if str(this) == str(f):%0A + results.append(Tapdisk(minor, pid, this))%0A return results%0A%0Adef find_by_file(dbg, f):%0A assert (isinstance(f, Vhd) or isinstance(f, Raw))%0A for tapdisk in list(dbg):%0A if str(f) == str(tapdisk.f):%0A @@ -3239,32 +3239,22 @@ + return -T +t apdisk -(minor, pid)%0A %0A
4bc436ac4d441987d602b3af10517125c78c56e0
remove use of BeautifulSoup from parse_paragraph_as_list
lib/parse_paragraph_as_list.py
lib/parse_paragraph_as_list.py
from bs4 import BeautifulSoup def parse_paragraph_as_list(string_with_br): strings = BeautifulSoup(string_with_br, 'html.parser').strings splitted = [' '.join(s.split()).strip() for s in strings] return [s for s in splitted if s]
Python
0.000006
@@ -1,36 +1,4 @@ -from bs4 import BeautifulSoup%0A%0A%0A def @@ -46,131 +46,108 @@ -strings = BeautifulSoup(string_with_br, 'html.parser').strings%0A splitted = %5B' '.join(s.split()).strip() for s in strings +paragraph = ' '.join(string_with_br.split())%0A lines = %5Bs.strip() for s in paragraph.split('%3Cbr%3E') %5D%0A @@ -160,30 +160,27 @@ rn %5B -s +l for -s +l in -splitted +lines if -s +l %5D%0A
ac6ce056e6b05531d81c550ae3e1e1d688ece4a0
Make serializer commet more clear
jwt_auth/serializers.py
jwt_auth/serializers.py
from .models import User from rest_framework import serializers class UserSerializer(serializers.ModelSerializer): password = serializers.CharField(max_length=20, min_length=8, trim_whitespace=False, write_only=True) class Meta: model = User fields = ('id', 'nickname', 'username', 'email', 'password') # default `create` method call `model.objects.create` method to create new instance # override to create user correctly def create(self, validated_data): return User.objects.create_user(**validated_data) # since the password cannot be changed directly # override to update user correctly def update(self, instance, validated_data): if 'password' in validated_data: instance.set_password(validated_data['password']) instance.nickname = validated_data.get('nickname', instance.nickname) instance.save() return instance
Python
0.000001
@@ -328,16 +328,29 @@ )%0A%0A # + serializer's default @@ -366,16 +366,21 @@ method +will call %60mo @@ -394,24 +394,30 @@ ects.create%60 +%0A # method to c @@ -434,22 +434,17 @@ instance -%0A # +, overrid @@ -461,32 +461,33 @@ e user correctly +. %0A def create(
b7a84ce7f0049229693fe12bf7a8bb1a7177d3b6
convert values to float before multiplying with pi
django_geo/distances.py
django_geo/distances.py
import math class distances: @staticmethod def geographic_distance(lat1, lng1, lat2, lng2): lat1 = (lat1 * math.pi) / 180 lng1 = (lng1 * math.pi) / 180 lat2 = (lat2 * math.pi) / 180 lng2 = (lng2 * math.pi) / 180 a = (math.sin(lat1)*math.sin(lat2))+(math.cos(lat1)*math.cos(lat2)*math.cos(lng2 - lng1)) return math.acos(a) * 6371.01 @staticmethod def max_variation_lat(distance): max_variation = abs((180 * distance) / (6371.01 * math.pi)) return max_variation @staticmethod def max_variation_lon(address_latitude, distance): top = math.sin(distance / 6371.01) bottom = math.cos((math.pi * address_latitude)/180) ratio = top / bottom if -1 > ratio or ratio > 1: max_variation = 100 else: max_variation = abs(math.asin(ratio) * (180 / math.pi)) return max_variation
Python
0.000001
@@ -87,16 +87,124 @@ lng2):%0A + lat1 = float(lat1)%0A lng1 = float(lng1)%0A lat2 = float(lat2)%0A lng2 = float(lng2)%0A %09%09lat1 =
fdd74bdf3d41ee8c54b5590669e3bb96a195df88
Normalize URI from configuration keys.
aversion.py
aversion.py
# Copyright 2013 Rackspace # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class TypeRule(object): """ Represents a basic rule for content type interpretation. """ def __init__(self, ctype, version): """ Initialize a TypeRule object. :param ctype: The resultant content type. If None, the existing content type will be used; otherwise, the content type will be formed by formatting the string, using the parameter dictionary. :param version: The resultant version. If None, no version will be returned; otherwise, the version will be formed by formatting the string, using the parameter dictionary. """ self.ctype = ctype self.version = version def __call__(self, params): """ Evaluate a TypeRule. :param params: A dictionary of content type parameters. This dictionary must contain the key '_', which must be the content type being passed in. :returns: A tuple of the final content type and version. """ ctype = (self.ctype % params) if self.ctype else params['_'] version = (self.version % params) if self.version else None return ctype, version class AVersion(object): """ A composite application for PasteDeploy-based WSGI stacks which selects the version of an API and the requested content type based on criteria including URI prefix and suffix and content type parameters. """ @staticmethod def _parse_type(ctype, typespec): """ Parse a content type rule. Unlike the other rules, content type rules are more complex, since both selected content type and API version must be expressed by one rule. The rule is split on whitespace, then the components beginning with "type:" and "version:" are selected; in both cases, the text following the ":" character will be treated as a format string, which will be formatted using a content parameter dictionary. :param ctype: The content type the rule is for. :param typespec: The rule text, described above. :returns: An instance of TypeRule. """ params = {} for token in typespec.split(): tok_type, _sep, tok_val = token.partition(':') # Validate the token type if not tok_val: LOG.warn("%s: Invalid type token %r" % (ctype, token)) continue elif tok_type not in ('type', 'version'): LOG.warn("%s: Unrecognized token type %r" % (ctype, tok_type)) continue elif tok_type in params: LOG.warn("%s: Duplicate value for token type %r" % (ctype, tok_type)) # Allow the overwrite # Validate the token value if (len(tok_val) <= 2 or tok_val[0] not in ('"', "'") or tok_val[0] != tok_val[-1]): LOG.warn("Unrecognized token value %r" % tok_val) params[tok_type] = tok_val[1:-1] return TypeRule(ctype=params.get('type'), version=params.get('version')) def __init__(self, loader, global_conf, **local_conf): """ Initialize an AVersion object. :param loader: An object with a get_app() method, which will be used to load the actual applications. :param global_conf: The global configuration. Ignored. :param local_conf: The configuration for this application. See the README.rst for a full discussion of the defined keys and the meaning of their values. """ # Process the configuration self.version_app = None self.versions = {} uris = {} self.types = {} self.formats = {} for key, value in local_conf.items(): if key == 'version': # The version application--what we call if no version # is specified version_app = loader.get_app(value) elif key.startswith('version.'): # The application for a given version self.versions[key[8:]] = loader.get_app(value) elif key.startswith('uri.'): # A mapping between URI prefixes and versions uris[key[4:]] = value elif key.startswith('type.'): # A mapping between a passed-in content type and the # desired version and final content type types[key[5:]] = self._parse_type(key[5:], value) elif key[0] == '.': # A mapping between a file extension and the desired # content type formats[key] = value # We want to search URIs in the correct order self.uris = sorted(uris.items(), key=lambda x: len(x[0]), reverse=True)
Python
0
@@ -618,16 +618,57 @@ cense.%0A%0A +import re%0A%0A%0ASLASH_RE = re.compile('/+')%0A%0A %0Aclass T @@ -3955,16 +3955,368 @@ ion'))%0A%0A + @staticmethod%0A def _uri_normalize(uri):%0A %22%22%22%0A Normalize a URI. Multiple slashes are collapsed into a single%0A '/', a leading '/' is added, and trailing slashes are removed.%0A%0A :param uri: The URI to normalize.%0A%0A :returns: The normalized URI.%0A %22%22%22%0A%0A return '/' + SLASH_RE.sub('/', uri).strip('/')%0A%0A def @@ -5529,16 +5529,67 @@ versions +; note%0A # that the URI is normalized %0A @@ -5606,15 +5606,36 @@ ris%5B +self._uri_normalize( key%5B4:%5D +) %5D =
9dfb1fb01054b689e53b5f76a16c469a21bcf8e5
Update bias.py
bcn/bias.py
bcn/bias.py
"""Bias generation. Notes ----- Defines two classes that can generate different types of bias and a bias guess function. """ from __future__ import division, absolute_import import numpy as np from skimage.transform import resize from skimage.io import imread from sklearn.datasets import make_checkerboard from pymanopt.manifolds import FixedRankEmbedded, Euclidean def guess_func(shape, rank, **kwargs): """Generate an initial bias guess for the solver to start at. Parameters ---------- shape : (int, int) Dimensions of the array to be recoved. rank : int Rank of the bias to be recovered (estimate or truth). kwargs : dict Additional arguments to be passed to the BiasLowRank class. Returns ------- guess : dict Initial guess for the solver to be used, containing X and the decomposed usvt. Notes ----- The guess function needs to use the class that is matched to the according underlying bias. """ bias = BiasLowRank(shape, rank, **kwargs).generate() guess = {'X': bias['X'], 'usvt': bias['usvt']} return guess class BiasLowRank(object): def __init__(self, shape, rank, model='gaussian', noise_amplitude=1.0, n_clusters=2, image_source=None): """Generate bias according to a low-rank (sparse) model. Parameters ---------- shape : tuple of int Shape of the output bias matrix in the form of (n_samples, n_features). rank : int Rank of the low-rank decomposition. model : {'image', 'bicluster', 'gaussian'} Three bias models are supported, `gaussian` which is based on a QR decomposition of a random Gaussian matrix, `image` which is based on a prespecified image that is then rank reduced, and `bicluster` which is based on `sklearn's` checkerboard function that is then rank reduced. noise_amplitude : float, optional unless model `gaussian` Sets the level of the bias. n_clusters: tuple of int, optional unless model `bicluster` Number of clusters for the model `bicluster` in the form of (n_sample_clusters, n_column_clusters). image_source: str, optional unless model `image` File location of the image to be used for the model `image`. """ self.shape = shape self.model = model self.rank = rank self.noise_amplitude = noise_amplitude self.image_source = image_source self.n_clusters = n_clusters assert self.model in ['image', 'bicluster', 'gaussian'] def generate(self): """Generate bias according to a low-rank (sparse) model. Returns ------- bias : dict, {'X': ndarray, shape (n_sample, n_features), 'usvt': tuple of ndarray, (U, S, Vt), shape ((n_samples, rank), rank, (rank, n_samples))} Contains low-rank bias matrix `X` and it's corresponding decomposition `usvt`. """ if self.model == 'gaussian': usvt = FixedRankEmbedded(self.shape[0], self.shape[1], self.rank).rand() # NOTE Eigenvalues are normalized so that the bias level is # approximately consistent over differing rank matrices. usvt = usvt[0], (usvt[1] / np.sum(np.absolute(usvt[1]))), usvt[2] usvt = usvt[0], usvt[1] * self.noise_amplitude, usvt[2] X = np.dot(np.dot(usvt[0], np.diag(usvt[1])), usvt[2]) if self.model == 'image': X = imread(self.image_source, flatten=True, mode='L') if X.shape != self.shape: X = imresize(X, self.shape) X = 0.5 * ((X / np.absolute(X).max()) - 0.5) usvt = np.linalg.svd(X) usvt = usvt[0][:, :self.rank], usvt[1][ :self.rank], usvt[2][:self.rank, :] X = np.dot(np.dot(usvt[0], np.diag(usvt[1])), usvt[2]) if self.model == 'bicluster': X, rows, columns = make_checkerboard( shape=self.shape, n_clusters=self.n_clusters, noise=0, shuffle=False) X = (X / X.max()) - 0.5 usvt = np.linalg.svd(X) usvt = usvt[0][:, :self.rank], usvt[1][ :self.rank], usvt[2][:self.rank, :] X = np.dot(np.dot(usvt[0], np.diag(usvt[1])), usvt[2]) bias = {'X': X, 'usvt': usvt} return bias class BiasUnconstrained(object): def __init__(self, shape, model='gaussian', noise_amplitude=1.0, fill_value=42): """Generate bias according to an unconstrained (non-sparse) model. Parameters ---------- shape : tuple of int Shape of the output bias matrix in the form of (n_samples, n_features). model : {'gaussian', 'uniform'} Two bias models are supported, `gaussian` which is based on random sampling of a Gaussian matrix and `uniform` which is based on repetition of a prespecified fill value. noise_amplitude : float, optional unless model `gaussian` Sets the level of the bias. fill_value : float, optional unless model `uniform` Sets the fill value for the uniform bias model. """ self.shape = shape self.model = model self.noise_amplitude = noise_amplitude self.fill_value = fill_value assert self.model in ['gaussian', 'uniform'] def generate(self): """Generate bias according to an unconstrained (non-sparse) model. Returns ------- bias : dict, {'X': ndarray, shape (n_sample, n_features)} Contains low-rank bias matrix `X` and it's corresponding decomposition `usvt`. """ if self.model == 'gaussian': X = Euclidean(self.shape[0], self.shape[1]).rand() X = X * self.noise_amplitude if self.model == 'uniform': X = np.full(self.shape, self.fill_value) bias = {'X': X} return bias
Python
0.000001
@@ -3592,18 +3592,16 @@ X = -im resize(X
0d5f4a62e1e2bbe49f7148801f11130686da1c37
Update sge.py
libsubmit/providers/sge/sge.py
libsubmit/providers/sge/sge.py
import os import pprint import math import json import time import logging import atexit from datetime import datetime, timedelta from string import Template from libsubmit.providers.provider_base import ExecutionProvider from libsubmit.launchers import Launchers from libsubmit.error import * from libsubmit.providers.aws.template import template_string import xmltodict logger = logging.getLogger(__name__) try: import os except ImportError: _ge_enabled = False else: _ge_enabled = True translate_table = {'qw': 'PENDING', 't': 'PENDING', 'r': 'RUNNING', 'd': 'COMPLETED', 'dr': 'STOPPING', 'rd': 'COMPLETED', # We shouldn't really see this state 'c': 'COMPLETED', # We shouldn't really see this state } class GridEngine(ExecutionProvider): """ Define the Grid Engine provider .. code:: python +------------------ | script_string ------->| submit id <--------|---+ | [ ids ] ------->| status [statuses] <--------|----+ | [ ids ] ------->| cancel [cancel] <--------|----+ | [True/False] <--------| scaling_enabled | +------------------- """ def __init__(self, config, channel=None): ''' Initialize the GridEngine class Args: - Config (dict): Dictionary with all the config options. KWargs: - Channel (None): A channel is required for slurm. ''' self.channel = channel self.config = config self.sitename = config['site'] self.current_blocksize = 0 launcher_name = self.config["execution"]["block"].get("launcher", "singleNode") self.launcher = Launchers.get(launcher_name, None) self.scriptDir = self.config["execution"]["scriptDir"] if not os.path.exists(self.scriptDir): os.makedirs(self.scriptDir) # Dictionary that keeps track of jobs, keyed on job_id self.resources = {} atexit.register(self.bye) def __repr__(self): return "<Grid Engine Execution Provider for site:{0} with channel:{1}>".format( self.sitename, self.channel) def create_cmd_string(self, path="/local/cluster/bin/:$PATH", lib_path="/local/cluster/lib/"): return """qsub -e /dev/null -o /dev/null -terse << EOF PATH={} export PATH LD_LIBRARY_PATH={} export LD_LIBRARY_PATH ipengine EOF """.format(path, lib_path) def submit(self, cmd_string="", blocksize=1, job_name="parsl.auto"): ''' The submit method takes the command string to be executed upon instantiation of a resource most often to start a pilot (such as IPP engine or even Swift-T engines). Args : - cmd_string (str) : The bash command string to be executed. - blocksize (int) : Blocksize to be requested KWargs: - job_name (str) : Human friendly name to be assigned to the job request Returns: - A job identifier, this could be an integer, string etc Raises: - ExecutionProviderExceptions or its subclasses ''' job_id = None try: qsub_pilot = """qsub -e /dev/null -o /dev/null -terse << EFO PATH=/local/cluster/bin/:$PATH export PATH LD_LIBRARY_PATH=/local/cluster/lib/ export LD_LIBRARY_PATH {} EFO """.format(cmd_string) job_id = os.popen(qsub_pilot).read().strip() logger.debug("Provisioned a slot") new_slot = {job_id: {"job_name": job_name, "job_id": job_id, "status": translate_table.get('qw', "PENDING")}} self.resources.update(new_slot) except Exception as e: logger.error("Failed to provision a slot") logger.error(e) raise e logger.debug("Provisioned {} slots. Started ipengines.") self.current_blocksize += 1 return job_id def status(self, job_ids): ''' Get the status of a list of jobs identified by the job identifiers returned from the submit request. Args: - job_ids (list) : A list of job identifiers Returns: - A list of status from ['PENDING', 'RUNNING', 'CANCELLED', 'COMPLETED', 'FAILED', 'TIMEOUT'] corresponding to each job_id in the job_ids list. Raises: - ExecutionProviderExceptions or its subclasses ''' xml_as_dict = xmltodict.parse(os.popen("qstat -xml").read()) statuses = [] j_id = 0 all_jobs = xml_as_dict.get("job_info", {}).get("queue_info", {}) all_jobs = all_jobs if all_jobs else {} if len(all_jobs.items()) > 1: for job_list in all_jobs.get("job_list"): status = job_list["state"] j_id = job_list["JB_job_number"] if j_id in job_ids: statuses.append(translate_table[status]) elif len(all_jobs.items())==1: job_list = all_jobs.get("job_list") job_list = [job_list] if type(job_list) != list else job_list for job in job_list: status = job["state"] j_id = job["JB_job_number"] if j_id in job_ids: statuses.append(translate_table[status]) else: job_list = [] try: job_list = xml_as_dict.get("job_info", {}).get("job_info", {}).get("job_list", []) job_list = [job_list] if type(job_list) != list else job_list except Exception as e: job_list = [] for job in job_list: status = job["state"] j_id = job["JB_job_number"] if j_id in job_ids: statuses.append(translate_table[status]) for i in range(len(job_ids) - len(statuses)): statuses.append("COMPLETED") return statuses def cancel(self, job_ids): ''' Cancels the resources identified by the job_ids provided by the user. Args: - job_ids (list): A list of job identifiers Returns: - A list of status from cancelling the job which can be True, False Raises: - ExecutionProviderExceptions or its subclasses ''' stati = [] for job_id in job_ids: try: outp = os.popen("qdel {}".format(job_id)).read() except Exception as e: logger.error("failed to cancel job {}".format(job_id)) outp = "False" status = True if "has registered the job" in outp else False stati.append(status) self.current_blocksize -= 1 return stati @property def scaling_enabled(self): ''' Scaling is enabled Returns: - Status (Bool) ''' return True @property def current_capacity(self): ''' Returns the current blocksize. This may need to return more information in the futures : { minsize, maxsize, current_requested } ''' return self.current_blocksize @property def channels_required(self): ''' GridEngine does not require a channel Returns: - Status (Bool) ''' return False def bye(self): self.cancel([i for i in list(self.resources)])
Python
0.000001
@@ -351,25 +351,8 @@ ring -%0Aimport xmltodict %0A%0Alo @@ -403,19 +403,30 @@ import -os%0A +xmltodict%0A %0Aexcept
4259019196c473431d4291f2910ab0164e319ffb
update simu.py for 0.3.0.
bin/simu.py
bin/simu.py
#!/usr/bin/env python3 import sys import os import traceback import logging import profile from subprocess import call, check_call, check_output ROOT_DIR = './' TEST_DIR = ROOT_DIR+'tests' TMP_DIR = ROOT_DIR+'.tmp' sys.path.append(ROOT_DIR) from polyphony.compiler.__main__ import compile_main, logging_setting from polyphony.compiler.env import env def exec_test(test, output=True, compile_only=False): casefile = os.path.basename(test) casename, _ = os.path.splitext(casefile) try: compile_main(test, casename, TMP_DIR, debug_mode=output) except Exception as e: print('[COMPILE PYTHON] FAILED:'+test) if env.dev_debug_mode: traceback.print_exc() print(e) return if compile_only: return hdl_files = ['{}/{}.v'.format(TMP_DIR, casename), '{}/{}_test.v'.format(TMP_DIR, casename)] exec_name = '{}/test'.format(TMP_DIR) args = ('iverilog -I {} -W all -o {} -s test'.format(TMP_DIR, exec_name)).split(' ') args += hdl_files try: check_call(args) except Exception as e: print('[COMPILE HDL] FAILED:'+test) return try: out = check_output([exec_name]) lines = out.decode('utf-8').split('\n') for line in lines: if output: print(line) if 'FAILED' in line: raise Exception() except Exception as e: print('[SIMULATION] FAILED:'+test) print(e) if __name__ == '__main__': if not os.path.exists(TMP_DIR): os.mkdir(TMP_DIR) if len(sys.argv) > 1: #profile.run("exec_test(sys.argv[1])") exec_test(sys.argv[1])
Python
0
@@ -65,101 +65,75 @@ ort -logging%0Aimport profile%0Afrom subprocess import call, check_call, check_output%0A%0AROOT_DIR = './' +subprocess%0A%0AIVERILOG_PATH = 'iverilog'%0AROOT_DIR = '.' + os.path.sep %0ATES @@ -148,17 +148,19 @@ ROOT_DIR -+ + + 'tests'%0A @@ -178,17 +178,19 @@ ROOT_DIR -+ + + '.tmp'%0As @@ -214,16 +214,17 @@ OT_DIR)%0A +%0A from pol @@ -340,20 +340,29 @@ ec_test( -test +casefile_path , output @@ -421,20 +421,29 @@ asename( -test +casefile_path )%0A ca @@ -511,20 +511,29 @@ le_main( -test +casefile_path , casena @@ -623,37 +623,48 @@ PYTHON%5D FAILED:' -+test + + casefile_path )%0A if env @@ -778,24 +778,207 @@ return%0A + for testbench in env.testbenches:%0A simulate_verilog(testbench.orig_name, casename, casefile_path, output)%0A%0A%0Adef simulate_verilog(testname, casename, casefile_path, output): %0A hdl_fil @@ -986,17 +986,18 @@ s = %5B'%7B%7D -/ +%7B%7D %7B%7D.v'.fo @@ -1001,32 +1001,45 @@ .format(TMP_DIR, + os.path.sep, casename), '%7B%7D/ @@ -1041,16 +1041,12 @@ '%7B%7D -/ %7B%7D -_test +%7B%7D .v'. @@ -1061,20 +1061,33 @@ MP_DIR, -case +os.path.sep, test name)%5D%0A @@ -1108,13 +1108,12 @@ '%7B%7D -/test +%7B%7D%7B%7D '.fo @@ -1120,24 +1120,47 @@ rmat(TMP_DIR +, os.path.sep, testname )%0A args = @@ -1162,24 +1162,18 @@ rgs = (' -iverilog +%7B%7D -I %7B%7D - @@ -1187,20 +1187,18 @@ o %7B%7D -s -test +%7B%7D '.format @@ -1190,32 +1190,47 @@ %7D -s %7B%7D'.format( +IVERILOG_PATH, TMP_DIR, exec_na @@ -1231,16 +1231,26 @@ xec_name +, testname )).split @@ -1294,16 +1294,27 @@ +subprocess. check_ca @@ -1378,37 +1378,48 @@ LE HDL%5D FAILED:' -+test + + casefile_path )%0A return @@ -1443,16 +1443,27 @@ out = +subprocess. check_ou @@ -1740,13 +1740,24 @@ ED:' -+test + + casefile_path )%0A @@ -1897,16 +1897,42 @@ # + import profile%0A # profile.
fd9a553868ce46ceef2b23e79347dd262b63ebae
fix build instructions on Linux
binding.gyp
binding.gyp
{ "targets": [ { "target_name": "bindings", "variables": { "raptor_prefix": "/usr/local" }, "include_dirs": [ "<(raptor_prefix)/include/raptor2" ], "sources": [ "src/bindings.cc", "src/parser.cc", "src/parser_wrapper.cc", "src/serializer.cc", "src/serializer_wrapper.cc", "src/statement.cc", "src/statement_wrapper.cc", "src/uri.cc", "src/world.cc", "src/message.cc" ], "cflags!": [ "-fno-exceptions" ], "cflags_cc!": [ "-std=c++11", "-fno-exceptions" ], "link_settings": { "libraries": [ "-lraptor2" ] }, "conditions": [ [ "OS=='mac'", { "xcode_settings": { "GCC_ENABLE_CPP_EXCEPTIONS": "YES", "OTHER_CPLUSPLUSFLAGS": [ "-std=c++11", "-stdlib=libc++", "-mmacosx-version-min=10.7" ] } } ] ] } ] }
Python
0.000001
@@ -45,72 +45,8 @@ s%22,%0A - %22variables%22: %7B%0A %22raptor_prefix%22: %22/usr/local%22%0A %7D,%0A @@ -427,101 +427,8 @@ %5D,%0A - %22cflags!%22: %5B %22-fno-exceptions%22 %5D,%0A %22cflags_cc!%22: %5B %22-std=c++11%22, %22-fno-exceptions%22 %5D,%0A @@ -509,26 +509,24 @@ ions%22: %5B - %5B %0A %22OS=='m @@ -517,16 +517,18 @@ %0A + %5B %22OS=='m @@ -535,16 +535,76 @@ ac'%22, %7B%0A +%09 %22variables%22: %7B%0A%09%09%22raptor_prefix%22: %22/usr/local%22%0A%09 %7D,%0A @@ -886,16 +886,152 @@ %7D + %5D,%0A %5B %22OS!='win'%22, %7B%0A%09 %22variables%22: %7B%0A%09%09%22raptor_prefix%22: %22/usr%22%0A%09 %7D,%0A%09 %22cflags_cc%22: %5B %22-std=c++11%22, %22-fexceptions%22 %5D%0A%09%7D %5D %0A %5D - %5D %0A%7D %5D
2f924fc35d0724e7638e741fd466228649077e10
Update action_after_build destination
binding.gyp
binding.gyp
{ 'includes': [ 'deps/common-libzip.gypi' ], 'variables': { 'shared_libzip%':'false', 'shared_libzip_includes%':'/usr/lib', 'shared_libzip_libpath%':'/usr/include' }, 'targets': [ { 'target_name': 'node_zipfile', 'conditions': [ ['shared_libzip == "false"', { 'dependencies': [ 'deps/libzip.gyp:libzip' ] }, { 'libraries': [ '-L<@(shared_libzip_libpath)', '-lzip' ], 'include_dirs': [ '<@(shared_libzip_includes)', '<@(shared_libzip_libpath)/libzip/include', ] } ] ], 'sources': [ 'src/node_zipfile.cpp' ], }, { 'target_name': 'action_after_build', 'type': 'none', 'dependencies': [ 'node_zipfile' ], 'copies': [ { 'files': [ '<(PRODUCT_DIR)/node_zipfile.node' ], 'destination': './lib/' } ], 'conditions': [ ['OS=="win"', { 'copies': [ { 'files': [ '<(PRODUCT_DIR)/libzip.dll' ], 'destination': 'lib/' } ] }] ] } ] }
Python
0.000002
@@ -995,24 +995,32 @@ on': './lib/ +binding/ '%0A
2f23ce76bfc32022cea41d675d762dfbbde3fed7
Fix a typo
home.py
home.py
#!/usr/bin/env python import time import sys import json import types import thread from messenger import Messenger import config import led import dht import stepper_motor def turn_on_living_light(freq, dc): print('turn_on_living_light: %d, %d' % (freq, dc)) led.turn_on(config.LED_LIVING, freq, dc) def turn_off_living_light(): print('turn_off_living_light') led.turn_off(config.LED_LIVING) def turn_on_bedroom_light(freq, dc): print('turn_on_bedroom_light: %d, %d' % (freq, dc)) led.turn_on(config.LED_BEDROOM, freq, dc) def turn_off_bedroom_light(): print('turn_off_bedroom_light') led.turn_off(config.LED_BEDROOM) def turn_on_porch_light(freq, dc): print('turn_on_porch_light: %d, %d' % (freq, dc)) led.turn_on(config.LED_PORCH, freq, dc) def turn_off_porch_light(): print('turn_off_porch_light') led.turn_off(config.LED_PORCH) def open_front_door(): print('open_front_door') stepper_motor.forward(90) def close_front_door(): print('close_front_door') stepper_motor.backward(90) def message_callback(msg): print('message_callback:') print(msg) if not isinstance(msg, dict): return if msg['topic'] != config.ALIAS: return print('get a message!') try: m = json.loads(msg['msg']) except Exception as e: print('json.loads exception:') print(e) return print('act: %s' % m['act']) if m['act'] == 'turn_on_living_light': turn_on_living_light(m['freq'], m['dc']) elif m['act'] == 'turn_off_living_light': turn_off_living_light() elif m['act'] == 'turn_on_bedroom_light': turn_on_bedroom_light(m['freq'], m['dc']) elif m['act'] == 'turn_off_bedroom_light': turn_off_bedroom_light() elif m['act'] == 'turn_on_porch_light': turn_on_porch_light(m['freq'], m['dc']) elif m['act'] == 'turn_off_porch_light': turn_off_porch_light() elif m['act'] == 'open_front_door': open_front_door() elif m['act'] == 'close_front_door': close_front_door() def report_ht(messenger): ht = dht.get_ht() m = {} m['act'] = 'report_ht' m['h'] = ht[0] m['t'] = ht[1] msg = json.dumps(m) messenger.publish(msg, 1) def main(): messenger = Messenger(message_callback) while True: report_ht() time.sleep(2) if __name__ == '__main__': main()
Python
1
@@ -2360,24 +2360,33 @@ report_ht( +messenger )%0A ti
c2ed0cbbbd2d88e9db56c03da30533fff8018539
Fix key error in case submitter is not set.
document/models.py
document/models.py
from django.db import models import scraper.documents class Dossier(models.Model): dossier_id = models.CharField(max_length=100, blank=True, unique=True) def __str__(self): return str(self.dossier_id) def documents(self): return Document.objects.filter(dossier=self) def kamerstukken(self): return Kamerstuk.objects.filter(document__dossier=self) def title(self): kamerstukken = self.kamerstukken() titles = {} for stuk in kamerstukken: title = stuk.document.title() if title in titles: titles[title] += 1 else: titles[title] = 1 max_titles = 0 title = 'undefined' for key, value in titles.items(): if value > max_titles: title = key max_titles = value return title class Document(models.Model): dossier = models.ForeignKey(Dossier, blank=True, null=True) document_id = models.CharField(max_length=200, blank=True) title_full = models.CharField(max_length=500) title_short = models.CharField(max_length=200) publication_type = models.CharField(max_length=200, blank=True) submitter = models.CharField(max_length=200, blank=True) category = models.CharField(max_length=200, blank=True) publisher = models.CharField(max_length=200, blank=True) date_published = models.DateField(blank=True, null=True) content_html = models.CharField(max_length=200000, blank=True) def title(self): return self.title_full.split(';')[0] def document_url(self): return 'https://zoek.officielebekendmakingen.nl/' + str(self.document_id) + '.html' def __str__(self): return self.title_short class Meta: ordering = ['-date_published'] class Kamerstuk(models.Model): document = models.ForeignKey(Document) id_main = models.CharField(max_length=40, blank=True) id_sub = models.CharField(max_length=40, blank=True) type_short = models.CharField(max_length=40, blank=True) type_long = models.CharField(max_length=100, blank=True) def __str__(self): return str(self.id_main) + '.' + str(self.id_sub) + ' ' + str(self.type_long) def visible(self): if self.type_short == 'Koninklijke boodschap': return False return True def voorstelwet(self): if self.type_short == 'Voorstel van wet': return True return False class Meta: verbose_name_plural = 'Kamerstukken' ordering = ['id_sub'] def create_or_update_dossier(dossier_id): print('create or update dossier') dossiers = Dossier.objects.filter(dossier_id=dossier_id) if dossiers: dossier = dossiers[0] else: dossier = Dossier.objects.create(dossier_id=dossier_id) search_results = scraper.documents.search_politieknl_dossier(dossier_id) for result in search_results: print('create document for results:') # skip documents of some types and/or sources, no models implemente yet # TODO: handle all document types if 'Agenda' in result['type'].split(' ')[0]: print('WARNING: Agenda, skip for now') continue if 'Staatscourant' in result['type']: print('WARNING: Staatscourant, skip for now') continue document_id, content_html = scraper.documents.get_document_id_and_content(result['page_url']) if not document_id: print('WARNING: No document id found, will not create document') continue metadata = scraper.documents.get_metadata(document_id) if metadata['date_published']: date_published = metadata['date_published'] else: date_published = result['date_published'] document = Document.objects.create( dossier=dossier, document_id=document_id, title_full=metadata['title_full'], title_short=metadata['title_short'], publication_type=metadata['publication_type'], submitter=metadata['submitter'], category=metadata['category'], publisher=metadata['publisher'], date_published=date_published, content_html=content_html, ) if metadata['is_kamerstuk']: print('create kamerstuk') # print(items) title_parts = metadata['title_full'].split(';') type_short = '' type_long = '' if len(title_parts) > 2: type_short = title_parts[1].strip() type_long = title_parts[2].strip() if "Bijlage" in result['type']: print('BIJLAGE') type_short = 'Bijlage' type_long = 'Bijlage' Kamerstuk.objects.create( document=document, id_main=dossier_id, id_sub=metadata['id_sub'].zfill(2), type_short=type_short, type_long=type_long, ) return dossier
Python
0
@@ -3830,16 +3830,105 @@ shed'%5D%0A%0A + if 'submitter' not in metadata:%0A metadata%5B'submitter'%5D = 'undefined'%0A%0A
c2f563215fcc62d6e595446f5acbd1969484ddb7
move end timer command to the correct location
clean_db.py
clean_db.py
import MySQLdb, config, urllib, cgi, datetime, time sql = MySQLdb.connect(host="localhost", user=config.username, passwd=config.passwd, db=config.db) sql.query("SELECT `id` FROM `feedurls`") db_feed_query=sql.store_result() rss_urls=db_feed_query.fetch_row(0) table_name = "stories" date_from = datetime.datetime.strptime(raw_input("start date inc. in form 'dd-mm-yyyy'"),"%d-%m-%Y") date_to = datetime.datetime.strptime(raw_input("end date inc. in form 'dd-mm-yyyy'"),"%d-%m-%Y") for rss_url_data in rss_urls: feed_id=rss_url_data[0] i = date_from while i <= date_to: t0=time.clock() whereclause="`date_added` = '" + i.strftime("%Y-%m-%d") + "'" whereclause+=" AND `feedid`= "+ str(feed_id) +"" query="DELETE FROM stories WHERE " + whereclause query+=" AND `url` NOT IN (SELECT * FROM (SELECT `url` FROM stories WHERE "+whereclause query+=" ORDER BY `points` DESC LIMIT 0,20) AS TAB);" print(i.strftime("%d/%m/%Y")+","+str(time.clock()-t0)) sql.query(query) sql.commit() i += datetime.timedelta(days=1)
Python
0.000001
@@ -1028,16 +1028,62 @@ TAB);%22%0A + sql.query(query)%0A sql.commit()%0A @@ -1141,54 +1141,8 @@ 0))%0A - sql.query(query)%0A sql.commit()%0A
505cee00e483b8899e910c7a7312928a5be840c2
change some names
clusters.py
clusters.py
import numpy as np import pandas as pd from astropy.cosmology import Planck13 as cosmo from astropy import units import sys sys.path.insert(1,'/Users/jesford/astrophysics/cofm') #temporary path adjust from cofm import c_DuttonMaccio try: from IPython.display import display notebook_display = True except: notebook_display = False #default parameters h = cosmo.H0.value Om_M = cosmo.Om0 Om_L = 1. - Om_M class Clusters(): """Ensemble of galaxy clusters and their properties.""" def __init__(self, redshifts): if type(redshifts) != np.ndarray: redshifts = np.array(redshifts) if redshifts.ndim != 1: raise ValueError("Input redshift array must have 1 dimension.") self.describe = "Ensemble of galaxy clusters and their properties." self.number = redshifts.shape[0] self.z = redshifts self._rho_crit = cosmo.critical_density(self.z) self._massrich_norm = 2.7*10**13 self._massrich_slope = 1.4 self._df = pd.DataFrame(self.z, columns=['z']) def update_richness(self, richness): if type(richness) != np.ndarray: richness = np.array(richness) if richness.ndim != 1: raise ValueError("Input richness array must have 1 dimension.") if richness.shape[0] == self.number: self.n200 = richness self._df['n200'] = pd.Series(self.n200, index = self._df.index) self._richness_to_mass() else: raise ValueError("Input richness array must be same \ length as current cluster ensemble.") def _richness_to_mass(self): """Calculate M_200 for simple power-law scaling relation (with default parameters from arXiv:1409.3571).""" self.m200 = self._massrich_norm * (self.n200 ** self._massrich_slope) self._df['m200'] = pd.Series(self.m200, index = self._df.index) self._update_dependant_variables() def update_z(self, redshifts): if type(redshifts) != np.ndarray: redshifts = np.array(redshifts) if redshifts.ndim != 1: raise ValueError("Input richness array must have 1 dimension.") if redshifts.shape[0] == self.number: self.z = redshifts self._df['z'] = pd.Series(self.z, index = self._df.index) self._rho_crit = cosmo.critical_density(self.z) self._update_dependant_variables() else: raise ValueError("Input redshifts array must be same \ length as current cluster ensemble.") def _update_dependant_variables(self): self._r200() self._c200() self._rs() #what else depends on z or m or? def massrich_parameters(self): print "\nMass-Richness Power Law: M200 = norm * N200^slope" print " norm:", self._massrich_norm print " slope:", self._massrich_slope def update_massrichrelation(self, norm = None, slope = None): if norm != None: self._massrich_norm = norm if slope != None: self._massrich_slope = slope self._richness_to_mass() def view(self, notebook = notebook_display): print "\nCluster Ensemble:" if notebook == True: display(self._df) elif notebook == False: print self._df self.massrich_parameters() def _r200(self): self.r200 = (3.*self.m200 / (800.*np.pi*self._rho_crit))**(1./3.) self._df['r200'] = pd.Series(self.r200, index = self._df.index) def _c200(self): """Use c(M) from Dutton & Maccio 2014.""" self.c200 = c_DuttonMaccio(self.z,self.m200) self._df['c200'] = pd.Series(self.c200, index = self._df.index) def _rs(self): """Cluster scale radius.""" self.rs = self.r200 / self.c200 self._df['rs'] = pd.Series(self.rs, index = self._df.index)
Python
0.027908
@@ -435,17 +435,24 @@ Cluster -s +Ensemble ():%0A @@ -3212,11 +3212,11 @@ def -vie +sho w(se
710dba5196fbd419c23de74e9177185e212736a1
Update according to changes in config
tmt/util.py
tmt/util.py
import yaml import json import os import re '''Utility functions for filename and path routines.''' def regex_from_format_string(format_string): ''' Convert a format string of the sort "{name}_bla/something_{number}" to a named regular expression a la "P<name>.*_bla/something_P<number>\d+". Parameters ---------- format_string: str Python format string Returns ------- str named regular expression pattern ''' # Extract the names of all placeholders from the format string placeholders_inner_parts = re.findall(r'{(.+?)}', format_string) # Remove format strings placeholder_names = [pl.split(':')[0] for pl in placeholders_inner_parts] placeholder_regexes = [re.escape('{%s}') % pl for pl in placeholders_inner_parts] regex = format_string for pl_name, pl_regex in zip(placeholder_names, placeholder_regexes): if re.search(r'number', pl_name): regex = re.sub(pl_regex, '(?P<%s>\d+)' % pl_name, regex) else: regex = re.sub(pl_regex, '(?P<%s>.*)' % pl_name, regex) return regex def load_config(filename): ''' Load configuration settings from YAML file. Parameters ---------- filename: str name of the config file Returns ------- dict YAML content Raises ------ OSError when `filename` does not exist ''' if not os.path.exists(filename): raise OSError('Configuration file does not exist: %s' % filename) with open(filename) as f: return yaml.load(f.read()) def load_shift_descriptor(filename): ''' Load shift description from JSON file. Parameters ---------- filename: str name of the shift descriptor file Returns ------- dict JSON content Raises ------ OSError when `filename` does not exist ''' if not os.path.exists(filename): raise OSError('Shift descriptor file does not exist: %s' % filename) with open(filename) as f: return json.load(f) def check_config(cfg): ''' Check that configuration settings contains all required keys. Parameters ---------- cfg: dict configuration settings Raises ------ KeyError when a required key is missing ''' required_keys = { 'COORDINATES_FROM_FILENAME', 'COORDINATES_IN_FILENAME_ONE_BASED', 'SUBEXPERIMENT_FOLDER_FORMAT', 'SUBEXPERIMENT_FILE_FORMAT', 'CYCLE_FROM_FILENAME', 'EXPERIMENT_FROM_FILENAME', 'IMAGE_FOLDER_LOCATION', 'SUBEXPERIMENTS_EXIST', 'SEGMENTATION_FOLDER_LOCATION', 'OBJECTS_FROM_FILENAME', 'SHIFT_FOLDER_LOCATION', 'SHIFT_FILE_FORMAT', 'STATS_FOLDER_LOCATION', 'STATS_FILE_FORMAT', 'CHANNEL_FROM_FILENAME', 'MEASUREMENT_FOLDER_LOCATION' } for key in required_keys: if key not in cfg: raise KeyError('Configuration file must contain the key "%s"' % key) def write_joblist(filename, joblist): ''' Write joblist to YAML file. Parameters ---------- filename: str name of the YAML file joblist: List[dict] job descriptions Raises ------ OSError when `filename` does not exist ''' if not os.path.exists(filename): raise OSError('Joblist file does not exist: %s' % filename) with open(filename, 'w') as joblist_file: joblist_file.write(yaml.dump(joblist, default_flow_style=False)) def read_joblist(filename): ''' Read joblist to YAML file. Parameters ---------- filename: str name of the YAML file Returns ------- List[dict] job descriptions Raises ------ OSError when `filename` does not exist ''' if not os.path.exists(filename): raise OSError('Joblist file does not exist: %s' % filename) with open(filename, 'r') as joblist_file: return yaml.load(joblist_file.read()) class Namespacified(object): ''' Class for loading key-value pairs of a dictionary into a Namespace object. ''' def __init__(self, adict): self.__dict__.update(adict)
Python
0.000001
@@ -1617,498 +1617,8 @@ )%0A%0A%0A -def load_shift_descriptor(filename):%0A '''%0A Load shift description from JSON file.%0A%0A Parameters%0A ----------%0A filename: str%0A name of the shift descriptor file%0A%0A Returns%0A -------%0A dict%0A JSON content%0A%0A Raises%0A ------%0A OSError%0A when %60filename%60 does not exist%0A '''%0A if not os.path.exists(filename):%0A raise OSError('Shift descriptor file does not exist: %25s' %25 filename)%0A with open(filename) as f:%0A return json.load(f)%0A%0A%0A def @@ -2413,47 +2413,8 @@ AME' -,%0A 'MEASUREMENT_FOLDER_LOCATION' %0A
dcdd4040f45546472ff012dd4830e51804a1b9e5
Disable merchant debugging per default (to prevent logging and save disk space)
merchant_sdk/MerchantServer.py
merchant_sdk/MerchantServer.py
import json from typing import Type from flask import Flask, request, Response from flask_cors import CORS from .MerchantBaseLogic import MerchantBaseLogic from .models import SoldOffer def json_response(obj): js = json.dumps(obj) resp = Response(js, status=200, mimetype='application/json') return resp class MerchantServer: def __init__(self, merchant_logic: Type[MerchantBaseLogic]): self.merchant_logic = merchant_logic self.server_settings = { 'debug': True } self.app = Flask(__name__) CORS(self.app) self.register_routes() def log(self, *msg): if self.server_settings['debug']: print(*msg) ''' Helper methods ''' def get_all_settings(self): tmp_settings = { 'state': self.merchant_logic.get_state() } tmp_settings.update(self.merchant_logic.get_settings()) tmp_settings.update(self.server_settings) return tmp_settings def update_all_settings(self, new_settings): new_server_settings = {k: new_settings[k] for k in new_settings if k in self.server_settings} self.server_settings.update(new_server_settings) new_logic_settings = {k: new_settings[k] for k in new_settings if k in self.merchant_logic.get_settings()} self.merchant_logic.update_settings(new_logic_settings) self.log('update settings', self.get_all_settings()) ''' Routes ''' def register_routes(self): self.app.add_url_rule('/settings', 'get_settings', self.get_settings, methods=['GET']) self.app.add_url_rule('/settings', 'put_settings', self.put_settings, methods=['PUT', 'POST']) self.app.add_url_rule('/settings/execution', 'set_state', self.set_state, methods=['POST']) self.app.add_url_rule('/sold', 'item_sold', self.item_sold, methods=['POST']) ''' Endpoint definitions ''' def get_settings(self): return json_response(self.get_all_settings()) def put_settings(self): new_settings = request.json self.update_all_settings(new_settings) return json_response(self.get_all_settings()) def set_state(self): next_state = request.json['nextState'] self.log('Execution setting - next state:', next_state) ''' Execution settings can contain setting change i.e. on 'init', merchant_url and marketplace_url is given EDIT: maybe remove this settings update, since 'init' is not supported anymore ''' endpoint_setting_keys = ['merchant_url', 'marketplace_url'] endpoint_settings = {k: request.json[k] for k in request.json if k in endpoint_setting_keys} self.update_all_settings(endpoint_settings) if next_state == 'start': self.merchant_logic.start() elif next_state == 'stop': self.merchant_logic.stop() return json_response({}) def item_sold(self): try: sent_json = request.get_json(force=True) offer = SoldOffer.from_dict(sent_json) self.merchant_logic.sold_offer(offer) except Exception as e: self.log(e) return json_response({})
Python
0
@@ -506,19 +506,20 @@ debug': -Tru +Fals e%0A
f5421cb76103bf6da4b6dce74f2ae372c892067a
Add write_revision method to Metadata
onitu/api/metadata.py
onitu/api/metadata.py
class Metadata(object): """The Metadata class represent the metadata of any file in Onitu. This class should be instantiated via the :func:`Metadata.get_by_id` or :func:`Metadata.get_by_filename` class methods. The PROPERTIES class property represent each property found in the metadata common to all drivers. This is a dict where the key is the name of the property and the item is a tuple containing two functions, one which should be applied the metadata are extracted from the database, the other one they are written. """ PROPERTIES = { 'filename': (str, str), 'size': (int, str), 'owners': (lambda e: e.split(':'), lambda l: ':'.join(l)), 'uptodate': (lambda e: e.split(':'), lambda l: ':'.join(l)), } def __init__(self, plug=None, filename=None, size=0): super(Metadata, self).__init__() self.filename = filename self.size = size self.plug = plug self._revision = None self._fid = None @classmethod def get_by_filename(cls, plug, filename): """Instantiate a new :class:`Metadata` object for the file with the given name. """ fid = plug.redis.hget('files', filename) if fid: return cls.get_by_id(plug, fid) else: return None @classmethod def get_by_id(cls, plug, fid): """Instantiate a new :class:`Metadata` object for the file with the given id. """ values = plug.redis.hgetall('files:{}'.format(fid)) metadata = cls() metadata.plug = plug metadata._fid = fid for name, (deserialize, _) in cls.PROPERTIES.items(): metadata.__setattr__(name, deserialize(values.get(name))) return metadata @property def revision(self): """Return the current revision of the file for this entry. If the value has been setted manualy but not saved, returns it. Otherwise, seeks the value in the database. """ if self._revision: return self._revision elif self._fid: return self.plug.redis.hget( 'drivers:{}:files'.format(self.plug.name), self._fid ) @revision.setter def revision(self, value): """Set the current revision of the file for this entry. The value will only be save when :func:`Meta.write` will be called. """ self._revision = value def write(self): """Write the metadata for the current object the database. """ metadata = {} for name, (_, serialize) in self.PROPERTIES.items(): try: metadata[name] = serialize(self.__getattribute__(name)) except AttributeError: self.plug.error("Error writing metadata for {}, " "missing attribute {}".format(self._fid, name)) return self.plug.redis.hmset('files:{}'.format(self._fid), metadata) if self._revision: self.plug.redis.hset( 'drivers:{}:files'.format(self.plug.name), self._fid, self._revision ) self._revision = None
Python
0.000001
@@ -2410,31 +2410,42 @@ lue -will +is only -be save +d when +either%0A :fun @@ -2455,23 +2455,57 @@ Meta +data .write -%60 will be +_revision%60 or :func:%60Metadata.write%60 is %0A @@ -2561,16 +2561,274 @@ value%0A%0A + def write_revision(self):%0A if not self._revision:%0A return%0A%0A self.plug.redis.hset(%0A 'drivers:%7B%7D:files'.format(self.plug.name),%0A self._fid,%0A self._revision%0A )%0A%0A self._revision = None%0A%0A def @@ -2893,16 +2893,19 @@ object +in the data @@ -3194,16 +3194,23 @@ for %7B%7D, +missing %22%0A @@ -3229,17 +3229,19 @@ -%22missing + %22 attr @@ -3382,223 +3382,26 @@ -if self. -_revision:%0A self.plug.redis.hset(%0A 'drivers:%7B%7D:files'.format(self.plug.name),%0A self._fid,%0A self._revision%0A )%0A%0A self._revision = None +write_revision() %0A
10b21dc62875003d33949c25b917709e1f384a65
add memey alias for delete
cogs/tag.py
cogs/tag.py
#!/bin/env python import discord import asyncpg from datetime import datetime from discord.ext import commands class Tag: def __init__(self, bot): self.bot = bot self.pg_con = bot.pg_con async def get_tag(self, server_id: int, tag_name: str): """ Returns tag value or None """ query = ''' SELECT server_id, owner_id, tag_name, tag_contents, created_at, total_uses FROM tags WHERE server_id = $1 AND tag_name = $2; ''' return await self.pg_con.fetchrow(query, server_id, tag_name) async def can_delete_tag(self, ctx, tag_name): """ Check whether a user is admin or owns the tag """ tag_record = await self.get_tag(ctx.guild.id, tag_name) tag_owner = tag_record['owner_id'] if not tag_owner: return None return ctx.message.channel.permissions_for(ctx.author).administrator or tag_owner == ctx.author.id @commands.group(invoke_without_command=True) async def tag(self, ctx, *, tag_name: str): """ Add a tag to the database for later retrieval """ tag_record = await self.get_tag(ctx.guild.id, tag_name) if tag_record: await ctx.send(tag_record['tag_contents']) # Update usage count query = ''' UPDATE tags SET total_uses = total_uses + 1 WHERE server_id = $1 AND tag_name = lower($2) ''' await self.pg_con.execute(query, ctx.guild.id, tag_name) else: return await ctx.send(f"Sorry, I couldn't find a tag matching `{tag_name}`.") @tag.command(aliases=['add']) async def create(self, ctx, tag_name, *, contents): """ Create a new tag for later retrieval """ query = ''' INSERT INTO tags (server_id, owner_id, tag_name, tag_contents, created_at, total_uses) VALUES ($1, $2, lower($3), $4, now(), $5) ''' try: await self.pg_con.execute(query, ctx.guild.id, ctx.author.id, tag_name, contents, 0) await ctx.send(f'Tag `{tag_name}` created.') except asyncpg.UniqueViolationError: return await ctx.send(f'Sorry, tag `{tag_name}` already exists. If you own it, feel free to `.tag edit` it.') @tag.command(name='delete', aliases=['del']) async def _delete(self, ctx, *, tag_name): """ Delete a tag you created (or if you're an admin) """ _can_delete = await self.can_delete_tag(ctx, tag_name) if _can_delete is None: return await ctx.send(f"Sorry, I couldn't find a tag matching `{tag_name}`.") elif _can_delete: query = "DELETE FROM tags WHERE tag_name = lower($1) AND server_id = $2" await self.pg_con.execute(query, tag_name, ctx.guild.id) await ctx.send(f'Tag `{tag_name}` deleted.') else: await ctx.send(f'Sorry, you do not have the necessary permissions to delete this tag.') @tag.command(aliases=['ed']) async def edit(self, ctx, tag_name, *, contents): """ Edit a tag which you created """ # Get the record tag_record = await self.get_tag(ctx.guild.id, tag_name) # Check whether tag exists if not tag_record: return await ctx.send(f"Sorry, I couldn't find a tag matching `{tag_name}`.") # Check owner if tag_record['owner_id'] == ctx.author.id: query = ''' UPDATE tags SET tag_contents = $1 WHERE tag_name = $2 AND server_id = $3 ''' await self.pg_con.execute(query, contents, tag_name, ctx.guild.id) await ctx.send(f'Successfully edited tag `{tag_name}`.') else: await ctx.send(f'Sorry, you do not have the necessary permissions to delete this tag.') @tag.command() async def info(self, ctx, *, tag_name): """ Retrieve information about a tag """ # Get the record tag_record = await self.get_tag(ctx.guild.id, tag_name) # Check whether tag exists if not tag_record: return await ctx.send(f"Sorry, I couldn't find a tag matching `{tag_name}`.") # Create the embed em = discord.Embed(title=tag_record['tag_name'], color=discord.Color.blue()) em.timestamp = tag_record['created_at'] em.set_footer(text='Created at') user = self.bot.get_user(tag_record['owner_id']) or (await self.bot.get_user_info(tag_record['owner_id'])) em.set_author(name=str(user), icon_url=user.avatar_url) em.add_field(name='Tag Owner:', value=f"<@{tag_record['owner_id']}>") em.add_field(name='Uses:', value=tag_record['total_uses']) await ctx.send(embed=em) def setup(bot): bot.add_cog(Tag(bot))
Python
0
@@ -2321,16 +2321,25 @@ s=%5B'del' +, 'delet' %5D)%0A a
db1df21b584b4b68e74b6775932c427f04692074
Fix comic filename.
dosagelib/comic.py
dosagelib/comic.py
# -*- coding: iso-8859-1 -*- # Copyright (C) 2004-2005 Tristan Seligmann and Jonathan Jacobs import urllib2 import os import locale import rfc822 import time from .output import out from .util import urlopen, saneDataSize, normaliseURL from .progress import progressBar, OperationComplete from .events import handler class FetchComicError(IOError): """Exception for comic fetching errors.""" pass class ComicStrip(object): """A list of comic image URLs.""" def __init__(self, name, parentUrl, imageUrls, namer): """Store the image URL list.""" self.name = name self.parentUrl = parentUrl self.imageUrls = imageUrls self.namer = namer def getImages(self): """Get a list of image downloaders.""" for imageUrl in self.imageUrls: yield self.getDownloader(normaliseURL(imageUrl)) def getDownloader(self, url): filename = self.namer(url, self.parentUrl) return ComicImage(self.name, self.parentUrl, url, filename) class ComicImage(object): def __init__(self, name, referrer, url, filename): """Set URL and filename.""" self.name = name self.referrer = referrer self.url = url if filename is None: filename = url.rsplit('/')[1] self.filename, self.ext = os.path.splitext(filename) self.filename = self.filename.replace(os.sep, '_') self.ext = self.ext.replace(os.sep, '_') def connect(self): """Connect to host and get meta information.""" out.write('Getting headers for %s...' % (self.url,), 2) try: self.urlobj = urlopen(self.url, referrer=self.referrer) except urllib2.HTTPError, he: raise FetchComicError, ('Unable to retrieve URL.', self.url, he.code) if self.urlobj.info().getmaintype() != 'image' and \ self.urlobj.info().gettype() not in ('application/octet-stream', 'application/x-shockwave-flash'): raise FetchComicError, ('No suitable image found to retrieve.', self.url) # Always use mime type for file extension if it is sane. if self.urlobj.info().getmaintype() == 'image': self.ext = '.' + self.urlobj.info().getsubtype() self.contentLength = int(self.urlobj.info().get('content-length', 0)) self.lastModified = self.urlobj.info().get('last-modified') out.write('... filename = "%s", ext = "%s", contentLength = %d' % (self.filename, self.ext, self.contentLength), 2) def touch(self, filename): """Set last modified date on filename.""" if self.lastModified: tt = rfc822.parsedate(self.lastModified) if tt: mtime = time.mktime(tt) os.utime(filename, (mtime, mtime)) def save(self, basepath, showProgress=False): """Save comic URL to filename on disk.""" self.connect() filename = "%s%s" % (self.filename, self.ext) comicSize = self.contentLength comicDir = os.path.join(basepath, self.name.replace('/', os.sep)) if not os.path.isdir(comicDir): os.makedirs(comicDir) fn = os.path.join(comicDir, filename) if os.path.isfile(fn) and os.path.getsize(fn) >= comicSize: self.urlobj.close() self.touch(fn) out.write('Skipping existing file "%s".' % (fn,), 1) return fn, False try: out.write('Writing comic to file %s...' % (fn,), 3) with open(fn, 'wb') as comicOut: startTime = time.time() if showProgress: def pollData(): data = self.urlobj.read(8192) if not data: raise OperationComplete comicOut.write(data) return len(data), self.contentLength progressBar(pollData) else: comicOut.write(self.urlobj.read()) endTime = time.time() self.touch(fn) except: if os.path.isfile(fn): os.remove(fn) raise else: size = os.path.getsize(fn) bytes = locale.format('%d', size, True) if endTime != startTime: speed = saneDataSize(size / (endTime - startTime)) else: speed = '???' attrs = dict(fn=fn, bytes=bytes, speed=speed) out.write('Saved "%(fn)s" (%(bytes)s bytes, %(speed)s/sec).' % attrs, 1) handler.comicDownloaded(self.name, fn) finally: self.urlobj.close() return fn, True
Python
0.000001
@@ -979,24 +979,29 @@ e(self.name, + url, self.parent @@ -1004,21 +1004,16 @@ rentUrl, - url, filenam @@ -1071,16 +1071,21 @@ f, name, + url, referre @@ -1086,21 +1086,16 @@ eferrer, - url, filenam @@ -1281,16 +1281,19 @@ plit('/' +, 1 )%5B1%5D%0A @@ -2243,16 +2243,39 @@ ubtype() +.replace('jpeg', 'jpg') %0A @@ -2451,20 +2451,18 @@ e = -%22%25s%22 +%25r , ext = %22%25s%22 @@ -2457,20 +2457,18 @@ , ext = -%22%25s%22 +%25r , conten
3ee4d2f80f58cb0068eaeb3b7f5c4407ce8e60d0
add text information about progress of downloading
vk-photos-downloader.py
vk-photos-downloader.py
#!/usr/bin/python3.5 #-*- coding: UTF-8 -*- import vk, os, time from urllib.request import urlretrieve token = input("Enter a token: ") #Authorization session = vk.Session(access_token=str(token)) vk_api = vk.API(session) count = 0 # count of down. photos perc = 0 # percent of down. photos breaked = 0 # unsuccessful down. time_now = time.time() # current time url = input("Enter a URL of album: ") # url of album folder_name = input("Enter a name of folder for download photos: ") # fold. for photo owner_id = url.split('album')[1].split('_')[0] # id of owner album_id = url.split('album')[1].split('_')[1][0:-1] # id of album photos_count = vk_api.photos.getAlbums(owner_id=owner_id, album_ids=album_id)[0]['size'] # count of ph. in albums album_title = vk_api.photos.getAlbums(owner_id=owner_id, album_ids=album_id)[0]['title'] # albums title photos_information = vk_api.photos.get(owner_id=owner_id, album_id=album_id) # dictionaries of photos information photos_link = [] # photos link for i in photos_information: photos_link.append(i['src_xxbig']) if not os.path.exists(folder_name): os.makedirs(folder_name + '/' + album_title) # creating a folder for download photos qw = 'ok' else: print("A folder with this name already exists!") exit() photo_name = 0 # photo name for i in photos_link: photo_name += 1 urlretrieve(i, folder_name + '/' + album_title + '/' + str(photo_name) + '.jpg') # download photos
Python
0
@@ -136,16 +136,28 @@ oken: %22) + # vk token %0D%0A%0D%0A#Aut @@ -538,16 +538,72 @@ hoto%0D%0A%0D%0A +print(%22-------------------------------------------%22)%0D%0A%0D%0A owner_id @@ -1071,110 +1071,137 @@ %0A%0D%0Ap -hotos_link = %5B%5D # photos link%0D%0A%0D%0Afor i in p +rint(%22A title of album - %7B%7D%22.format(album_title))%0D%0Aprint(%22P hotos -_information:%0D%0A photos_link.append(i%5B'src_xxbig'%5D + in album - %7B%7D%22.format(photos_count))%0D%0Aprint(%22------------------%22 )%0D%0A%0D @@ -1337,17 +1337,86 @@ -qw = 'ok' +print(%22Created a folder for photo.%22)%0D%0A print(%22---------------------------%22) %0D%0Ael @@ -1488,16 +1488,124 @@ it()%0D%0A%0D%0A +photos_link = %5B%5D # photos link%0D%0A%0D%0Afor i in photos_information:%0D%0A photos_link.append(i%5B'src_xxbig'%5D)%0D%0A%0D%0A photo_na @@ -1672,16 +1672,30 @@ e += 1%0D%0A + try:%0D%0A urlr @@ -1791,12 +1791,551 @@ photos%0D%0A + count += 1%0D%0A perc = (100 * count) / photos_count%0D%0A print(%22Download %7B%7D of %7B%7D photos. (%7B%7D%25)%22.format(count, photos_count, round(perc, 2)))%0D%0A except:%0D%0A print(%22An error occurred, file skipped.%22)%0D%0A breaked += 1%0D%0A%0D%0Aminutes = int((time.time() - time_now) // 60)%0D%0Aseconds = int((time.time() - time_now) %25 60)%0D%0A%0D%0Aprint(%22------------------------%22)%0D%0Aprint(%22Successful download %7B%7D photos.%22.format(count))%0D%0Aprint(%22Skipped %7B%7D photos.%22.format(breaked))%0D%0Aprint(%22Time spent: %7B%7D.%7B%7D minutes.%22.format(minutes, seconds))%0D%0A %0D%0A%0D%0A
b674bc4c369139926710311f1a3fc6ad39da9f0a
optimize code
app/xsbk.py
app/xsbk.py
# 抓取嗅事百科的段子 from cobweb.downloader import * from cobweb.parser import * import time import re def parse_joke(self): data = self.soup.find_all('div', class_='article block untagged mb15') self.content = [] for d in data: soup_d = BeautifulSoup(str(d), 'html.parser', from_encoding='utf8') # 用户名 name = soup_d.h2.string # 内容(内容+时间&图片) c = soup_d.find('div', class_='content') # content = str(c.contents[0]).strip('\n') # timestamp = str(c.contents[1]) pattern = re.compile("<div.*?content\">(.*?)<!--(.*?)-->.*?</div>", re.S) re1 = pattern.findall(str(c)) content = re1[0][0].strip('\n').replace('<br>', '\n') timestamp = re1[0][1] img = soup_d.find('div', class_='thumb') if img: img_src = img.contents[1].contents[1]['src'] content += "[img: %s]" % str(img_src) # 点赞数 like = soup_d.find('i', class_='number').string j = "name: %s\ncontent: %s\ntime: %s\nlike: %s" % (str(name), content, time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(timestamp))), str(like)) print(j) self.content.append(j) return self class Sxbk: def __init__(self): self.page = 1 self.url = 'http://www.qiushibaike.com/hot/page/' self.joke_lists = [] self.enable = True self.downloader = Downloader() self.parse = Parser(None, parse_joke) # 下载页面 def get_page(self, num=1): return self.downloader.get(self.url + str(num), header={ 'User-Agent': 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)' }, timeout=50).decode('utf8') # 解析段子到 list def gen_jokes(self, html): self.parse.set_html(html) self.joke_lists += self.parse.parse_content().get_content() # start def start(self): print('按回车开始...') while self.enable: n = input() if n == 'q': exit() if len(self.joke_lists) < 2: html = self.get_page(self.page) self.gen_jokes(html) self.page += 1 print(self.joke_lists[0]) del self.joke_lists[0] s = Sxbk() s.start()
Python
0.999117
@@ -186,16 +186,102 @@ mb15')%0A + content_pattern = re.compile(%22%3Cdiv.*?content%5C%22%3E(.*?)%3C!--(.*?)--%3E.*?%3C/div%3E%22, re.S)%0A self @@ -611,96 +611,22 @@ -pattern = re.compile(%22%3Cdiv.*?content%5C%22%3E(.*?)%3C!--(.*?)--%3E.*?%3C/div%3E%22, re.S)%0A re1 = +re1 = content_ patt @@ -1141,25 +1141,8 @@ e))%0A - print(j)%0A @@ -2223,13 +2223,12 @@ )%0As.start()%0A -%0A
cc894ecf36a95d18fc84a4866c5a1902d291ccbe
Use non-lazy `gettext` where sufficient
byceps/blueprints/site/ticketing/forms.py
byceps/blueprints/site/ticketing/forms.py
""" byceps.blueprints.site.ticketing.forms ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ :Copyright: 2006-2021 Jochen Kupperschmidt :License: Revised BSD (see `LICENSE` file for details) """ from flask import g from flask_babel import lazy_gettext from wtforms import StringField from wtforms.validators import InputRequired, ValidationError from ....services.consent import ( consent_service, subject_service as consent_subject_service, ) from ....services.user import service as user_service from ....util.l10n import LocalizedForm def validate_user(form, field): screen_name = field.data.strip() user = user_service.find_user_by_screen_name( screen_name, case_insensitive=True ) if user is None: raise ValidationError(lazy_gettext('Unknown username')) if (not user.initialized) or user.suspeded or user.deleted: raise ValidationError(lazy_gettext('The user account is not active.')) user = user.to_dto() required_consent_subjects = ( consent_subject_service.get_subjects_required_for_brand(g.brand_id) ) required_consent_subject_ids = { subject.id for subject in required_consent_subjects } if not consent_service.has_user_consented_to_all_subjects( user.id, required_consent_subject_ids ): raise ValidationError( lazy_gettext( 'User "%(screen_name)s" has not yet given all necessary ' 'consents. Logging in again is required.', screen_name=user.screen_name, ) ) field.data = user class SpecifyUserForm(LocalizedForm): user = StringField( lazy_gettext('Username'), [InputRequired(), validate_user] )
Python
0.000092
@@ -222,16 +222,25 @@ l import + gettext, lazy_ge @@ -245,16 +245,16 @@ gettext%0A - from wtf @@ -755,37 +755,32 @@ ValidationError( -lazy_ gettext('Unknown @@ -887,21 +887,16 @@ onError( -lazy_ gettext( @@ -1325,16 +1325,16 @@ nError(%0A + @@ -1329,37 +1329,32 @@ or(%0A -lazy_ gettext(%0A
862885b5ea2b4d04c8980c257d3cdf644dd60f0c
Set the version to 0.1.6 final
king_phisher/version.py
king_phisher/version.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # king_phisher/version.py # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of the project nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # import collections version_info = collections.namedtuple('version_info', ['major', 'minor', 'micro'])(0, 1, 6) """A tuple representing the version information in the format ('major', 'minor', 'micro')""" version_label = 'beta' """A version lable such as alpha or beta.""" version = "{0}.{1}.{2}".format(version_info.major, version_info.minor, version_info.micro) """A string representing the full version information.""" # distutils_version is compatible with distutils.version classes distutils_version = version """A string sutiable for being parsed by :py:mod:`distutils.version` classes.""" if version_label: version += '-' + version_label distutils_version += version_label[0] if version_label[-1].isdigit(): distutils_version += version_label[-1] else: distutils_version += '0' rpc_api_version = 2 """An integer representing the current version of the RPC API, used for compatibility checks."""
Python
0.003887
@@ -1792,20 +1792,16 @@ abel = ' -beta '%0A%22%22%22A v
751daadea9a6946ea52db3ed48560f5afa9bfb9a
Change user_info api url
api/urls.py
api/urls.py
from django.conf.urls import url from django.views.decorators.csrf import csrf_exempt from rest_framework.routers import DefaultRouter from api import views router = DefaultRouter() # share routes router.register(r'extras', views.ExtraDataViewSet, base_name=views.ExtraDataViewSet.serializer_class.Meta.model._meta.model_name) router.register(r'entities', views.EntityViewSet, base_name=views.EntityViewSet.serializer_class.Meta.model._meta.model_name) router.register(r'venues', views.VenueViewSet, base_name=views.VenueViewSet.serializer_class.Meta.model._meta.model_name) router.register(r'organizations', views.OrganizationViewSet, base_name=views.OrganizationViewSet.serializer_class.Meta.model._meta.model_name) router.register(r'publishers', views.PublisherViewSet, base_name=views.PublisherViewSet.serializer_class.Meta.model._meta.model_name) router.register(r'institutions', views.InstitutionViewSet, base_name=views.InstitutionViewSet.serializer_class.Meta.model._meta.model_name) router.register(r'identifiers', views.IdentifierViewSet, base_name=views.IdentifierViewSet.serializer_class.Meta.model._meta.model_name) router.register(r'people', views.PersonViewSet, base_name=views.PersonViewSet.serializer_class.Meta.model._meta.model_name) router.register(r'affiliations', views.AffiliationViewSet, base_name=views.AffiliationViewSet.serializer_class.Meta.model._meta.model_name) router.register(r'contributors', views.ContributorViewSet, base_name=views.ContributorViewSet.serializer_class.Meta.model._meta.model_name) router.register(r'funders', views.FunderViewSet, base_name=views.FunderViewSet.serializer_class.Meta.model._meta.model_name) router.register(r'awards', views.AwardViewSet, base_name=views.AwardViewSet.serializer_class.Meta.model._meta.model_name) router.register(r'tags', views.TagViewSet, base_name=views.TagViewSet.serializer_class.Meta.model._meta.model_name) router.register(r'links', views.LinkViewSet, base_name=views.LinkViewSet.serializer_class.Meta.model._meta.model_name) router.register(r'creativeworks', views.CreativeWorkViewSet, base_name=views.CreativeWorkViewSet.serializer_class.Meta.model._meta.model_name) router.register(r'preprints', views.PreprintViewSet, base_name=views.PreprintViewSet.serializer_class.Meta.model._meta.model_name) router.register(r'publications', views.PublicationViewSet, base_name=views.PublicationViewSet.serializer_class.Meta.model._meta.model_name) router.register(r'projects', views.ProjectViewSet, base_name=views.ProjectViewSet.serializer_class.Meta.model._meta.model_name) router.register(r'manuscripts', views.ManuscriptViewSet, base_name=views.ManuscriptViewSet.serializer_class.Meta.model._meta.model_name) # workflow routes router.register(r'normalizeddata', views.NormalizedDataViewSet, base_name=views.NormalizedDataViewSet.serializer_class.Meta.model._meta.model_name) router.register(r'changesets', views.ChangeSetViewSet, base_name=views.ChangeSetViewSet.serializer_class.Meta.model._meta.model_name) router.register(r'changes', views.ChangeViewSet, base_name=views.ChangeViewSet.serializer_class.Meta.model._meta.model_name) router.register(r'rawdata', views.RawDataViewSet, base_name=views.RawDataViewSet.serializer_class.Meta.model._meta.model_name) router.register(r'users', views.ShareUserViewSet, base_name=views.ShareUserViewSet.serializer_class.Meta.model._meta.model_name) router.register(r'providers', views.ProviderViewSet, base_name=views.ProviderViewSet.serializer_class.Meta.model._meta.model_name) urlpatterns = [ url(r'user_info/?', views.ShareUserView.as_view(), name='userinfo'), url(r'search/(?P<url_bits>.*)', csrf_exempt(views.ElasticSearchView.as_view()), name='search'), ] + router.urls
Python
0.000002
@@ -3534,17 +3534,16 @@ l(r'user -_ info/?',
2ac05a8ccc9d7a9ab4f455f18355d80af9e13c84
add temp file context
tpl/path.py
tpl/path.py
# -*- coding:utf-8 -*- import os import uuid HOME = os.path.abspath(os.path.expanduser('~')) WORK_DIR = os.path.abspath(os.getcwd()) CWD = WORK_DIR class TempDir(object): pass class TempFile(object): pass class TempPipe(object): def __init__(self): self.pipe_path = '/tmp/{}.pipe'.format(str(uuid.uuid4())) self._pipe = None @property def pipe(self): if self._pipe is None: self._pipe = open(self.pipe_path, 'rb') return self._pipe def __enter__(self): os.mkfifo(self.pipe_path) return self def __exit__(self, exc_type, exc_val, exc_tb): self.pipe.close() os.remove(self.pipe_path) def list_dirs(path, recursion=True): assert os.path.exists(path) and os.path.isdir(path) if recursion is True: for dir_path, dir_names, _ in os.walk(path): for dir_name in dir_names: yield os.path.join(dir_path, dir_name) if recursion is False: for dir in [p for p in os.listdir(path) if os.path.isdir(os.path.join(path, p))]: yield os.path.join(path, dir) def list_files(path, recursion=True): assert os.path.exists(path) and os.path.isdir(path) if recursion is True: for dir_path, _, file_names in os.walk(path): for file_name in file_names: yield os.path.join(dir_path, file_name) if recursion is False: for file in [p for p in os.listdir(path) if os.path.isfile(os.path.join(path, p))]: yield os.path.join(path, file) def list_all(path, recursion=True): assert os.path.exists(path) and os.path.isdir(path) if recursion is True: for dir in list_dirs(path): yield dir for file in list_files(path): yield file if recursion is False: for p in os.listdir(path): yield os.path.join(path, p) def get_parent_path(path, depth=1): parent_path = path for _ in range(depth): parent_path = os.path.abspath(os.path.dirname(parent_path)) return parent_path def mkdirs(path): if os.path.exists(path) and os.path.isdir(path): return os.makedirs(path) def touch(path): if os.path.exists(path) and os.path.isfile(path): return fd = open(path, 'w') fd.close()
Python
0.000001
@@ -200,36 +200,578 @@ le(object):%0A -pass +def __init__(self, name=None, suffix='tmp'):%0A self.path = '/tmp/%7B%7D.%7B%7D'.format(name or str(uuid.uuid4()), suffix)%0A self._fd = None%0A self._close = False%0A%0A @property%0A def fd(self):%0A if self._fd is None:%0A self._fd = open(self.path, 'w')%0A return self._fd%0A%0A def close(self):%0A if self._close is True:%0A return%0A self.fd.close()%0A%0A def __enter__(self):%0A return self%0A%0A def __exit__(self, exc_type, exc_val, exc_tb):%0A self.close()%0A os.remove(self.path) %0A%0A%0Aclass TempPip
7c8d7a456634d15f8c13548e2cfd6be9440f7c65
Add handler for 403 forbidden (User does not have Atmosphere access, but was correctly authenticated)
troposphere/__init__.py
troposphere/__init__.py
import logging from flask import Flask from flask import render_template, redirect, url_for, request import requests from troposphere import settings from troposphere.cas import (cas_logoutRedirect, cas_loginRedirect, cas_validateTicket) from troposphere.oauth import generate_access_token logger = logging.getLogger(__name__) app = Flask(__name__) def get_maintenance(): """ Returns a list of maintenance records along with a boolean to indicate whether or not login should be disabled """ return ([], False) @app.route('/') def redirect_app(): return "Redirect" @app.errorhandler(503) def handle_maintenance(): return "We're undergoing maintenance" @app.route('/login', methods=['GET', 'POST']) def login(): """ CAS Login : Phase 1/3 Call CAS Login """ records, disabled_login = get_maintenance() if disabled_login: abort(503) #if request.method == "POST" and 'next' in request.form: return cas_loginRedirect('/application/') #else: #return "Login please" @app.route('/logout') def logout(): #django_logout(request) if request.POST.get('cas', False): return cas_logoutRedirect() return redirect(settings.REDIRECT_URL + '/login') @app.route('/CAS_serviceValidater') def cas_service_validator(): """ Method expects 2 GET parameters: 'ticket' & 'sendback' After a CAS Login: Redirects the request based on the GET param 'ticket' Unauthorized Users are returned a 401 Authorized Users are redirected to the GET param 'sendback' """ logger.debug('GET Variables:%s' % request.args) sendback = request.args.get('sendback', None) ticket = request.args.get('ticket', None) if not ticket: logger.info("No Ticket received in GET string") abort(400) user = cas_validateTicket(ticket, sendback) logger.debug(user + " successfully authenticated against CAS") # Now check Groupy key = open(settings.OAUTH_PRIVATE_KEY, 'r').read() token = generate_access_token(key, user) logger.debug("TOKEN: " + token) return redirect(sendback) @app.route('/no_user') def no_user(): return "You're not an Atmopshere user" #@app.route('/CASlogin', defaults={'path': ''}) #@app.route('/CASlogin/<redirect>') # """ # url(r'^CASlogin/(?P<redirect>.*)$', 'authentication.cas_loginRedirect'), # """ # pass @app.route('/application', defaults={'path': ''}) @app.route('/application/', defaults={'path': ''}) @app.route('/application/<path:path>') def application(path): return render_template('application.html') if __name__ == '__main__': app.run(host='0.0.0.0', debug=True)
Python
0
@@ -94,16 +94,23 @@ request +, abort %0Aimport @@ -2029,16 +2029,29 @@ .read()%0A + try:%0A toke @@ -2087,16 +2087,20 @@ , user)%0A + logg @@ -2127,24 +2127,28 @@ token)%0A + + return redir @@ -2165,30 +2165,61 @@ ck)%0A -%[email protected]('/no_user' + except:%0A abort(403)%0A%[email protected](403 )%0Ade @@ -2228,19 +2228,40 @@ no_user( +e ):%0A + logger.debug(e)%0A retu
b7be60eff8e0c82741dda674824aa748e33e7fdd
convert pui.py to pywiki framework
trunk/toolserver/pui.py
trunk/toolserver/pui.py
#!usr/bin/python # -*- coding: utf-8 -* # # (C) Legoktm 2008-2009, MIT License # import re, sys, os sys.path.append(os.environ['HOME'] + '/pyenwiki') import wikipedia site = wikipedia.getSite() page = wikipedia.Page(site, 'Wikipedia:Possibly unfree images') wikitext = state0 = page.get() wikitext = re.compile(r'\n==New listings==', re.IGNORECASE).sub(r'\n*[[/{{subst:#time:Y F j|-14 days}}]]\n==New listings==', wikitext) EditMsg = 'Adding new day to holding cell' wikipedia.showDiff(state0, wikitext) wikipedia.setAction(EditMsg) page.put(wikitext)
Python
0.999999
@@ -93,17 +93,8 @@ t re -, sys, os %0Asys @@ -131,18 +131,16 @@ %5D + '/py -en wiki')%0Ai @@ -153,40 +153,8 @@ wiki -pedia%0Asite = wikipedia.getSite() %0Apag @@ -165,25 +165,14 @@ wiki -pedia .Page( -site, 'Wik @@ -416,21 +416,16 @@ ll'%0Awiki -pedia .showDif @@ -448,37 +448,8 @@ xt)%0A -wikipedia.setAction(EditMsg)%0A page @@ -453,17 +453,25 @@ age.put(wikitext +,EditMsg )
7b3fd535b7622a9c4253aa80276e05ed83f8177e
Fix app name error
txmoney/rates/models.py
txmoney/rates/models.py
# coding=utf-8 from __future__ import absolute_import, unicode_literals from datetime import date from decimal import Decimal from django.db import models from django.utils.functional import cached_property from ..settings import txmoney_settings as settings from .exceptions import RateDoesNotExist class RateSource(models.Model): name = models.CharField(max_length=100) base_currency = models.CharField(max_length=3, default=settings.BASE_CURRENCY, blank=True) last_update = models.DateTimeField(auto_now=True, blank=True) class Meta: unique_together = ('name', 'base_currency') @cached_property def is_updated(self): return True if self.last_update.date() == date.today() else False class RateQuerySet(models.QuerySet): def get_rate_currency_by_date(self, currency, currency_date=None): """ Get currency for a date :param currency: base currency. :param currency_date: ratio currency. :return: Currency """ currency_date = currency_date if currency_date else date.today() try: # TODO: check if rate if updated else update return self.get(currency=currency, date=currency_date) except Rate.DoesNotExist: try: backend = settings.DEFAULT_BACKEND() backend.update_rates() return self.get(currency=currency, date=currency_date) except Rate.DoesNotExist: raise RateDoesNotExist(currency, currency_date) class Rate(models.Model): source = models.ForeignKey(RateSource, on_delete=models.PROTECT, related_name='rates', related_query_name='rate') currency = models.CharField(max_length=3, unique_for_date='date') value = models.DecimalField(max_digits=14, decimal_places=6) date = models.DateField(auto_now_add=True, blank=True) objects = RateQuerySet.as_manager() class Meta: unique_together = ('source', 'currency', 'date') @staticmethod def get_ratio(from_currency, to_currency, ratio_date=None): """ Calculate exchange ratio between two currencies for a date :param from_currency: base currency. :param to_currency: ratio currency. :param ratio_date: ratio date :return: Decimal """ ratio_date = ratio_date if ratio_date else date.today() # If not default currency get date base currency rate value because all rates are for base currency ratio = Decimal(1) if from_currency == settings.BASE_CURRENCY else \ Rate.objects.get_rate_currency_by_date(from_currency, ratio_date).value if to_currency != settings.BASE_CURRENCY: money_rate = Decimal(1) / Rate.objects.get_rate_currency_by_date(to_currency, ratio_date).value ratio *= money_rate return ratio
Python
0.000009
@@ -544,32 +544,67 @@ class Meta:%0A + app_label = 'txmoneyrates'%0A unique_t @@ -1956,32 +1956,67 @@ class Meta:%0A + app_label = 'txmoneyrates'%0A unique_t
d6029a7b2e39ff6222ca3d6788d649b14bbf35e3
add smoother to denominator as well
trending.py
trending.py
import googleanalytics as ga import collections import numpy import datetime SMOOTHER = 20 WINDOW = 8 GROWTH_THRESHOLD = 0.03 def trend(counts) : X, Y = zip(*counts) X = numpy.array([x.toordinal() for x in X]) X -= datetime.date.today().toordinal() A = numpy.array([numpy.ones(len(X)), X]) Y = numpy.log(numpy.array(Y)) w = numpy.linalg.lstsq(A.T,Y)[0] return w profile = ga.authenticate(identity='sunspot', account='Illinois Campaign for Political Reform', webproperty='Illinois Sunshine', profile='Illinois Sunshine') totals = profile.core.query.metrics('pageviews').\ daily(days=-WINDOW) totals = {date : count for date, count in totals.rows} pages = profile.core.query.metrics('pageviews').\ dimensions('pagepath').\ daily(days=-WINDOW) page_counts = collections.defaultdict(dict) normalized_page_counts = collections.defaultdict(dict) for date, page, count in pages.rows : page_counts[page][date] = count normalized_page_counts[page][date] = (count + SMOOTHER)/totals[date] for counts in normalized_page_counts.values() : for date in totals.keys() - counts.keys() : counts[date] = SMOOTHER/totals[date] for page, counts in normalized_page_counts.items() : b0, b1 = trend(counts.items()) if b1 > GROWTH_THRESHOLD and page.startswith('/committees/') : print(page, b0, b1) for count in sorted(page_counts[page].items()) : print(count)
Python
0.000001
@@ -122,9 +122,9 @@ 0.0 -3 +2 %0A%0Ade @@ -1171,16 +1171,17 @@ OOTHER)/ +( totals%5Bd @@ -1180,24 +1180,36 @@ totals%5Bdate%5D + + SMOOTHER) %0A%0Afor counts @@ -1326,16 +1326,17 @@ MOOTHER/ +( totals%5Bd @@ -1339,16 +1339,28 @@ ls%5Bdate%5D + + SMOOTHER) %0A%0Afor pa
656c0f44c27f64d14dde7cbbfdec31906dab4c51
Add params to request docs.
treq/api.py
treq/api.py
from treq.client import HTTPClient def head(url, **kwargs): """ Make a ``HEAD`` request. See :py:func:`treq.request` """ return _client(**kwargs).head(url, **kwargs) def get(url, headers=None, **kwargs): """ Make a ``GET`` request. See :py:func:`treq.request` """ return _client(**kwargs).get(url, headers=headers, **kwargs) def post(url, data=None, **kwargs): """ Make a ``POST`` request. See :py:func:`treq.request` """ return _client(**kwargs).post(url, data=data, **kwargs) def put(url, data=None, **kwargs): """ Make a ``PUT`` request. See :py:func:`treq.request` """ return _client(**kwargs).put(url, data=data, **kwargs) def delete(url, **kwargs): """ Make a ``DELETE`` request. See :py:func:`treq.request` """ return _client(**kwargs).delete(url, **kwargs) def request(method, url, **kwargs): """ Make an HTTP request. :param str method: HTTP method. Example: ``'GET'``, ``'HEAD'``. ``'PUT'``, ``'POST'``. :param str url: http or https URL, which may include query arguments. :param headers: Optional HTTP Headers to send with this request. :type headers: Headers or None :param data: Optional request body. :type data: str, file-like, IBodyProducer, or None :param reactor: Optional twisted reactor. :param bool persistent: Use peristent HTTP connections. Default: ``True`` :param bool allow_redirects: Follow HTTP redirects. Default: ``True`` :rtype: Deferred that fires with an IResponse provider. """ return _client(**kwargs).request(method, url, **kwargs) # # Private API # def _client(*args, **kwargs): return HTTPClient.with_config(**kwargs)
Python
0
@@ -1218,32 +1218,279 @@ eaders or None%0A%0A + :param params: Optional paramters to be append as the query string to%0A the URL, any query string parameters in the URL already will be%0A preserved.%0A%0A :type params: dict w/ str or list of str values, list of 2-tuples, or None.%0A%0A :param data:
5af3ba58fcaf35e22925b3b7cf8c935e19a10038
Split up configuration-getting logic.
dreamssh/config.py
dreamssh/config.py
from ConfigParser import SafeConfigParser import os from zope.interface import moduleProvides from dreamssh import meta from dreamssh.sdk import interfaces moduleProvides(interfaces.IConfig) class Config(object): pass # Main main = Config() main.config = Config() main.config.userdir = os.path.expanduser("~/.%s" % meta.library_name) main.config.localfile = "config.ini" main.config.userfile = "%s/%s" % (main.config.userdir, main.config.localfile) # Internal SSH Server ssh = Config() ssh.servicename = meta.description ssh.port = 2222 ssh.pidfile = "twistd.pid" ssh.username = "root" ssh.keydir = os.path.join(main.config.userdir, "ssh") ssh.privkey = "id_rsa" ssh.pubkey = "id_rsa.pub" ssh.localdir = "~/.ssh" ssh.banner = """: : Welcome to : :________ ____________________ __ :___ __ \_________________ _______ _____ ___/_ ___/__ / / / :__ / / /_ ___/ _ \ __ `/_ __ `__ \____ \_____ \__ /_/ / :_ /_/ /_ / / __/ /_/ /_ / / / / /___/ /____/ /_ __ / :/_____/ /_/ \___/\__,_/ /_/ /_/ /_//____/ /____/ /_/ /_/ : : : You have logged into a DreamSSH Server. : {{HELP}} : : Enjoy! : """ class Configurator(object): """ """ def __init__(self, main=None, ssh=None): self.main = main self.ssh = ssh self.updateConfig() def buildDefaults(self): config = SafeConfigParser() config.add_section("SSH") config.set("SSH", "servicename", self.ssh.servicename) config.set("SSH", "port", str(self.ssh.port)) config.set("SSH", "pidfile", self.ssh.pidfile) config.set("SSH", "username", self.ssh.username) config.set("SSH", "keydir", self.ssh.keydir) config.set("SSH", "privkey", self.ssh.privkey) config.set("SSH", "pubkey", self.ssh.pubkey) config.set("SSH", "localdir", self.ssh.localdir) config.set("SSH", "banner", self.ssh.banner) return config def getConfigFile(self): if os.path.exists(self.main.config.localfile): return self.main.config.localfile if not os.path.exists(self.main.config.userdir): os.mkdir(os.path.expanduser(self.main.config.userdir)) return self.main.config.userfile def writeDefaults(self): config = buildDefaults() with open(self.getConfigFile(), "wb") as configFile: config.write(configFile) def updateConfig(self): """ If the configfile doesn't exist, this method will create it and exit. If it does exist, it will load the config values from the file (which may be different from those defined be default in this module), and update the in-memory config values with what it reads from the file. """ configFile = self.getConfigFile() if not os.path.exists(configFile): self.writeDefaults() return config = SafeConfigParser() config.read(self.getConfigFile()) self.ssh.servicename = config.get("SSH", "servicename") self.ssh.port = int(config.get("SSH", "port")) self.ssh.pidfile = config.get("SSH", "pidfile") self.ssh.username = str(config.get("SSH", "username")) self.ssh.keydir = config.get("SSH", "keydir") self.ssh.privkey = config.get("SSH", "privkey") self.ssh.pubkey = config.get("SSH", "pubkey") self.ssh.localdir = config.get("SSH", "localdir") self.ssh.banner = str(config.get("SSH", "banner")) Configurator(main, ssh) del Config, Configurator
Python
0.000001
@@ -2388,16 +2388,268 @@ gFile)%0A%0A + def getConfig(self):%0A configFile = self.getConfigFile()%0A if not os.path.exists(configFile):%0A self.writeDefaults()%0A return%0A config = SafeConfigParser()%0A config.read(configFile)%0A return config%0A %0A def @@ -2743,14 +2743,35 @@ ill -create +(indirectly) create%0A it @@ -3030,36 +3030,32 @@ %22%0A config -File = self.getConfi @@ -3047,36 +3047,32 @@ = self.getConfig -File ()%0A if no @@ -3077,34 +3077,14 @@ not -os.path.exists( config -File) :%0A @@ -3097,125 +3097,14 @@ -self.writeDefaults()%0A return%0A config = SafeConfigParser()%0A config.read(self.getConfigFile()) +return %0A @@ -3619,16 +3619,38 @@ nner%22))%0A + return config%0A %0A%0AConfig
0ba512b0e8eb6b5055261afb2962d3bfc5e2fda5
Add some playback stream headers
src/playback.py
src/playback.py
# -*- coding: utf-8 -*- import mimetypes import os from flask import Response, request import audiotranscode from tables import Song import config def stream_audio(): song = Song.get_one(id=request.args.get('id')) # A hack to get my local dev env working path = song.path if config.DEBUG: cut = '/mnt/storage/audio/music/' path = os.path.join(config.MUSIC_DIR, song.path[len(cut):]) # Find the file and guess type mime = mimetypes.guess_type(path)[0] ext = mimetypes.guess_extension(mime) # Transcoding if required transcode = False if ext not in ['.mp3', '.ogg']: transcode = True mime = "audio/mpeg" def generate_audio(): if not transcode: with open(path, "rb") as handle: data = handle.read(1024) while data: yield data data = handle.read(1024) else: tc = audiotranscode.AudioTranscode() for data in tc.transcode_stream(path, 'mp3'): yield data return Response(generate_audio(), mimetype=mime)
Python
0
@@ -86,30 +86,112 @@ est%0A -import audiotranscode%0A +from werkzeug.datastructures import Headers%0Aimport audiotranscode%0A%0Afrom utils import generate_random_key %0Afro @@ -756,16 +756,204 @@ io/mpeg%22 +%0A ext = '.mp3'%0A%0A # Send some extra headers%0A headers = Headers()%0A headers.add('Content-Transfer-Encoding', 'binary')%0A headers.add('Content-Length', os.path.getsize(path)) %0A%0A de @@ -1383,14 +1383,31 @@ imetype=mime +, headers=headers )%0A
ecb3779a653791b0f5f172fea52ae49326e705e1
Update auth.py file with printing on start of authentication.
app/lib/twitter/auth.py
app/lib/twitter/auth.py
# -*- coding: utf-8 -*- """ Setup authentication so tweepy package can access Twitter API. Usage: $ python -m lib.twitter.auth --help Based on https://github.com/tweepy/tweepy/blob/master/examples/oauth.py http://docs.tweepy.org/en/latest/code_snippet.html """ import webbrowser import tweepy from lib.config import AppConf appConf = AppConf() # Setup configured authentication values as global variables. CONSUMER_KEY = appConf.get('TwitterAuth', 'consumerKey') CONSUMER_SECRET = appConf.get('TwitterAuth', 'consumerSecret') ACCESS_KEY = appConf.get('TwitterAuth', 'accessKey') ACCESS_SECRET = appConf.get('TwitterAuth', 'accessSecret') # Raise an error for consumer values, but access keys may still be blank # if only user tokens will be used using user flow. msg = ('Invalid Twitter auth details. Register your own Twitter app at ' 'dev.twitter.com, then paste your credentials in a `app.local.conf`' ' file using headings as in `app.conf`.') assert CONSUMER_KEY and CONSUMER_SECRET and \ CONSUMER_KEY != 'YOUR_CONSUMER_KEY', msg def generateAppToken(): """ Read configured details for twitter account app and generate auth object with an access token set. """ auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET) auth.set_access_token(ACCESS_KEY, ACCESS_SECRET) return auth def generateUserToken(): """ Generate a Twitter API access token using configured Twitter app credentials. """ auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET) print 'You need to authorise the application. Opening page in browser.' authURL = auth.get_authorization_url() webbrowser.open(authURL) # This is fixed to command line input for now. userPin = raw_input('Generate a pin and enter it here or enter `quit`. /> ') if not userPin or userPin.lower() in ('q', 'quit', 'exit'): print 'Exiting.' exit(0) print 'Authenticating...' auth.get_access_token(userPin) return auth def getAPIConnection(userFlow=False): """ Return tweepy API object for API requests. IMPORTANT: When testing the user flow functionality, do not sign in to Twitter in the browser the same user you use to create Twitter app credentials. Otherwise your access token and secret will be regenerated and you will have to get new values from dev.twitter.com and add them to app conf. @param userFlow: Default False so that access token is set for configured app. Set to True to use OAuth flow where user directed to sign in with a browser and return a pin number back to the application. """ if userFlow: auth = generateUserToken() else: auth = generateAppToken() # Construct the API instance. Set tweepy to automatically wait if rate # limit is exceeded and to print out a notification. api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True) me = api.me() print 'Authenticated with Twitter API as `{0}`.\n'.format(me.name) return api def main(args): if not args or set(args) & set(('-h', '--help')): print 'Usage: python -m lib.twitter.auth [-t|--test] [-u|--user]'\ ' [-h|--help]' print 'Options and arguments:' print '--test : Run test to get Twitter API connection and print out ' print ' authenticated user name. Defaults to builtin app token'\ ' method' print ' which uses configured app credentials.' print '--user : Use in conjunction to --test flag to make' print ' authentication method follow the user flow where the'\ ' user is' print ' prompted to authorise in the browser, get a pin number'\ ' and' print ' paste it back into the application.' else: if set(args) & set(('-t', '--test')): userFlow = set(args) & set(('-u', '--user')) getAPIConnection(userFlow) if __name__ == '__main__': import sys main(sys.argv[1:])
Python
0
@@ -2675,44 +2675,133 @@ -auth = generateUserToken()%0A else: +print 'Generating user API token...'%0A auth = generateUserToken()%0A else:%0A print 'Generating app API token...' %0A
e6487a2c623638b540b707c895a97eac1fc31979
Update connection_test.py to work with Python3.7
server/integration-tests/connection_test.py
server/integration-tests/connection_test.py
#!/usr/bin/env python2.7 from __future__ import absolute_import, division, unicode_literals import json import requests import websocket # 1. Put some data into icepeak over HTTP new_data = {'status': 'freezing'} requests.put('http://localhost:3000/so/cool', json.dumps(new_data)) # 2. Get the data back over a websocket conn = websocket.create_connection("ws://localhost:3000/so/cool") result = conn.recv() parsed_result = json.loads(result) assert new_data == parsed_result, 'Input data: {} is different from output data: {}'.format( new_data, parsed_result) print 'Initial data was successfully sent to client!'
Python
0.000001
@@ -18,79 +18,215 @@ thon -2.7%0Afrom __future__ import absolute_import, division, unicode_literals%0A +3.7%0A%22%22%22%0ATest PUTing some data into Icepeak and getting it back over a websocket.%0A%0ARequires a running Icepeak instance.%0A%0ARequirements can be installed with: pip install requests websockets%0A%22%22%22%0A%0Aimport asyncio %0Aimp @@ -266,16 +266,17 @@ ebsocket +s %0A%0A# 1. P @@ -462,95 +462,132 @@ ket%0A -conn = websocket.create_connection(%22ws://localhost:3000/so/cool%22)%0Aresult = conn.recv()%0A +async def hello(uri):%0A async with websockets.connect(uri) as websocket:%0A result = await websocket.recv()%0A%0A pars @@ -618,16 +618,24 @@ esult)%0A%0A + assert n @@ -727,16 +727,20 @@ + new_data @@ -762,17 +762,17 @@ )%0A%0Aprint - +( 'Initial @@ -810,9 +810,98 @@ client!' +)%0A%0Aasyncio.get_event_loop().run_until_complete(%0A hello('ws://localhost:3000/so/cool')) %0A
2fe5fc8c53142b5661bf176441e246d48cdb0799
fix a typo
Functions/Sed.py
Functions/Sed.py
''' Created on Feb 14, 2014 @author: Tyranic-Moron ''' from IRCMessage import IRCMessage from IRCResponse import IRCResponse, ResponseType from Function import Function from GlobalVars import * import re # matches a sed-style regex pattern (taken from https://github.com/mossblaser/BeardBot/blob/master/modules/sed.py) # I stripped the unnecessary escapes by using a raw string instead sedRegex = re.compile(r"s/(?P<search>(\\\\|(\\[^\\])|[^\\/])+)/(?P<replace>(\\\\|(\\[^\\])|[^\\/])*)((/(?P<flags>.*))?)") class Instantiate(Function): Help = 's/search/replacement/flags - matches sed-like regex replacement patterns and attempts to execute them on the latest matching line from the last 10\n'\ 'flags are g (global), i (case-insensitive), o (only user messages). Example usage: "I\'d eat some tacos" -> s/some/all the/ -> "I\'d eat all the tacos"' messages = [] unmodifiedMessages = [] def GetResponse(self, message): if message.Type != 'PRIVMSG' and message.Type != 'ACTION': return match = sedRegex.match(message.MessageString) if match: search = match.group('search') replace = match.group('replace') flags = match.group('flags') if flags is None: flags = '' response = self.substitute(search, replace, flags) if response is not None: responseType = ResponseType.Say if response.Type == 'ACTION': responseType = ResponseType.Do return IRCResponse(responseType, response.MessageString, message.ReplyTo) else: self.storeMessage(message) def substitute(self, search, replace, flags): messages = self.unmodifiedMessages if 'o' in flags else self.messages for message in reversed(messages): if 'g' in flags: count = 0 else: count = 1 if 'i' in flags: subFlags = re.IGNORECASE else subFlags = 0 new = re.sub(search, replace, message.MessageString, count, subFlags) new = new[:300] if new != message.MessageString: newMessage = message newMessage.MessageString = new self.storeMessage(newMessage, False) return newMessage return None def storeMessage(self, message, unmodified=True): self.messages.append(message) self.messages = self.messages[-10:] if unmodified: self.unmodifiedMessages.append(message) self.unmodifiedMessages = self.unmodifiedMessages[-10:]
Python
0.999999
@@ -2065,16 +2065,17 @@ else +: %0A
69b9c641f144633b94aca47212af446971286454
add tests
server_common/test_modules/test_autosave.py
server_common/test_modules/test_autosave.py
import unittest class TestAutosave(unittest.TestCase): def setUp(self): pass
Python
0
@@ -1,92 +1,1395 @@ -import unittest%0A%0A%0Aclass TestAutosave(unittest.TestCase):%0A def setUp(self):%0A pass%0A +from __future__ import unicode_literals, absolute_import, print_function, division%0Aimport unittest%0Aimport shutil%0Aimport os%0A%0Afrom server_common.autosave import AutosaveFile%0A%0A%0ATEMP_FOLDER = os.path.join(%22C:%5C%5C%22, %22instrument%22, %22var%22, %22tmp%22, %22autosave_tests%22)%0A%0A%0Aclass TestAutosave(unittest.TestCase):%0A def setUp(self):%0A self.autosave = AutosaveFile(service_name=%22unittests%22, file_name=%22test_file%22, folder=TEMP_FOLDER)%0A try:%0A os.makedirs(TEMP_FOLDER)%0A except:%0A pass%0A%0A def test_GIVEN_no_existing_file_WHEN_get_parameter_from_autosave_THEN_default_returned(self):%0A default = object()%0A self.assertEqual(self.autosave.read_parameter(%22some_random_parameter%22, default), default)%0A%0A def test_GIVEN_parameter_saved_WHEN_get_parameter_from_autosave_THEN_saved_value_returned(self):%0A value = %22test_value%22%0A self.autosave.write_parameter(%22parameter%22, value)%0A self.assertEqual(self.autosave.read_parameter(%22parameter%22, None), value)%0A%0A def test_GIVEN_different_parameter_saved_WHEN_get_parameter_from_autosave_THEN_saved_value_returned(self):%0A value = %22test_value%22%0A self.autosave.write_parameter(%22other_parameter%22, value)%0A self.assertEqual(self.autosave.read_parameter(%22parameter%22, None), None)%0A%0A def tearDown(self):%0A try:%0A shutil.rmtree(TEMP_FOLDER)%0A except:%0A pass %0A
a79b7d2f0a712bfbc60e4ea3b34aa0331675519d
Add OpenSlideError exception class
openslide/lowlevel.py
openslide/lowlevel.py
# # openslide-python - Python bindings for the OpenSlide library # # Copyright (c) 2010-2011 Carnegie Mellon University # # This library is free software; you can redistribute it and/or modify it # under the terms of version 2.1 of the GNU Lesser General Public License # as published by the Free Software Foundation. # # This library is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public # License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with this library; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # from ctypes import * from itertools import count import PIL.Image import struct import sys _lib = cdll.LoadLibrary('libopenslide.so.0') PROPERTY_NAME_COMMENT = 'openslide.comment' PROPERTY_NAME_VENDOR = 'openslide.vendor' PROPERTY_NAME_QUICKHASH1 = 'openslide.quickhash-1' PROPERTY_NAME_BACKGROUND_COLOR = 'openslide.background-color' # validating class to make sure we correctly pass an OpenSlide handle class _OpenSlide(c_void_p): @classmethod def from_param(cls, obj): if not obj: raise ValueError("Passing undefined slide object") if obj.__class__ != cls: raise ValueError("Not an OpenSlide reference") return super(_OpenSlide, cls).from_param(obj) # check if the library got into an error state after each library call def _errcheck(result, func, args): err = get_error(args[0]) if err is not None: raise RuntimeError(err) return result can_open = _lib.openslide_can_open can_open.restype = c_int # c_bool can_open.argtypes = [ c_char_p ] open = _lib.openslide_open open.restype = _OpenSlide open.argtypes = [ c_char_p ] close = _lib.openslide_close close.restype = None close.argtypes = [ _OpenSlide ] get_layer_count = _lib.openslide_get_layer_count get_layer_count.restype = c_int32 get_layer_count.argtypes = [ _OpenSlide ] get_layer_count.errcheck = _errcheck _get_layer_dimensions = _lib.openslide_get_layer_dimensions _get_layer_dimensions.restype = None _get_layer_dimensions.argtypes = [ _OpenSlide, c_int32, POINTER(c_int64), POINTER(c_int64) ] _get_layer_dimensions.errcheck = _errcheck def get_layer_dimensions(slide, layer): w, h = c_int64(), c_int64() _get_layer_dimensions(slide, layer, byref(w), byref(h)) return w.value, h.value get_layer_downsample = _lib.openslide_get_layer_downsample get_layer_downsample.restype = c_double get_layer_downsample.argtypes = [ _OpenSlide, c_int32 ] get_layer_downsample.errcheck = _errcheck get_best_layer_for_downsample = _lib.openslide_get_best_layer_for_downsample get_best_layer_for_downsample.restype = c_int32 get_best_layer_for_downsample.argtypes = [ _OpenSlide, c_double ] get_best_layer_for_downsample.errcheck = _errcheck _read_region = _lib.openslide_read_region _read_region.restype = None _read_region.argtypes = [ _OpenSlide, POINTER(c_uint32), c_int64, c_int64, c_int32, c_int64, c_int64 ] _read_region.errcheck = _errcheck def read_region(slide, x, y, layer, w, h): buf = create_string_buffer(w * h * 4) dest = cast(buf, POINTER(c_uint32)) _read_region(slide, dest, x, y, layer, w, h) return _aRGB_to_RGBa(buf, (w, h)) get_error = _lib.openslide_get_error get_error.restype = c_char_p get_error.argtypes = [ _OpenSlide ] def _checknamelist(result, func, args): _errcheck(result, func, args) names = [] for i in count(): name = result[i] if not name: break names.append(name) return names get_property_names = _lib.openslide_get_property_names get_property_names.restype = POINTER(c_char_p) get_property_names.argtypes = [ _OpenSlide ] get_property_names.errcheck = _checknamelist get_property_value = _lib.openslide_get_property_value get_property_value.restype = c_char_p get_property_value.argtypes = [ _OpenSlide, c_char_p ] get_property_value.errcheck = _errcheck get_comment = _lib.openslide_get_comment get_comment.restype = c_char_p get_comment.argtypes = [ _OpenSlide ] get_comment.errcheck = _errcheck get_associated_image_names = _lib.openslide_get_associated_image_names get_associated_image_names.restype = POINTER(c_char_p) get_associated_image_names.argtypes = [ _OpenSlide ] get_associated_image_names.errcheck = _checknamelist _get_associated_image_dimensions = _lib.openslide_get_associated_image_dimensions _get_associated_image_dimensions.restype = None _get_associated_image_dimensions.argtypes = [ _OpenSlide, c_char_p, POINTER(c_int64), POINTER(c_int64) ] _get_associated_image_dimensions.errcheck = _errcheck def get_associated_image_dimensions(slide, name): w, h = c_int64(), c_int64() _get_associated_image_dimensions(slide, name, byref(w), byref(h)) return w.value, h.value _read_associated_image = _lib.openslide_read_associated_image _read_associated_image.restype = None _read_associated_image.argtypes = [ _OpenSlide, c_char_p, POINTER(c_uint32) ] _read_associated_image.errcheck = _errcheck def read_associated_image(slide, name): w, h = c_int64(), c_int64() _get_associated_image_dimensions(slide, name, byref(w), byref(h)) buf = create_string_buffer(w.value * h.value * 4) dest = cast(buf, POINTER(c_uint32)) _read_associated_image(slide, name, dest) return _aRGB_to_RGBa(buf, (w.value, h.value)) # repack buffer from native-endian aRGB to big-endian RGBa and return PIL.Image _rawmode = (sys.byteorder == 'little') and 'BGRA' or 'ARGB' def _aRGB_to_RGBa(buf, size): i = PIL.Image.frombuffer('RGBA', size, buf.raw, 'raw', _rawmode, 0, 1) return PIL.Image.frombuffer('RGBA', size, i.tostring(), 'raw', 'RGBa', 0, 1)
Python
0
@@ -1147,16 +1147,59 @@ color'%0A%0A +class OpenSlideError(Exception):%0A pass%0A%0A # valida @@ -1742,14 +1742,16 @@ ise -Runtim +OpenSlid eErr
6a4f4031b0aac1c8859424703088df903746a6c8
change command doc string
dvc/command/get.py
dvc/command/get.py
import argparse import logging from .base import append_doc_link from .base import CmdBaseNoRepo from dvc.exceptions import DvcException logger = logging.getLogger(__name__) class CmdGet(CmdBaseNoRepo): def run(self): from dvc.repo import Repo try: Repo.get( self.args.url, path=self.args.path, out=self.args.out, rev=self.args.rev, ) return 0 except DvcException: logger.exception( "failed to get '{}' from '{}'".format( self.args.path, self.args.url ) ) return 1 def add_parser(subparsers, parent_parser): GET_HELP = "Download/copy files or directories from git repository." get_parser = subparsers.add_parser( "get", parents=[parent_parser], description=append_doc_link(GET_HELP, "get"), help=GET_HELP, formatter_class=argparse.RawDescriptionHelpFormatter, ) get_parser.add_argument( "url", help="URL of Git repository to download from." ) get_parser.add_argument( "path", help="Path to a file or directory within the repository." ) get_parser.add_argument( "-o", "--out", nargs="?", help="Destination path to copy/download files to.", ) get_parser.add_argument( "--rev", nargs="?", help="Repository git revision." ) get_parser.set_defaults(func=CmdGet)
Python
0.000002
@@ -781,17 +781,17 @@ es from -g +G it repos
b92c1b5a6a814761d018f9e4d715fb1c2eca4f8b
fix z projection
spindle_tracker/tracking/tracker.py
spindle_tracker/tracking/tracker.py
import logging import os from dateutil import parser from sktracker.io import TiffFile from sktracker.io import StackIO from sktracker.io import ObjectsIO from sktracker.detection import peak_detector from sktracker.io.trackmate import trackmate_peak_import from ..utils.path import check_extension log = logging.getLogger(__name__) class Tracker(): """ Generic container for particle tracking """ MINIMUM_METADATA = [] HDF5_EXTENSION = "h5" TIF_EXTENSION = "tif" XML_EXTENSION = "xml" def __init__(self, sample_path, base_dir, verbose=True, force_metadata=False, json_discovery=True): """ Parameters: ----------- sample_path: string path to TIF file or to HDF5 file """ if not verbose: log.disabled = True else: log.disabled = False # Init paths self.sample_path = sample_path self.base_dir = base_dir self.full_path = os.path.join(self.base_dir, self.sample_path) self.tif_path = self.has_tif() self.h5_path = self.has_h5() self.st = None self.oio = None if self.h5_path: self.oio = ObjectsIO.from_h5(self.h5_path, base_dir=self.base_dir, minimum_metadata_keys=self.__class__.MINIMUM_METADATA) elif self.tif_path: self.st = StackIO(image_path=self.tif_path, base_dir=self.base_dir, json_discovery=json_discovery) self.h5_path = self.has_h5(force=True) self.oio = ObjectsIO(self.st.metadata, store_path=self.h5_path, base_dir=self.base_dir, minimum_metadata_keys=self.__class__.MINIMUM_METADATA) else: raise IOError("h5 or/and tif file does not exist.") self.load_oio() @property def full_tif_path(self): if self.tif_path: return os.path.join(self.base_dir, self.tif_path) else: return None @property def full_h5_path(self): if self.h5_path: return os.path.join(self.base_dir, self.h5_path) else: return None @property def full_xml_path(self): if self.has_xml(): return os.path.join(self.base_dir, self.has_xml()) else: return None @property def stored_data(self): if not hasattr(self, '_stored_data'): self._stored_data = [] else: self._stored_data = list(set(self._stored_data)) return self._stored_data def has_h5(self, force=False): """ """ extension = self.__class__.HDF5_EXTENSION full_path = check_extension(self.full_path, extension=extension, force=force) if full_path: return os.path.relpath(full_path, self.base_dir) else: return None def has_tif(self, force=False): """ """ extension = self.__class__.TIF_EXTENSION full_path = check_extension(self.full_path, extension=extension, force=force) if full_path: return os.path.relpath(full_path, self.base_dir) else: return None def has_xml(self, force=False): """ """ extension = self.__class__.XML_EXTENSION full_path = check_extension(self.full_path, extension=extension, force=force) if full_path: return os.path.relpath(full_path, self.base_dir) else: return None def load_oio(self): """ """ for key in self.oio.keys(): key = key.replace('/', '') self.stored_data.append(key) setattr(self, key, self.oio[key]) log.info("Correctly loaded '{}'".format(key)) def save_oio(self): """ """ for key in self.stored_data: key = key.replace('/', '') self.oio[key] = getattr(self, key) log.info("Correctly saved '{}'".format(key)) log.info("Data has been correctly saved to {}".format(self.h5_path)) def get_tif(self): """ """ if self.full_tif_path: return TiffFile(self.full_tif_path) else: raise IOError("Tif path does not exist.") def __repr__(self): """ """ if self.has_tif(): return self.tif_path elif self.has_hdf5(): return self.h5_path else: return 'Error: No file found !' def __lt__(self, other): try: date_self = parser.parse(self.metadata['acquisition_date']) date_other = parser.parse(other.metadata['acquisition_date']) except: log.error("Can't parse or find acquisition date") return None return date_self < date_other def __gt__(self, other): return not self.__lt__(other) def detect_peaks(self, detection_parameters, channel=0, z_projection=False, show_progress=False, parallel=True, erase=False): """ """ if hasattr(self, 'raw') and not erase: log.info("Peaks already detected") return None if not self.full_tif_path or not os.path.isfile(self.full_tif_path): raise IOError("Tif path does not exist.") self.st = StackIO(image_path=self.tif_path, base_dir=self.base_dir, json_discovery=False) data_iterator = self.st.image_iterator(channel_index=channel, z_projection=z_projection) if z_projection and 'Z' in self.metadata['DimensionOrder']: z_position = self.metadata['DimensionOrder'].index('Z') metadata = self.metadata.copy() metadata['Shape'][z_position] = 1 metadata['SizeZ'] = 1 else: metadata = self.metadata peaks = peak_detector(data_iterator(), metadata, parallel=parallel, show_progress=show_progress, parameters=detection_parameters) self.stored_data.append('raw') self.raw = peaks self.save_oio() def get_peaks_from_trackmate(self, erase=False): """ """ if hasattr(self, 'raw') and not erase: log.info("Peaks already detected") return None xml_file = self.has_xml() if not xml_file: log.error("No XML file detected") self.raw = trackmate_peak_import(self.full_xml_path) self.stored_data.append('raw') self.save_oio()
Python
0.000001
@@ -6405,16 +6405,72 @@ .copy()%0A + metadata%5B'Shape'%5D = list(metadata%5B'Shape'%5D)%0A
5ab7d4af67abb20a87f9b963e9ae53df65eea42f
fix self deleter
cogs/fun.py
cogs/fun.py
import asyncio import discord from discord.ext import commands class Fun: """ Fun and useful stuff """ def __init__(self, bot): self.bot = bot @commands.command(pass_context=True) async def marco(self, ctx): """ Says "polo" """ await self.bot.say(self.bot.msg_prefix + "polo") @commands.command(pass_context=True) async def soon(self, ctx, *, message: str = ""): """ Makes a soon tm """ await self.bot.delete_message(ctx.message) await self.bot.say("soon\u2122" + message) @commands.command(pass_context=True) async def give(self, ctx, *, message: str = ""): """ Gives stuff """ await self.bot.delete_message(ctx.message) await self.bot.say("༼ つ ◕\\_◕ ༽つ " + message + " ༼ つ ◕\\_◕ ༽つ") @commands.command(pass_context=True) async def shrug(self, ctx, *, message: str = ""): """ Makes a shrug """ await self.bot.delete_message(ctx.message) await self.bot.say("¯\_(ツ)_/¯ " + message) @commands.command(pass_context=True) async def lenny(self, ctx, *, message: str = ""): """ Makes a lenny face """ await self.bot.delete_message(ctx.message) await self.bot.say("( ͡° ͜ʖ ͡°) " + message) @commands.command(pass_context=True, aliases=["d"]) async def justdeleteme(self, ctx, count: int): """ Deletes 'count' number of message you have sent in the channel But only if they are in the first 1000 messages """ count += 1 iterator = self.bot.logs_from(ctx.channel, limit=1000) async for m in iterator: if isinstance(m, discord.Message): if (m.author == ctx.author): await self.bot.delete_message(m) count -= 1 if count <= 0: return @commands.command(pass_context=True, hidden=True) async def whois(self, ctx, *, ingnore: str = ""): """ Let's just ingore that """ to_del = await self.bot.say(self.bot.msg_prefix + "Use debug...") await asyncio.sleep(5) await self.bot.delete_message(to_del) def _calculate_mutual_servers(self, member: discord.Member): # Calculates mutual servers. serverlist = [] for server in self.bot.servers: assert isinstance(server, discord.Server) if server.get_member(member.id): serverlist += [server.name] return serverlist def _safe_roles(self, roles: list): names = [] for role in roles: if role.name == "@everyone": names.append("@\u200beveryone") # u200b is invisible space else: names.append(role.name) return names def setup(bot): bot.add_cog(Fun(bot))
Python
0.000003
@@ -1662,16 +1662,24 @@ rom(ctx. +message. channel,
f5d6bc2ff2abf53c6fcf7de5d74ffc50b5d7673d
Fix python environment in test (#1305)
frameworks/helloworld/tests/test_soak.py
frameworks/helloworld/tests/test_soak.py
import pytest import shakedown import time import json import os import sdk_cmd as cmd import sdk_install import sdk_plan import sdk_tasks import sdk_marathon import sdk_test_upgrade from tests.config import ( PACKAGE_NAME, DEFAULT_TASK_COUNT ) FRAMEWORK_NAME = "secrets/hello-world" NUM_HELLO = 2 NUM_WORLD = 3 if "FRAMEWORK_NAME" in os.environ: FRAMEWORK_NAME = os.environ("FRAMEWORK_NAME") if "NUM_HELLO" in os.environ: NUM_HELLO = os.environ("NUM_HELLO") if "NUM_WORLD" in os.environ: NUM_WORLD = os.environ("NUM_WORLD") @pytest.mark.soak_upgrade def test_soak_upgrade_downgrade(): sdk_test_upgrade.soak_upgrade_downgrade(PACKAGE_NAME, PACKAGE_NAME, DEFAULT_TASK_COUNT) @pytest.mark.soak_secrets_update @pytest.mark.skipif('shakedown.dcos_version_less_than("1.10")') def test_soak_secrets_update(): secret_content_alternative = "hello-world-secret-data-alternative" test_soak_secrets_framework_alive() cmd.run_cli("security secrets update --value={} secrets/secret1".format(secret_content_alternative)) cmd.run_cli("security secrets update --value={} secrets/secret2".format(secret_content_alternative)) cmd.run_cli("security secrets update --value={} secrets/secret3".format(secret_content_alternative)) test_soak_secrets_restart_hello0() # get new task ids - only first pod hello_tasks = sdk_tasks.get_task_ids(FRAMEWORK_NAME, "hello-0") world_tasks = sdk_tasks.get_task_ids(FRAMEWORK_NAME, "world-0") # make sure content is changed assert secret_content_alternative == task_exec(world_tasks[0], "bash -c 'echo $WORLD_SECRET1_ENV'") assert secret_content_alternative == task_exec(world_tasks[0], "cat WORLD_SECRET2_FILE") assert secret_content_alternative == task_exec(world_tasks[0], "cat secrets/secret3") # make sure content is changed assert secret_content_alternative == task_exec(hello_tasks[0], "bash -c 'echo $HELLO_SECRET1_ENV'") assert secret_content_alternative == task_exec(hello_tasks[0], "cat HELLO_SECRET1_FILE") assert secret_content_alternative == task_exec(hello_tasks[0], "cat HELLO_SECRET2_FILE") # revert back to some other value cmd.run_cli("security secrets update --value=SECRET1 secrets/secret1") cmd.run_cli("security secrets update --value=SECRET2 secrets/secret2") cmd.run_cli("security secrets update --value=SECRET3 secrets/secret3") test_soak_secrets_restart_hello0() @pytest.mark.soak_secrets_alive @pytest.mark.skipif('shakedown.dcos_version_less_than("1.10")') def test_soak_secrets_framework_alive(): sdk_plan.wait_for_completed_deployment(FRAMEWORK_NAME) sdk_tasks.check_running(FRAMEWORK_NAME, NUM_HELLO + NUM_WORLD) def test_soak_secrets_restart_hello0(): hello_tasks_old = sdk_tasks.get_task_ids(FRAMEWORK_NAME, "hello-0") world_tasks_old = sdk_tasks.get_task_ids(FRAMEWORK_NAME, "world-0") # restart pods to retrieve new secret's content cmd.run_cli('hello-world --name={} pods restart hello-0'.format(FRAMEWORK_NAME)) cmd.run_cli('hello-world --name={} pods restart world-0'.format(FRAMEWORK_NAME)) # wait pod restart to complete sdk_tasks.check_tasks_updated(FRAMEWORK_NAME, "hello-0", hello_tasks_old) sdk_tasks.check_tasks_updated(FRAMEWORK_NAME, 'world-0', world_tasks_old) # wait till it all running sdk_tasks.check_running(FRAMEWORK_NAME, NUM_HELLO + NUM_WORLD) def task_exec(task_name, command): lines = cmd.run_cli("task exec {} {}".format(task_name, command)).split('\n') print(lines) for i in lines: # ignore text starting with: # Overwriting Environment Variable .... # Overwriting PATH ...... if not i.isspace() and not i.startswith("Overwriting"): return i return ""
Python
0.000005
@@ -316,16 +316,45 @@ LD = 3%0A%0A +# check environment first...%0A if %22FRAM @@ -411,17 +411,17 @@ .environ -( +%5B %22FRAMEWO @@ -428,17 +428,17 @@ RK_NAME%22 -) +%5D %0Aif %22NUM @@ -482,25 +482,25 @@ = os.environ -( +%5B %22NUM_HELLO%22) @@ -498,17 +498,17 @@ M_HELLO%22 -) +%5D %0Aif %22NUM @@ -556,17 +556,17 @@ .environ -( +%5B %22NUM_WOR @@ -568,17 +568,17 @@ M_WORLD%22 -) +%5D %0A%0A%0A@pyte
7e6d2ab61667bd3be246a2efe2a31212dbdef079
Fix how rangelist is generated in orgviz.randomnodes
orgviz/randomnodes.py
orgviz/randomnodes.py
import random import datetime from .utils.date import timedeltastr class RandomDatetime(object): def __init__(self, pre_days=30, post_days=30, hour_min=6, hour_max=21): self.pre_days = pre_days self.post_days = post_days self.hour_min = hour_min self.hour_max = hour_max self.now = datetime.datetime.now() self.zero = datetime.datetime(*self.now.timetuple()[:3]) def datetime(self, pre=None, post=None): pre = self.pre_days if pre is None else pre post = self.post_days if post is None else post delta = datetime.timedelta( random.randrange(- pre, post + 1), random.randrange(self.hour_min, self.hour_max) * 60 * 60) return self.zero + delta def date(self, **kwds): return datetime.date(*self.datetime(**kwds).timetuple()[:3]) def node(level, heading, todo=None, scheduled=None, deadline=None, closed=None, clock=[], tags=[], datelist=[], rangelist=[]): active_datestr = lambda x: x.strftime('<%Y-%m-%d %a>') inactive_datestr = lambda x: x.strftime('[%Y-%m-%d %a %H:%M]') yield '*' * level yield ' ' if todo: yield todo yield ' ' yield heading if tags: yield ' :{0}:'.format(':'.join(tags)) yield '\n' if scheduled or deadline or closed: yield ' ' * level for (name, date, datestr) in [('CLOSED', closed, inactive_datestr), ('DEADLINE', deadline, active_datestr), ('SCHEDULED', scheduled, active_datestr)]: if date: yield ' ' yield name yield ': ' yield datestr(date) if scheduled or deadline or closed: yield '\n' for (clock_start, clock_end) in clock: yield ' ' * (level + 1) yield 'CLOCK: ' yield inactive_datestr(clock_start) yield '--' yield inactive_datestr(clock_end) yield ' => ' yield timedeltastr(clock_end - clock_start) yield '\n' for date in datelist: yield inactive_datestr(date) yield '\n' for (start, end) in rangelist: yield inactive_datestr(start) yield '--' yield inactive_datestr(end) yield '\n' def makeorg(num, **kwds): heading_pops = ['aaa', 'bbb', 'ccc'] tags_pops = ['work', 'boss', 'notes', 'action', '@home', '@work'] true_or_false = [True, False] rd = RandomDatetime(**kwds) for i in range(num): kwds = {} if i == 0: kwds['level'] = 1 else: kwds['level'] = random.randrange(1, 4) kwds['heading'] = random.choice(heading_pops) if random.choice(true_or_false): if random.choice(true_or_false): kwds['todo'] = 'TODO' else: kwds['closed'] = rd.datetime(post=0) kwds['todo'] = 'DONE' for sdc in ['scheduled', 'deadline']: if random.choice(true_or_false): kwds[sdc] = rd.date() if random.choice(true_or_false): kwds['clock'] = clock = [] for _ in range(random.randrange(1, 5)): start = rd.datetime(post=0) end = start + datetime.timedelta( 0, random.randrange(30, 180) * 60) clock.append((start, end)) if random.choice(true_or_false): kwds['tags'] = [random.choice(tags_pops)] if random.choice(true_or_false): if random.choice(true_or_false): kwds['datelist'] = [ rd.datetime() for _ in range(random.randrange(1, 5))] else: kwds['rangelist'] = [ (rd.datetime(), rd.datetime()) for _ in range(random.randrange(1, 5))] for s in node(**kwds): yield s def writeorg(file, *args, **kwds): file.writelines(makeorg(*args, **kwds)) def run(num): import sys writeorg(sys.stdout, num)
Python
0.001507
@@ -60,16 +60,31 @@ deltastr +, total_seconds %0A%0A%0Aclass @@ -866,16 +866,398 @@ )%5B:3%5D)%0A%0A + def datetimerange(self, **kwds):%0A return self._start_end(self.datetime(**kwds), self.datetime(**kwds))%0A%0A def daterange(self, **kwds):%0A return self._start_end(self.datetime(**kwds), self.datetime(**kwds))%0A%0A @staticmethod%0A def _start_end(d1, d2):%0A if total_seconds(d1 - d2) %3C 0:%0A return (d1, d2)%0A else:%0A return (d2, d1)%0A%0A %0Adef nod @@ -4170,17 +4170,16 @@ -( rd.datet @@ -4185,26 +4185,15 @@ time -(), rd.datetim +rang e() -) %0A
b0c83624004ebda4ea23f597d109d89ec319f1cf
fix tut2.py
doc/source/code/tut2.py
doc/source/code/tut2.py
from netpyne import specs, sim # Network parameters netParams = specs.NetParams() # object of class NetParams to store the network parameters ## Population parameters netParams.popParams['S'] = {'cellType': 'PYR', 'numCells': 20, 'cellModel': 'HH'} netParams.popParams['M'] = {'cellType': 'PYR', 'numCells': 20, 'cellModel': 'HH'} ## Cell property rules cellRule = {'conds': {'cellType': 'PYR'}, 'secs': {}} # cell rule dict cellRule['secs']['soma'] = {'geom': {}, 'mechs': {}} # soma params dict cellRule['secs']['soma']['geom'] = {'diam': 18.8, 'L': 18.8, 'Ra': 123.0} # soma geometry cellRule['secs']['soma']['mechs']['hh'] = {'gnabar': 0.12, 'gkbar': 0.036, 'gl': 0.003, 'el': -70} # soma hh mechanism netParams.cellParams['PYRrule'] = cellRule # add dict to list of cell params ## Synaptic mechanism parameters netParams.synMechParams['exc'] = {'mod': 'Exp2Syn', 'tau1': 0.1, 'tau2': 5.0, 'e': 0} # excitatory synaptic mechanism # Stimulation parameters netParams.stimSourceParams['bkg'] = {'type': 'NetStim', 'rate': 10, 'noise': 0.5} netParams.stimTargetParams['bkg->PYR'] = {'source': 'bkg', 'conds': {'cellType': 'PYR'}, 'weight': 0.01, 'delay': 5, 'synMech': 'exc'} ## Cell connectivity rules netParams.connParams['S->M'] = { # S -> M label 'preConds': {'pop': 'S'}, # conditions of presyn cells 'postConds': {'pop': 'M'}, # conditions of postsyn cells 'divergence': 9, # probability of connection 'weight': 0.01, # synaptic weight 'delay': 5, # transmission delay (ms) 'synMech': 'exc'} # synaptic mechanism # Simulation options simConfig = specs.SimConfig() # object of class SimConfig to store simulation configuration simConfig.duration = 1*1e3 # Duration of the simulation, in ms simConfig.dt = 0.025 # Internal integration timestep to use simConfig.verbose = False # Show detailed messages simConfig.recordTraces = {'V_soma':{'sec':'soma','loc':0.5,'var':'v'}} # Dict with traces to record simConfig.recordStep = 0.1 # Step size in ms to save data (eg. V traces, LFP, etc) simConfig.filename = 'model_output' # Set file output name simConfig.savePickle = False # Save params, network and sim output to pickle file simConfig.saveJson = True simConfig.analysis['plotRaster'] = True # Plot a raster simConfig.analysis['plotTraces'] = {'include': [1]} # Plot recorded traces for this list of cells simConfig.analysis['plot2Dnet'] = True # plot 2D visualization of cell positions and connections # Create network and run simulation sim.createSimulateAnalyze(netParams = netParams, simConfig = simConfig) # import pylab; pylab.show() # this line is only necessary in certain systems where figures appear empty # check model output sim.checkOutput('tut2')
Python
0.000001
@@ -1418,22 +1418,25 @@ s%0A%09' -divergence': 9 +probability': 0.5 , %09%09