commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
---|---|---|---|---|---|---|---|
8006d142a00a6dae70850b3c9d816f745f252260 | create settings file with parent_separator setting | cms/settings.py | cms/settings.py | Python | 0 | @@ -0,0 +1,105 @@
+from django.conf import settings%0A%0A%0APARENT_SEPARATOR = getattr(settings, 'MINICMS_PARENT_SEPARATOR', '/')%0A
|
|
7c8d43b16d6b47555caeb00234590bc8d335ed71 | test markup | tests/test_markup.py | tests/test_markup.py | Python | 0.000001 | @@ -0,0 +1,843 @@
+import pytest%0A%0Afrom rich.markup import MarkupError, _parse, render%0Afrom rich.text import Span%0A%0A%0Adef test_parse():%0A result = list(_parse(%22%5Bfoo%5Dhello%5B/foo%5D%5Bbar%5Dworld%5B/%5D%5B%5Bescaped%5D%5D%22))%0A expected = %5B%0A (None, %22%5Bfoo%5D%22),%0A (%22hello%22, None),%0A (None, %22%5B/foo%5D%22),%0A (None, %22%5Bbar%5D%22),%0A (%22world%22, None),%0A (None, %22%5B/%5D%22),%0A (%22%5B%22, None),%0A (%22escaped%22, None),%0A (%22%5D%22, None),%0A %5D%0A assert result == expected%0A%0A%0Adef test_render():%0A result = render(%22%5Bbold%5DFOO%5B/bold%5D%22)%0A assert str(result) == %22FOO%22%0A assert result.spans == %5BSpan(0, 3, %22bold%22)%5D%0A%0A%0Adef test_markup_error():%0A with pytest.raises(MarkupError):%0A assert render(%22foo%5B/%5D%22)%0A with pytest.raises(MarkupError):%0A assert render(%22foo%5B/bar%5D%22)%0A with pytest.raises(MarkupError):%0A assert render(%22%5Bfoo%5Dhello%5B/bar%5D%22)%0A
|
|
93b2972c41855511cddf57029ab8fce0dccd9265 | add hashtable using open addressing | ds/hash.py | ds/hash.py | Python | 0.000001 | @@ -0,0 +1,1670 @@
+'''HashTable using open addressing'''%0A%0A%0Aclass HashTable(object):%0A def __init__(self):%0A self.size = 11%0A self.keys = %5BNone%5D * self.size%0A self.data = %5BNone%5D * self.size%0A%0A def hash(self, key):%0A return key %25 self.size%0A%0A def rehash(self, key):%0A return (key + 1) %25 self.size%0A%0A def put(self, key, data):%0A slot = self.hash(key)%0A%0A if self.keys%5Bslot%5D is None:%0A self.keys%5Bslot%5D = key%0A self.data%5Bslot%5D = data%0A else:%0A while self.keys%5Bslot%5D is not None:%0A slot = self.rehash(slot)%0A if self.keys%5Bslot%5D == key:%0A self.data%5Bslot%5D = data # replace%0A break%0A else:%0A self.keys%5Bslot%5D = key%0A self.data%5Bslot%5D = data%0A%0A def get(self, key):%0A slot = self.hash(key)%0A if self.keys%5Bslot%5D == key:%0A return self.data%5Bslot%5D%0A else:%0A start_slot = slot%0A while self.keys%5Bslot%5D != key:%0A slot = self.rehash(slot)%0A if slot == start_slot:%0A return None%0A else:%0A return self.data%5Bslot%5D%0A%0A def __setitem__(self, key, data):%0A self.put(key, data)%0A%0A def __getitem__(self, key):%0A return self.get(key)%0A%0A def __str__(self):%0A return ', '.join(map(str, enumerate(self.data)))%0A%0A%0Aif __name__ == '__main__':%0A H = HashTable()%0A H%5B54%5D = %22cat%22%0A H%5B26%5D = %22dog%22%0A H%5B93%5D = %22lion%22%0A H%5B17%5D = %22tiger%22%0A H%5B77%5D = %22bird%22%0A H%5B31%5D = %22cow%22%0A H%5B44%5D = %22goat%22%0A H%5B55%5D = %22pig%22%0A H%5B20%5D = %22chicken%22%0A print(H)%0A%0A H%5B9%5D = %22duck%22%0A print(H%5B9%5D)%0A print(H)%0A
|
|
256e1bb8dd543051fe51b3b669ab4a10c0556f40 | add back pytext | tests/test_pytext.py | tests/test_pytext.py | Python | 0.000001 | @@ -0,0 +1,468 @@
+import unittest%0A%0Afrom pytext.config.field_config import FeatureConfig%0Afrom pytext.data.featurizer import InputRecord, SimpleFeaturizer%0A%0Aclass TestPyText(unittest.TestCase):%0A%0A def test_tokenize(self):%0A featurizer = SimpleFeaturizer.from_config(%0A SimpleFeaturizer.Config(), FeatureConfig()%0A )%0A%0A tokens = featurizer.featurize(InputRecord(raw_text=%22At eight o'clock%22)).tokens%0A self.assertEqual(%5B'at', 'eight', %22o'clock%22%5D, tokens)%0A
|
|
ea0b0e3b3ca2b3ad51ae9640f7f58d9f2737f64c | Split out runner | dox/runner.py | dox/runner.py | Python | 0.015951 | @@ -0,0 +1,913 @@
+# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.%0A#%0A# Licensed under the Apache License, Version 2.0 (the %22License%22);%0A# you may not use this file except in compliance with the License.%0A# You may obtain a copy of the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS,%0A# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or%0A# implied.%0A# See the License for the specific language governing permissions and%0A# limitations under the License.%0A%0A__all__ = %5B%0A 'Runner',%0A%5D%0A%0Aimport sh%0A%0A%0Aclass Runner(object):%0A%0A def __init__(self, args):%0A self.args = args%0A%0A def run(self, image, command):%0A print(%22Going to run %7B0%7D in %7B1%7D%22.format(command, image))%0A if self.args.rebuild:%0A print(%22Need to rebuild%22)%0A sh.ls()%0A
|
|
af75f727e5ec22020c8d91af6a0302ea0e4bda74 | Support for http://docs.oasis-open.org/security/saml/Post2.0/sstc-request-initiation-cd-01.html in the metadata. | src/saml2/extension/reqinit.py | src/saml2/extension/reqinit.py | Python | 0 | @@ -0,0 +1,960 @@
+#!/usr/bin/env python%0A%0A#%0A# Generated Thu May 15 13:58:36 2014 by parse_xsd.py version 0.5.%0A#%0A%0Aimport saml2%0A%0Afrom saml2 import md%0A%0ANAMESPACE = 'urn:oasis:names:tc:SAML:profiles:SSO:request-init'%0A%0A%0Aclass RequestInitiator(md.EndpointType_):%0A %22%22%22The urn:oasis:names:tc:SAML:profiles:SSO:request-init:RequestInitiator%0A element %22%22%22%0A%0A c_tag = 'RequestInitiator'%0A c_namespace = NAMESPACE%0A c_children = md.EndpointType_.c_children.copy()%0A c_attributes = md.EndpointType_.c_attributes.copy()%0A c_child_order = md.EndpointType_.c_child_order%5B:%5D%0A c_cardinality = md.EndpointType_.c_cardinality.copy()%0A%0A%0Adef request_initiator_from_string(xml_string):%0A return saml2.create_class_from_xml_string(RequestInitiator, xml_string)%0A%0A%0AELEMENT_FROM_STRING = %7B%0A RequestInitiator.c_tag: request_initiator_from_string,%0A%7D%0A%0AELEMENT_BY_TAG = %7B%0A 'RequestInitiator': RequestInitiator,%0A%7D%0A%0A%0Adef factory(tag, **kwargs):%0A return ELEMENT_BY_TAG%5Btag%5D(**kwargs)%0A%0A
|
|
cfa5b544c3d44a7440feca006c01bbd72ecc0286 | Test arena constants | test/test_arena.py | test/test_arena.py | Python | 0.000001 | @@ -0,0 +1,451 @@
+from support import lib,ffi%0Afrom qcgc_test import QCGCTest%0A%0Aclass ArenaTestCase(QCGCTest):%0A def test_size_calculations(self):%0A exp = lib.QCGC_ARENA_SIZE_EXP%0A size = 2**exp%0A bitmap = size / 128%0A effective_cells = (size - 2 * bitmap) / 16%0A self.assertEqual(size, lib.qcgc_arena_size)%0A self.assertEqual(bitmap, lib.qcgc_arena_bitmap_size)%0A self.assertEqual(effective_cells, lib.qcgc_arena_cells_count)%0A
|
|
12270bc14b44343b4babef3b6445074685b59bd7 | Create histogram.py | python/histogram.py | python/histogram.py | Python | 0.00286 | @@ -0,0 +1,623 @@
+import sys%0A%0Ahistogram = dict()%0A%0Abin_width = 5%0Amax_index = 0%0A%0Afor line in sys.stdin:%0A if not line:%0A continue%0A%0A number = int(line)%0A bin_index = number / bin_width%0A if bin_index not in histogram:%0A histogram%5Bbin_index%5D = 0%0A histogram%5Bbin_index%5D = histogram%5Bbin_index%5D + 1%0A if bin_index %3E max_index:%0A max_index = bin_index%0A%0Afor index in range(max_index) + %5Bmax_index + 1%5D:%0A if index not in histogram:%0A histogram%5Bindex%5D = 0%0A count = histogram%5Bindex%5D%0A if count == None:%0A count = 0%0A print %22%5B%7B0%7D, %7B1%7D%3E : %7B2%7D%22.format(index * bin_width, (index + 1) * bin_width, count)%0A
|
|
8b6b30997816bae1255c3e035851b8e6edb5e4c7 | add a test | python/test/test.py | python/test/test.py | Python | 0.000002 | @@ -0,0 +1,897 @@
+import unittest%0Aimport os%0A%0Aimport couchapp.utils%0A%0Aclass CouchAppTest(unittest.TestCase):%0A%0A def testInCouchApp(self):%0A dir_, file_ = os.path.split(__file__)%0A if dir_:%0A os.chdir(dir_)%0A%0A startdir = os.getcwd()%0A try:%0A os.chdir(%22in_couchapp%22)%0A os.chdir(%22installed%22)%0A cwd = os.getcwd()%0A self.assertEquals(couchapp.utils.in_couchapp(), cwd,%0A %22in_couchapp() returns %25s%22 %25 %0A couchapp.utils.in_couchapp())%0A os.chdir(os.path.pardir)%0A os.chdir(%22no_install%22)%0A self.assert_(not couchapp.utils.in_couchapp(),%0A %22Found a couchapp at %25s but didn't expect one!%22%0A %25 couchapp.utils.in_couchapp())%0A finally:%0A os.chdir(startdir)%0A%0Aif __name__ == %22__main__%22:%0A unittest.main()%0A
|
|
952438d97fc0c96afaf505469cc7b9cb0c9f287d | Add config file with the list of relays availables | relay_api/conf/config.py | relay_api/conf/config.py | Python | 0 | @@ -0,0 +1,191 @@
+# List of available relays%0Arelays = %5B%0A %7B%0A %22id%22: 1,%0A %22gpio%22: 20,%0A %22name%22: %22relay 1%22%0A %7D,%0A %7B%0A %22id%22: 2,%0A %22gpio%22: 21,%0A %22name%22: %22relay 2%22%0A %7D%0A%5D%0A
|
|
b5083af1cce5fb5b9c7bb764b18edce8640bd3a1 | add utilLogger.py from toLearn/ and update to v0.4 | utilLogger.py | utilLogger.py | Python | 0 | @@ -0,0 +1,1194 @@
+import os.path%0Aimport datetime%0A%0A'''%0Av0.4 2015/11/30%0A%09- comment out test run%0A%09- add from sentence to import CUtilLogger%0Av0.3 2015/11/30%0A%09- change array declaration to those using range()%0A%09- __init__() does not take saveto arg %0A%09- automatically get file name based on the date%0Av0.2 2015/11/30%0A%09- update add() to handle auto save feature%0Av0.1 2015/11/30%0A%09- add save()%0A%09- add add()%0A%09- add __init__()%0A'''%0A%0Aclass CUtilLogger:%0A%09def __init__(self):%0A%09%09self.idx = 0%0A%09%09self.bufferNum = 5%0A%09%09self.strs = %5B 0 for idx in range(10)%5D%0A%09%09return%0A%0A%09def clear(self):%0A%09%09for idx in range(0, self.idx):%0A%09%09%09self.strs%5Bidx%5D = %22%22%0A%09%09self.idx = 0%0A%0A%09def add(self,str):%0A%09%09self.strs%5Bself.idx%5D = str%0A%09%09self.idx = self.idx + 1%0A#%09%09print self.idx%0A%09%09if self.idx %3E= self.bufferNum:%0A%09%09%09self.save()%0A%09%09%09self.clear()%0A%0A%09def save(self):%0A%09%09today = datetime.date.today()%0A%09%09yymmdd = today.strftime(%22%25y%25m%25d%22)%0A%09%09filename = yymmdd + %22.log%22%0A%09%09with open(filename, %22a%22) as logfd:%0A%09%09%09for idx in range(0, self.idx):%0A%09%09%09%09text = self.strs%5Bidx%5D + %22%5Cr%5Cn%22%0A%09%09%09%09logfd.write(text)%09%0A%0A# Usage %0A%0A'''%0Afrom utilLogger import CUtilLogger%0Alogger = CUtilLogger()%0A%0Afor loop in range(0, 31):%0A%09logger.add(%22test%22)%0Alogger.save() # to save the rest%0Alogger = None%0A'''%0A%0A%0A
|
|
99f5d264ab88573e0541c529eca905b8a1d16873 | Bump to 0.5.3 dev. | rbtools/__init__.py | rbtools/__init__.py | #
# __init__.py -- Basic version and package information
#
# Copyright (c) 2007-2009 Christian Hammond
# Copyright (c) 2007-2009 David Trowbridge
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# The version of RBTools
#
# This is in the format of:
#
# (Major, Minor, Micro, alpha/beta/rc/final, Release Number, Released)
#
VERSION = (0, 5, 2, 'final', 0, True)
def get_version_string():
version = '%s.%s' % (VERSION[0], VERSION[1])
if VERSION[2]:
version += ".%s" % VERSION[2]
if VERSION[3] != 'final':
if VERSION[3] == 'rc':
version += ' RC%s' % VERSION[4]
else:
version += ' %s %s' % (VERSION[3], VERSION[4])
if not is_release():
version += " (dev)"
return version
def get_package_version():
version = '%s.%s' % (VERSION[0], VERSION[1])
if VERSION[2]:
version += ".%s" % VERSION[2]
if VERSION[3] != 'final':
version += '%s%s' % (VERSION[3], VERSION[4])
return version
def is_release():
return VERSION[5]
__version_info__ = VERSION[:-1]
__version__ = get_package_version()
| Python | 0 | @@ -1357,26 +1357,27 @@
5,
-2, 'final
+3, 'alpha
', 0,
-Tru
+Fals
e)%0A%0A
|
698d9868ccab154f5f945710a237d8aeca2090aa | Add some more tests | tests/main_test.py | tests/main_test.py | #!/usr/bin/env python3
"""Test suite for stats.py
Runs:
doctests from the stats module
tests from the examples text file (if any)
unit tests in this module
a limited test for uncollectable objects
"""
import doctest
import gc
import itertools
import os
import random
import sys
import unittest
# Module being tested.
import stats
# Reminder to myself that this has to be run under Python3.
if sys.version < "3.0":
raise RuntimeError("run this under Python3")
# === Helper functions ===
# === Data sets for testing ===
# === Test suites ===
class GlobalTest(unittest.TestCase):
def testState(self):
"""Test the state of globals."""
self.assert_(stats._sum is sum)
def testMeta(self):
"""Test existence of metadata."""
attrs = ("__doc__ __version__ __date__ __author__"
" __author_email__ __all__").split()
for meta in attrs:
self.failUnless(hasattr(stats, meta), "missing %s" % meta)
class MinmaxTest(unittest.TestCase):
"""Tests for minmax function."""
data = list(range(100))
expected = (0, 99)
def key(self, n):
# Tests assume this is a monotomically increasing function.
return n*33 - 11
def setUp(self):
random.shuffle(self.data)
def testArgsNoKey(self):
"""Test minmax works with multiple arguments and no key."""
self.assertEquals(stats.minmax(*self.data), self.expected)
def testSequenceNoKey(self):
"""Test minmax works with a single sequence argument and no key."""
self.assertEquals(stats.minmax(self.data), self.expected)
def testIterNoKey(self):
"""Test minmax works with a single iterator argument and no key."""
self.assertEquals(stats.minmax(iter(self.data)), self.expected)
def testArgsKey(self):
"""Test minmax works with multiple arguments and a key function."""
result = stats.minmax(*self.data, key=self.key)
self.assertEquals(result, self.expected)
def testSequenceKey(self):
"""Test minmax works with a single sequence argument and a key."""
result = stats.minmax(self.data, key=self.key)
self.assertEquals(result, self.expected)
def testIterKey(self):
"""Test minmax works with a single iterator argument and a key."""
it = iter(self.data)
self.assertEquals(stats.minmax(it, key=self.key), self.expected)
def testCompareNoKey(self):
"""Test minmax directly against min and max built-ins."""
data = random.sample(range(-5000, 5000), 300)
expected = (min(data), max(data))
result = stats.minmax(data)
self.assertEquals(result, expected)
random.shuffle(data)
result = stats.minmax(iter(data))
self.assertEquals(result, expected)
def testCompareKey(self):
"""Test minmax directly against min and max built-ins with a key."""
letters = list('abcdefghij')
random.shuffle(letters)
assert len(letters) == 10
data = [count*letter for (count, letter) in enumerate(letters)]
random.shuffle(data)
expected = (min(data, key=len), max(data, key=len))
result = stats.minmax(data, key=len)
self.assertEquals(result, expected)
random.shuffle(data)
result = stats.minmax(iter(data), key=len)
self.assertEquals(result, expected)
# ============================================================================
if __name__ == '__main__':
# Define a function that prints, or doesn't, according to whether or not
# we're in (slightly) quiet mode. Note that we always print "skip" and
# failure messages.
if '-q' in sys.argv[1:]:
def pr(s):
pass
else:
def pr(s):
print(s)
#
# Now run the tests.
#
gc.collect()
assert not gc.garbage
#
# Run doctests in the stats module.
#
failures, tests = doctest.testmod(stats)
if failures:
print("Skipping further tests while doctests failing.")
sys.exit(1)
else:
pr("Module doc tests: failed %d, attempted %d" % (failures, tests))
#
# Run doctests in the example text file.
#
if os.path.exists('examples.txt'):
failures, tests = doctest.testfile('examples.txt')
if failures:
print("Skipping further tests while doctests failing.")
sys.exit(1)
else:
pr("Example doc tests: failed %d, attempted %d" % (failures, tests))
else:
pr('WARNING: No example text file found.')
#
# Run unit tests.
#
pr("Running unit tests:")
try:
unittest.main()
except SystemExit:
pass
#
# Check for reference leaks.
#
gc.collect()
if gc.garbage:
print("List of uncollectable garbage:")
print(gc.garbage)
else:
pr("No garbage found.")
| Python | 0 | @@ -609,69 +609,80 @@
-def testState(self):%0A %22%22%22Test the state of globals.%22%22%22
+%22%22%22Test the state and/or existence of globals.%22%22%22%0A def testSum(self):
%0A
@@ -3415,16 +3415,360 @@
cted)%0A%0A%0A
+class SortedDataDecoratorTest(unittest.TestCase):%0A %22%22%22Test that the sorted_data decorator works correctly.%22%22%22%0A def testDecorator(self):%0A @stats.sorted_data%0A def f(data):%0A return data%0A%0A values = random.sample(range(1000), 100)%0A result = f(values)%0A self.assertEquals(result, sorted(values))%0A%0A%0A%0A
%0A# =====
@@ -4043,16 +4043,87 @@
ssages.%0A
+ # FIX ME can we make unittest run silently if there are no errors?%0A
if '
|
90948c62d1d01800c6a75dd5f15d7fef334dc66f | Add python unittests | noticeboard/test_noticeboard.py | noticeboard/test_noticeboard.py | Python | 0.000003 | @@ -0,0 +1,1846 @@
+import os%0Aimport json%0Aimport tempfile%0Aimport unittest%0A%0Afrom noticeboard import noticeboard%0A%0A%0Aclass TestNoticeboard(unittest.TestCase):%0A def setUp(self):%0A self.fd, noticeboard.app.config%5B%22DATABASE%22%5D = tempfile.mkstemp()%0A noticeboard.app.config%5B%22TESTING%22%5D = True%0A self.app = noticeboard.app.test_client()%0A noticeboard.init_db()%0A%0A def tearDown(self):%0A os.close(self.fd)%0A os.unlink(noticeboard.app.config%5B%22DATABASE%22%5D)%0A%0A def decode_json(self, resp):%0A return json.loads(resp.data.decode('utf-8'))%0A%0A def test_no_note_by_default(self):%0A resp = self.app.get(%22/api/v1/notes%22)%0A data = self.decode_json(resp)%0A self.assertEqual(data%5B%22notes%22%5D, %5B%5D)%0A%0A def test_creating_note_with_text(self):%0A text = %22Foo Bar Baz%22%0A resp = self.app.get(%22/api/v1/notes/create/%7B%7D%22.format(text))%0A data = self.decode_json(resp)%0A self.assertEqual(data%5B%22note%22%5D%5B%22text%22%5D, text)%0A%0A def test_created_note_can_be_retrieved(self):%0A text = %22Hello World!%22%0A resp = self.app.get(%22/api/v1/notes/create/%7B%7D%22.format(text))%0A created_note = self.decode_json(resp)%5B%22note%22%5D%0A%0A resp = self.app.get(%22/api/v1/notes/%7B%7D%22.format(created_note%5B%22id%22%5D))%0A retrieved_note = self.decode_json(resp)%5B%22note%22%5D%0A self.assertEqual(retrieved_note, created_note)%0A%0A def test_created_note_shows_up_in_notes(self):%0A text = %22Hello, %E4%B8%96%E7%95%8C!%22%0A resp = self.app.get(%22/api/v1/notes/create/%7B%7D%22.format(text))%0A note1 = self.decode_json(resp)%5B%22note%22%5D%0A%0A text = %22This is fun!%22%0A resp = self.app.get(%22/api/v1/notes/create/%7B%7D%22.format(text))%0A note2 = self.decode_json(resp)%5B%22note%22%5D%0A%0A resp = self.app.get(%22/api/v1/notes%22)%0A notes = self.decode_json(resp)%5B%22notes%22%5D%0A self.assertIn(note1, notes)%0A self.assertIn(note2, notes)%0A
|
|
17fcdd9a01be24ad9562e5a558e2dd65a84d1a19 | Add missing tests/queuemock.py | tests/queuemock.py | tests/queuemock.py | Python | 0.000003 | @@ -0,0 +1,2923 @@
+# -*- coding: utf-8 -*-%0A#%0A# 2019-01-07 Friedrich Weber %[email protected]%3E%0A# Implement queue mock%0A#%0A# License: AGPLv3%0A# contact: http://www.privacyidea.org%0A#%0A# This code is free software; you can redistribute it and/or%0A# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE%0A# License as published by the Free Software Foundation; either%0A# version 3 of the License, or any later version.%0A#%0A# This code is distributed in the hope that it will be useful,%0A# but WITHOUT ANY WARRANTY; without even the implied warranty of%0A# MERCHANTABILITY or FITNE7SS FOR A PARTICULAR PURPOSE. See the%0A# GNU AFFERO GENERAL PUBLIC LICENSE for more details.%0A#%0A# You should have received a copy of the GNU Affero General Public%0A# License along with this program. If not, see %3Chttp://www.gnu.org/licenses/%3E.%0A#%0Aimport functools%0Aimport mock%0Afrom privacyidea.lib.queue import get_job_queue%0A%0Afrom privacyidea.config import TestingConfig%0Afrom privacyidea.lib.queue.promise import ImmediatePromise%0A%0Afrom privacyidea.lib.queue.base import BaseQueue, QueueError%0Afrom tests.base import OverrideConfigTestCase%0A%0A%0Aclass FakeQueue(BaseQueue):%0A %22%22%22%0A A queue class that keeps track of enqueued jobs, for usage in unit tests.%0A %22%22%22%0A def __init__(self, options):%0A BaseQueue.__init__(self, options)%0A self._jobs = %7B%7D%0A self.reset()%0A%0A @property%0A def jobs(self):%0A return self._jobs%0A%0A def reset(self):%0A self.enqueued_jobs = %5B%5D%0A%0A def add_job(self, name, func, fire_and_forget=False):%0A if name in self._jobs:%0A raise QueueError(u%22Job %7B!r%7D already exists%22.format(name))%0A%0A @functools.wraps(func)%0A def wrapper(*args, **kwargs):%0A result = func(*args, **kwargs)%0A if fire_and_forget:%0A return None%0A else:%0A return result%0A%0A self._jobs%5Bname%5D = wrapper%0A%0A def enqueue(self, name, args, kwargs):%0A if name not in self._jobs:%0A raise QueueError(u%22Unknown job: %7B!r%7D%22.format(name))%0A self.enqueued_jobs.append((name, args, kwargs))%0A return ImmediatePromise(self._jobs%5Bname%5D(*args, **kwargs))%0A%0A%0Aclass MockQueueTestCase(OverrideConfigTestCase):%0A %22%22%22%0A A test case class which has a mock job queue set up.%0A You can check the enqueued jobs with::%0A%0A queue = get_job_queue()%0A self.assertEqual(queue.enqueued_jobs, ...)%0A%0A The %60%60enqueued_jobs%60%60 attribute is reset for each test case.%0A %22%22%22%0A class Config(TestingConfig):%0A PI_JOB_QUEUE_CLASS = %22fake%22%0A%0A @classmethod%0A def setUpClass(cls):%0A %22%22%22 override privacyidea.config.config%5B%22testing%22%5D with the inner config class %22%22%22%0A with mock.patch.dict(%22privacyidea.lib.queue.QUEUE_CLASSES%22, %7B%22fake%22: FakeQueue%7D):%0A super(MockQueueTestCase, cls).setUpClass()%0A%0A def setUp(self):%0A get_job_queue().reset()%0A OverrideConfigTestCase.setUp(self)%0A
|
|
6083124c110e0ce657b78f6178cd7464996a042b | add tests I want to pass | tests/test_geometries.py | tests/test_geometries.py | Python | 0 | @@ -0,0 +1,1958 @@
+%22%22%22This contains a set of tests for ParaTemp.geometries%22%22%22%0A%0A########################################################################%0A# #%0A# This script was written by Thomas Heavey in 2017. #%0A# [email protected] [email protected] #%0A# #%0A# Copyright 2017 Thomas J. Heavey IV #%0A# #%0A# Licensed under the Apache License, Version 2.0 (the %22License%22); #%0A# you may not use this file except in compliance with the License. #%0A# You may obtain a copy of the License at #%0A# #%0A# http://www.apache.org/licenses/LICENSE-2.0 #%0A# #%0A# Unless required by applicable law or agreed to in writing, software #%0A# distributed under the License is distributed on an %22AS IS%22 BASIS, #%0A# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or #%0A# implied. #%0A# See the License for the specific language governing permissions and #%0A# limitations under the License. #%0A# #%0A########################################################################%0A%0Afrom __future__ import absolute_import%0A%0Aimport pytest%0A%0A%0Aclass TestXYZ(object):%0A%0A @pytest.fixture%0A def xyz(self):%0A from ..ParaTemp.geometries import XYZ%0A return XYZ('tests/test-data/stil-3htmf.xyz')%0A%0A def test_n_atoms(self, xyz):%0A assert xyz.n_atoms == 66%0A%0A def test_energy(self, xyz):%0A assert xyz.energy == -1058630.8496721%0A
|
|
8c9034e91d82487ae34c592b369a3283b577acc8 | Add a new test for the latest RegexLexer change, multiple new states including '#pop'. | tests/test_regexlexer.py | tests/test_regexlexer.py | Python | 0 | @@ -0,0 +1,965 @@
+# -*- coding: utf-8 -*-%0A%22%22%22%0A Pygments regex lexer tests%0A ~~~~~~~~~~~~~~~~~~~~~~~~~~%0A%0A :copyright: 2007 by Georg Brandl.%0A :license: BSD, see LICENSE for more details.%0A%22%22%22%0A%0Aimport unittest%0A%0Afrom pygments.token import Text%0Afrom pygments.lexer import RegexLexer%0A%0Aclass TestLexer(RegexLexer):%0A %22%22%22Test tuple state transitions including #pop.%22%22%22%0A tokens = %7B%0A 'root': %5B%0A ('a', Text.Root, 'rag'),%0A ('e', Text.Root),%0A %5D,%0A 'beer': %5B%0A ('d', Text.Beer, ('#pop', '#pop')),%0A %5D,%0A 'rag': %5B%0A ('b', Text.Rag, '#push'),%0A ('c', Text.Rag, ('#pop', 'beer')),%0A %5D,%0A %7D%0A%0Aclass TupleTransTest(unittest.TestCase):%0A def test(self):%0A lx = TestLexer()%0A toks = list(lx.get_tokens_unprocessed('abcde'))%0A self.assertEquals(toks,%0A %5B(0, Text.Root, 'a'), (1, Text.Rag, 'b'), (2, Text.Rag, 'c'),%0A (3, Text.Beer, 'd'), (4, Text.Root, 'e')%5D)%0A
|
|
09598f2635cf946be08ed8529ba6fec1938a5581 | test jobq push/startjob flow | tests/test_jobq.py | tests/test_jobq.py | """
Test JobQ
"""
from hstestcase import HSTestCase
class JobqTest(HSTestCase):
def test_push(self):
jobq = self.project.jobq
qjob = jobq.push(self.spidername)
self.assertTrue('key' in qjob, qjob)
self.assertTrue('auth' in qjob, qjob)
job = self.hsclient.get_job(qjob['key'])
self.assertEqual(job.metadata.get('state'), u'pending')
self.assertEqual(job.metadata.get('spider'), self.spidername)
self.assertEqual(job.metadata.get('auth'), qjob['auth'])
jobq.start(job)
job.metadata.expire()
self.assertEqual(job.metadata.get('state'), u'running')
jobq.finish(job)
job.metadata.expire()
self.assertEqual(job.metadata.get('state'), u'finished')
jobq.delete(job)
job.metadata.expire()
self.assertEqual(job.metadata.get('state'), u'deleted')
def test_push_with_extras(self):
qjob = self.project.jobq.push(self.spidername, foo='bar', baz='fuu')
job = self.hsclient.get_job(qjob['key'])
self.assertEqual(job.metadata.get('foo'), u'bar')
self.assertEqual(job.metadata.get('baz'), u'fuu')
def test_push_with_priority(self):
jobq = self.project.jobq
qjob = jobq.push(self.spidername, priority=jobq.PRIO_HIGHEST)
self.assertTrue('key' in qjob, qjob)
self.assertTrue('auth' in qjob, qjob)
def test_push_with_state(self):
qjob = self.project.jobq.push(self.spidername, state='running')
self.assertTrue('key' in qjob, qjob)
self.assertTrue('auth' in qjob, qjob)
job = self.hsclient.get_job(qjob['key'])
self.assertEqual(job.metadata.get('state'), u'running')
def test_summary(self):
jobq = self.project.jobq
# push at least one job per state
jobq.push(self.spidername)
jobq.push(self.spidername, state='running')
jobq.push(self.spidername, state='finished')
summaries = dict((s['name'], s) for s in jobq.summary())
self.assertEqual(set(summaries), set(['pending', 'running', 'finished']))
self.assertTrue(jobq.summary('pending'))
self.assertTrue(jobq.summary('running'))
self.assertTrue(jobq.summary('finished'))
def test_summaries_and_state_changes(self):
jobq = self.project.jobq
j1 = jobq.push(self.spidername)
j2 = jobq.push(self.spidername)
j3 = jobq.push(self.spidername)
j4 = jobq.push(self.spidername, state='running')
# check queue summaries
self._assert_queue('pending', [j3, j2, j1])
self._assert_queue('running', [j4])
self._assert_queue('finished', [])
# change job states
jobq.start(j1)
jobq.finish(j2)
jobq.finish(j4)
# check summaries again
self._assert_queue('pending', [j3])
self._assert_queue('running', [j1])
self._assert_queue('finished', [j4, j2])
# delete all jobs and check for empty summaries
jobq.delete(j1)
jobq.delete(j2)
jobq.delete(j3)
jobq.delete(j4)
self._assert_queue('pending', [])
self._assert_queue('running', [])
self._assert_queue('finished', [])
def _assert_queue(self, qname, jobs):
summary = self.project.jobq.summary(qname, spiderid=self.spiderid)
self.assertEqual(summary['name'], qname)
self.assertEqual(summary['count'], len(jobs))
self.assertEqual(len(summary['summary']), len(jobs))
# Most recent jobs first
self.assertEqual([s['key'] for s in summary['summary']],
[j['key'] for j in jobs])
| Python | 0 | @@ -1700,24 +1700,794 @@
'running')%0A%0A
+ def test_startjob(self):%0A jobq = self.project.jobq%0A qj = jobq.push(self.spidername)%0A nj = jobq.start()%0A self.assertTrue(nj.pop('pending_time', None), nj)%0A self.assertEqual(nj, %7B%0A u'auth': qj%5B'auth'%5D,%0A u'key': qj%5B'key'%5D,%0A u'priority': jobq.PRIO_NORMAL,%0A u'spider': self.spidername,%0A u'state': u'running',%0A %7D)%0A%0A def test_startjob_order(self):%0A jobq = self.project.jobq%0A q1 = jobq.push(self.spidername)%0A q2 = jobq.push(self.spidername)%0A q3 = jobq.push(self.spidername)%0A self.assertEqual(jobq.start()%5B'key'%5D, q1%5B'key'%5D)%0A self.assertEqual(jobq.start()%5B'key'%5D, q2%5B'key'%5D)%0A self.assertEqual(jobq.start()%5B'key'%5D, q3%5B'key'%5D)%0A%0A
def test
|
ac9b7e5a50f4748a8a536feb8e0a89edc6342866 | fix pep8 | libtree/tree.py | libtree/tree.py | from libtree.node import Node
def print_tree(per, node=None, intend=0):
if node is None:
node = get_root_node(per)
print('{} - {} {}'.format(' '*intend, node.id, node.type))
for child in list(get_children(per, node)):
print_tree(per, child, intend=intend+2)
def get_root_node(per):
sql = """
SELECT
*
FROM
nodes
WHERE
parent IS NULL;
"""
per.execute(sql)
result = per.fetchone()
if result is None:
raise ValueError('No root node.')
else:
return Node(**result)
def get_node(per, id):
if type(id) != int:
raise TypeError('Need numerical id.')
sql = """
SELECT
*
FROM
nodes
WHERE
id = %s;
"""
per.execute(sql, (id, ))
result = per.fetchone()
if result is None:
raise ValueError('Node does not exist.')
else:
return Node(**result)
def create_node(per, parent, type, position=0, description=''):
""" non-atomic """
parent_id = None
if parent is not None:
parent_id = int(parent)
sql = """
INSERT INTO
nodes
(parent, type, position, description)
VALUES
(%s, %s, %s, %s);
"""
per.execute(sql, (parent_id, type, position, description))
id = per.get_last_row_id()
node = Node(id, parent_id, type, position)
if parent is not None:
ancestors = [parent_id] + list(get_ancestor_ids(per, parent_id))
insert_ancestors(per, node, ancestors)
return node
def insert_ancestors(per, node, ancestors):
id = int(node)
data = []
for ancestor in ancestors:
data.append((id, int(ancestor)))
sql = """
INSERT INTO
ancestor
(node, ancestor)
VALUES
(%s, %s);
"""
per.executemany(sql, data)
def delete_ancestors(per, node, ancestors):
id = int(node)
sql = """
DELETE FROM
ancestor
WHERE
node=%s
AND
ancestor=%s;
"""
per.execute(sql, (id, ','.join(map(str, ancestors))))
def get_ancestor_ids(per, node):
sql = """
SELECT
ancestor
FROM
ancestor
WHERE
node=%s;
"""
per.execute(sql, (int(node), ))
for result in per:
yield int(result['ancestor'])
def get_ancestors(per, node):
sql = """
SELECT
nodes.*
FROM
ancestor
INNER JOIN
nodes
ON
ancestor.ancestor=nodes.id
WHERE
ancestor.node=%s;
"""
per.execute(sql, (int(node), ))
for result in per:
yield Node(**result)
def get_descendant_ids(per, node):
sql = """
SELECT
node
FROM
ancestor
WHERE
ancestor=%s;
"""
per.execute(sql, (int(node), ))
# TODO: check if fetchmany() is fast and not uses more memory
for result in per:
yield int(result['node'])
def get_descendants(per, node):
raise NotImplementedError("could be billions of objects")
def get_children(per, node):
sql = """
SELECT
*
FROM
nodes
WHERE
parent=%s;
"""
per.execute(sql, (int(node), ))
for result in per:
yield Node(**result)
def get_child_ids(per, node):
sql = """
SELECT
id
FROM
nodes
WHERE
parent=%s;
"""
per.execute(sql, (int(node), ))
for result in per:
yield int(result['id'])
def delete_node(per, node):
""" non-atomic """
id = int(node)
old_objects = set(get_descendant_ids(per, id))
old_objects.add(id)
old_object_ids = ','.join(map(str, old_objects))
sql = """
DELETE FROM
ancestor
WHERE
node IN ({})
OR
ancestor IN ({});
""".format(old_object_ids, old_object_ids)
per.execute(sql)
sql = """
DELETE FROM
nodes
WHERE
id IN ({});
""".format(old_object_ids)
per.execute(sql)
def move_node(per, node, new_parent):
""" non-atomic """
id = int(node)
parent_id = int(new_parent)
# Update ancestors by comparing the ancestor list of both the node
# and the new parent node. Delete all entries that are not in the
# parents list, add entries that are not in the nodes list.
# Also add the new parents ID and we're set.
# Hint for undestanding: Both (the nodes and the parent nodes)
# ancestor lists contain the root node, and there might be others,
# therefore we dont need to remove and re-add them to the database.
sql = """
DELETE FROM
ancestor
WHERE
ancestor
IN
(
SELECT
ancestor
FROM
ancestor
WHERE
node=%s
)
AND
node
IN
(
SELECT
node
FROM
ancestor
WHERE
ancestor=%s
OR
node=%s
);
"""
per.execute(sql, (id, id, id))
sql = """
INSERT INTO
ancestor
SELECT
sub.node, par.ancestor
FROM
ancestor AS sub
JOIN
(
SELECT
ancestor
FROM
ancestor
WHERE
node= %s
UNION SELECT %s
) AS par
ON TRUE
WHERE
sub.ancestor = %s
OR
sub.node = %s;
"""
per.execute(sql, (parent_id, parent_id, id, id))
parent_ancestors = set(get_ancestor_ids(per, parent_id))
parent_ancestors.add(parent_id)
insert_ancestors(per, node, parent_ancestors)
# change parent in nodes
sql = """
UPDATE
nodes
SET
parent=%s
WHERE
id=%s;
"""
per.execute(sql, (int(new_parent), int(node)))
| Python | 0.000001 | @@ -185,16 +185,24 @@
e.type))
+ # noqa
%0A%0A fo
|
ba0c292753355e5ff7e8e131c61e8086f31b3b76 | Create src/task_2_0.py | src/task_2_0.py | src/task_2_0.py | Python | 0.000039 | @@ -0,0 +1,394 @@
+# %D0%A0%D0%B0%D0%B7%D0%B4%D0%B5%D0%BB 1. %D0%97%D0%B0%D0%B4%D0%B0%D1%87%D0%B0 2. %D0%92%D0%B0%D1%80%D0%B8%D0%B0%D0%BD%D1%82 0.%0D%0A# %D0%9D%D0%B0%D0%BF%D0%B8%D1%88%D0%B8%D1%82%D0%B5 %D0%BF%D1%80%D0%BE%D0%B3%D1%80%D0%B0%D0%BC%D0%BC%D1%83, %D0%BA%D0%BE%D1%82%D0%BE%D1%80%D0%B0%D1%8F %D0%B1%D1%83%D0%B4%D0%B5%D1%82 %D0%B2%D1%8B%D0%B2%D0%BE%D0%B4%D0%B8%D1%82%D1%8C %D0%BD%D0%B0 %D1%8D%D0%BA%D1%80%D0%B0%D0%BD %D0%BD%D0%B0%D0%B8%D0%B1%D0%BE%D0%BB%D0%B5%D0%B5 %D0%BF%D0%BE%D0%BD%D1%80%D0%B0%D0%B2%D0%B8%D0%B2%D1%88%D0%B5%D0%B5%D1%81%D1%8F %D0%B2%D0%B0%D0%BC %D0%B2%D1%8B%D1%81%D0%BA%D0%B0%D0%B7%D1%8B%D0%B2%D0%B0%D0%BD%D0%B8%D0%B5, %D0%B0%D0%B2%D1%82%D0%BE%D1%80%D0%BE%D0%BC %D0%BA%D0%BE%D1%82%D0%BE%D1%80%D0%BE%D0%B3%D0%BE %D1%8F%D0%B2%D0%BB%D1%8F%D0%B5%D1%82%D1%81%D1%8F %D0%A4.%D0%9C.%D0%94%D0%BE%D1%81%D1%82%D0%BE%D0%B5%D0%B2%D1%81%D0%BA%D0%B8%D0%B9. %D0%9D%D0%B5 %D0%B7%D0%B0%D0%B1%D1%83%D0%B4%D1%8C%D1%82%D0%B5 %D0%BE %D1%82%D0%BE%D0%BC, %D1%87%D1%82%D0%BE %D0%B0%D0%B2%D1%82%D0%BE%D1%80 %D0%B4%D0%BE%D0%BB%D0%B6%D0%B5%D0%BD %D0%B1%D1%8B%D1%82%D1%8C %D1%83%D0%BF%D0%BE%D0%BC%D1%8F%D0%BD%D1%83%D1%82 %D0%BD%D0%B0 %D0%BE%D1%82%D0%B4%D0%B5%D0%BB%D1%8C%D0%BD%D0%BE%D0%B9 %D1%81%D1%82%D1%80%D0%BE%D0%BA%D0%B5.%0D%0A%0D%0Aprint(%22%D0%96%D0%B8%D0%B7%D0%BD%D1%8C, %D0%B2%D0%B5%D0%B7%D0%B4%D0%B5 %D0%B6%D0%B8%D0%B7%D0%BD%D1%8C, %D0%B6%D0%B8%D0%B7%D0%BD%D1%8C %D0%B2 %D0%BD%D0%B0%D1%81 %D1%81%D0%B0%D0%BC%D0%B8%D1%85, %D0%B0 %D0%BD%D0%B5 %D0%B2%D0%BE %D0%B2%D0%BD%D0%B5%D1%88%D0%BD%D0%B5%D0%BC.%22)%0D%0A%0D%0Aprint(%22%5Cn%5Ct%5Ct%5Ct%5Ct%5Ct%D0%A4.%D0%9C.%D0%94%D0%BE%D1%81%D1%82%D0%BE%D0%B5%D0%B2%D1%81%D0%BA%D0%B8%D0%B9%22)%0D%0A%0D%0Ainput(%22%5Cn%5Cn%D0%9D%D0%B0%D0%B6%D0%BC%D0%B8%D1%82%D0%B5 Enter %D0%B4%D0%BB%D1%8F %D0%B2%D1%8B%D1%85%D0%BE%D0%B4%D0%B0.%22)%0D%0A
|
|
6f00204ae2603063eafbd74a369e9da0864854ca | Create new monthly violence polls | poll/management/commands/create_new_violence_polls.py | poll/management/commands/create_new_violence_polls.py | Python | 0.000002 | @@ -0,0 +1,2371 @@
+#!/usr/bin/python%0A# -*- coding: utf-8 -*-%0Afrom django.core.management.base import BaseCommand%0Aimport traceback%0A%0Afrom poll.models import Poll%0Afrom unregister.models import Blacklist%0Afrom django.conf import settings%0A%0Afrom optparse import make_option%0Afrom poll.forms import NewPollForm%0Afrom django.contrib.sites.models import Site%0Afrom django.contrib.auth.models import User%0Afrom rapidsms.models import Contact%0Afrom django.db.models import Q%0A%0A%0Aclass Command(BaseCommand):%0A help = %22Create new violence polls%22%0A%0A option_list = BaseCommand.option_list + (%0A make_option('-n', '--name', dest='n'),%0A make_option('-t', '--poll_type', dest='t'),%0A make_option('-q', '--question', dest='q'),%0A make_option('-r', '--default_response', dest='r'),%0A make_option('-c', '--contacts', dest='c'),%0A make_option('-u', '--user', dest='u'),%0A make_option('-s', '--start_immediately', dest='s'),%0A make_option('-e', '--response_type', dest='e'),%0A make_option('-g', '--groups', dest='g'),%0A )%0A%0A def handle(self, **options):%0A edtrac_violence_girls = Poll.objects.create(%0A name=%22edtrac_violence_girls%22,%0A type=%22n%22,%0A question=%22How many cases of violence against girls were recorded this month? Answer in figures e.g. 5%22,%0A default_response='',%0A user=User.objects.get(username='admin'),%0A )%0A edtrac_violence_girls.sites.add(Site.objects.get_current())%0A %0A edtrac_violence_boys = Poll.objects.create(%0A name=%22edtrac_violence_boys%22,%0A type=%22n%22,%0A question=%22How many cases of violence against boys were recorded this month? Answer in figures e.g. 4%22,%0A default_response='',%0A user = User.objects.get(username='admin'),%0A )%0A edtrac_violence_boys.sites.add(Site.objects.get_current())%0A %0A edtrac_violence_reported = Poll.objects.create(%0A name='edtrac_violence_reported',%0A type='n',%0A question='How many cases of violence were referred to the Police this month? Answer in figures e.g. 6',%0A default_response='',%0A user=User.objects.get(username='admin'),%0A )%0A edtrac_violence_reported.sites.add(Site.objects.get_current())%0A
|
|
23c09555221b3f7500a4c658452c9c0cb223799c | Add evaluation using random forest | Train_SDAE/tools/evaluate_model.py | Train_SDAE/tools/evaluate_model.py | Python | 0 | @@ -0,0 +1,1146 @@
+import numpy as np%0A# import pandas as pd%0A# import sys%0Afrom scipy.special import expit%0Afrom sklearn import ensemble%0A%0Adef get_activations(exp_data, w, b):%0A exp_data = np.transpose(exp_data)%0A prod = exp_data.dot(w)%0A prod_with_bias = prod + b%0A return( expit(prod_with_bias) )%0A%0A# Order of *args: first all the weights and then all the biases%0Adef run_random_forest(nHLayers, exp_data, labels, *args):%0A print len(args%5B0%5D), len(args%5B0%5D%5B0%5D), len(args%5B0%5D%5B1%5D)%0A print len(args%5B0%5D%5B2%5D)%0A print %22NewLine!%5Cn%22, len(args%5B0%5D%5B3%5D)%0A print %22NewLine!%5Cn%22, len(args%5B0%5D%5B4%5D)%0A assert len(exp_data) == len(labels)%0A %0A # I think they should be already transposed when running the code. Will see%0A act = exp_data#.T%0A %0A for i in range(nHLayers):%0A print('Weights and biases for layer: ' + str(i+1))%0A print np.asarray(args%5B0%5D%5Bi%5D).shape, np.asarray(args%5B0%5D%5BnHLayers + i%5D).shape%0A act = get_activations(act.T, args%5B0%5D%5Bi%5D, args%5B0%5D%5BnHLayers + i%5D)%0A %0A rf = ensemble.RandomForestClassifier(n_estimators=1000, oob_score=True, max_depth=5)%0A rfit = rf.fit(act, labels)%0A print('OOB score: %25.2f%5Cn' %25 rfit.oob_score_)%0A%0A
|
|
009df3372804fa946b7e1bd4c0827e887b964b38 | Convert blogger to simple xml | convert.py | convert.py | Python | 0.999999 | @@ -0,0 +1,1311 @@
+from bs4 import BeautifulSoup%0Aimport io%0Aimport markdown2%0Aimport time%0Aimport codecs%0A%0Afile = io.open(%22Import/blog-03-03-2013.xml%22)%0Afile_contents = file.read(-1)%0A%0A#lxml xpath doesn't seem to understand blogger export%0Asoup = BeautifulSoup(file_contents)%0A%0Aentries = soup(%22entry%22)%0Acount = 0%0A%0Adef formatTime(timefield):%0A time_obj = time.strptime(entry(timefield)%5B0%5D.string%5B0:16%5D, %22%25Y-%25m-%25dT%25H:%25M%22)%0A return time.strftime(%22%25Y%25m%25d%25H%25M%25S%22, time_obj)%0A %0Afor entry in entries:%0A categories = entry(%22category%22)%0A tags = %5B%5D%0A post = False%0A for category in categories:%0A if category%5B%22term%22%5D == %22http://schemas.google.com/blogger/2008/kind#post%22:%0A post = True%0A if category%5B%22scheme%22%5D == %22http://www.blogger.com/atom/ns#%22 and category%5B%22term%22%5D:%0A tags.append(category%5B%22term%22%5D)%0A%0A if post:%0A pub = formatTime(%22published%22)%0A updated = formatTime(%22updated%22)%0A filename_xml = %22%25s.blogger.xml%22 %25 pub%0A title = entry(%22title%22)%5B0%5D.string%0A content = entry(%22content%22)%5B0%5D.string%0A%0A blog_file = io.open(%22Export/%22 + filename_xml, %22w%22)%0A blog_file.write(%22%3Cblog%3E%5Cn%5Ct%3Ctitle%3E%25s%3C/title%3E%5Cn%5Ct%3Ccontent%3E%3C!%5BCDATA%5B%25s%5D%5D%3E%3C/content%3E%5Cn%3C/blog%3E%22 %25 (title, content))%0A blog_file.close()%0A %0A count += 1%0A%0Aprint %22Found %25d posts%22 %25 count%0Aprint %22done!%22
|
|
8348ce87a68592e7108c43687ebfdf12684a1914 | Add elementTypes.py file | elementTypes.py | elementTypes.py | Python | 0.000001 | @@ -0,0 +1,1722 @@
+%0Aclass elementC3D10():%0A def __init__(self):%0A self.name = 'C3D10'%0A self.desc = 'Quadratic tetrahedral element'%0A self.numNodes = 10%0A self.numIntPnts = 4%0A self.N = array(self.numNodes)%0A self.setIpcs()%0A def setIpcs(self):%0A alpha = 0.1770833333%0A beta = 0.4687500000%0A self.ipcs = numpy.array(%5B%5Balpha,alpha,alpha%5D,%0A %5Bbeta, alpha,alpha%5D,%0A %5Balpha,beta, alpha%5D,%0A %5Balpha,alpha,beta %5D%5D)%0A def shapeFunctionMatrix(self,ipc):%0A g,h,r=ipc%0A self.N%5B0%5D = (2.0*(1.0-g-h-r)-1.0)*(1.0-g-h-r)%0A self.N%5B1%5D = (2.0*g-1.0)*g%0A self.N%5B2%5D = (2.0*h-1.0)*h%0A self.N%5B3%5D = (2.0*r-1.0)*r%0A self.N%5B4%5D = 4.0*(1.0-g-h-r)*g%0A self.N%5B5%5D = 4.0*g*h%0A self.N%5B6%5D = 4.0*(1.0-g-h-r)*h%0A self.N%5B7%5D = 4.0*(1.0-g-h-r)*r%0A self.N%5B8%5D = 4.0*g*r%0A self.N%5B9%5D = 4.0*h*r%0A def interpFunc(self,nv):%0A return np.dot(self.N,nv)%0A %0Aclass elementC3D4():%0A def __init__(self):%0A self.name = 'C3D4'%0A self.desc = 'Linear tetrahedral element'%0A self.numNodes = 4%0A self.numIntPnts = 1%0A self.N = np.array(self.numNodes)%0A self.setIpcs()%0A def setIpcs(self):%0A alpha = 0.33333 # CHECK THESE VALUES%0A beta = 0.33333 # CHECK THESE VALUES%0A self.ipcs = np.array(%5B%5B%5D,%5B%5D,%5B%5D%5D)%0A def shapeFuncMatrix(self,ipc):%0A g,h,r=ipc%0A self.N%5B0%5D = (1.0-g-h-r)%0A self.N%5B1%5D = g%0A self.N%5B2%5D = h%0A self.N%5B3%5D = r%0A def interpFunc(self,nv):%0A return np.dot(self.N,nv) %0A %0A%0A%0A %0A %0A %0A
|
|
e789fb7246e7b926841f2d2912896fd0a0d14518 | Create login_portal.py | login_portal.py | login_portal.py | Python | 0.000001 | @@ -0,0 +1,335 @@
+from splinter import Browser%0A%0A%0Aprint 'Starting...'%0A%0Abrowser = Browser('firefox') # using firefox%0Abrowser.visit(%22http://portal.ku.edu.kw/sisapp/faces/login.jspx%22)%0Abrowser.fill('username','xxxxx') # enter student ID%0Abrowser.fill('password','yyyyy') # enter password%0A%0Abrowser.find_by_id('loginBtn').click() # click login%0A
|
|
82acd4827b2f3f426a6b97f474c54886758cfab7 | add code to update fields | obztak/scratch/update-fields.py | obztak/scratch/update-fields.py | Python | 0.000001 | @@ -0,0 +1,1731 @@
+#!/usr/bin/env python%0A%22%22%22%0AUpdate survey fields%0A%22%22%22%0A__author__ = %22Alex Drlica-Wagner%22%0Aimport copy%0A%0Aimport fitsio%0Aimport numpy as np%0Aimport pylab as plt%0A%0Aimport skymap%0A%0Afrom obztak.utils import fileio%0Aimport obztak.delve%0Afrom obztak.delve import DelveFieldArray%0A%0Aimport argparse%0Aparser = argparse.ArgumentParser(description=__doc__)%0Aparser.add_argument('old')%0Aparser.add_argument('new')%0Aparser.add_argument('-o','--outfile',default='update_target_fields.csv')%0Aargs = parser.parse_args()%0A%0Adb = DelveFieldArray.load_database()%0Aold = DelveFieldArray.load(args.old)%0Anew = DelveFieldArray.load(args.new)%0A%0Aprint(%22Running comparing to new fields...%22)%0A%0Aif len(old) != len(new):%0A print(%22Different number of fields%22)%0A%0Adelve = np.in1d(new.unique_id,db.unique_id)%0A%0A#done = (new%5B'PRIORITY'%5D %3C 0) & (old%5B'PRIORITY'%5D %3E= 0)%0Adone = (new%5B'PRIORITY'%5D %3C 0) & np.in1d(new.unique_id, old.unique_id%5Bold%5B'PRIORITY'%5D %3E= 0%5D)%0A%0Aplt.figure()%0Asmap = skymap.SurveyMcBryde()%0Asmap.draw_fields(new%5Bdone & ~delve%5D)%0Asmap.draw_des()%0Aplt.title('New')%0Aplt.show()%0A%0A# Write here%0Aout = DelveFieldArray.load(args.old)%0A%0A### There are two ways of doing this that should give the same answers...%0Aprint(%22Running DelveSurvey.update_covered_fields...%22)%0Aupdate = obztak.delve.DelveSurvey.update_covered_fields(old)%0Adone = (update%5B'PRIORITY'%5D %3C 0) & (old%5B'PRIORITY'%5D %3E= 0)%0Adelve = np.in1d(update.unique_id,db.unique_id)%0A%0Aplt.figure()%0Asmap = skymap.SurveyMcBryde()%0Asmap.draw_fields(update%5Bdone & ~delve%5D)%0A#smap.draw_fields(update%5Bdone%5D)%0Aplt.title('Update')%0A%0Aprint(%22Writing %25s...%22%25args.outfile)%0Aupdate.write(args.outfile)%0A%0A# double check%0Aassert len(fileio.read_csv(args.old)) == len(fileio.read_csv(args.outfile))%0A%0Aprint(%22REMINDER: gzip the output file and move to data directory.%22)%0A
|
|
7d5dcaa0a72dbdd78e192f082bbdf261de1d8963 | Delete occurrences of an element if it occurs more than n times | Codewars/DeleteOccurrencesOfElementOverNTimes.py | Codewars/DeleteOccurrencesOfElementOverNTimes.py | Python | 0.000001 | @@ -0,0 +1,646 @@
+# implemented with list comprehension with side-effects and a global variable%0A# there's a simpler way to do it with list appends that's probably no less efficient, since Python arrays are dynamic, but I wanted to try this out instead%0A%0Afrom collections import Counter%0A%0Ac = Counter()%0A%0A# for use in list comprehensions with side effects! Naughty...%0Adef count_and_return(x):%0A c%5Bx%5D += 1%0A return x%0A%0Adef delete_nth(arr,max_e):%0A if max_e %3C= 0:%0A return %5B%5D%0A global c%0A c = Counter()%0A return %5Bcount_and_return(x) for x in arr if c%5Bx%5D %3C max_e%5D # note: condition is evaluated before the function is applied to x, hence %3C instead of %3C=%0A
|
|
c7c7281fc964ac25aea291f18bbf29013f3f3d58 | question 7.1 | crack_7_1.py | crack_7_1.py | Python | 0.99996 | @@ -0,0 +1,469 @@
+def fib_slow(number):%0A%09if number == 0: return 1%0A%09elif number == 1: return 1%0A%09else:%0A%09%09return fib_slow(number-1) + fib_slow(number-2)%0A%0Adef fib_fast(number):%0A%09if numbers%5Bnumber%5D == 0:%0A%09%09if number == 0 or number == 1:%0A%09%09%09numbers%5Bnumber%5D = 1%0A%09%09%09return numbers%5Bnumber%5D%0A%09%09else:%0A%09%09%09temp = fib_fast(number-1) + fib_fast(number-2)%0A%09%09%09numbers%5Bnumber%5D = temp%0A%09%09%09return numbers%5Bnumber%5D%0A%09else:%0A%09%09return numbers%5Bnumber%5D%0A%0Anumbers = %5B0%5D * 6%0Aif __name__ == '__main__':%0A%09print fib_fast(5)
|
|
1c4adbe07892d95ca6254dcc2e48e11eb2141fa7 | Create pixelconversor.py | Art-2D/pixelconversor.py | Art-2D/pixelconversor.py | Python | 0.000003 | @@ -0,0 +1,59 @@
+//This program rake a image an convert it in 2D pixel art.%0A
|
|
096c8165ec2beacbc4897285b8fed439765d3e01 | Add test on update document title | test/integration/ggrc/models/test_document.py | test/integration/ggrc/models/test_document.py | Python | 0 | @@ -0,0 +1,886 @@
+# Copyright (C) 2017 Google Inc.%0A# Licensed under http://www.apache.org/licenses/LICENSE-2.0 %3Csee LICENSE file%3E%0A%0A%22%22%22Integration tests for Document%22%22%22%0A%0Afrom ggrc.models import all_models%0Afrom integration.ggrc import TestCase%0Afrom integration.ggrc.api_helper import Api%0Afrom integration.ggrc.models import factories%0A%0A%0Aclass TestDocument(TestCase):%0A %22%22%22Document test cases%22%22%22%0A # pylint: disable=invalid-name%0A%0A def setUp(self):%0A super(TestDocument, self).setUp()%0A self.api = Api()%0A%0A def test_update_title(self):%0A %22%22%22Test update document title.%22%22%22%0A create_title = %22test_title%22%0A update_title = %22update_test_title%22%0A document = factories.DocumentFactory(title=create_title)%0A response = self.api.put(document, %7B%22title%22: update_title%7D)%0A self.assert200(response)%0A self.assertEqual(all_models.Document.query.get(document.id).title,%0A update_title)%0A
|
|
6e0b02c660d20fe2beca96eeab8d3108fd4be2ea | add FIXME | crosscat/tests/test_log_likelihood.py | crosscat/tests/test_log_likelihood.py | import argparse
import random
from functools import partial
#
import numpy
import pylab
pylab.ion()
pylab.show()
#
from crosscat.LocalEngine import LocalEngine
import crosscat.utils.data_utils as du
import crosscat.utils.geweke_utils as gu
import crosscat.utils.timing_test_utils as ttu
import crosscat.utils.convergence_test_utils as ctu
import experiment_runner.experiment_utils as eu
result_filename = 'result.pkl'
directory_prefix='test_log_likelihood'
noneify = set(['n_test'])
base_config = dict(
gen_seed=0,
num_rows=100, num_cols=4,
num_clusters=5, num_views=1,
n_steps=10, n_test=10,
)
def arbitrate_args(args):
if args.n_test is None:
args.n_test = args.num_rows / 10
return args
def test_log_likelihood_quality_test(config):
gen_seed = config['gen_seed']
num_rows = config['num_rows']
num_cols = config['num_cols']
num_clusters = config['num_clusters']
num_views = config['num_views']
n_steps = config['n_steps']
n_test = config['n_test']
# generate data
T, M_c, M_r, gen_X_L, gen_X_D = ttu.generate_clean_state(gen_seed, num_clusters,
num_cols, num_rows, num_views)
engine = LocalEngine()
sampled_T = gu.sample_T(engine, M_c, T, gen_X_L, gen_X_D)
T_test = random.sample(sampled_T, n_test)
gen_data_ll = ctu.calc_mean_test_log_likelihood(M_c, T, gen_X_L, gen_X_D, T)
gen_test_set_ll = ctu.calc_mean_test_log_likelihood(M_c, T, gen_X_L, gen_X_D, T_test)
# run inference
def calc_ll(T, p_State):
log_likelihoods = map(p_State.calc_row_predictive_logp, T)
mean_log_likelihood = numpy.mean(log_likelihoods)
return mean_log_likelihood
calc_data_ll = partial(calc_ll, T)
calc_test_set_ll = partial(calc_ll, T_test)
diagnostic_func_dict = dict(
data_ll=calc_data_ll,
test_set_ll=calc_test_set_ll,
)
X_L, X_D = engine.initialize(M_c, M_r, T)
X_L, X_D, diagnostics_dict = engine.analyze(M_c, T, X_L, X_D,
do_diagnostics=diagnostic_func_dict, n_steps=n_steps)
result = dict(
config=config,
diagnostics_dict=diagnostics_dict,
gen_data_ll=gen_data_ll,
gen_test_set_ll=gen_test_set_ll,
)
return result
def plot_result(result):
pylab.figure()
diagnostics_dict = result['diagnostics_dict']
gen_data_ll = result['gen_data_ll']
gen_test_set_ll = result['gen_test_set_ll']
#
pylab.plot(diagnostics_dict['data_ll'], 'g')
pylab.plot(diagnostics_dict['test_set_ll'], 'r')
pylab.axhline(gen_data_ll, color='g', linestyle='--')
pylab.axhline(gen_test_set_ll, color='r', linestyle='--')
return
if __name__ == '__main__':
from crosscat.utils.general_utils import Timer, MapperContext, NoDaemonPool
# do single experiment
parser = eu.generate_parser(base_config, noneify)
args = parser.parse_args()
args = arbitrate_args(args)
config = args.__dict__
result = test_log_likelihood_quality_test(config)
plot_result(result)
# demonstrate use of experiment runner
do_experiments = eu.do_experiments
is_result_filepath, generate_dirname, config_to_filepath = \
eu.get_fs_helper_funcs(result_filename, directory_prefix)
writer = eu.get_fs_writer(config_to_filepath)
read_all_configs, reader, read_results = eu.get_fs_reader_funcs(
is_result_filepath, config_to_filepath)
gen_configs_kwargs = dict(
num_clusters=[1, 2, 4],
num_rows = [20, 40, 100]
)
config_list = eu.gen_configs(base_config, **gen_configs_kwargs)
dirname = 'test_log_likelihood'
runner = test_log_likelihood_quality_test
with Timer('experiments') as timer:
with MapperContext(Pool=NoDaemonPool) as mapper:
# use non-daemonic mapper since run_geweke spawns daemonic processes
do_experiments(config_list, runner, writer, dirname, mapper)
pass
pass
all_configs = read_all_configs(dirname)
all_results = read_results(all_configs, dirname)
map(plot_result, all_results)
| Python | 0.000001 | @@ -2680,24 +2680,53 @@
style='--')%0A
+ # FIXME: save the result%0A
return%0A%0A
|
5172dcb5edd09afce992d237bd31700251fca4bd | Remove useless optional style argument to notify() | Bindings/python/Growl.py | Bindings/python/Growl.py | """
A Python module that enables posting notifications to the Growl daemon.
See <http://sourceforge.net/projects/growl/> for more information.
Requires PyObjC 1.1 <http://pyobjc.sourceforge.net/> and Python 2.3
<http://www.python.org/>.
Copyright 2003 Mark Rowe <[email protected]>
Released under the BSD license.
"""
from Foundation import NSArray, NSDistributedNotificationCenter, NSDictionary, NSNumber
from AppKit import NSWorkspace
class GrowlNotifier(object):
"""
A class that abstracts the process of registering and posting
notifications to the Growl daemon.
You can either pass `applicationName', `notifications',
`defaultNotifications' and `applicationIcon' to the constructor
or you may define them as class-level variables in a sub-class.
`defaultNotifications' is optional, and defaults to the value of
`notifications'. `applicationIcon' is also optional but defaults
to a pointless icon so is better to be specified.
"""
applicationName = 'GrowlNotifier'
notifications = []
defaultNotifications = None
applicationIcon = None
def __init__(self, applicationName=None, notifications=None, defaultNotifications=None, applicationIcon=None):
if applicationName is not None:
self.applicationName = applicationName
if notifications is not None:
self.notifications = notifications
if defaultNotifications is not None:
self.defaultNotifications = defaultNotifications
if applicationIcon is not None:
self.applicationIcon = applicationIcon
def register(self):
"""
Register this application with the Growl daemon.
"""
if not self.applicationIcon:
self.applicationIcon = NSWorkspace.sharedWorkspace().iconForFileType_("txt")
if self.defaultNotifications is None:
self.defaultNotifications = self.notifications
regInfo = {'ApplicationName': self.applicationName,
'AllNotifications': NSArray.arrayWithArray_(self.notifications),
'DefaultNotifications': NSArray.arrayWithArray_(self.defaultNotifications),
'ApplicationIcon': self.applicationIcon.TIFFRepresentation()}
d = NSDictionary.dictionaryWithDictionary_(regInfo)
notCenter = NSDistributedNotificationCenter.defaultCenter()
notCenter.postNotificationName_object_userInfo_deliverImmediately_("GrowlApplicationRegistrationNotification", None, d, True)
def notify(self, noteType, title, description, icon=None, appicon=None, style=None, sticky=False):
"""
Post a notification to the Growl daemon.
`noteType' is the name of the notification that is being posted.
`title' is the user-visible title for this notification.
`description' is the user-visible description of this notification.
`icon' is an optional icon for this notification. It defaults to
`self.applicationIcon'.
`appicon' is an optional icon for the sending application.
`sticky' is a boolean controlling whether the notification is sticky.
"""
assert noteType in self.notifications
if icon is None:
icon = self.applicationIcon
n = {'NotificationName': noteType,
'ApplicationName': self.applicationName,
'NotificationTitle': title,
'NotificationDescription': description,
'NotificationDefault': NSNumber.numberWithBool_(True),
'NotificationIcon': icon.TIFFRepresentation()}
if style is not None:
n['NotificationDefault'] = NSNumber.numberWithBool_(False)
if appicon is not None:
n['NotificationAppIcon'] = appicon.TIFFRepresentation()
if sticky:
n['NotificationSticky'] = NSNumber.numberWithBool_(True)
d = NSDictionary.dictionaryWithDictionary_(n)
notCenter = NSDistributedNotificationCenter.defaultCenter()
notCenter.postNotificationName_object_userInfo_deliverImmediately_('GrowlNotification', None, d, True)
def main():
from Foundation import NSRunLoop, NSDate
class TestGrowlNotifier(GrowlNotifier):
applicationName = 'Test Growl Notifier'
notifications = ['Foo']
n = TestGrowlNotifier(applicationIcon=NSWorkspace.sharedWorkspace().iconForFileType_('unknown'))
n.register()
# A small delay to ensure our notification will be shown.
NSRunLoop.currentRunLoop().runUntilDate_(NSDate.dateWithTimeIntervalSinceNow_(0.1))
n.notify('Foo', 'Test Notification', 'Blah blah blah')
if __name__ == '__main__':
main()
| Python | 0 | @@ -2630,20 +2630,8 @@
one,
- style=None,
sti
@@ -3534,225 +3534,41 @@
tion
-Default': NSNumber.numberWithBool_(True),%0A 'NotificationIcon': icon.TIFFRepresentation()%7D%0A %0A if style is not None:%0A n%5B'NotificationDefault'%5D = NSNumber.numberWithBool_(False)
+Icon': icon.TIFFRepresentation()%7D
%0A
|
41752bfcbc0a1afdf7a0f3caa52285af08d131dd | Create get_var.py | get_var.py | get_var.py | Python | 0.000002 | @@ -0,0 +1,210 @@
+import parse_expr%0A%0Avariables = %7B%7D%0A%0Adef getVar(key):%0A if key%5B0%5D == '%25':%0A return variables%5Bkey%5B1:%5D%5D%0A elif key%5B-1%5D in ('+', '-', '/', '*'):%0A return parse_expr(key)%0A else:%0A return key%0A
|
|
e42142498f2ef2b3e78d1becb024441500902a79 | add corruptor | test/corrupt.py | test/corrupt.py | Python | 0.999262 | @@ -0,0 +1,613 @@
+#!/usr/bin/env python%0A%0Afrom __future__ import print_function%0A%0Aimport os%0Aimport sys%0Aimport random%0A%0Aif len(sys.argv) != 3 and not sys.argv%5B2%5D:%0A print('''%0A Usage: corrupt.py filename magic_string%0A%0A magic_string is what you want to write to the file%0A it can not be empty and will be randomly placed %5Cn%5Cn''')%0A%0A sys.exit(1)%0A%0Asize = 0%0Aindex = 0%0Atry:%0A size = os.stat(sys.argv%5B1%5D).st_size%0Aexcept Exception as e:%0A print(e)%0A sys.exit(1)%0A%0Awith open(sys.argv%5B1%5D, %22rb+%22) as f:%0A index = random.randint(0, size)%0A f.seek(index)%0A f.write(sys.argv%5B2%5D)%0A%0Aprint(%22Corrupted file offset: %25s%5Cn%22 %25 index)%0A%0A
|
|
d2f18cc0992d4d7217583cd2601bc90afaa93a04 | add grain that detects SSDs | salt/grains/ssds.py | salt/grains/ssds.py | Python | 0.000002 | @@ -0,0 +1,1019 @@
+# -*- coding: utf-8 -*-%0A'''%0A Detect SSDs%0A'''%0Aimport os%0Aimport salt.utils%0Aimport logging%0A%0Alog = logging.getLogger(__name__)%0A%0Adef ssds():%0A '''%0A Return list of disk devices that are SSD (non-rotational)%0A '''%0A%0A SSDs = %5B%5D%0A for subdir, dirs, files in os.walk('/sys/block'):%0A for dir in dirs:%0A flagfile = subdir + '/' + dir + '/queue/rotational'%0A if os.path.isfile(flagfile):%0A with salt.utils.fopen(flagfile, 'r') as _fp:%0A flag = _fp.read(1)%0A if flag == '0':%0A SSDs.append(dir)%0A log.info(dir + ' is a SSD')%0A elif flag == '1':%0A log.info(dir + ' is no SSD')%0A else:%0A log.warning(flagfile + ' does not report 0 or 1')%0A log.debug(flagfile + ' reports ' + flag)%0A else:%0A log.warning(flagfile + ' does not exist for ' + dir)%0A %0A return %7B'SSDs': SSDs%7D%0A
|
|
936c2327d6be9da48dfbef47c17167510e9c2262 | Create bzip2.py | wigs/bzip2.py | wigs/bzip2.py | Python | 0.000007 | @@ -0,0 +1,125 @@
+class bzip2(Wig):%0A%09tarball_uri = 'http://www.bzip.org/1.0.6/bzip2-$RELEASE_VERSION$.tar.gz'%0A%09last_release_version = 'v1.0.6'%0A
|
|
c2ca8328835d544440fd3b87813e2768ece58685 | Add new package: audacious (#16121) | var/spack/repos/builtin/packages/audacious/package.py | var/spack/repos/builtin/packages/audacious/package.py | Python | 0.00002 | @@ -0,0 +1,1255 @@
+# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other%0A# Spack Project Developers. See the top-level COPYRIGHT file for details.%0A#%0A# SPDX-License-Identifier: (Apache-2.0 OR MIT)%0A%0Afrom spack import *%0A%0A%0Aclass Audacious(AutotoolsPackage):%0A %22%22%22A lightweight and versatile audio player.%22%22%22%0A%0A homepage = %22https://audacious-media-player.org/%22%0A url = %22https://github.com/audacious-media-player/audacious/archive/audacious-4.0.2.tar.gz%22%0A%0A version('4.0.2', sha256='92f30a78353c50f99b536061b9d94b6b9128760d546fddbf863e3591c4ac5a8d')%0A version('4.0.1', sha256='203195cf0d3c2e40d23c9895269ca0ace639c4a2b4dceb624169d75337059985')%0A version('4.0', sha256='cdfffd0eb966856980328ebb0fff9cbce57f99db9bda15e7e839d26c89e953e6')%0A version('3.10.1', sha256='c478939b4bcf6704c26eee87d48cab26547e92a83741f437711178c433373fa1')%0A version('3.10', sha256='82710d6ac90931c2cc4a0f0fcb6380ac21ed42a7a50856d16a67d3179a96e9ae')%0A%0A depends_on('m4', type='build')%0A depends_on('autoconf', type='build')%0A depends_on('automake', type='build')%0A depends_on('libtool', type='build')%0A depends_on('glib')%0A depends_on('qt')%0A%0A def autoreconf(self, spec, prefix):%0A bash = which('bash')%0A bash('./autogen.sh')%0A
|
|
4287d2290c581b907b08efabc1e6bccea4019ac6 | add new package (#15743) | var/spack/repos/builtin/packages/py-pyface/package.py | var/spack/repos/builtin/packages/py-pyface/package.py | Python | 0 | @@ -0,0 +1,1673 @@
+# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other%0A# Spack Project Developers. See the top-level COPYRIGHT file for details.%0A#%0A# SPDX-License-Identifier: (Apache-2.0 OR MIT)%0A%0A%0Aclass PyPyface(PythonPackage):%0A %22%22%22The pyface project contains a toolkit-independent GUI abstraction layer,%0A which is used to support the %22visualization%22 features of the Traits%0A package. Thus, you can write code in terms of the Traits API (views, items,%0A editors, etc.), and let pyface and your selected toolkit and back-end take%0A care of the details of displaying them.%22%22%22%0A%0A homepage = %22https://docs.enthought.com/pyface%22%0A url = %22https://pypi.io/packages/source/p/pyface/pyface-6.1.2.tar.gz%22%0A%0A version('6.1.2', sha256='7c2ac3d5cbec85e8504b3b0b63e9307be12c6d710b46bae372ce6562d41f4fbc')%0A%0A variant('backend', default='pyqt5', description='Default backend',%0A values=('wx', 'pyqt', 'pyqt5', 'pyside'), multi=False)%0A%0A depends_on('py-setuptools', type='build')%0A depends_on('py-traits', type=('build', 'run'))%0A%0A # Backends%0A depends_on('[email protected]:', when='backend=wx', type=('build', 'run'))%0A depends_on('py-numpy', when='backend=wx', type=('build', 'run'))%0A depends_on('[email protected]:', when='backend=pyqt', type=('build', 'run'))%0A depends_on('py-pygments', when='backend=pyqt', type=('build', 'run'))%0A depends_on('py-pyqt5@5:', when='backend=pyqt5', type=('build', 'run'))%0A depends_on('py-pygments', when='backend=pyqt5', type=('build', 'run'))%0A depends_on('[email protected]:', when='backend=pyside', type=('build', 'run'))%0A depends_on('py-pygments', when='backend=pyside', type=('build', 'run'))%0A
|
|
be0033ac91c28f3e45eff34c84b7da59d7fcefe2 | add py-ranger package (#3258) | var/spack/repos/builtin/packages/py-ranger/package.py | var/spack/repos/builtin/packages/py-ranger/package.py | Python | 0 | @@ -0,0 +1,1535 @@
+##############################################################################%0A# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.%0A# Produced at the Lawrence Livermore National Laboratory.%0A#%0A# This file is part of Spack.%0A# Created by Todd Gamblin, [email protected], All rights reserved.%0A# LLNL-CODE-647188%0A#%0A# For details, see https://github.com/llnl/spack%0A# Please also see the LICENSE file for our notice and the LGPL.%0A#%0A# This program is free software; you can redistribute it and/or modify%0A# it under the terms of the GNU Lesser General Public License (as%0A# published by the Free Software Foundation) version 2.1, February 1999.%0A#%0A# This program is distributed in the hope that it will be useful, but%0A# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF%0A# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and%0A# conditions of the GNU Lesser General Public License for more details.%0A#%0A# You should have received a copy of the GNU Lesser General Public%0A# License along with this program; if not, write to the Free Software%0A# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA%0A##############################################################################%0A#%0Afrom spack import *%0A%0A%0Aclass PyRanger(PythonPackage):%0A %22%22%22A VIM-inspired filemanager for the console%22%22%22%0A%0A homepage = %22http://ranger.nongnu.org/%22%0A url = %22https://github.com/ranger/ranger/archive/v1.7.2.tar.gz%22%0A%0A version('1.7.2', '27805c3ab7ec4b129e1b93249506d925')%0A%0A depends_on('[email protected]:')%0A
|
|
7e4a62aa483fbadc7089144191e48948f419903b | add setup.py | py/setup.py | py/setup.py | Python | 0 | @@ -0,0 +1,332 @@
+#!/usr/bin/env python%0A# vim: set fileencoding=utf8 shiftwidth=4 tabstop=4 textwidth=80 foldmethod=marker :%0A# Copyright (c) 2010, Kou Man Tong. All rights reserved.%0A# For licensing, see LICENSE file included in the package.%0A%0Afrom distutils.core import setup%0A%0Asetup(name = %22vtdb%22,%0A%09%09packages=%5B%22vtdb%22, %22net%22%5D,%0A%09%09platforms = %22Any%22,%0A%09%09)%0A
|
|
a8f1529f6c077c0d70ccb326da6e63f3dd78ec76 | move kernel sanitization to separate script | sanitize_kernels.py | sanitize_kernels.py | Python | 0.000001 | @@ -0,0 +1,450 @@
+import glob%0Aimport nbformat%0A%0A#sanitize kernelspec%0Anotebooks = glob.glob(%22notebooks/*.ipynb%22)%0Aold_envs = %7B%7D%0Afor nb in notebooks:%0A tmp = nbformat.read(nb,4)%0A old_envs%5Bnb%5D = tmp%5B'metadata'%5D%5B'kernelspec'%5D%5B'name'%5D%0A tmp%5B'metadata'%5D%5B'kernelspec'%5D%5B'name'%5D = 'python2'%0A nbformat.write(tmp,nb)%0A%0A#revert kernelspec%0A#for k in old_envs:%0A# tmp = nbformat.read(k,4)%0A# tmp%5B'metadata'%5D%5B'kernelspec'%5D%5B'name'%5D = old_envs%5Bk%5D%0A# nbformat.write(tmp,k)%0A
|
|
9b4f18dbf63a76bd2c0723677fb0d0215831324a | Create __init__.py | ext/__init__.py | ext/__init__.py | Python | 0.000429 | @@ -0,0 +1 @@
+%0A
|
|
52e282b8c51c71db61cb0163df02caf2dce63b45 | add pretty function repr extension | extensions/pretty_func_repr.py | extensions/pretty_func_repr.py | Python | 0.000001 | @@ -0,0 +1,1143 @@
+%22%22%22%0ATrigger pinfo (??) to compute text reprs of functions, etc.%0A%0ARequested by @katyhuff%0A%22%22%22%0A%0Aimport types%0A%0Afrom IPython import get_ipython%0A%0A%0Adef pinfo_function(obj, p, cycle):%0A %22%22%22Call the same code as %60foo?%60 to compute reprs of functions%0A %0A Parameters%0A ----------%0A obj:%0A The object being formatted%0A p:%0A The pretty formatter instance%0A cycle: %0A Whether a cycle has been detected (unused)%0A %22%22%22%0A text = get_ipython().inspector._format_info(obj, detail_level=1)%0A p.text(text)%0A%0A%0A_save_types = %7B%7D%0A%0A%0Adef load_ipython_extension(ip):%0A %22%22%22register pinfo_function as the custom plain-text repr for funtion types%22%22%22%0A pprinter = ip.display_formatter.formatters%5B'text/plain'%5D%0A%0A for t in (types.FunctionType,%0A types.BuiltinMethodType,%0A types.BuiltinFunctionType):%0A f = pprinter.for_type(t, pinfo_function)%0A _save_types%5Bt%5D = f%0A%0A%0Adef unload_ipython_extension(ip):%0A %22%22%22unregister pinfo_function%22%22%22%0A pprinter = ip.display_formatter.formatters%5B'text/plain'%5D%0A for t, f in _save_types.items():%0A pprinter.for_type(t, f)%0A %0A _save_types.clear()%0A%0A
|
|
864bf2bb3bdb731d0725cc33891145f2a7da17d3 | Add initialization functions for database connection | db/common.py | db/common.py | Python | 0.000001 | @@ -0,0 +1,835 @@
+#!/usr/bin/env python%0A# -*- coding: utf-8 -*-%0A%0Aimport os%0A%0Afrom contextlib import contextmanager%0A%0Afrom sqlalchemy import create_engine%0Afrom sqlalchemy.orm.session import sessionmaker%0Afrom sqlalchemy.schema import MetaData%0Afrom sqlalchemy.ext.declarative import declarative_base%0A%0Afrom utils import get_connection_string_from_config_file%0A%0Acfg_src = os.path.join(os.path.dirname(__file__), %22..%22, r%22_config.ini%22)%0Aconn_string = get_connection_string_from_config_file(cfg_src, 'db_conn_3')%0A%0AEngine = create_engine(conn_string, echo=False, pool_size=5)%0ASession = sessionmaker(bind=Engine)%0ABase = declarative_base(metadata=MetaData(schema='nhl', bind=Engine))%0A%0A%0A@contextmanager%0Adef session_scope():%0A session = Session()%0A try:%0A yield session%0A except:%0A session.rollback()%0A raise%0A finally:%0A session.close()%0A
|
|
ba0e1d90f5f33ed63c56c2788873624731a7a0b5 | add file | regxtest.py | regxtest.py | Python | 0.000001 | @@ -0,0 +1,1980 @@
+'''%0A((abc)%7B4%7D)%0A%5B1-5%5D%7B5%7D%0A5+%0A5*%0A5?%0A'''%0AEQUL = 1%0ACOUNT = 2%0AANY = 3%0ATREE = 4%0A%0Aclass Node %0A def __init__(self, ntype, parent = None):%0A self.type = ntype %0A self.c = None%0A self.children = %5B%5D%0A self.parent = parent%0A %0Aclass RegX: %0A def __init__(self, regstr):%0A self.curnode = Node(TREE)%0A self.tokens = self.curnode.children%0A self.parseregx(regstr) %0A%0A def parseany(self, regstr):%0A %0A def parseregx(self, regstr, idx = 0):%0A regstr_len = len(regstr)%0A while True:%0A if regstr%5Bidx%5D == '%5B':%0A newnode = Node(ANY, self.curnode)%0A self.tokens.append(newnode)%0A idx = self.parseany(regstr, idx)%0A elif regstr%5Bidx%5D == '%7B':%0A newnode = Node(COUNT, self.curnode)%0A self.tokens.insert(-1, newnode)%0A idx+=1%0A elif regstr%5Bidx%5D == '(':%0A newnode = Node(TREE, self.curnode)%0A self.curnode = newnode%0A self.tokens = newnode.children%0A parseregx(regstr, idx)%0A elif regstr%5Bidx%5D == ')':%0A self.curnode = self.curnode.parent%0A self.tokens = self.curnode.children%0A idx+=1%0A elif regstr%5Bidx%5D == '?':%0A newnode = Node(COUNT, self.curnode)%0A newnode.c = regstr%5Bidx%5D%0A self.tokens.insert(-1, newnode)%0A idx+=1%0A elif regstr%5Bidx%5D == '+':%0A newnode = Node(COUNT, self.curnode)%0A newnode.c = regstr%5Bidx%5D%0A self.tokens.insert(-1, newnode)%0A idx+=1%0A elif regstr%5Bidx%5D == '*':%0A newnode = Node(COUNT, self.curnode)%0A newnode.c = regstr%5Bidx%5D%0A self.tokens.insert(-1, newnode)%0A idx+=1%0A elif regstr%5Bidx%5D == '.': %0A pass%0A elif:%0A pass
|
|
0100a3468dbada1e7ec3cbeaebda7ee11874ab8b | find similarly related words | relation.py | relation.py | Python | 0.999999 | @@ -0,0 +1,2328 @@
+#!/usr/bin/env python%0A%0A%22%22%22Given phrases p1 and p2, find nearest neighbors to both and rank%0Apairs of neighbors by similarity to vec(p2)-vec(p1) in given word%0Arepresentation.%0A%0AThe basic idea is a straightforward combination of nearest neighbors%0Aand analogy as in word2vec (https://code.google.com/p/word2vec/).%0A%22%22%22%0A%0Aimport sys%0Aimport os%0A%0Aimport numpy%0A%0Aimport wvlib%0A%0Afrom distance import process_options, get_query%0A%0Adef process_query(wv, query, options=None):%0A try: %0A vectors = %5Bwv.words_to_vector(q) for q in query%5D%0A except KeyError, e:%0A print %3E%3E sys.stderr, 'Out of dictionary word: %25s' %25 str(e)%0A return False%0A%0A words = %5Bw for q in query for w in q%5D%0A if not options.quiet:%0A for w in words:%0A print '%5CnWord: %25s Position in vocabulary: %25d' %25 (w, wv.rank(w))%0A%0A nncount = 100 # TODO: add CLI parameter%0A nearest = %5Bwv.nearest(v, n=nncount, exclude=words) for v in vectors%5D%0A nearest = %5B%5B(n%5B0%5D, n%5B1%5D, wv%5Bn%5B0%5D%5D) for n in l%5D for l in nearest%5D%0A assert len(nearest) == 2, 'internal error'%0A pairs = %5B(n1, n2, %0A numpy.dot(wvlib.unit_vector(vectors%5B1%5D-vectors%5B0%5D+n2%5B2%5D), n1%5B2%5D))%0A for n1 in nearest%5B0%5D for n2 in nearest%5B1%5D if n1%5B0%5D != n2%5B0%5D%5D%0A pairs.sort(lambda a, b: cmp(b%5B2%5D, a%5B2%5D))%0A%0A nncount = options.number if options else 10%0A for p in pairs%5B:nncount%5D:%0A print '%25s%5Ct---%5Ct%25s%5Ct%25f' %25 (p%5B1%5D%5B0%5D, p%5B0%5D%5B0%5D, p%5B2%5D)%0A%0A return True%0A%0Adef query_loop(wv, options):%0A while True:%0A try:%0A query = get_query(options.prompt, options.multiword, %0A options.exit_word, 3)%0A except EOFError:%0A return 0%0A if not query:%0A continue%0A if options.echo:%0A print query%0A if len(query) %3C 2:%0A print %3E%3E sys.stderr, 'Enter two words/phrases'%0A continue%0A if len(query) %3E 2:%0A print %3E%3E sys.stderr, 'Ignoring words/phrases after the second'%0A query = query%5B:3%5D%0A process_query(wv, query, options)%0A%0Adef main(argv=None):%0A if argv is None:%0A argv = sys.argv%0A try:%0A wv, options = process_options(argv%5B1:%5D)%0A except Exception, e:%0A print %3E%3E sys.stderr, 'Error: %25s' %25 str(e)%0A return 1%0A return query_loop(wv, options)%0A%0Aif __name__ == '__main__':%0A sys.exit(main(sys.argv))%0A
|
|
11380e7db081960757cbde2c4d2e69b695648782 | Add routine to calculate density. | density.py | density.py | Python | 0 | @@ -0,0 +1,765 @@
+#!/usr/bin/env python%0A# -----------------------------------------------------------------------------%0A# GENHERNQUIST.DENSITY%0A# Laura L Watkins %[email protected]%5D%0A# -----------------------------------------------------------------------------%0A%0A%0Adef density(r, norm, rs, alpha, beta, gamma):%0A %0A %22%22%22%0A Density profile of a generalised Hernquist model.%0A %0A INPUTS%0A r : radial variable (requires unit)%0A norm : normalisation (requires unit)%0A rs : scale radius of model (requires unit)%0A alpha : sharpness of transition between inner and outer%0A beta : outer logarithmic slope%0A gamma : inner logarithmic slope%0A %22%22%22%0A %0A rho = norm*(r/rs)**(-gamma)*(1+(r/rs)**alpha)**((gamma-beta)/alpha)%0A %0A return rho%0A
|
|
3fb3662e58e35ccb283074c1078e1c9e7aaf88ed | Add live test for session | LendingClub/tests/live_session_test.py | LendingClub/tests/live_session_test.py | Python | 0 | @@ -0,0 +1,1160 @@
+#!/usr/bin/env python%0A%0Aimport sys%0Aimport unittest%0Aimport getpass%0Afrom logger import TestLogger%0A%0Asys.path.insert(0, '.')%0Asys.path.insert(0, '../')%0Asys.path.insert(0, '../../')%0A%0Afrom LendingClub import session%0A%0A%0Aclass LiveTestSession(unittest.TestCase):%0A http = None%0A session = None%0A logger = None%0A%0A def setUp(self):%0A self.logger = TestLogger()%0A self.session = session.Session(logger=self.logger)%0A%0A def tearDown(self):%0A pass%0A%0A def test_login(self):%0A %22%22%22 test_valid_login%0A Test login with credentials from the user%0A %22%22%22%0A%0A print '%5Cn%5CnEnter a valid LendingClub account information...'%0A email = raw_input('Email:')%0A password = getpass.getpass()%0A%0A self.assertTrue(self.session.authenticate(email, password))%0A print 'Authentication successful'%0A%0A def test_invalid_login(self):%0A %22%22%22 test_invalid_password%0A Test login with the wrong password%0A %22%22%22%0A self.assertRaises(%0A session.AuthenticationError,%0A lambda: self.session.authenticate('[email protected]', 'wrongsecret')%0A )%0A%0A%0Aif __name__ == '__main__':%0A unittest.main()%0A
|
|
9e4858e652fba57f767a9c6d921853a6487301bd | Add a test for the version string parsing code | epsilon/test/test_version.py | epsilon/test/test_version.py | Python | 0.00002 | @@ -0,0 +1,597 @@
+%22%22%22%0ATests for turning simple version strings into twisted.python.versions.Version%0Aobjects.%0A%0A%22%22%22%0Afrom epsilon import asTwistedVersion%0Afrom twisted.trial.unittest import SynchronousTestCase%0A%0A%0Aclass AsTwistedVersionTests(SynchronousTestCase):%0A def test_simple(self):%0A %22%22%22%0A A simple version string can be turned into a Version object.%0A %22%22%22%0A version = asTwistedVersion(%22package%22, %221.2.3%22)%0A self.assertEqual(version.package, %22package%22)%0A self.assertEqual(version.major, 1)%0A self.assertEqual(version.minor, 2)%0A self.assertEqual(version.micro, 3)%0A
|
|
dff8d43edd0e831605f1b1c3b2d261fcf05dca9a | Add wordpress guid replace script | script/wordpress/guid.py | script/wordpress/guid.py | Python | 0 | @@ -0,0 +1,434 @@
+import MySQLdb%0Aimport urlparse%0A%0Apoe = %22https://wordpress.wordpress%22%0A%0Adb = MySQLdb.connect(db=%22wordpress%22,user=%22%22,passwd=%22%22) %0Ac = db.cursor() %0Asql = %22SELECT ID,guid from wp_posts;%22%0Ac.execute(sql)%0Arecords = c.fetchall()%0Afor record in records:%0A o = urlparse.urlparse(record%5B1%5D)%0A url = poe + o.path%0A if o.query:%0A url = url + %22?%22 + o.query%0A print %22UPDATE wp_posts SET guid ='%22 + url + %22' where ID = '%22 + str(record%5B0%5D) + %22';%22%0A%0A
|
|
c48ec87b3e1c672864fc8c5bfe1aa551c01846ee | add basic tcp server | Server.py | Server.py | Python | 0.000001 | @@ -0,0 +1,1670 @@
+%22%22%22%0AFile: Server.py%0AAuthor: Daniel Schauenberg %[email protected]%3E%0ADescription: class for implementing a search engine web server%0A%22%22%22%0Aimport socket%0Afrom operator import itemgetter%0A%0Aclass Webserver:%0A %22%22%22 class for implementing a web server, serving the%0A inverted index search engine to the outside%0A (or inside) world%0A %22%22%22%0A def __init__(self, host='', port=3366):%0A %22%22%22 constructor method to set the webserver basic settings%0A %22%22%22%0A self.host = host%0A self.port = port%0A self.socket = None%0A%0A def bind_to_port(self):%0A %22%22%22 simple method to make the port binding easier%0A %22%22%22%0A self.socket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)%0A self.socket.bind((self.host,self.port))%0A # number of queued connections%0A self.socket.listen(3)%0A # create endless loop waiting for connections%0A # can be interrupted via CTRL-C%0A try:%0A while True:%0A # get socket object and client address%0A connection, clientsock = self.socket.accept()%0A print %22Client %25s connected with port %25s.%22 %25 (itemgetter(0)(clientsock),itemgetter(1)(clientsock))%0A while True:%0A data = connection.recv(8192)%0A if not data: break%0A #connection.sendall(data)%0A print data%0A connection.close()%0A print clientaddr%0A finally:%0A # don't leave socket open when going home%0A self.socket.close()%0A%0Adef main():%0A foo = Webserver()%0A foo.bind_to_port()%0A%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
94403aedd21947c30b5d8159fcd42288050afc3a | Create 6kyu_personalized_brand_list.py | Solutions/6kyu/6kyu_personalized_brand_list.py | Solutions/6kyu/6kyu_personalized_brand_list.py | Python | 0 | @@ -0,0 +1,251 @@
+from collections import OrderedDict%0A%0Adef sorted_brands(history):%0A poplr=OrderedDict()%0A for i in history:%0A try: poplr%5Bi%5B'brand'%5D%5D+=1%0A except: poplr%5Bi%5B'brand'%5D%5D=1%0A return sorted(poplr.keys(), key=lambda x: poplr%5Bx%5D, reverse=1)%0A
|
|
2f6720e5f31c6e1753e2595867a5ef690b79bda7 | Fix snapdiff arg parsing. | scripts/snapdiff.py | scripts/snapdiff.py | #!/usr/bin/env python
##########################################################################
#
# Copyright 2008-2009 VMware, Inc.
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
##########################################################################/
import sys
import os.path
import subprocess
import optparse
import Image
thumb_size = 320, 320
def compare(im, ref):
import ImageMath
# See http://www.pythonware.com/library/pil/handbook/imagemath.htm
mask = ImageMath.eval("min(abs(a - b), 1)", a=im, b=ref)
gray = ref.convert('L')
# TODO
def surface(html, image):
if False:
html.write(' <td><a href="%s"><img src="%s"/></a></td>\n' % (image, image))
else:
name, ext = os.path.splitext(image)
thumb = name + '_thumb' + ext
if not os.path.exists(thumb) and os.path.exists(image):
im = Image.open(image)
im.thumbnail(thumb_size)
im.save(thumb)
html.write(' <td><a href="%s"><img src="%s"/></a></td>\n' % (image, thumb))
def main():
optparser = optparse.OptionParser(
usage="\n\t%prog [options] [file] ...",
version="%%prog")
optparser.add_option(
'-o', '--output', metavar='FILE',
type="string", dest="output",
help="output filename [stdout]")
optparser.add_option(
'--start', metavar='FRAME',
type="int", dest="start", default=1,
help="start frame [default: %default]")
optparser.add_option(
'--stop', metavar='FRAME',
type="int", dest="stop", default=9999,
help="stop frame [default: %default]")
optparser.add_option(
'-f', '--fuzz',
type="string", dest="fuzz", default='5%',
help="fuzz [default: %default]")
(options, args) = optparser.parse_args(sys.argv[1:])
if len(args) != 2:
optparser.error('incorrect number of arguments')
ref_prefix = sys.argv[1]
src_prefix = sys.argv[2]
if options.output:
html = open(options.output, 'wt')
else:
html = sys.stdout
html.write('<html>\n')
html.write(' <body>\n')
html.write(' <table border="1">\n')
html.write(' <tr><th><th>ref</th><th>src</th><th>Δ</th></tr>\n')
for frame_no in range(options.start, options.stop + 1):
ref_image = "%s%04u.png" % (ref_prefix, frame_no)
src_image = "%s%04u.png" % (src_prefix, frame_no)
delta_image = "%s%04u_diff.png" % (src_prefix, frame_no)
if os.path.exists(ref_image) and os.path.exists(src_image):
html.write(' <tr>\n')
subprocess.call(["compare", '-metric', 'AE', '-fuzz', options.fuzz, ref_image, src_image, delta_image])
surface(html, ref_image)
surface(html, src_image)
surface(html, delta_image)
html.write(' </tr>\n')
html.flush()
html.write(' </table>\n')
html.write(' </body>\n')
html.write('</html>\n')
if __name__ == '__main__':
main()
| Python | 0 | @@ -2962,26 +2962,22 @@
refix =
-sys.argv%5B1
+args%5B0
%5D%0A sr
@@ -2991,18 +2991,14 @@
x =
-sys.argv%5B2
+args%5B1
%5D%0A%0A
|
48cac034e7b402e2d4b3cb52d2cae51b44928e0b | add Faster R-CNN | examples/faster_rcnn/eval.py | examples/faster_rcnn/eval.py | Python | 0.000384 | @@ -0,0 +1,1995 @@
+from __future__ import division%0A%0Aimport argparse%0Aimport sys%0Aimport time%0A%0Aimport chainer%0Afrom chainer import iterators%0A%0Afrom chainercv.datasets import voc_detection_label_names%0Afrom chainercv.datasets import VOCDetectionDataset%0Afrom chainercv.evaluations import eval_detection_voc%0Afrom chainercv.links import FasterRCNNVGG16%0Afrom chainercv.utils import apply_detection_link%0A%0A%0Adef main():%0A parser = argparse.ArgumentParser()%0A parser.add_argument('--gpu', type=int, default=-1)%0A parser.add_argument('--batchsize', type=int, default=32)%0A args = parser.parse_args()%0A%0A model = FasterRCNNVGG16(pretrained_model='voc07')%0A%0A if args.gpu %3E= 0:%0A chainer.cuda.get_device(args.gpu).use()%0A model.to_gpu()%0A%0A model.use_preset('evaluate')%0A%0A dataset = VOCDetectionDataset(%0A year='2007', split='test', use_difficult=True, return_difficult=True)%0A iterator = iterators.SerialIterator(%0A dataset, args.batchsize, repeat=False, shuffle=False)%0A%0A start_time = time.time()%0A processed = 0%0A%0A def hook(%0A pred_bboxes, pred_labels, pred_scores, gt_values):%0A global processed%0A processed += len(pred_bboxes)%0A fps = len(processed) / (time.time() - start_time)%0A sys.stdout.write(%0A '%5Cr%7B:d%7D of %7B:d%7D images, %7B:.2f%7D FPS'.format(%0A len(processed), len(dataset), fps))%0A sys.stdout.flush()%0A%0A pred_bboxes, pred_labels, pred_scores, gt_values = %5C%0A apply_detection_link(model, iterator, hook=hook)%0A gt_bboxes, gt_labels, gt_difficults = gt_values%0A%0A eval_ = eval_detection_voc(%0A pred_bboxes, pred_labels, pred_scores,%0A gt_bboxes, gt_labels, gt_difficults,%0A use_07_metric=True)%0A%0A print()%0A print('mAP: %7B:f%7D'.format(eval_%5B'map'%5D))%0A for l, name in enumerate(voc_detection_label_names):%0A if l in eval_:%0A print('%7B:s%7D: %7B:f%7D'.format(name, eval_%5Bl%5D%5B'ap'%5D))%0A else:%0A print('%7B:s%7D: -'.format(name))%0A%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
b098f2ad30339c0efb9728741b796fe9f2db7f74 | Make sure mopidy startup doesn't block | mycroft/skills/playback_control/mopidy_service.py | mycroft/skills/playback_control/mopidy_service.py | from mycroft.messagebus.message import Message
from mycroft.util.log import getLogger
from mycroft.skills.audioservice import AudioBackend
from os.path import dirname, abspath, basename
import sys
import time
logger = getLogger(abspath(__file__).split('/')[-2])
__author__ = 'forslund'
sys.path.append(abspath(dirname(__file__)))
Mopidy = __import__('mopidypost').Mopidy
class MopidyService(AudioBackend):
def _connect(self, message):
logger.debug('Could not connect to server, will retry quietly')
url = 'http://localhost:6680'
if self.config is not None:
url = self.config.get('url', url)
try:
self.mopidy = Mopidy(url)
except:
if self.connection_attempts < 1:
logger.debug('Could not connect to server, will retry quietly')
self.connection_attempts += 1
time.sleep(10)
self.emitter.emit(Message('MopidyServiceConnect'))
return
logger.info('Connected to mopidy server')
def __init__(self, config, emitter, name='mopidy'):
self.connection_attempts = 0
self.emitter = emitter
self.config = config
self.name = name
self.mopidy = None
self.emitter.on('MopidyServiceConnect', self._connect)
self._connect(None)
def supported_uris(self):
if self.mopidy:
return ['file', 'http', 'https', 'local', 'spotify', 'gmusic']
else:
return []
def clear_list(self):
self.mopidy.clear_list()
def add_list(self, tracks):
self.mopidy.add_list(tracks)
def play(self):
self.mopidy.play()
def stop(self):
self.mopidy.clear_list()
self.mopidy.stop()
def pause(self):
self.mopidy.pause()
def resume(self):
self.mopidy.resume()
def next(self):
self.mopidy.next()
def previous(self):
self.mopidy.previous()
def lower_volume(self):
self.mopidy.lower_volume()
def restore_volume(self):
self.mopidy.restore_volume()
def track_info(self):
info = self.mopidy.currently_playing()
ret = {}
ret['name'] = info.get('name', '')
if 'album' in info:
ret['artist'] = info['album']['artists'][0]['name']
ret['album'] = info['album'].get('name', '')
else:
ret['artist'] = ''
ret['album'] = ''
return ret
| Python | 0 | @@ -439,80 +439,8 @@
e):%0A
- logger.debug('Could not connect to server, will retry quietly')%0A
@@ -1240,21 +1240,52 @@
elf.
-_c
+emitter.emit(Message('MopidyServiceC
onnect
-(None
+')
)%0A%0A
|
874e2c35bb0aea38a1161d96b8af484a69336ea6 | Add htpasswd.py to the contrib tree as it may be useful more generally than just for the Testing branch | contrib/htpasswd.py | contrib/htpasswd.py | Python | 0.000003 | @@ -0,0 +1,2742 @@
+#!/usr/bin/python%0A%22%22%22Replacement for htpasswd%22%22%22%0A%0Aimport os%0Aimport random%0Atry:%0A import crypt%0Aexcept ImportError:%0A import fcrypt as crypt%0Afrom optparse import OptionParser%0A%0A%0Adef salt():%0A %22%22%22Returns a string of 2 randome letters%22%22%22%0A # FIXME: Additional characters may be legal here.%0A letters = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'%0A return random.choice(letters) + random.choice(letters)%0A%0A%0Aclass HtpasswdFile:%0A def __init__(self, filename, create=False):%0A self.entries = %5B%5D%0A self.filename = filename%0A if not create:%0A if os.path.exists(self.filename):%0A self.load()%0A else:%0A raise Exception(%22%25s does not exist%22 %25 self.filename)%0A%0A def load(self):%0A lines = open(self.filename, 'r').readlines()%0A self.entries = %5B%5D%0A for line in lines:%0A username, hash = line.split(':')%0A entry = %5Busername, hash.rstrip()%5D%0A self.entries.append(entry)%0A%0A def save(self):%0A open(self.filename, 'w').writelines(%5B%22%25s:%25s%5Cn%22 %25 (entry%5B0%5D, entry%5B1%5D) for entry in self.entries%5D)%0A%0A def update(self, username, password):%0A hash = crypt.crypt(password, salt())%0A matching_entries = %5Bentry for entry in self.entries if entry%5B0%5D == username%5D%0A if matching_entries:%0A matching_entries%5B0%5D%5B1%5D = hash%0A else:%0A self.entries.append(%5Busername, hash%5D)%0A%0A def delete(self, username):%0A self.entries = %5Bentry for entry in self.entries if entry%5B0%5D != username%5D%0A%0Adef main():%0A %22%22%22%25prog %5B-c%5D -b filename username password%0A Create or update an htpasswd file%22%22%22%0A # For now, we only care about the use cases that affect tests/functional.py%0A parser = OptionParser(usage=main.__doc__)%0A parser.add_option('-b', action='store_true', dest='batch', default=False,%0A help='Batch mode; password is passed on the command line IN THE CLEAR.')%0A parser.add_option('-c', action='store_true', dest='create', default=False,%0A help='Create a new htpasswd file, overwriting any existing file.')%0A parser.add_option('-D', action='store_true', dest='delete_user', default=False,%0A help='Remove the given user from the password file.')%0A%0A options, args = parser.parse_args()%0A%0A assert(options.batch) # We only support batch mode for now.%0A%0A # Non-option arguments%0A filename, username = args%5B:2%5D%0A if options.delete_user:%0A password = None%0A else:%0A password = args%5B2%5D%0A%0A passwdfile = HtpasswdFile(filename, create=options.create)%0A%0A if options.delete_user:%0A passwdfile.delete(username)%0A else:%0A passwdfile.update(username, password)%0A%0A passwdfile.save()%0A%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
88eb8887bd71702fbf0c5095d8c2d637876de4b8 | Add the upload_file_test | examples/upload_file_test.py | examples/upload_file_test.py | Python | 0.000018 | @@ -0,0 +1,809 @@
+from seleniumbase import BaseCase%0A%0A%0Aclass FileUploadButtonTests(BaseCase):%0A%0A %22%22%22 The main purpose of this is to test the self.choose_file() method. %22%22%22%0A%0A def test_file_upload_button(self):%0A self.open(%22https://www.w3schools.com/jsref/tryit.asp%22%0A %22?filename=tryjsref_fileupload_get%22)%0A self.wait_for_element('%5Bid*=%22google_ads%22%5D')%0A self.remove_elements('%5Bid*=%22google_ads%22%5D')%0A self.switch_to_frame('iframeResult')%0A self.add_css_style(%0A 'input%5Btype=%22file%22%5D%7Bzoom: 1.5;-moz-transform: scale(1.5);%7D')%0A self.highlight('input%5Btype=%22file%22%5D')%0A self.choose_file('input%5Btype=%22file%22%5D', %22example_logs/screenshot.png%22)%0A self.demo_mode = True # Adds highlighting to the assert statement%0A self.assert_element('input%5Btype=%22file%22%5D')%0A
|
|
a98ba6efa109383ecc1dfeb07691dc0a4a4e2a5b | Update migrations | django_afip/migrations/0002_auto_20150909_1837.py | django_afip/migrations/0002_auto_20150909_1837.py | Python | 0.000001 | @@ -0,0 +1,627 @@
+# -*- coding: utf-8 -*-%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import models, migrations%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('afip', '0001_initial'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterField(%0A model_name='tax',%0A name='amount',%0A field=models.DecimalField(decimal_places=2, max_digits=15, verbose_name='cantidad'),%0A ),%0A migrations.AlterField(%0A model_name='vat',%0A name='amount',%0A field=models.DecimalField(decimal_places=2, max_digits=15, verbose_name='cantidad'),%0A ),%0A %5D%0A
|
|
9668580633a1a8baaa59030e5a52d2478222cbd2 | Add cost tracking file to openstack | nodeconductor/openstack/cost_tracking.py | nodeconductor/openstack/cost_tracking.py | Python | 0 | @@ -0,0 +1,300 @@
+from . import models%0Afrom nodeconductor.cost_tracking import CostTrackingBackend%0A%0A%0Aclass OpenStackCostTrackingBackend(CostTrackingBackend):%0A%0A @classmethod%0A def get_monthly_cost_estimate(cls, resource):%0A backend = resource.get_backend()%0A return backend.get_monthly_cost_estimate()%0A
|
|
2dfa68eb458cfc7d6166ede8a222b1d11b9577a0 | Create grabscreen.py | grabscreen.py | grabscreen.py | Python | 0 | @@ -0,0 +1,1214 @@
+# Done by Frannecklp%0A%0Aimport cv2%0Aimport numpy as np%0Aimport win32gui, win32ui, win32con, win32api%0A%0Adef grab_screen(region=None):%0A%0A hwin = win32gui.GetDesktopWindow()%0A%0A if region:%0A left,top,x2,y2 = region%0A width = x2 - left + 1%0A height = y2 - top + 1%0A else:%0A width = win32api.GetSystemMetrics(win32con.SM_CXVIRTUALSCREEN)%0A height = win32api.GetSystemMetrics(win32con.SM_CYVIRTUALSCREEN)%0A left = win32api.GetSystemMetrics(win32con.SM_XVIRTUALSCREEN)%0A top = win32api.GetSystemMetrics(win32con.SM_YVIRTUALSCREEN)%0A%0A%0A hwindc = win32gui.GetWindowDC(hwin)%0A srcdc = win32ui.CreateDCFromHandle(hwindc)%0A memdc = srcdc.CreateCompatibleDC()%0A bmp = win32ui.CreateBitmap()%0A bmp.CreateCompatibleBitmap(srcdc, width, height)%0A memdc.SelectObject(bmp)%0A memdc.BitBlt((0, 0), (width, height), srcdc, (left, top), win32con.SRCCOPY)%0A %0A signedIntsArray = bmp.GetBitmapBits(True)%0A img = np.fromstring(signedIntsArray, dtype='uint8')%0A img.shape = (height,width,4)%0A%0A srcdc.DeleteDC()%0A memdc.DeleteDC()%0A win32gui.ReleaseDC(hwin, hwindc)%0A win32gui.DeleteObject(bmp.GetHandle())%0A%0A return cv2.cvtColor(img, cv2.COLOR_BGRA2RGB)%0A%0A
|
|
0486e02bbaefea63a2dff9983be51623a184dc66 | test python interpreter | test/test_interpreter_layer.py | test/test_interpreter_layer.py | Python | 0.000062 | @@ -0,0 +1,422 @@
+# This code is so you can run the samples without installing the package%0Aimport sys%0Aimport os%0Asys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))%0A#%0A%0A%0Aimport cocos%0Afrom cocos.director import director%0Aimport pyglet%0A %0A%0Aif __name__ == %22__main__%22:%0A director.init()%0A interpreter_layer = cocos.layer.InterpreterLayer()%0A main_scene = cocos.scene.Scene(interpreter_layer)%0A director.run(main_scene)%0A
|
|
76baf574ba5a4ff9e835412e27fd2ebc634a9992 | add Cython register test | new_pymtl/translation_tools/verilator_sim_test.py | new_pymtl/translation_tools/verilator_sim_test.py | Python | 0 | @@ -0,0 +1,471 @@
+from verilator_sim import get_verilated%0Afrom new_pmlib.regs import Reg%0Afrom new_pymtl import SimulationTool%0A%0Adef test_reg():%0A model = Reg(16)%0A print %22BEGIN%22%0A vmodel = get_verilated( model )%0A print %22END%22%0A%0A vmodel.elaborate()%0A%0A sim = SimulationTool( vmodel )%0A%0A sim.reset()%0A assert vmodel.out == 0%0A%0A vmodel.in_.value = 10%0A sim.cycle()%0A assert vmodel.out == 10%0A%0A vmodel.in_.value = 12%0A assert vmodel.out == 10%0A sim.cycle()%0A assert vmodel.out == 12%0A
|
|
214aa96b5e816ad6386fc20fed684152ac8181d1 | add migration for ip to generic ip field change | newsletters/migrations/0003_auto_20150701_1840.py | newsletters/migrations/0003_auto_20150701_1840.py | Python | 0 | @@ -0,0 +1,407 @@
+# -*- coding: utf-8 -*-%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import models, migrations%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('newsletters', '0002_auto_20150630_0009'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterField(%0A model_name='subscription',%0A name='ip',%0A field=models.GenericIPAddressField(),%0A ),%0A %5D%0A
|
|
f90a9e585b5de36b3abc11cf454cde75a44a1a6b | Include Overlay Utils | evaluation/overlay_utils.py | evaluation/overlay_utils.py | Python | 0 | @@ -0,0 +1,1892 @@
+#!/usr/bin/env python%0A%0A%22%22%22Utility functions for segmentation tasks.%22%22%22%0A%0Afrom PIL import Image%0Aimport scipy.ndimage%0Aimport numpy as np%0A%0A%0Adef replace_colors(segmentation, color_changes):%0A %22%22%22%0A Replace the values in segmentation to the values defined in color_changes.%0A%0A Parameters%0A ----------%0A segmentation : numpy array%0A Two dimensional%0A color_changes : dict%0A The key is the original color, the value is the color to change to.%0A The key 'default' is used when the color is not in the dict.%0A If default is not defined, no replacement is done.%0A Each color has to be a tuple (r, g, b) with r, g, b in %7B0, 1, ..., 255%7D%0A Returns%0A -------%0A np.array%0A The new colored segmentation%0A %22%22%22%0A width, height = segmentation.shape%0A output = scipy.misc.toimage(segmentation)%0A output = output.convert('RGBA')%0A for x in range(0, width):%0A for y in range(0, height):%0A if segmentation%5Bx, y%5D in color_changes:%0A output.putpixel((y, x), color_changes%5Bsegmentation%5Bx, y%5D%5D)%0A elif 'default' in color_changes:%0A output.putpixel((y, x), color_changes%5B'default'%5D)%0A return output%0A%0A%0Adef overlay_segmentation(image, segmentation, color_dict):%0A %22%22%22%0A Overlay original_image with segmentation_image.%0A%0A Parameters%0A ----------%0A %22%22%22%0A width, height = segmentation.shape%0A output = scipy.misc.toimage(segmentation)%0A output = output.convert('RGBA')%0A for x in range(0, width):%0A for y in range(0, height):%0A if segmentation%5Bx, y%5D in color_dict:%0A output.putpixel((y, x), color_dict%5Bsegmentation%5Bx, y%5D%5D)%0A elif 'default' in color_dict:%0A output.putpixel((y, x), color_dict%5B'default'%5D)%0A%0A background = scipy.misc.toimage(image)%0A background.paste(output, box=None, mask=output)%0A%0A return np.array(background)%0A
|
|
a12dd320df30404df8c8ec196e21067376cc1e2c | Add tests of table and column pickling | astropy/table/tests/test_pickle.py | astropy/table/tests/test_pickle.py | Python | 0 | @@ -0,0 +1,2155 @@
+import cPickle as pickle%0A%0Aimport numpy as np%0Aimport pytest%0A%0Afrom ...table import Table, Column, MaskedColumn%0A%0A%[email protected](params=%5B0, 1, -1%5D)%0Adef protocol(request):%0A %22%22%22%0A Fixture to run all the tests for protocols 0 and 1, and -1 (most advanced).%0A %22%22%22%0A return request.param%0A%0A%0Adef test_pickle_column(protocol):%0A c = Column(data=%5B1, 2%5D, name='a', format='%2505d', description='col a', unit='cm', meta=%7B'a': 1%7D)%0A cs = pickle.dumps(c)%0A cp = pickle.loads(cs)%0A assert np.all(cp == c)%0A assert cp.attrs_equal(c)%0A%0A%0Adef test_pickle_masked_column(protocol):%0A c = MaskedColumn(data=%5B1, 2%5D, name='a', format='%2505d', description='col a', unit='cm',%0A meta=%7B'a': 1%7D)%0A c.mask%5B1%5D = True%0A c.fill_value = -99%0A%0A cs = pickle.dumps(c)%0A cp = pickle.loads(cs)%0A%0A assert np.all(cp._data == c._data)%0A assert np.all(cp.mask == c.mask)%0A assert cp.attrs_equal(c)%0A assert cp.fill_value == -99%0A%0A%0Adef test_pickle_table(protocol):%0A a = Column(data=%5B1, 2%5D, name='a', format='%2505d', description='col a', unit='cm', meta=%7B'a': 1%7D)%0A b = Column(data=%5B3.0, 4.0%5D, name='b', format='%2505d', description='col b', unit='cm',%0A meta=%7B'b': 1%7D)%0A t = Table(%5Ba, b%5D, meta=%7B'a': 1%7D)%0A ts = pickle.dumps(t)%0A tp = pickle.loads(ts)%0A%0A assert np.all(tp%5B'a'%5D == t%5B'a'%5D)%0A assert np.all(tp%5B'b'%5D == t%5B'b'%5D)%0A assert tp%5B'a'%5D.attrs_equal(t%5B'a'%5D)%0A assert tp%5B'b'%5D.attrs_equal(t%5B'b'%5D)%0A assert tp.meta == t.meta%0A%0A%0Adef test_pickle_masked_table(protocol):%0A a = Column(data=%5B1, 2%5D, name='a', format='%2505d', description='col a', unit='cm', meta=%7B'a': 1%7D)%0A b = Column(data=%5B3.0, 4.0%5D, name='b', format='%2505d', description='col b', unit='cm',%0A meta=%7B'b': 1%7D)%0A t = Table(%5Ba, b%5D, meta=%7B'a': 1%7D, masked=True)%0A t%5B'a'%5D.mask%5B1%5D = True%0A t%5B'a'%5D.fill_value = -99%0A%0A ts = pickle.dumps(t)%0A tp = pickle.loads(ts)%0A%0A for colname in ('a', 'b'):%0A for attr in ('_data', 'mask', 'fill_value'):%0A assert np.all(getattr(tp%5Bcolname%5D, attr) == getattr(tp%5Bcolname%5D, attr))%0A%0A assert tp%5B'a'%5D.attrs_equal(t%5B'a'%5D)%0A assert tp%5B'b'%5D.attrs_equal(t%5B'b'%5D)%0A assert tp.meta == t.meta%0A
|
|
fb5f6b5db2e2701692dd0a35dfad36d7b6dd4f2d | Create example file | example.py | example.py | Python | 0.000001 | @@ -0,0 +1,537 @@
+from blender_wrapper.api import Scene%0Afrom blender_wrapper.api import Camera%0Afrom blender_wrapper.api import SunLamp%0Afrom blender_wrapper.api import ORIGIN%0A%0A%0Adef main():%0A scene = Scene(1500, 1000, filepath=%22~/Desktop/%22)%0A scene.setup()%0A%0A camera = Camera((1, 0, 1), (90, 0, 0), view_align=True)%0A camera.add_to_scene()%0A%0A lamp = SunLamp(10, (0, 0, 3), ORIGIN)%0A lamp.add_to_scene()%0A%0A scene.render(resolution_percentage=100)%0A%0A%0A# Execute running:%0A# blender --background -P ./test.py%0Aif __name__ == %22__main__%22:%0A main()%0A
|
|
68e16ca50bec3802184e098548aa2c2584c352b2 | Add main example code | signal_decorator.py | signal_decorator.py | Python | 0.000033 | @@ -0,0 +1,791 @@
+#!/usr/bin/python%0A__author__ = 'Neil Parley'%0A%0Afrom functools import wraps%0Aimport signal%0Aimport sys%0A%0A%0Adef catch_sig(f):%0A %22%22%22%0A Adds the signal handling as a decorator, define the signals and functions that handle them. Then wrap the functions%0A with your decorator.%0A :param f: Function%0A :return: Function wrapped with registered signal handling%0A %22%22%22%0A @wraps(f)%0A def reg_signal(*args, **kwargs):%0A def signal_handler(*args):%0A print('Got killed')%0A sys.exit(0)%0A%0A signal.signal(signal.SIGTERM, signal_handler)%0A signal.signal(signal.SIGINT, signal_handler)%0A return f(*args, **kwargs)%0A%0A return reg_signal%0A%0A%0A@catch_sig%0Adef test():%0A import time%0A print(%22Waiting%22)%0A time.sleep(60)%0A%0Aif __name__ == %22__main__%22:%0A test()
|
|
b4eec76e7e0d4ff28cf6e2678c45bf63c5f22874 | revert db settings | example_project/settings.py | example_project/settings.py | import os.path
import sys
# Django settings for example_project project.
DEBUG = True
TEMPLATE_DEBUG = True
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
PROJECT_ROOT = os.path.dirname(__file__)
sys.path.insert(0, os.path.abspath(os.path.join(PROJECT_ROOT, '..')))
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'disqus', # Or path to database file if using sqlite3.
'USER': 'dzhou', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Los_Angeles'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = ''
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = ')*)&8a36)6%74e@-ne5(-!8a(vv#tkv)(eyg&@0=zd^pl!7=y@'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'example_project.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_ROOT, 'templates'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'sentry',
'paging',
'south',
'indexer',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
)
import logging
logging.basicConfig(level=logging.DEBUG)
SENTRY_THRASHING_TIMEOUT = 0
SENTRY_FILTERS = (
'sentry.filters.LoggerFilter',
'sentry.filters.LevelFilter',
'sentry.filters.ServerNameFilter',
'example_project.filters.IPFilter',
)
| Python | 0 | @@ -482,14 +482,14 @@
': '
-disqus
+sentry
',
@@ -574,13 +574,16 @@
': '
-dzhou
+postgres
',
|
e0f985b912acbd0c5d922c4c1c0f25cf9ef17200 | raise an exception if fact table can not be determined | cubes/sql/mapper.py | cubes/sql/mapper.py | # -*- encoding: utf-8 -*-
"""Logical to Physical Mappers"""
from __future__ import absolute_import
from collections import namedtuple
from ..logging import get_logger
from ..errors import BackendError
from ..mapper import Mapper
from ..model import AttributeBase
from .. import compat
from .schema import to_column
__all__ = (
"SnowflakeMapper",
"DenormalizedMapper",
"DEFAULT_KEY_FIELD"
)
DEFAULT_KEY_FIELD = "id"
class SnowflakeMapper(Mapper):
"""Mapper is core clas for translating logical model to physical
database schema.
"""
# WARNING: do not put any SQL/engine/connection related stuff into this
# class yet. It might be moved to the cubes as one of top-level modules
# and subclassed here.
def __init__(self, cube, mappings=None, locale=None, schema=None,
fact_name=None, dimension_prefix=None, dimension_suffix=None,
joins=None, dimension_schema=None, **options):
"""A snowflake schema mapper for a cube. The mapper creates required
joins, resolves table names and maps logical references to tables and
respective columns.
Attributes:
* `cube` - mapped cube
* `mappings` – dictionary containing mappings
* `simplify_dimension_references` – references for flat dimensions
(with one level and no details) will be just dimension names, no
attribute name. Might be useful when using single-table schema, for
example, with couple of one-column dimensions.
* `dimension_prefix` – default prefix of dimension tables, if
default table name is used in physical reference construction
* `dimension_suffix` – default suffix of dimension tables, if
default table name is used in physical reference construction
* `fact_name` – fact name, if not specified then `cube.name` is used
* `schema` – default database schema
* `dimension_schema` – schema whre dimension tables are stored (if
different than fact table schema)
`mappings` is a dictionary where keys are logical attribute references
and values are table column references. The keys are mostly in the
form:
* ``attribute`` for measures and fact details
* ``attribute.locale`` for localized fact details
* ``dimension.attribute`` for dimension attributes
* ``dimension.attribute.locale`` for localized dimension attributes
The values might be specified as strings in the form ``table.column``
(covering most of the cases) or as a dictionary with keys ``schema``,
``table`` and ``column`` for more customized references.
.. In the future it might support automatic join detection.
"""
super(SnowflakeMapper, self).__init__(cube, locale=locale, **options)
self.mappings = mappings or cube.mappings
self.dimension_prefix = dimension_prefix or ""
self.dimension_suffix = dimension_suffix or ""
self.dimension_schema = dimension_schema
fact_prefix = options.get("fact_prefix") or ""
fact_suffix = options.get("fact_suffix") or ""
self.fact_name = fact_name or self.cube.fact or "%s%s%s" % \
(fact_prefix, self.cube.basename, fact_suffix)
self.schema = schema
def physical(self, attribute, locale=None):
"""Returns physical reference as tuple for `attribute`, which should
be an instance of :class:`cubes.model.Attribute`. If there is no
dimension specified in attribute, then fact table is assumed. The
returned tuple has structure: (`schema`, `table`, `column`).
The algorithm to find physical reference is as follows:
1. if there is mapping for `dimension.attribute`, use the mapping
2. if there is no mapping or no mapping was found, then use table
`dimension` or fact table, if attribute does not belong to a
dimension and column `attribute`
If table prefixes and suffixes are used, then they are
prepended/appended to the table tame in the implicit mapping.
If localization is requested and the attribute is localizable, then
suffix `_LOCALE` whre `LOCALE` is the locale name will be added to
search for mapping or for implicit attribute creation.
"""
if attribute.expression:
raise ModelError("Attribute '{}' has an expression, it can not "
"have a physical representation"
.format(attribute.name))
schema = self.dimension_schema or self.schema
reference = None
# Fix locale: if attribute is not localized, use none, if it is
# localized, then use specified if exists otherwise use default
# locale of the attribute (first one specified in the list)
locale = locale or self.locale
if attribute.is_localizable():
locale = locale if locale in attribute.locales \
else attribute.locales[0]
else:
locale = None
# Try to get mapping if exists
if self.cube.mappings:
logical = self.logical(attribute, locale)
# TODO: should default to non-localized reference if no mapping
# was found?
mapped_ref = self.cube.mappings.get(logical)
if mapped_ref:
reference = to_column(mapped_ref,
default_table=self.fact_name,
default_schema=self.schema)
# No mappings exist or no mapping was found - we are going to create
# default physical reference
if not reference:
column_name = attribute.name
if locale:
column_name += "_" + locale
# TODO: temporarily preserved. it should be attribute.owner
dimension = attribute.dimension
if dimension and not (self.simplify_dimension_references \
and (dimension.is_flat
and not dimension.has_details)):
table_name = "%s%s%s" % (self.dimension_prefix, dimension, self.dimension_suffix)
else:
table_name = self.fact_name
reference = to_column((schema, table_name, column_name))
return reference
# TODO: is this still needed?
def physical_references(self, attributes, expand_locales=False):
"""Convert `attributes` to physical attributes. If `expand_locales` is
``True`` then physical reference for every attribute locale is
returned."""
if expand_locales:
physical_attrs = []
for attr in attributes:
if attr.is_localizable():
refs = [self.physical(attr, locale) for locale in attr.locales]
else:
refs = [self.physical(attr)]
physical_attrs += refs
else:
physical_attrs = [self.physical(attr) for attr in attributes]
return physical_attrs
class DenormalizedMapper(Mapper):
def __init__(self, cube, locale=None, schema=None,
fact_name=None, denormalized_view_prefix=None,
denormalized_view_schema=None,
**options):
"""Creates a mapper for a cube that has data stored in a denormalized
view/table.
Attributes:
* `denormalized_view_prefix` – default prefix used for constructing
view name from cube name
* `fact_name` – fact name, if not specified then `cube.name` is used
* `schema` – schema where the denormalized view is stored
* `fact_schema` – database schema for the original fact table
"""
super(DenormalizedMapper, self).__init__(cube, locale=locale,
schema=schema, fact_name=fact_name)
dview_prefix = denormalized_view_prefix or ""
# FIXME: this hides original fact name, we do not want that
self.fact_name = options.get("denormalized_view") or dview_prefix + \
self.cube.basename
self.fact_schema = self.schema
self.schema = self.schema or denormalized_view_schema
def physical(self, attribute, locale=None):
"""Returns same name as localized logical reference.
"""
locale = locale or self.locale
try:
if attribute.locales:
locale = locale if locale in attribute.locales \
else attribute.locales[0]
else:
locale = None
except:
locale = None
column_name = self.logical(attribute, locale)
reference = to_column((self.schema, self.fact_name, column_name))
return reference
| Python | 0.000001 | @@ -196,16 +196,28 @@
endError
+, ModelError
%0Afrom ..
@@ -3178,32 +3178,167 @@
_suffix%22) or %22%22%0A
+%0A if not (fact_name or self.cube.fact or self.cube.basename):%0A raise ModelError(%22Can not determine cube fact name%22)%0A%0A
self.fac
|
bec85af38596c2a4c38b8a53e3960a9ba375fe6f | remove sklearn.test() | sklearn/__init__.py | sklearn/__init__.py | """
Machine learning module for Python
==================================
sklearn is a Python module integrating classical machine
learning algorithms in the tightly-knit world of scientific Python
packages (numpy, scipy, matplotlib).
It aims to provide simple and efficient solutions to learning problems
that are accessible to everybody and reusable in various contexts:
machine-learning as a versatile tool for science and engineering.
See http://scikit-learn.org for complete documentation.
"""
import sys
__version__ = '0.14-git'
try:
# This variable is injected in the __builtins__ by the build
# process. It used to enable importing subpackages of sklearn when
# the binaries are not built
__SKLEARN_SETUP__
except NameError:
__SKLEARN_SETUP__ = False
if __SKLEARN_SETUP__:
sys.stderr.write('Partial import of sklearn during the build process.\n')
# We are not importing the rest of the scikit during the build
# process, as it may not be compiled yet
else:
from . import __check_build
from .base import clone
try:
from numpy.testing import nosetester
class _NoseTester(nosetester.NoseTester):
""" Subclass numpy's NoseTester to add doctests by default
"""
def test(self, label='fast', verbose=1, extra_argv=['--exe'],
doctests=True, coverage=False):
"""Run the full test suite
Examples
--------
This will run the test suite and stop at the first failing
example
>>> from sklearn import test
>>> test(extra_argv=['--exe', '-sx']) #doctest: +SKIP
"""
return super(_NoseTester, self).test(label=label,
verbose=verbose,
extra_argv=extra_argv,
doctests=doctests,
coverage=coverage)
try:
test = _NoseTester(raise_warnings="release").test
except TypeError:
# Older versions of numpy do not have a raise_warnings argument
test = _NoseTester().test
del nosetester
except:
pass
__all__ = ['cross_validation', 'cluster', 'covariance',
'datasets', 'decomposition', 'feature_extraction',
'feature_selection', 'semi_supervised',
'gaussian_process', 'grid_search', 'hmm', 'lda', 'linear_model',
'metrics', 'mixture', 'naive_bayes', 'neighbors', 'pipeline',
'preprocessing', 'qda', 'svm', 'test', 'clone',
'cross_decomposition',
'isotonic', 'pls']
def setup_module(module):
"""Fixture for the tests to assure globally controllable seeding of RNGs
"""
import os
import numpy as np
import random
# It could have been provided in the environment
_random_seed = os.environ.get('SKLEARN_SEED', None)
if _random_seed is None:
_random_seed = np.random.uniform() * (2 ** 31 - 1)
_random_seed = int(_random_seed)
print("I: Seeding RNGs with %r" % _random_seed)
np.random.seed(_random_seed)
random.seed(_random_seed)
| Python | 0 | @@ -1064,355 +1064,231 @@
-try:%0A from numpy.testing import nosetester%0A%0A class _NoseTester(nosetester.NoseTester):%0A %22%22%22 Subclass numpy's NoseTester to add doctests by default%0A %22%22%22%0A%0A def test(self, label='fast', verbose=1, extra_argv=%5B'--exe'%5D,%0A doctests=True, coverage=False):%0A %22%22%22Run the full
+def test(*args, **kwargs):%0A import warnings%0A # Not using a DeprecationWarning, as they are turned off by%0A # default%0A warnings.warn(%22%22%22sklearn.test() is no longer supported to run the%0Ascikit-learn
tes
@@ -1298,913 +1298,281 @@
uite
+.
%0A%0A
- Examples%0A --------%0A This will run the test suite and stop at the first failing%0A example%0A %3E%3E%3E from sklearn import test%0A %3E%3E%3E test(extra_argv=%5B'--exe', '-sx'%5D) #doctest: +SKIP%0A %22%22%22%0A return super(_NoseTester, self).test(label=label,%0A verbose=verbose,%0A extra_argv=extra_argv,%0A doctests=doctests,%0A coverage=coverage)%0A%0A try:%0A test = _NoseTester(raise_warnings=%22release%22).test%0A except TypeError:%0A # Older versions of numpy do not have a raise_warnings argument%0A test = _NoseTester().test%0A del nosetester%0A except:%0A pass
+After installation, you can launch the test suite from outside the%0Asource directory (you will need to have nosetests installed)::%0A%0A $ nosetests --exe sklearn%0A%0ASee the web page http://scikit-learn.org/stable/install.html#testing%0Afor more information.%0A%22%22%22, stacklevel=2)
%0A%0A
@@ -1956,16 +1956,8 @@
vm',
- 'test',
'cl
|
950bdd0f528fc61175c39dc2ade6abb9d46d767a | Change plan on book | contacts/migrations/0027_auto_20170106_0627.py | contacts/migrations/0027_auto_20170106_0627.py | Python | 0 | @@ -0,0 +1,1229 @@
+# -*- coding: utf-8 -*-%0A# Generated by Django 1.9.11 on 2017-01-06 06:27%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations, models%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('contacts', '0026_auto_20161231_2045'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterField(%0A model_name='book',%0A name='plan',%0A field=models.CharField(blank=True, choices=%5B('team_monthly', 'Team Monthly Subscription'), ('basic_yearly', 'Basic Yearly Subscription'), ('basic_monthly', 'Basic Monthly Subscription'), ('family_monthly', 'Family Monthly Subscription'), ('family_yearly', 'Family Yearly Subscription'), ('team_yearly', 'Team Yearly Subscription')%5D, max_length=100),%0A ),%0A migrations.AlterField(%0A model_name='historicalbook',%0A name='plan',%0A field=models.CharField(blank=True, choices=%5B('team_monthly', 'Team Monthly Subscription'), ('basic_yearly', 'Basic Yearly Subscription'), ('basic_monthly', 'Basic Monthly Subscription'), ('family_monthly', 'Family Monthly Subscription'), ('family_yearly', 'Family Yearly Subscription'), ('team_yearly', 'Team Yearly Subscription')%5D, max_length=100),%0A ),%0A %5D%0A
|
|
97c87237de87c91d66a92c1cacc362a7b831b8ef | add script to install python modules with pip | install_py_modules.py | install_py_modules.py | Python | 0 | @@ -0,0 +1,689 @@
+# this will install most necessary packages for this project%0A# that you may not already have on your system%0A%0Aimport pip%0A%0Adef install(package):%0A pip.main(%5B'install', package%5D)%0A%0A# Example%0Aif __name__ == '__main__':%0A # for scraping akc.org for a list of breed names and pics%0A install('Scrapy')%0A # for calculating Haralick textures%0A install('mahotas')%0A # image operations convenience functions%0A install('imutils')%0A # plotting package%0A install('seaborn')%0A # data operations%0A install('pandas')%0A # machine learning lib%0A install('scikit-learn')%0A # image processing%0A install('scikit-image')%0A # eta and %25 completion of tasks%0A install('progressbar')
|
|
98e822a78722e735b31817e74cc5e310fcb43c9a | add missed migration (HomeBanner verbose texts) | brasilcomvc/portal/migrations/0005_homebanner_verbose.py | brasilcomvc/portal/migrations/0005_homebanner_verbose.py | Python | 0 | @@ -0,0 +1,434 @@
+# -*- coding: utf-8 -*-%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import models, migrations%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('portal', '0004_homebanner_image_upload_to'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterModelOptions(%0A name='homebanner',%0A options=%7B'verbose_name_plural': 'Banners da Home', 'verbose_name': 'Banner da Home'%7D,%0A ),%0A %5D%0A
|
|
35310a8fa136b5b6e094401a8289f5eabeb28cbc | Create batterylevel.py | home/hairygael/batterylevel.py | home/hairygael/batterylevel.py | Python | 0.000063 | @@ -0,0 +1,194 @@
+def batterylevel():%0A power_now = subprocess.call (%22WMIC PATH Win32_Battery Get EstimatedChargeRemaining%22, %22r%22.readline())%0A ANSWER = float(power_now) * 100 , %22%25%22%0A i01.mouth.speak(str(ANSWER))%0A
|
|
c7851b61268848cf1b02d9e5c845a846ded4c2a7 | Update __init__.py | tendrl/node_agent/objects/cluster_message/__init__.py | tendrl/node_agent/objects/cluster_message/__init__.py | from tendrl.commons import etcdobj
from tendrl.commons.message import Message as message
from tendrl.commons import objects
class ClusterMessage(objects.BaseObject, message):
internal = True
def __init__(self, **cluster_message):
self._defs = {}
message.__init__(self, **cluster_message)
objects.BaseObject.__init__(self)
self.value = 'clusters/%s/Messages/%s'
self._etcd_cls = _ClusterMessageEtcd
class _ClusterMessageEtcd(etcdobj.EtcdObj):
"""Cluster message object, lazily updated
"""
__name__ = 'clusters/%s/Messages/%s'
_tendrl_cls = ClusterMessage
def render(self):
self.__name__ = self.__name__ % (
self.cluster_id, self.message_id
)
return super(_ClusterMessageEtcd, self).render()
| Python | 0.000072 | @@ -384,33 +384,33 @@
= 'clusters/%25s/
-M
+m
essages/%25s'%0A
@@ -578,17 +578,17 @@
ters/%25s/
-M
+m
essages/
|
7fddacd1a751c095f70693bb703bb9959a706ae1 | Add an example with end to end data | example.py | example.py | Python | 0.00006 | @@ -0,0 +1,2234 @@
+%22%22%22%0AExample script for getting events over a Zaqar queue.%0A%0ATo run:%0A$ export IDENTITY_API_VERSION=3%0A$ source ~/devstack/openrc%0A$ python example.py%0A%22%22%22%0Aimport json%0Aimport os%0Aimport uuid%0A%0Aimport requests%0Aimport websocket%0A%0Afrom keystoneauth1.identity import v3%0Afrom keystoneauth1 import session%0A%0A%0Aclient_id = str(uuid.uuid4())%0A%0A%0Adef authenticate(ws, token, project_id):%0A ws.send(json.dumps(%0A %7B'action': 'authenticate',%0A 'headers': %7B'X-Auth-Token': token,%0A 'Client-ID': client_id,%0A 'X-Project-ID': project_id%7D%7D))%0A return ws.recv()%0A%0A%0Adef send_message(ws, project_id, action, body=None):%0A msg = %7B'action': action,%0A 'headers': %7B'Client-ID': client_id, 'X-Project-ID': project_id%7D%7D%0A if body:%0A msg%5B'body'%5D = body%0A ws.send(json.dumps(msg))%0A return json.loads(ws.recv())%0A%0A%0Adef main():%0A auth_url = os.environ.get('OS_AUTH_URL')%0A user = os.environ.get('OS_USERNAME')%0A password = os.environ.get('OS_PASSWORD')%0A project = os.environ.get('OS_PROJECT_NAME')%0A auth = v3.Password(auth_url=auth_url,%0A username=user,%0A user_domain_name='default',%0A password=password,%0A project_name=project,%0A project_domain_name='default')%0A sess = session.Session(auth=auth)%0A token = auth.get_token(sess)%0A project_id = auth.get_project_id(project)%0A%0A nabu_url = auth.get_endpoint(sess, service_type='subscription')%0A requests.post('%25s/v1/subscription' %25 (nabu_url,),%0A data=json.dumps(%7B'source': 'compute',%0A 'target': 'nabu_queue'%7D),%0A headers=%7B'X-Auth-Token': token,%0A 'Content-Type': 'application/json'%7D)%0A ws_url = auth.get_endpoint(sess, service_type='messaging-websocket')%0A ws = websocket.create_connection(ws_url.replace('http', 'ws'))%0A authenticate(ws, token, project_id)%0A send_message(ws, project_id, 'queue_create', %7B'queue_name': 'nabu_queue'%7D)%0A send_message(ws, project_id, 'subscription_create',%0A %7B'queue_name': 'nabu_queue', 'ttl': 3000%7D)%0A while True:%0A ws.recv()%0A%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
e1907624a143d0733cd89e5458d104ed0a4fee43 | Add simple tasks | fabfile.py | fabfile.py | Python | 0.999917 | @@ -0,0 +1,100 @@
+# Simple Tasks%0A%0Adef hello():%0A print 'Hello ThaiPy!'%0A%0A%0Adef hi(name='Kan'):%0A print 'Hi ' + name%0A
|
|
50769229ce8ef4e84f345184b0aebf036bc0e179 | add fabfile | fabfile.py | fabfile.py | Python | 0.000002 | @@ -0,0 +1,338 @@
+from fabric.api import local, put, run, cd, sudo%0A%0Adef status():%0A run(%22systemctl status web%22)%0A%0Adef restart():%0A sudo(%22systemctl restart web%22)%0A%0Adef deploy():%0A local('tar -czf cydev_web.tgz web static/')%0A put(%22cydev_web.tgz%22, %22~/cydev.ru%22)%0A with cd(%22~/cydev.ru%22):%0A run(%22tar -xvf cydev_web.tgz%22)%0A restart()%0A status()%0A
|
|
fe0d8aa2e8293a14c9f2b0ac9fe9c51a99b75f16 | Make gallery images a bit smaller. | docs/source/notebook_gen_sphinxext.py | docs/source/notebook_gen_sphinxext.py | #
# Generation of RST from notebooks
#
import glob
import os
import os.path
import warnings
warnings.simplefilter('ignore')
from nbconvert.exporters import rst
def setup(app):
setup.app = app
setup.config = app.config
setup.confdir = app.confdir
app.connect('builder-inited', generate_rst)
return dict(
version='0.1',
parallel_read_safe=True,
parallel_write_safe=True
)
notebook_source_dir = '../../examples/notebooks'
generated_source_dir = 'examples/generated'
def nb_to_rst(nb_path):
"""convert notebook to restructured text"""
exporter = rst.RSTExporter()
out, resources = exporter.from_file(open(nb_path))
basename = os.path.splitext(os.path.basename(nb_path))[0]
imgdir = basename + '_files'
img_prefix = os.path.join(imgdir, basename + '_')
resources['metadata']['basename'] = basename
resources['metadata']['name'] = basename.replace('_', ' ')
resources['metadata']['imgdir'] = imgdir
base_url = ('http://nbviewer.ipython.org/github/metpy/MetPy/blob/master/'
'examples/notebooks/')
out_lines = ['`Notebook <%s>`_' % (base_url + os.path.basename(nb_path))]
for line in out.split('\n'):
if line.startswith('.. image:: '):
line = line.replace('output_', img_prefix)
out_lines.append(line)
out = '\n'.join(out_lines)
return out, resources
def write_nb(dest, output, resources):
if not os.path.exists(dest):
os.makedirs(dest)
rst_file = os.path.join(dest,
resources['metadata']['basename'] + resources['output_extension'])
name = resources['metadata']['name']
with open(rst_file, 'w') as rst:
header = '=' * len(name)
rst.write(header.encode('utf-8') + b'\n')
rst.write(name.encode('utf-8') + b'\n')
rst.write(header.encode('utf-8') + b'\n')
rst.write(output.encode('utf-8'))
imgdir = os.path.join(dest, resources['metadata']['imgdir'])
if not os.path.exists(imgdir):
os.makedirs(imgdir)
basename = resources['metadata']['basename']
for filename in resources['outputs']:
img_file = os.path.join(imgdir, filename.replace('output_', basename + '_'))
with open(img_file, 'wb') as img:
img.write(resources['outputs'][filename])
def generate_rst(app):
for fname in glob.glob(os.path.join(app.srcdir, notebook_source_dir, '*.ipynb')):
write_nb(os.path.join(app.srcdir, generated_source_dir), *nb_to_rst(fname))
with open(os.path.join(app.srcdir, 'examples', 'index.rst'), 'w') as test:
test.write('==============\n''MetPy Examples\n''==============\n'
'.. toctree::\n :glob:\n :hidden:\n\n generated/*\n\n')
no_images = []
for fname in glob.glob(os.path.join(app.srcdir, generated_source_dir, '*.rst')):
filepath, filename = os.path.split(fname)
target = filename.replace('.rst', '.html')
dir = os.listdir(os.path.join(app.srcdir, generated_source_dir, filename.replace('.rst', '_files')))
if dir:
file = dir[0]
test.write('.. image:: generated/' + filename.replace('.rst', '_files') + '/' + file +
'\n :height: 300px'
'\n :width: 375px'
'\n :target: generated/' + target + '\n\n')
else:
no_images.append(target)
for filename in no_images:
test.write('`' + filename.replace('_', ' ').replace('.html', '') +
' <generated/' + filename + '>`_\n\n')
| Python | 0 | @@ -3247,57 +3247,8 @@
e +%0A
- '%5Cn :height: 300px'%0A
@@ -3288,11 +3288,11 @@
th:
-375
+220
px'%0A
|
2af8c695c1463c080ce8c4bff7e3d81662a49c81 | implement generic decorator and register function | dispatk.py | dispatk.py | Python | 0 | @@ -0,0 +1,1678 @@
+%22%22%22%0AThis function is inspired by singledispatch of Python 3.4+ (PEP 443),%0Abut the dispatch happens on the key extracted fro the arguments values.%0A%0Afrom dispatk import dispatk%0A%0A@dispatk(lambda n: int(n))%0Adef fib(n):%0A return fib(n-1) + fib(n-2)%0A%[email protected](0)%0Adef _(n):%0A return 0%0A%[email protected](1, 2)%0Adef _(n):%0A return 1%0A%[email protected](41)%0Adef _(n):%0A return 165580141%0A%0A%0A*register* accepts one or more keys.%0A%[email protected](1, 2)%0Adef _(n):%0A return 1%0A%0Ais equivalent to%0A%[email protected](1)%[email protected](2)%0Adef _(n):%0A return 1%0A%0A%0A%22%22%22%0Afrom functools import wraps%0A%0A__all__ = ('dispatk',)%0A%0A%0Adef dispatk(keyer):%0A %22%22%22This is the decorator for the generic function and it accepts%0A only one argument *keyer*, it'll be called with the same arguments%0A of the function call and it must return an hashable object%0A (int, tuple, etc.).%0A%0A Rhe generic function has a *register* method used to decorate the%0A function for some specific keys.%0A *register* accepts one or more keys and returns the decorated%0A function.%0A %22%22%22%0A calls = %7B%7D%0A%0A def _dispatk(main):%0A def register(*keys):%0A def _register(spec):%0A for key in keys:%0A if key in calls:%0A raise ValueError(%0A %22function already registered for %25r%22%0A %25 (main.__name__, key))%0A%0A calls%5Bkey%5D = spec%0A%0A return spec%0A%0A return _register%0A%0A @wraps(main)%0A def run(*args, **kwargs):%0A return calls.get(keyer(*args, **kwargs), main)(*args, **kwargs)%0A%0A run.register = register%0A%0A return run%0A%0A return _dispatk%0A
|
|
e823c55f62c8aa1d72ec3bf2b58288b3dd413561 | Create radix_sort.py | sorts/radix_sort.py | sorts/radix_sort.py | Python | 0.000003 | @@ -0,0 +1,580 @@
+def radixsort(lst):%0A RADIX = 10%0A maxLength = False%0A tmp , placement = -1, 1%0A %0A while not maxLength:%0A maxLength = True%0A # declare and initialize buckets%0A buckets = %5Blist() for _ in range( RADIX )%5D%0A %0A # split lst between lists%0A for i in lst:%0A tmp = i / placement%0A buckets%5Btmp %25 RADIX%5D.append( i )%0A if maxLength and tmp %3E 0:%0A maxLength = False%0A %0A # empty lists into lst array%0A a = 0%0A for b in range( RADIX ):%0A buck = buckets%5Bb%5D%0A for i in buck:%0A lst%5Ba%5D = i%0A a += 1%0A %0A # move to next%0A placement *= RADIX%0A
|
|
2b810eb1900ca96c7fb2d8b63b70b7b0df8b9ed5 | Create find_digits.py | algorithms/implementation/python3/find_digits.py | algorithms/implementation/python3/find_digits.py | Python | 0.998631 | @@ -0,0 +1,271 @@
+#!/bin/python3%0A%0Aimport sys%0A%0A%0At = int(input().strip())%0Afor a0 in range(t):%0A n = int(input().strip())%0A%0A count = 0%0A digits = str(n)%0A for digit in digits:%0A if int(digit) != 0:%0A if n %25 int(digit) == 0:%0A count += 1%0A print(count)%0A
|
|
c723865ae8013020f6f0a28cd41592c3dc900968 | add a second test for process_dc_env. | tests/process_dc_env_test_2.py | tests/process_dc_env_test_2.py | Python | 0 | @@ -0,0 +1,3216 @@
+#!/usr/bin/env python%0Aimport sys%0Aimport os%0Aimport argparse%0A# There is a PEP8 warning about this next line not being at the top of the file.%0A# The better answer is to append the $dcUTILS/scripts directory to the sys.path%0A# but I wanted to illustrate it here...so your mileage may vary how you want%0Afrom process_dc_env import pythonGetEnv%0A# ==============================================================================%0A%22%22%22%0AThis script provides an example of how to use the process_dc_env.py in a python%0Ascript. In a python script, the pythonGetEnv is imported from the%0Aprocess_dc_env script and then called directly in the script. That function will%0Ado the necessary handling of some of the arguments on behalf of the python%0Ascript. Any other arguments passed in are ignored by the process_dc_env script%0Aand it is expected that the python script would handle the rest of them. The%0ApythonGetEnv will return a environment list presented in a dictionary with the%0Aenvironment variable set as the key and the value, is, well, the value.%0A%0ANote that the argparse statement for processing arguments needs to be a bit%0Adifferent than what you probably normally use. We need to ignore some of the%0Acommands that are processed in the proces_dc_env.py (ie appName, env and%0AworkspaceName if used). to do this use parse_known_args instead of parse_args%0A%22%22%22%0A__version__ = %220.1%22%0A%0A__copyright__ = %22Copyright 2016, devops.center%22%0A__credits__ = %5B%22Bob Lozano%22, %22Gregg Jensen%22%5D%0A__license__ = %22GPL%22%0A__status__ = %22Development%22%0A# ==============================================================================%0A%0A%0Adef checkArgs():%0A parser = argparse.ArgumentParser(%0A description='Script that provides a facility to watch for file ' +%0A 'changes and then perform actions based upon the files' +%0A ' that change.')%0A parser.add_argument('-f', '--foo', help='foo option',%0A required=False)%0A parser.add_argument('-w', '--workspaceName', help='The alternate ' +%0A 'directory name to find the application env files ' +%0A 'in. This will not change the .dcConfig/' +%0A 'baseDiretory file but will read it for the ' +%0A 'alternate path and use it directly',%0A required=False)%0A%0A # old way%0A # args = parser.parse_args()%0A%0A # new way and the extra options are in the unknown part%0A args, unknown = parser.parse_known_args()%0A%0A # if we get here then the%0A return (args.foo, args.workspaceName)%0A%0A%0Adef main(argv):%0A # for manageApp.py only ... or designed to only be used by manageApp.py%0A # retVals = pythonGetEnv(initialCreate=True)%0A%0A # normal call for all other python scripts%0A try:%0A (foo, workspaceName) = checkArgs()%0A except SystemExit:%0A pythonGetEnv()%0A sys.exit(1)%0A%0A retVals = pythonGetEnv()%0A%0A print %22=%3E%7B%7D%3C=%22.format(retVals)%0A print %22foo=%7B%7D%22.format(foo)%0A print %22workspaceName=%7B%7D%22.format(workspaceName)%0A%0A print %22CUSTOMER_APP_NAME=%22 + retVals%5B%22CUSTOMER_APP_NAME%22%5D%0A print %22ENV=%22 + retVals%5B%22ENV%22%5D%0A%0Aif __name__ == %22__main__%22:%0A main(sys.argv%5B1:%5D)%0A%0A# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4%0A
|
|
51aefefc3cdcd131678e921a29b5acd5b9601b81 | add a unit-tests that essentially import the the python python file in src/dynamic_graph/ | tests/python/python_imports.py | tests/python/python_imports.py | Python | 0 | @@ -0,0 +1,2778 @@
+#!/usr/bin/env python%0A%0Aimport unittest%0A%0Aclass PythonImportTest(unittest.TestCase):%0A def test_math_small_entities(self):%0A try:%0A import dynamic_graph.sot.core.math_small_entities%0A except ImportError as ie:%0A self.fail(str(ie))%0A %0A def test_feature_position_relative(self):%0A try:%0A import dynamic_graph.sot.core.feature_position_relative%0A except ImportError as ie:%0A self.fail(str(ie))%0A%0A def test_feature_position(self):%0A try:%0A import dynamic_graph.sot.core.feature_position%0A except ImportError as ie:%0A self.fail(str(ie))%0A%0A def test_matrix_util(self):%0A try:%0A import dynamic_graph.sot.core.matrix_util%0A except ImportError as ie:%0A self.fail(str(ie))%0A%0A def test_meta_task_6d(self):%0A try:%0A import dynamic_graph.sot.core.meta_task_6d%0A except ImportError as ie:%0A self.fail(str(ie))%0A%0A def test_meta_task_posture(self):%0A try:%0A import dynamic_graph.sot.core.meta_task_posture%0A except ImportError as ie:%0A self.fail(str(ie))%0A%0A def test_meta_task_visual_point(self):%0A try:%0A import dynamic_graph.sot.core.meta_task_visual_point%0A except ImportError as ie:%0A self.fail(str(ie))%0A%0A def test_meta_tasks_kine_relative(self):%0A try:%0A import dynamic_graph.sot.core.meta_tasks_kine_relative%0A except ImportError as ie:%0A self.fail(str(ie))%0A%0A def test_meta_tasks_kine(self):%0A try:%0A import dynamic_graph.sot.core.meta_tasks_kine%0A except ImportError as ie:%0A self.fail(str(ie))%0A%0A def test_meta_tasks(self):%0A try:%0A import dynamic_graph.sot.core.meta_tasks%0A except ImportError as ie:%0A self.fail(str(ie))%0A%0A def test_attime(self):%0A try:%0A import dynamic_graph.sot.core.utils.attime%0A except ImportError as ie:%0A self.fail(str(ie))%0A%0A def test_history(self):%0A try:%0A import dynamic_graph.sot.core.utils.history%0A except ImportError as ie:%0A self.fail(str(ie))%0A%0A def test_thread_interruptible_loop(self):%0A try:%0A import dynamic_graph.sot.core.utils.thread_interruptible_loop%0A except ImportError as ie:%0A self.fail(str(ie))%0A%0A%0A def test_viewer_helper(self):%0A try:%0A import dynamic_graph.sot.core.utils.viewer_helper%0A except ImportError as ie:%0A self.fail(str(ie))%0A%0A%0A def test_viewer_loger(self):%0A try:%0A import dynamic_graph.sot.core.utils.viewer_loger%0A except ImportError as ie:%0A self.fail(str(ie))%0Aif __name__ == '__main__':%0A unittest.main()%0A
|
|
f8067853546a9c25716aef6bc9f255591cb65626 | Add migration to change the project results report URL | akvo/rsr/migrations/0125_auto_20180315_0829.py | akvo/rsr/migrations/0125_auto_20180315_0829.py | Python | 0 | @@ -0,0 +1,983 @@
+# -*- coding: utf-8 -*-%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations%0A%0AORIGINAL_URL = '/en/reports/project_results/%7Bproject%7D?format=%7Bformat%7D&download=true'%0ANEW_URL = ORIGINAL_URL + '&p_StartDate=%7Bstart_date%7D&p_EndDate=%7Bend_date%7D'%0AREPORT_ID = 1%0A%0A%0Adef add_start_end_dates_report_url(apps, schema):%0A Report = apps.get_model('rsr', 'Report')%0A project_results_report = Report.objects.get(id=REPORT_ID)%0A project_results_report.url = NEW_URL%0A project_results_report.save()%0A%0A%0Adef remove_start_end_dates_report_url(apps, schema):%0A Report = apps.get_model('rsr', 'Report')%0A project_results_report = Report.objects.get(id=REPORT_ID)%0A project_results_report.url = ORIGINAL_URL%0A project_results_report.save()%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('rsr', '0124_auto_20180309_0923'),%0A %5D%0A%0A operations = %5B%0A migrations.RunPython(add_start_end_dates_report_url, remove_start_end_dates_report_url)%0A %5D%0A
|
|
2aa0990746b71086b4c31ee81ac8874436c63e32 | Add a few tests (close #4) | tests/test_crosslinking_bot.py | tests/test_crosslinking_bot.py | Python | 0.000001 | @@ -0,0 +1,1474 @@
+from datetime import datetime%0Afrom datetime import date, timedelta%0A%0Aimport pytest%0A%0Afrom crosslinking_bot import crosslinking_bot as cb%0A%0A%0Aclass TestParseDate:%0A%0A def test_return_today(self):%0A today = datetime.today().date()%0A assert 'today' == cb.parse_date(today)%0A%0A def test_return_1_day_ago(self):%0A yesterday = date.today() - timedelta(1)%0A assert '1 day ago' == cb.parse_date(yesterday)%0A%0A def test_return_2_days_ago(self):%0A two_days_ago = date.today() - timedelta(2)%0A assert '2 days ago' == cb.parse_date(two_days_ago)%0A%0Aclass TestPrepareComment:%0A @pytest.fixture%0A def hn_hits(self):%0A return %5B%7B%0A 'objectID': 12135399,%0A 'created_at_i': 1469823139,%0A %7D,%0A %7B%0A 'objectID': 12135398,%0A 'created_at_i': 1469821139,%0A %7D,%0A %5D%0A%0A def test_one_hit_contains_right_url(hn_hits):%0A hn_hits = %5Bhn_hits.hn_hits()%5B0%5D%5D%0A hn_url = cb.HN_STORY.format(hn_hits%5B0%5D%5B'objectID'%5D)%0A assert hn_url in cb.prepare_comment(hn_hits)%0A%0A def test_two_hits_contain_second_url(hn_hits):%0A hn_hits = hn_hits.hn_hits()%0A hn_url = cb.HN_STORY.format(hn_hits%5B1%5D%5B'objectID'%5D)%0A assert hn_url in cb.prepare_comment(hn_hits)%0A%0A def test_two_hits_contain_plural_form(hn_hits):%0A hn_hits = hn_hits.hn_hits()%0A hn_url = cb.HN_STORY.format(hn_hits%5B1%5D%5B'objectID'%5D)%0A assert 'discussions' in cb.prepare_comment(hn_hits)%0A
|
|
2d1f8a160b01ff9f57167f248c560675c4dc77a9 | Use the `@wraps` decorator with `@odin.local`. | distarray/odin.py | distarray/odin.py | """
ODIN: ODin Isn't Numpy
"""
from itertools import chain
from IPython.parallel import Client
from distarray.client import Context, DistArray
# Set up a global Context on import
_global_client = Client()
_global_view = _global_client[:]
_global_context = Context(_global_view)
context = _global_context
def flatten(lst):
"""Given a list of lists, return a flattened list.
Only flattens one level. For example,
>>> flatten(zip(['a', 'b', 'c'], [1, 2, 3]))
['a', 1, 'b', 2, 'c', 3]
>>> flatten([[1, 2], [3, 4, 5], [[5], [6], [7]]])
[1, 2, 3, 4, 5, [5], [6], [7]]
"""
return list(chain.from_iterable(lst))
def all_equal(lst):
"""Return True if all elements in `lst` are equal.
Also returns True if list is empty.
"""
if len(lst) == 0 or len(lst) == 1:
return True # vacuously True
else:
return all(element == lst[0] for element in lst[1:])
def key_and_push_args(subcontext, arglist):
"""For each arg in arglist, get or generate a key (UUID).
For DistArray objects, just get the existing key. For
everything else, generate a key and push the value to the engines
Parameters
----------
subcontext : Context
arglist : List of objects to key and/or push
Returns
-------
arg_keys : list of keys
"""
arg_keys = []
for arg in arglist:
if isinstance(arg, DistArray):
# if a DistArray, use its existing key
arg_keys.append(arg.key)
is_same_context = (subcontext == arg.context)
err_msg_fmt = "DistArray context mismatch: {} {}"
assert is_same_context, err_msg_fmt.format(subcontext, arg.context)
else:
# if not a DistArray, key it and push it to engines
arg_keys.extend(subcontext._key_and_push(arg))
return arg_keys
def determine_context(definition_context, args):
"""Determine the Context for a function.
Parameters
----------
definition_context: Context object
The Context in which the function was defined.
args : iterable
List of objects to inspect for context. Objects that aren't of
type DistArray are skipped.
Returns
-------
Context
If all provided DistArray objects have the same context.
Raises
------
ValueError
Raised if all DistArray objects don't have the same context.
"""
contexts = [definition_context] + [arg.context for arg in args if isinstance(arg, DistArray)]
if not all_equal(contexts):
errmsg = "All DistArray objects must be defined with the same context used for the function: {}"
raise ValueError(errmsg.format(contexts))
else:
return contexts[0]
def process_return_value(subcontext, result_key):
"""Figure out what to return on the Client.
Parameters
----------
key : string
Key corresponding to wrapped function's return value.
Returns
-------
A DistArray (if locally it's a DistArray), a None (if locally
it's a None).
Raises
------
TypeError for any other type besides those handled above
"""
type_key = subcontext._generate_key()
type_statement = "{} = str(type({}))".format(type_key, result_key)
subcontext._execute0(type_statement)
result_type_str = subcontext._pull0(type_key)
if (result_type_str == "<type 'NoneType'>" or # Python 2
result_type_str == "<class 'NoneType'>"): # Python 3
result = None
elif result_type_str == "<class 'distarray.core.denselocalarray.DenseLocalArray'>":
result = DistArray(result_key, subcontext)
else:
msg = ("Type is {}. @local is not yet implemented for return types"
"other than DistArray and NoneType").format(result_type_str)
raise TypeError(msg)
return result
def local(fn):
"""Decorator indicating a function is run locally on engines.
Parameters
----------
fn : function to wrap to run locally on engines
Returns
-------
fn : function wrapped to run locally on engines
"""
# we want @local functions to be able to call each other, so push
# their `__name__` as their key
func_key = fn.__name__
_global_context._push({func_key: fn})
result_key = _global_context._generate_key()
def inner(*args, **kwargs):
subcontext = determine_context(_global_context, flatten((args, kwargs.values())))
# generate keys for each parameter
# push to engines if not a DistArray
arg_keys = key_and_push_args(subcontext, args)
kwarg_names = kwargs.keys()
kwarg_keys = key_and_push_args(subcontext, kwargs.values())
# build up a python statement as a string
args_fmt = ','.join(['{}'] * len(arg_keys))
kwargs_fmt = ','.join(['{}={}'] * len(kwarg_keys))
# remove empty strings before joining
fmts = (fmt for fmt in (args_fmt, kwargs_fmt) if fmt)
fnargs_fmt = ','.join(fmts)
statement_fmt = ''.join(['{} = {}(', fnargs_fmt, ')'])
replacement_values = ([result_key, func_key] + arg_keys +
flatten(zip(kwarg_names, kwarg_keys)))
statement = statement_fmt.format(*replacement_values)
# execute it locally
subcontext._execute(statement)
return process_return_value(subcontext, result_key)
return inner
| Python | 0 | @@ -52,16 +52,44 @@
rt chain
+%0Afrom functools import wraps
%0A%0Afrom I
@@ -4355,16 +4355,31 @@
_key()%0A%0A
+ @wraps(fn)%0A
def
|
3a178c100cbf64b8ab60954a9b9ea5a01640f842 | Integrate LLVM at llvm/llvm-project@852d84e36ed7 | third_party/llvm/workspace.bzl | third_party/llvm/workspace.bzl | """Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "0128f8016770655fe7a40d3657f00853e6badb93"
LLVM_SHA256 = "f90705c878399b7dccca9cf9b28d695a4c6f8a0e12f2701f7762265470fa6c22"
tf_http_archive(
name = name,
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-{commit}".format(commit = LLVM_COMMIT),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
build_file = "//third_party/llvm:llvm.BUILD",
patch_file = [
"//third_party/llvm:build.patch",
"//third_party/llvm:mathextras.patch",
"//third_party/llvm:toolchains.patch",
"//third_party/llvm:temporary.patch", # Cherry-picks and temporary reverts. Do not remove even if temporary.patch is empty.
],
link_files = {"//third_party/llvm:run_lit.sh": "mlir/run_lit.sh"},
)
| Python | 0.000001 | @@ -160,133 +160,133 @@
= %22
-0128f8016770655fe7a40d3657f00853e6badb93%22%0A LLVM_SHA256 = %22f90705c878399b7dccca9cf9b28d695a4c6f8a0e12f2701f7762265470fa6c22
+852d84e36ed7a3db0ff4719f44a12b6bc09d35f3%22%0A LLVM_SHA256 = %223def20f54714c474910e5297b62639121116254e9e484ccee04eee6815b5d58c
%22%0A%0A
|
52f49543dd7bf01a2a24db435d8461b7c8921789 | Integrate LLVM at llvm/llvm-project@9a764ffeb6f0 | third_party/llvm/workspace.bzl | third_party/llvm/workspace.bzl | """Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "72136d8ba266eea6ce30fbc0e521c7b01a13b378"
LLVM_SHA256 = "54d179116e7a79eb1fdf7819aad62b4d76bc0e15e8567871cae9b675f7dec5c1"
tf_http_archive(
name = name,
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-{commit}".format(commit = LLVM_COMMIT),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
build_file = "//third_party/llvm:llvm.BUILD",
patch_file = [
"//third_party/llvm:infer_type.patch", # TODO(b/231285230): remove once resolved
"//third_party/llvm:build.patch",
"//third_party/llvm:toolchains.patch",
],
link_files = {"//third_party/llvm:run_lit.sh": "mlir/run_lit.sh"},
)
| Python | 0.000001 | @@ -160,133 +160,133 @@
= %22
-72136d8ba266eea6ce30fbc0e521c7b01a13b378%22%0A LLVM_SHA256 = %2254d179116e7a79eb1fdf7819aad62b4d76bc0e15e8567871cae9b675f7dec5c1
+9a764ffeb6f06a87c7ad482ae39f8a38b3160c5e%22%0A LLVM_SHA256 = %228f000d6541d64876de8ded39bc140176c90b74c3961b9ca755b1fed44423c56b
%22%0A%0A
|
9365d95e8f739c8c13bf0520ac20ad07a3387a42 | Avoid building blink_heap_unittests to unblock the Blink roll | public/all.gyp | public/all.gyp | #
# Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
{
'includes': [
'../Source/build/features.gypi',
],
'targets': [
{
'target_name': 'all_blink',
'type': 'none',
'dependencies': [
'../Source/testing/testing.gyp:TestRunner_resources',
'../Source/heap/blink_heap_tests.gyp:blink_heap_unittests',
'../Source/platform/blink_platform_tests.gyp:blink_platform_unittests',
'../Source/web/web_tests.gyp:webkit_unit_tests',
'../Source/wtf/wtf_tests.gyp:wtf_unittests',
],
'conditions': [
# Special target to wrap a gtest_target_type==shared_library
# webkit_unit_tests into an android apk for execution. See
# base.gyp for TODO(jrg)s about this strategy.
['OS=="android" and android_webview_build==0 and gtest_target_type == "shared_library"', {
'dependencies': [
'../Source/web/web_tests.gyp:webkit_unit_tests_apk',
],
}],
],
},
],
}
| Python | 0.998657 | @@ -1809,32 +1809,176 @@
+# FIXME: This test doesn't link properly. Commenting it out to%0A # unblock the Blink roll. See crbug.com/332220.%0A #
'../Source/heap/
|
45254d35def51a5e8936fe649f8c3fc089cd4a6d | add `schemas.py` | todo/schemas.py | todo/schemas.py | Python | 0.000001 | @@ -0,0 +1,521 @@
+%22%22%22Request/Response Schemas are defined here%22%22%22%0A# pylint: disable=invalid-name%0A%0Afrom marshmallow import Schema, fields%0Afrom marshmallow_enum import EnumField%0A%0Afrom todo.enums import Status%0A%0A%0Aclass TaskSchema(Schema):%0A %22%22%22Schema for api.portal.models.Panel%22%22%22%0A id = fields.Int(required=True)%0A title = fields.Str(required=True)%0A description = fields.Str(required=True)%0A status = EnumField(Status, required=True)%0A created_at = fields.DateTime(required=True)%0A updated_at = fields.DateTime(required=True)%0A
|
|
4f9660704445e6da62fc4e893d93fc84288303d4 | Integrate LLVM at llvm/llvm-project@aec908f9b248 | third_party/llvm/workspace.bzl | third_party/llvm/workspace.bzl | """Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "5dcd6afa20881490b38f3d88c4e59b0b4ff33551"
LLVM_SHA256 = "86f64f78ba3b6c7e8400fe7f5559b3dd110b9a4fd9bfe9e5ea8a4d27301580e0"
tf_http_archive(
name = name,
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-{commit}".format(commit = LLVM_COMMIT),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
build_file = "//third_party/llvm:llvm.BUILD",
patch_file = [
"//third_party/llvm:build.patch",
"//third_party/llvm:toolchains.patch",
"//third_party/llvm:temporary.patch", # Cherry-picks and temporary reverts. Do not remove even if temporary.patch is empty.
],
link_files = {"//third_party/llvm:run_lit.sh": "mlir/run_lit.sh"},
)
| Python | 0.000001 | @@ -160,133 +160,133 @@
= %22
-5dcd6afa20881490b38f3d88c4e59b0b4ff33551%22%0A LLVM_SHA256 = %2286f64f78ba3b6c7e8400fe7f5559b3dd110b9a4fd9bfe9e5ea8a4d27301580e0
+aec908f9b248b27cb44217081c54e2c00604dff7%22%0A LLVM_SHA256 = %22c88b75b4d60b960c7da65b7bacfdf8c5cf4c7846ab85a334f1ff18a8b50f2d98
%22%0A%0A
|
735dee2da41bf8df8519d516bd9b231ff440f5f9 | Create globals.system module for Python & system related settings | source/globals/system.py | source/globals/system.py | Python | 0 | @@ -0,0 +1,273 @@
+# -*- coding: utf-8 -*-%0A%0A## %5Cpackage globals.system%0A%0A# MIT licensing%0A# See: LICENSE.txt%0A%0A%0Aimport sys%0A%0A%0APY_VER_MAJ = sys.version_info%5B0%5D%0APY_VER_MIN = sys.version_info%5B1%5D%0APY_VER_REL = sys.version_info%5B2%5D%0APY_VER_STRING = u'%7B%7D.%7B%7D.%7B%7D'.format(PY_VER_MAJ, PY_VER_MIN, PY_VER_REL)%0A
|
|
b83b09f937f91a870165d88730a36faaee8a5261 | add a parser of metadata | retsmeta.py | retsmeta.py | Python | 0.000118 | @@ -0,0 +1,1826 @@
+# -*- coding: utf-8 -*-%0A%0Afrom xml.etree import ElementTree%0A%0Aclass MetaParser(object):%0A def GetResources(self):%0A pass%0A %0A def GetRetsClass(self, resource):%0A pass%0A %0A def GetTables(self, resource, rets_class):%0A pass%0A %0A def GetLookUp(self, resource, rets_class):%0A pass%0A %0A%0Aclass StandardXmlMetaParser(MetaParser):%0A def __init__(self, filepath):%0A with open(filepath,'r') as f:%0A xml_str = f.read()%0A %0A self.meta_xml = ElementTree.fromstring(xml_str)%0A %0A def GetResources(self):%0A resource_list = %5B%5D%0A resource_xml_list = self.meta_xml.find('METADATA').find('METADATA-SYSTEM').find('SYSTEM').find('METADATA-RESOURCE').findall('Resource')%0A for resource_xml in resource_xml_list:%0A resource = RetsResource()%0A resource.resource_id = resource_xml.find('ResourceID').text%0A resource_list.append(resource)%0A return resource_list%0A %0A def GetRetsClass(self, resource):%0A class_list = %5B%5D%0A resource_xml_list = self.meta_xml.find('METADATA').find('METADATA-SYSTEM').find('SYSTEM').find('METADATA-RESOURCE').findall('Resource')%0A for resource_xml in resource_xml_list:%0A if resource_xml.find('ResourceID')==resource:%0A class_xml_list = resource_xml.findall('Class')%0A for class_xml in class_xml_list:%0A %0A %0A def GetTables(self, resource, rets_class):%0A pass%0A %0A def GetLookUp(self, resource, rets_class):%0A pass%0A%0Aclass RetsResource(object):%0A def __init__(self):%0A self.resource_id = None%0A %0Aclass RetsClass(object):%0A def __init__(self):%0A self.rets_classname = None%0A %0Aclass RetsTable(object):%0A def __init__(self):%0A self.system_name = None%0A %0A %0A
|
|
f2e5c56297a00ebf4b5029b702f8441adca83a8e | Update 'systemd' module from oslo-incubator | cinder/openstack/common/systemd.py | cinder/openstack/common/systemd.py | # Copyright 2012-2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Helper module for systemd service readiness notification.
"""
import os
import socket
import sys
from cinder.openstack.common import log as logging
LOG = logging.getLogger(__name__)
def _abstractify(socket_name):
if socket_name.startswith('@'):
# abstract namespace socket
socket_name = '\0%s' % socket_name[1:]
return socket_name
def _sd_notify(unset_env, msg):
notify_socket = os.getenv('NOTIFY_SOCKET')
if notify_socket:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
try:
sock.connect(_abstractify(notify_socket))
sock.sendall(msg)
if unset_env:
del os.environ['NOTIFY_SOCKET']
except EnvironmentError:
LOG.debug("Systemd notification failed", exc_info=True)
finally:
sock.close()
def notify():
"""Send notification to Systemd that service is ready.
For details see
http://www.freedesktop.org/software/systemd/man/sd_notify.html
"""
_sd_notify(False, 'READY=1')
def notify_once():
"""Send notification once to Systemd that service is ready.
Systemd sets NOTIFY_SOCKET environment variable with the name of the
socket listening for notifications from services.
This method removes the NOTIFY_SOCKET environment variable to ensure
notification is sent only once.
"""
_sd_notify(True, 'READY=1')
def onready(notify_socket, timeout):
"""Wait for systemd style notification on the socket.
:param notify_socket: local socket address
:type notify_socket: string
:param timeout: socket timeout
:type timeout: float
:returns: 0 service ready
1 service not ready
2 timeout occurred
"""
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
sock.settimeout(timeout)
sock.bind(_abstractify(notify_socket))
try:
msg = sock.recv(512)
except socket.timeout:
return 2
finally:
sock.close()
if 'READY=1' in msg:
return 0
else:
return 1
if __name__ == '__main__':
# simple CLI for testing
if len(sys.argv) == 1:
notify()
elif len(sys.argv) >= 2:
timeout = float(sys.argv[1])
notify_socket = os.getenv('NOTIFY_SOCKET')
if notify_socket:
retval = onready(notify_socket, timeout)
sys.exit(retval)
| Python | 0 | @@ -682,18 +682,23 @@
ort
-os
+logging
%0Aimport
sock
@@ -693,22 +693,18 @@
%0Aimport
+o
s
-ocket
%0Aimport
@@ -708,62 +708,24 @@
rt s
-ys%0A%0Afrom cinder.openstack.common import log as logging
+ocket%0Aimport sys
%0A%0A%0AL
|
bbb445b691f7370059c7bf9c94e2e9c6f4155273 | update to latest | tasks/base.py | tasks/base.py | import os
from invoke import run
class BaseTest(object):
def download_mspec(self):
if not os.path.isdir("../mspec"):
run("cd .. && git clone --depth=100 --quiet https://github.com/ruby/mspec")
run("cd ../mspec && git checkout v1.6.0")
def download_rubyspec(self):
if not os.path.isdir("../rubyspec"):
run("cd .. && git clone --depth=100 --quiet https://github.com/ruby/spec")
run("mv spec rubyspec")
| Python | 0 | @@ -216,62 +216,8 @@
ec%22)
-%0A run(%22cd ../mspec && git checkout v1.6.0%22)
%0A%0A
|
50843d6a2c93be4e05a0a2da338e4b0e0d99d294 | Add tls proxy helper | jujuxaas/tls_proxy.py | jujuxaas/tls_proxy.py | Python | 0 | @@ -0,0 +1,2716 @@
+import copy%0Aimport select%0Aimport socket%0Aimport ssl%0Aimport sys%0Aimport threading%0A%0Aimport logging%0Alogger = logging.getLogger(__name__)%0A%0Aclass TlsProxyConnection(object):%0A def __init__(self, server, inbound_socket, inbound_address, outbound_address):%0A self.server = server%0A self.inbound_socket = inbound_socket%0A self.inbound_address = inbound_address%0A self.outbound_socket = None%0A self.outbound_address = outbound_address%0A self.thread = None%0A%0A def start(self):%0A self.thread = threading.Thread(target=self._proxy)%0A self.thread.daemon = True%0A self.thread.start()%0A%0A def _proxy(self):%0A try:%0A self.outbound_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)%0A self.outbound_socket = self.server._wrap_ssl(self.outbound_socket)%0A self.outbound_socket.connect(self.outbound_address)%0A logger.debug(%22Proxy for %25s: connected to remote%22, self.inbound_address)%0A %0A pairs = %7B%7D%0A pairs%5Bself.inbound_socket%5D = self.outbound_socket%0A pairs%5Bself.outbound_socket%5D = self.inbound_socket%0A %0A selectors = %5Bself.inbound_socket, self.outbound_socket%5D%0A while True:%0A ready, _, _ = select.select(selectors, %5B%5D, %5B%5D)%0A for s in ready:%0A data = s.recv(8192)%0A if len(data) == 0:%0A # Close%0A break%0A else:%0A other = pairs%5Bs%5D%0A other.send(data)%0A except:%0A logger.warn(%22Proxy for %25s: error: %25s%22, self.inbound_address, sys.exc_info())%0A finally:%0A logger.debug(%22Proxy for %25s: closing%22, self.inbound_address)%0A self.inbound_socket.close()%0A if self.outbound_socket:%0A self.outbound_socket.close() %0A%0Aclass TlsProxy(object):%0A def __init__(self, ssl_context, listen_address, forward_address):%0A self.listen_address = listen_address%0A self.forward_address = forward_address%0A self.ssl_context = ssl_context%0A self._ready = threading.Event()%0A %0A def _serve(self):%0A server = None%0A try:%0A server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)%0A server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)%0A server.bind(self.listen_address)%0A server.listen(50)%0A %0A self._ready.set()%0A %0A while True:%0A client, client_address = server.accept()%0A proxy = TlsProxyConnection(self, client, client_address, self.forward_address)%0A proxy.start()%0A finally:%0A if server:%0A server.close()%0A %0A def start(self):%0A self.thread = threading.Thread(target=self._serve)%0A self.thread.daemon = True%0A self.thread.start()%0A self._ready.wait()%0A %0A def _wrap_ssl(self, socket):%0A options = copy.copy(self.ssl_context)%0A options%5B'sock'%5D = socket%0A return ssl.wrap_socket(**options)%0A
|
|
7d7fd5b167528654b9fed5b0c971c2b8110d93ea | Create wrapper_exploit.py | wrapper_exploit.py | wrapper_exploit.py | Python | 0 | @@ -0,0 +1,520 @@
+# Author: Chris Duffy%0A# Date: May 2015%0A# Purpose: An sample exploit for testing UDP services%0Aimport sys, socket, strut, subprocess%0Aprogram_name = 'C:%5Cexploit_writing%5Cvulnerable.exe'%0Afill =%22A%22*####%0Aeip = struct.pack('%3CI',0x########)%0Aoffset = %22%5Cx90%22*##%0Aavailable_shellcode_space = ###%0Ashell =() #Code to insert%0A# NOPs to fill the remaining space%0Aexploit = fill + eip + offset + shell%0Aclient = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)%0Aclient.sendto(exploit, (rhost, rport))%0Asubprocess.call(%5Bprogram_name, exploit%5D)%0A
|
|
ef24797a12e8a8919ddb11c7b6763154c5c3aad1 | transform DR script to observe exceptions | transform_DR.py | transform_DR.py | Python | 0 | @@ -0,0 +1,1158 @@
+__author__ = 'kuhn'%0A__author__ = 'kuhn'%0A%0Afrom batchxslt import processor%0Afrom batchxslt import cmdiresource%0Aimport codecs%0Aimport os%0A%0Adgd_corpus = %22/home/kuhn/Data/IDS/svn_rev1233/dgd2_data/metadata/corpora/extern%22%0Adgd_events = %22/home/kuhn/Data/IDS/svn_rev1233/dgd2_data/metadata/events/extern%22%0Adgd_speakers = %22/home/kuhn/Data/IDS/svn_rev1233/dgd2_data/metadata/speakers/extern%22%0A%0Acorpus_xsl = %22/home/kuhn/Data/IDS/svn/dgd2_data/dgd2cmdi/xslt/dgdCorpus2cmdi.xsl%22%0Aevent_xsl = %22/home/kuhn/Data/IDS/svn/dgd2_data/dgd2cmdi/xslt/dgdEvent2cmdi.xsl%22%0Aspeaker_xsl = %22/home/kuhn/Data/IDS/svn/dgd2_data/dgd2cmdi/xslt/dgdSpeaker2cmdi.xsl%22%0A%0Asaxon_jar = %22/home/kuhn/Data/IDS/svn/dgd2_data/dgd2cmdi/dgd2cmdi/saxon/saxon9he.jar%22%0A%0Apf_corpus = os.path.join(dgd_corpus, 'DR--_extern.xml')%0Apf_events = os.path.join(dgd_events, 'DR')%0Apf_speakers = os.path.join(dgd_speakers, 'DR')%0A%0Axsl_processor = processor.XSLBatchProcessor(saxon_jar)%0A%0Axsl_processor.transform(corpus_xsl, pf_corpus, %22cmdi_%22, '/tmp/cmdi/corpus/')%0A%0Axsl_processor.transform(event_xsl, pf_events, %22cmdi_%22, '/tmp/cmdi/events/DR/')%0A%0Axsl_processor.transform(speaker_xsl, pf_speakers, %22cmdi_%22, '/tmp/cmdi/speakers/DR/')%0A
|
|
60b5228818c92f4d13b0a054956a5f834c7f7549 | Implement remove.py | programs/genesis_util/remove.py | programs/genesis_util/remove.py | Python | 0.0001 | @@ -0,0 +1,2546 @@
+#!/usr/bin/env python3%0A%0Aimport argparse%0Aimport json%0Aimport sys%0A%0Adef dump_json(obj, out, pretty):%0A if pretty:%0A json.dump(obj, out, indent=2, sort_keys=True)%0A else:%0A json.dump(obj, out, separators=(%22,%22, %22:%22), sort_keys=True)%0A return%0A%0Adef main():%0A parser = argparse.ArgumentParser(description=%22Remove entities from snapshot%22)%0A parser.add_argument(%22-o%22, %22--output%22, metavar=%22OUT%22, default=%22-%22, help=%22output filename (default: stdout)%22)%0A parser.add_argument(%22-i%22, %22--input%22, metavar=%22IN%22, default=%22-%22, help=%22input filename (default: stdin)%22)%0A parser.add_argument(%22-a%22, %22--asset%22, metavar=%22ASSETS%22, nargs=%22+%22, help=%22list of asset(s) to delete%22) %0A parser.add_argument(%22-p%22, %22--pretty%22, action=%22store_true%22, default=False, help=%22pretty print output%22)%0A opts = parser.parse_args()%0A%0A if opts.input == %22-%22:%0A genesis = json.load(sys.stdin) %0A else:%0A with open(opts.input, %22r%22) as f:%0A genesis = json.load(f)%0A%0A if opts.asset is None: %0A opts.asset = %5B%5D%0A rm_asset_set = set(opts.asset)%0A%0A removed_asset_entries = %7Baname : 0 for aname in opts.asset%7D%0A new_initial_assets = %5B%5D%0A for asset in genesis%5B%22initial_assets%22%5D:%0A symbol = asset%5B%22symbol%22%5D%0A if symbol not in rm_asset_set:%0A new_initial_assets.append(asset)%0A else:%0A removed_asset_entries%5Bsymbol%5D += 1%0A genesis%5B%22initial_assets%22%5D = new_initial_assets%0A%0A removed_balance_entries = %7Baname : %5B%5D for aname in opts.asset%7D%0A new_initial_balances = %5B%5D%0A for balance in genesis%5B%22initial_balances%22%5D:%0A symbol = balance%5B%22asset_symbol%22%5D%0A if symbol not in rm_asset_set:%0A new_initial_balances.append(balance)%0A else:%0A removed_balance_entries%5Bsymbol%5D.append(balance)%0A genesis%5B%22initial_balances%22%5D = new_initial_balances%0A # TODO: Remove from initial_vesting_balances%0A%0A for aname in opts.asset:%0A sys.stderr.write(%0A %22Asset %7Bsym%7D removed %7Bacount%7D initial_assets, %7Bbcount%7D initial_balances totaling %7Bbtotal%7D%5Cn%22.format(%0A sym=aname,%0A acount=removed_asset_entries%5Baname%5D,%0A bcount=len(removed_balance_entries%5Baname%5D),%0A btotal=sum(int(e%5B%22amount%22%5D) for e in removed_balance_entries%5Baname%5D),%0A ))%0A%0A if opts.output == %22-%22:%0A dump_json( genesis, sys.stdout, opts.pretty )%0A sys.stdout.flush()%0A else:%0A with open(opts.output, %22w%22) as f:%0A dump_json( genesis, f, opts.pretty )%0A return%0A%0Aif __name__ == %22__main__%22:%0A main()%0A
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.