commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
---|---|---|---|---|---|---|---|
a58a31a6037babdc607593196da2841f13791bfa | Revert "去掉camelcase和underscore的转换, 直接用三方的" | railguns/utils/text.py | railguns/utils/text.py | Python | 0 | @@ -0,0 +1,942 @@
+%22%22%22%0Ahttps://github.com/tomchristie/django-rest-framework/issues/944%0A%22%22%22%0Aimport re%0A%0A%0Afirst_cap_re = re.compile('(.)(%5BA-Z%5D%5Ba-z%5D+)')%0Aall_cap_re = re.compile('(%5Ba-z0-9%5D)(%5BA-Z%5D)')%0A%0A%0Adef camelcase_to_underscore(name):%0A s1 = first_cap_re.sub(r'%5C1_%5C2', name)%0A return all_cap_re.sub(r'%5C1_%5C2', s1).lower()%0A%0A%0Adef underscore_to_camelcase(name, lower_first=True):%0A result = name.title().replace('_', '')%0A if lower_first:%0A return result%5B0%5D.lower() + result%5B1:%5D%0A else:%0A return result%0A%0A%0Adef recursive_key_map(function, data):%0A if isinstance(data, dict):%0A new_dict = %7B%7D%0A for key, value in data.items():%0A if isinstance(key, str):%0A new_key = function(key)%0A new_dict%5Bnew_key%5D = recursive_key_map(function, value)%0A return new_dict%0A elif isinstance(data, (list, tuple)):%0A return %5Brecursive_key_map(function, value) for value in data%5D%0A else:%0A return data%0A
|
|
cd727a5e17cabcc4ee03f2973775f30b7c8b5a26 | add terrible copypasta'd watchdog-using piece of shit for test running | tasks.py | tasks.py | import sys
import time
from invocations.docs import docs, www
from invocations.testing import test, coverage
from invocations.packaging import vendorize, release
from invoke import ctask as task, Collection, Context
@task(help=test.help)
def integration(c, module=None, runner=None, opts=None):
"""
Run the integration test suite. May be slow!
"""
opts = opts or ""
opts += " --tests=integration/"
test(c, module, runner, opts)
@task
def sites(c):
"""
Build both doc sites w/ maxed nitpicking.
"""
# Turn warnings into errors, emit warnings about missing references.
# This gives us a maximally noisy docs build.
# Also enable tracebacks for easier debuggage.
opts = "-W -n -T"
# This is super lolzy but we haven't actually tackled nontrivial in-Python
# task calling yet, so...
docs_c = Context(config=c.config.clone())
www_c = Context(config=c.config.clone())
docs_c.update(**docs.configuration())
www_c.update(**www.configuration())
docs['build'](docs_c, opts=opts)
www['build'](www_c, opts=opts)
@task
def watch(c):
"""
Watch both doc trees & rebuild them if files change.
This includes e.g. rebuilding the API docs if the source code changes;
rebuilding the WWW docs if the README changes; etc.
"""
try:
from watchdog.observers import Observer
from watchdog.events import RegexMatchingEventHandler
except ImportError:
sys.exit("If you want to use this, 'pip install watchdog' first.")
class APIBuildHandler(RegexMatchingEventHandler):
def on_any_event(self, event):
my_c = Context(config=c.config.clone())
my_c.update(**docs.configuration())
docs['build'](my_c)
class WWWBuildHandler(RegexMatchingEventHandler):
def on_any_event(self, event):
my_c = Context(config=c.config.clone())
my_c.update(**www.configuration())
www['build'](my_c)
# Readme & WWW triggers WWW
www_handler = WWWBuildHandler(
regexes=['\./README.rst', '\./sites/www'],
ignore_regexes=['.*/\..*\.swp', '\./sites/www/_build'],
)
# Code and docs trigger API
api_handler = APIBuildHandler(
regexes=['\./invoke/', '\./sites/docs'],
ignore_regexes=['.*/\..*\.swp', '\./sites/docs/_build'],
)
# Run observer loop
observer = Observer()
# TODO: Find parent directory of tasks.py and use that.
for x in (www_handler, api_handler):
observer.schedule(x, '.', recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
ns = Collection(
test, coverage, integration, vendorize, release, www, docs, sites, watch
)
ns.configure({'coverage': {'package': 'invoke'}})
| Python | 0 | @@ -1100,16 +1100,21 @@
ef watch
+_docs
(c):%0A
@@ -2712,16 +2712,962 @@
join()%0A%0A
+ %0A@task%0Adef watch_tests(c, module=None):%0A %22%22%22%0A Watch source tree and test tree for changes, rerunning tests as necessary.%0A %22%22%22%0A try:%0A from watchdog.observers import Observer%0A from watchdog.events import RegexMatchingEventHandler%0A except ImportError:%0A sys.exit(%22If you want to use this, 'pip install watchdog' first.%22)%0A%0A class BuildHandler(RegexMatchingEventHandler):%0A def on_any_event(self, event):%0A test(c, module=module)%0A%0A # Code and docs trigger API%0A handler = BuildHandler(%0A regexes=%5B'%5C./invoke/', '%5C./tests'%5D,%0A ignore_regexes=%5B'.*/%5C..*%5C.swp'%5D,%0A )%0A%0A # Run observer loop%0A observer = Observer()%0A # TODO: Find parent directory of tasks.py and use that.%0A observer.schedule(handler, '.', recursive=True)%0A observer.start()%0A try:%0A while True:%0A time.sleep(1)%0A except KeyboardInterrupt:%0A observer.stop()%0A observer.join()%0A%0A
%0Ans = Co
@@ -3746,22 +3746,44 @@
, sites,
+%0A
watch
+_docs, watch_tests
%0A)%0Ans.co
|
a723c70a0ae9da0f2207dd9278c619be323bda4a | move test parts to avnav_test | avnav_test/avn_debug.py | avnav_test/avn_debug.py | Python | 0 | @@ -0,0 +1,187 @@
+import sys%0Asys.path.append(r'/home/pi/avnav/pydev')%0Aimport pydevd%0Afrom avnav_server import *%0Apydevd.settrace(host='10.222.10.45',stdoutToServer=True, stderrToServer=True)%0A%0Amain(sys.argv)%0A
|
|
aa1b39b455f7145848c287ee9ee85507f5b66de0 | Add Meduza | collector/rss/meduza.py | collector/rss/meduza.py | Python | 0 | @@ -0,0 +1,828 @@
+# coding=utf-8%0Aimport feedparser%0Aimport logging%0A%0Afrom util import date, tags%0A%0ASOURCE_NAME = 'Meduza'%0AFEED_URL = 'https://meduza.io/rss/all'%0A%0Alog = logging.getLogger('app')%0A%0A%0Adef parse():%0A feed = feedparser.parse(FEED_URL)%0A data = %5B%5D%0A%0A for entry in feed%5B'entries'%5D:%0A data.append(%7B%0A 'title': entry%5B'title'%5D,%0A 'description': entry%5B'description'%5D,%0A 'link': entry%5B'link'%5D,%0A 'published': date.utc_format(entry%5B'published'%5D),%0A%0A 'source_name': SOURCE_NAME,%0A 'source_title': feed%5B'feed'%5D%5B'title'%5D,%0A 'source_link': feed%5B'feed'%5D%5B'link'%5D,%0A%0A 'tags': tags.string_format('world', 'no_tech', 'meduza'),%0A %7D)%0A%0A log.info('%25s: got %25d documents', SOURCE_NAME, len(data))%0A%0A return data%0A%0A%0Aif __name__ == '__main__':%0A print parse()%0A
|
|
d50814603217ca9ea47324a0ad516ce7418bc9bf | Add script to generate a standalone timeline view. | build/generate_standalone_timeline_view.py | build/generate_standalone_timeline_view.py | Python | 0.999893 | @@ -0,0 +1,2521 @@
+#!/usr/bin/env python%0A# Copyright (c) 2012 The Chromium Authors. All rights reserved.%0A# Use of this source code is governed by a BSD-style license that can be%0A# found in the LICENSE file.%0Aimport optparse%0Aimport parse_deps%0Aimport sys%0Aimport os%0A%0Asrcdir = os.path.abspath(os.path.join(os.path.dirname(__file__), %22../src%22))%0A%0Ajs_warning_message = %22%22%22/**%0A// Copyright (c) 2012 The Chromium Authors. All rights reserved.%0A// Use of this source code is governed by a BSD-style license that can be%0A// found in the LICENSE file.%0A%0A* WARNING: This file is generated by generate_standalone_timeline_view.py%0A*%0A* Do not edit directly.%0A*/%0A%22%22%22%0A%0Acss_warning_message = %22%22%22/**%0A/* Copyright (c) 2012 The Chromium Authors. All rights reserved.%0A * Use of this source code is governed by a BSD-style license that can be%0A * found in the LICENSE file. */%0A%0A* WARNING: This file is generated by generate_standalone_timeline_view.py%0A*%0A* Do not edit directly.%0A*/%0A%22%22%22%0A%0Adef generate_css(filenames):%0A load_sequence = parse_deps.calc_load_sequence(filenames)%0A%0A style_sheet_chunks = %5Bcss_warning_message, '%5Cn'%5D%0A for module in load_sequence:%0A for style_sheet in module.style_sheets:%0A style_sheet_chunks.append(%22%22%22%25s%5Cn%22%22%22 %25 style_sheet.timeline_view)%0A%0A return ''.join(style_sheet_chunks)%0A%0Adef generate_js(filenames):%0A load_sequence = parse_deps.calc_load_sequence(filenames)%0A%0A js_chunks = %5Bjs_warning_message, '%5Cn'%5D%0A js_chunks.append(%22window.FLATTENED = %7B%7D;%5Cn%22)%0A%0A for module in load_sequence:%0A js_chunks.append( %22window.FLATTENED%5B'%25s'%5D = true;%5Cn%22 %25 module.name)%0A%0A for module in load_sequence:%0A js_chunks.append(module.timeline_view)%0A js_chunks.append(%22%5Cn%22)%0A%0A return ''.join(js_chunks)%0A%0Adef main(args):%0A parser = optparse.OptionParser()%0A parser.add_option(%22--js%22, dest=%22js_file%22,%0A help=%22Where to place generated javascript file%22)%0A parser.add_option(%22--css%22, dest=%22css_file%22,%0A help=%22Where to place generated css file%22)%0A options, args = parser.parse_args(args)%0A%0A if not options.js_file and not options.css_file:%0A print %22Must specify one, or both of --js and --css%22%0A return 1%0A%0A input_filenames = %5Bos.path.join(srcdir, f)%0A for f in %5B'base.js', 'timeline_view.js'%5D%5D%0A if options.js_file:%0A with open(options.js_file, 'w') as f:%0A f.write(generate_js(input_filenames))%0A%0A if options.css_file:%0A with open(options.css_file, 'w') as f:%0A f.write(generate_css(input_filenames))%0A%0A return 0%0A%0A%0Aif __name__ == %22__main__%22:%0A sys.exit(main(sys.argv))%0A
|
|
f6ef8e0c31163f95fa0c62873a7195ab51f65cf1 | Add cw_are_they_the_same.py | cw_are_they_the_same.py | cw_are_they_the_same.py | Python | 0.01265 | @@ -0,0 +1,1754 @@
+%22%22%22Codewars: Are they the %22same%22?%0A6 kyu%0A%0AURL: https://www.codewars.com/kata/550498447451fbbd7600041c%0A%0AGiven two arrays a and b write a function comp(a, b) (compSame(a, b) in Clojure)%0Athat checks whether the two arrays have the %22same%22 elements, with the same%0Amultiplicities. %22Same%22 means, here, that the elements in b are the elements i%0Aa squared, regardless of the order.%0A%0AExamples%0AValid arrays%0Aa = %5B121, 144, 19, 161, 19, 144, 19, 11%5D %0Ab = %5B121, 14641, 20736, 361, 25921, 361, 20736, 361%5D%0Acomp(a, b) returns true because in b 121 is the square of 11, 14641 is the%0Asquare of 121, 20736 the square of 144, 361 the square of 19, 25921 the%0Asquare of 161, and so on. It gets obvious if we write b's elements in terms of%0Asquares:%0Aa = %5B121, 144, 19, 161, 19, 144, 19, 11%5D %0Ab = %5B11*11, 121*121, 144*144, 19*19, 161*161, 19*19, 144*144, 19*19%5D%0AInvalid arrays%0AIf we change the first number to something else, comp may not return true%0Aanymore:%0Aa = %5B121, 144, 19, 161, 19, 144, 19, 11%5D %0Ab = %5B132, 14641, 20736, 361, 25921, 361, 20736, 361%5D%0Acomp(a,b) returns false because in b 132 is not the square of any number of a.%0A%0Aa = %5B121, 144, 19, 161, 19, 144, 19, 11%5D %0Ab = %5B121, 14641, 20736, 36100, 25921, 361, 20736, 361%5D%0Acomp(a,b) returns false because in b 36100 is not the square of any number of a.%0A%0ARemarks%0A- a or b might be %5B%5D (all languages except R, Shell). a or b might be nil or null%0Aor None or nothing (except in Haskell, Elixir, C++, Rust, R, Shell, PureScript).%0A- If a or b are nil (or null or None), the problem doesn't make sense so return false.%0A- If a or b are empty then the result is self-evident.%0A- a or b are empty or not empty lists.%0A%22%22%22%0A%0A%0Adef comp(array1, array2):%0A # your code%0A%0A%0Adef main():%0A pass%0A%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
64ea416a335d9c1a8946411c2b3b1a67cd450131 | Add first pass at reconstructed targets module. | vizard/targets.py | vizard/targets.py | Python | 0 | @@ -0,0 +1,1621 @@
+import viz%0Aimport vizact%0Aimport vizshape%0A%0Aimport vrlab%0A%0A%0Aclass Target:%0A '''A target is a single cube in the motion-capture space.%0A%0A Subjects are tasked with touching the cubes during the experiment.%0A '''%0A%0A def __init__(self, index, x, y, z):%0A self.center = x, y, z%0A self.sphere = vizshape.addSphere(%0A 0.7, center=self.center, color=viz.WHITE)%0A self.sound = viz.addAudio('%7B:02d%7D.wav'.format(index))%0A self.signal = vizact.Signal()%0A self.sensor = vizproximity.addBoundingSphereSensor(self.sphere, scale=1)%0A%0A def activate(self, prox):%0A prox.clearSensors()%0A%0A prox.addSensor(self.sensor)%0A%0A prox.onEnter(self.sensor, lambda e: vrlab.sounds.drip.play())%0A prox.onEnter(self.sensor, lambda e: self.sphere.color(viz.BLUE))%0A prox.onEnter(self.sensor, self.signal.send)%0A%0A prox.onExit(self.sensor, lambda e: self.sphere.color(viz.WHITE))%0A%0A%0ANUMBERED = (%0A Target( 0, -1.98, 0.05, -1.86),%0A Target( 1, -1.72, 1.83, 2.26),%0A Target( 2, 0.00, 0.05, 1.86),%0A Target( 3, 1.73, 0.05, -1.79),%0A Target( 4, 1.89, 0.99, 2.26),%0A Target( 5, -2.14, 0.93, 0.10),%0A Target( 6, -0.24, 0.90, -1.76),%0A Target( 7, 1.51, 1.81, -1.76),%0A Target( 9, 1.79, 0.05, 0.00),%0A Target(10, 0.10, 1.89, 0.10),%0A Target(11, -0.24, 1.86, 2.26),%0A)%0A%0A%0ACIRCUITS = (%0A (10, 0, 1, 3, 8, 4, 11, 7, 9, 6, 5, 2),%0A (7, 1, 0, 11, 9, 2, 8, 3, 6, 4, 10, 5),%0A (3, 0, 8, 11, 5, 10, 6, 1, 4, 2, 9, 7),%0A (11, 8, 7, 3, 4, 6, 9, 5, 0, 2, 1, 10),%0A (4, 7, 8, 5, 6, 0, 3, 1, 9, 10, 2, 11),%0A (10, 3, 9, 1, 2, 4, 5, 7, 11, 0, 6, 8),%0A)%0A
|
|
f1f57561c4ebb5a374b168cd5e6274cbb854611d | change except lines | wakatime/queue.py | wakatime/queue.py | # -*- coding: utf-8 -*-
"""
wakatime.queue
~~~~~~~~~~~~~~
Queue for offline time logging.
http://wakatime.com
:copyright: (c) 2014 Alan Hamlett.
:license: BSD, see LICENSE for more details.
"""
import logging
import os
import sqlite3
import traceback
from time import sleep
log = logging.getLogger(__name__)
class Queue(object):
DB_FILE = os.path.join(os.path.expanduser('~'), '.wakatime.db')
def connect(self):
conn = sqlite3.connect(self.DB_FILE)
c = conn.cursor()
c.execute('''CREATE TABLE IF NOT EXISTS action (
file text,
time real,
project text,
language text,
lines integer,
branch text,
is_write integer,
plugin text)
''')
return (conn, c)
def push(self, data, plugin):
try:
conn, c = self.connect()
action = {
'file': data.get('file'),
'time': data.get('time'),
'project': data.get('project'),
'language': data.get('language'),
'lines': data.get('lines'),
'branch': data.get('branch'),
'is_write': 1 if data.get('is_write') else 0,
'plugin': plugin,
}
c.execute('INSERT INTO action VALUES (:file,:time,:project,:language,:lines,:branch,:is_write,:plugin)', action)
conn.commit()
conn.close()
except sqlite3.Error, e:
log.error(str(e))
def pop(self):
tries = 3
wait = 0.1
action = None
try:
conn, c = self.connect()
except sqlite3.Error, e:
log.debug(traceback.format_exc())
return None
loop = True
while loop and tries > -1:
try:
c.execute('BEGIN IMMEDIATE')
c.execute('SELECT * FROM action LIMIT 1')
row = c.fetchone()
if row is not None:
values = []
clauses = []
index = 0
for row_name in ['file', 'time', 'project', 'language', 'lines', 'branch', 'is_write']:
if row[index] is not None:
clauses.append('{0}=?'.format(row_name))
values.append(row[index])
else:
clauses.append('{0} IS NULL'.format(row_name))
index += 1
if len(values) > 0:
c.execute('DELETE FROM action WHERE {0}'.format(u' AND '.join(clauses)), values)
else:
c.execute('DELETE FROM action WHERE {0}'.format(u' AND '.join(clauses)))
conn.commit()
if row is not None:
action = {
'file': row[0],
'time': row[1],
'project': row[2],
'language': row[3],
'lines': row[4],
'branch': row[5],
'is_write': True if row[6] is 1 else False,
'plugin': row[7],
}
loop = False
except sqlite3.Error, e:
log.debug(traceback.format_exc())
sleep(wait)
tries -= 1
try:
conn.close()
except sqlite3.Error, e:
log.debug(traceback.format_exc())
return action
| Python | 0.000137 | @@ -1505,35 +1505,32 @@
pt sqlite3.Error
-, e
:%0A lo
@@ -1541,13 +1541,29 @@
ror(
-s
tr
+aceback.format_exc
(
-e
))%0A%0A
@@ -1711,35 +1711,32 @@
pt sqlite3.Error
-, e
:%0A lo
@@ -3354,35 +3354,32 @@
pt sqlite3.Error
-, e
:%0A
@@ -3539,11 +3539,8 @@
rror
-, e
:%0A
|
58626e757b463f2aec6751e04fbaf0e83cf0adf9 | Create Bigram.py | src/3-trained-classifier/Bigram.py | src/3-trained-classifier/Bigram.py | Python | 0.000001 | @@ -0,0 +1,2090 @@
+__author__ = 'Atef Bellaaj'%0A__author__ = 'Bellaaj'%0Aimport collections%0Aimport nltk.metrics%0Aimport nltk.classify.util%0Afrom nltk.classify import NaiveBayesClassifier%0Afrom nltk.corpus import movie_reviews%0Aneg_ids = movie_reviews.fileids('neg')%0Apos_ids = movie_reviews.fileids('pos')%0A%0A%0A%0A%0Aimport itertools%0Afrom nltk.collocations import BigramCollocationFinder%0Afrom nltk.metrics import BigramAssocMeasures%0A%0Adef bigram_word_feats(words, score_fn=BigramAssocMeasures.chi_sq, n=200):%0A bigram_finder = BigramCollocationFinder.from_words(words)%0A bigrams = bigram_finder.nbest(score_fn, n)%0A return dict(%5B(ngram, True) for ngram in itertools.chain(words, bigrams)%5D)%0Aneg_feats = %5B(bigram_word_feats(movie_reviews.words(fileids=%5Bf%5D)), 'neg') for f in neg_ids%5D%0Apos_feats = %5B(bigram_word_feats(movie_reviews.words(fileids=%5Bf%5D)), 'pos') for f in pos_ids%5D%0A%0Aneg_limit = len(neg_feats)*3/4%0Apos_limit = len(pos_feats)*3/4%0A%0A%0Atrainfeats = neg_feats%5B:neg_limit%5D + pos_feats%5B:pos_limit%5D%0Atestfeats = neg_feats%5Bneg_limit:%5D + pos_feats%5Bpos_limit:%5D%0Aprint 'train on %25d instances, test on %25d instances' %25 (len(trainfeats), len(testfeats))%0Aprint neg_feats%5B1%5D%0A%0Aclassifier = NaiveBayesClassifier.train(trainfeats)%0Aimport pickle%0Af = open('bigram_classifier.pickle', 'wb')%0Apickle.dump(classifier, f)%0Af.close()%0Aprint 'accuracy:', nltk.classify.util.accuracy(classifier, testfeats)%0Aclassifier.show_most_informative_features()%0A%0Arefsets = collections.defaultdict(set)%0Atestsets = collections.defaultdict(set)%0A%0Afor i, (feats, label) in enumerate(testfeats):%0A refsets%5Blabel%5D.add(i)%0A observed = classifier.classify(feats)%0A testsets%5Bobserved%5D.add(i)%0A%0Aprint 'pos precision:', nltk.metrics.precision(refsets%5B'pos'%5D, testsets%5B'pos'%5D)%0Aprint 'pos recall:', nltk.metrics.recall(refsets%5B'pos'%5D, testsets%5B'pos'%5D)%0Aprint 'pos F-measure:', nltk.metrics.f_measure(refsets%5B'pos'%5D, testsets%5B'pos'%5D)%0Aprint 'neg precision:', nltk.metrics.precision(refsets%5B'neg'%5D, testsets%5B'neg'%5D)%0Aprint 'neg recall:', nltk.metrics.recall(refsets%5B'neg'%5D, testsets%5B'neg'%5D)%0Aprint 'neg F-measure:', nltk.metrics.f_measure(refsets%5B'neg'%5D, testsets%5B'neg'%5D)%0A
|
|
ad0a1c1404c53f1565ef728a747d5d5f319f1992 | Add tests for Enterprise | auth0/v2/test/authentication/test_enterprise.py | auth0/v2/test/authentication/test_enterprise.py | Python | 0 | @@ -0,0 +1,766 @@
+import unittest%0Aimport mock%0Afrom ...authentication.enterprise import Enterprise%0A%0A%0Aclass TestEnterprise(unittest.TestCase):%0A%0A @mock.patch('auth0.v2.authentication.enterprise.Enterprise.get')%0A def test_saml_metadata(self, mock_get):%0A%0A e = Enterprise('my.domain.com')%0A%0A e.saml_metadata('cid')%0A%0A mock_get.assert_called_with(%0A url='https://my.domain.com/samlp/metadata/cid'%0A )%0A%0A @mock.patch('auth0.v2.authentication.enterprise.Enterprise.get')%0A def test_wsfed_metadata(self, mock_get):%0A%0A e = Enterprise('my.domain.com')%0A%0A e.wsfed_metadata()%0A%0A mock_get.assert_called_with(%0A url='https://my.domain.com/wsfed/FederationMetadata' %5C%0A '/2007-06/FederationMetadata.xml'%0A )%0A
|
|
8780243a88f505c06962247fdcc6e4bc4abb2912 | add prototype at python | prototype.py | prototype.py | Python | 0 | @@ -0,0 +1,832 @@
+#!/usr/bin/env python%0Aimport copy%0A%0A%0Aclass Manager:%0A def __init__(self):%0A self.showcase = %7B%7D%0A%0A def register(self, name, obj):%0A self.showcase%5Bname%5D = obj%0A%0A def clone(self, name):%0A return copy.deepcopy(self.showcase%5Bname%5D)%0A%0A%0Aclass MessageBox:%0A def __init__(self, deco_char):%0A self.deco_char = deco_char%0A%0A def display(self, message):%0A print(self.deco_char * (len(message) + len(self.deco_char) * 2 + 2))%0A print('%7B0%7D %7B1%7D %7B0%7D'.format(self.deco_char, message))%0A print(self.deco_char * (len(message) + len(self.deco_char) * 2 + 2))%0A%0A%0Aif __name__ == '__main__':%0A manager = Manager()%0A box1 = MessageBox('*')%0A manager.register('ast', box1)%0A box2 = manager.clone('ast')%0A print(id(box1))%0A print(id(box2))%0A box1.display('hogehoge')%0A box2.display('hogehoge')%0A
|
|
f675668813df6d4da48dc2b4df4f9be91e808bae | Add ZEROFILL flag to get_ip_subnets | pytos/common/functions/network.py | pytos/common/functions/network.py |
import logging
import netifaces
import platform
import re
import socket
import struct
from functools import lru_cache
import dns
from dns import reversename, resolver, name
import netaddr
from pytos.common.logging.definitions import COMMON_LOGGER_NAME
logger = logging.getLogger(COMMON_LOGGER_NAME)
IPV4_ADDRESS_REGEX_STR = r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}(?:/\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}|/\d{1,2})?"
IPV4_ADDRESS_REGEX = re.compile(IPV4_ADDRESS_REGEX_STR)
IPV6_ADDRESS_REGEX = re.compile(
r"(?:::|(?:(?:[a-fA-F0-9]{1,4}):){7}(?:(?:[a-fA-F0-9]{1,4}))|(?::(?::(?:[a-fA-F0-9]{1,4})){1,6})|(?:(?:(?:["
r"a-fA-F0-9]{1,4}):){1,6}:)|(?:(?:(?:[a-fA-F0-9]{1,4}):)(?::(?:[a-fA-F0-9]{1,4})){1,6})|(?:(?:(?:[a-fA-F0-9]{"
r"1,4}):){2}(?::(?:[a-fA-F0-9]{1,4})){1,5})|(?:(?:(?:[a-fA-F0-9]{1,4}):){3}(?::(?:[a-fA-F0-9]{1,4})){1,"
r"4})|(?:(?:(?:[a-fA-F0-9]{1,4}):){4}(?::(?:[a-fA-F0-9]{1,4})){1,3})|(?:(?:(?:[a-fA-F0-9]{1,4}):){5}(?::(?:["
r"a-fA-F0-9]{1,4})){1,2}))(?:/[0-9]+)?")
def is_ipv4_string(ip):
"""Check if the specified string is a valid IPv4 address.
:type ip: str
:param ip: The IP address to check.
:rtype: bool
"""
if re.match(IPV4_ADDRESS_REGEX, ip):
return True
else:
return False
def is_ipv6_string(ip):
"""Check if the specified string is a valid IPv6 address.
:type ip: str
:param ip: The IP address to check.
:rtype: bool
"""
if re.match(IPV6_ADDRESS_REGEX, ip):
return True
else:
return False
def dns_lookup(target, query_type="A", rdclass=1, tcp=False):
if is_ipv4_string(target) or is_ipv6_string(target):
if query_type == "PTR":
try:
target = dns.reversename.from_address(target)
except (dns.resolver.NXDOMAIN, dns.name.LabelTooLong, dns.exception.Timeout, dns.resolver.NoNameservers):
return []
else:
raise ValueError("Only PTR is supported for IP addresses.")
try:
answers = dns.resolver.query(target, query_type, rdclass, tcp)
except (dns.resolver.NXDOMAIN, dns.name.LabelTooLong, dns.exception.Timeout, dns.resolver.NoNameservers):
return []
answers_list = [str(answer).rstrip(".") for answer in answers]
return answers_list
@lru_cache()
def get_iana_services():
"""Parse the local file of IANA services and return a dictionary of service name to service protocol and port.
:rtype:dict[str,(str,str)]
"""
os_dist = platform.system()
if os_dist == "Linux":
services_file_path = "/etc/services"
elif os_dist == "Windows":
services_file_path = "C:\\windows\\system32\\etc\\services"
else:
raise TypeError("Unsupported OS '{}'".format(os_dist))
services_dict = {}
with open(services_file_path) as services_file:
for line in services_file.readlines():
if not line.startswith("#") and not line.isspace():
split_line = line.split()
service_name = split_line[0]
service_port, service_protocol = split_line[1].split("/")
try:
services_dict[service_name].append((service_protocol, service_port))
except KeyError:
services_dict[service_name] = [(service_protocol, service_port)]
for alias_name in split_line[2:]:
if alias_name.startswith("#"):
break
try:
services_dict[alias_name].append((service_protocol, service_port))
except KeyError:
services_dict[alias_name] = [(service_protocol, service_port)]
return services_dict
@lru_cache()
def get_iana_protocols():
"""Parse the local file of IANA IP protocols and return a dictionary of protocol number to name.
:rtype:dict[int,str]
"""
os_dist = platform.system()
if os_dist == "Linux":
protocols_file_path = "/etc/protocols"
elif os_dist == "Windows":
protocols_file_path = "C:\\windows\\system32\\etc\\protocols"
else:
raise TypeError("Unsupported OS '{}'".format(os_dist))
protocols = {}
with open(protocols_file_path) as services_file:
for line in services_file.readlines():
if not line.startswith("#") and not line.isspace():
_, protocol_number, protocol_name, *_ = line.split()
protocols[int(protocol_number)] = protocol_name
return protocols
def get_ip_subnets(ip):
"""Get a list of subnets contained in the specified subnet.
:type ip: str
:param ip: The IP that subnets will be returned for.
:list[netaddr.IPNetwork]
"""
ip = ip.strip().replace(" ", "")
if "/" in ip:
return [netaddr.IPNetwork(ip)]
elif "-" in ip:
start_ip, end_ip = ip.split("-")
ip_set_object = netaddr.IPSet(netaddr.IPRange(start_ip, end_ip))
return [address for address in ip_set_object.iter_cidrs()]
else:
if is_ipv4_string(ip):
return [netaddr.IPNetwork(ip)]
else:
raise ValueError("Invalid IP string '{}'.".format(ip))
def calculate_quad_dotted_netmask(mask):
"""
This function converts a CIDR notation network mask to a Quad Dotted network mask.
:param mask: A IPv4 network mask in CIDR notation.
:type mask: int
:return: The specified mask in quad dotted notation.
:rtype: str
"""
try:
bits = 0xffffffff ^ (1 << 32 - mask) - 1
return socket.inet_ntoa(struct.pack('>I', bits))
except (struct.error, ValueError):
logger.error("Could not calculate quad dotted netmask notation for mask %s", mask)
def get_local_ip_addresses():
"""Get a list of non loopback IP addresses configured on the local host.
:rtype: list[str]
"""
addresses = []
for interface in netifaces.interfaces():
for address in netifaces.ifaddresses(interface).get(2, []):
if address["addr"] != "127.0.0.1":
addresses.append(address["addr"])
return addresses
def netmask_to_cidr(mask):
"""Convert a network mask from quad dotted notation to CIDR notation.
:type mask: str
:param mask: The network mask to convert.
:rtype: int
"""
return sum((bin(int(x)).count('1') for x in mask.split('.')))
| Python | 0 | @@ -4969,16 +4969,40 @@
, end_ip
+, flags=netaddr.ZEROFILL
))%0A
|
c61452cb7358c3000992e593349158a0e24a5f51 | Add migration | allseasons/convert/migrations/0004_message.py | allseasons/convert/migrations/0004_message.py | Python | 0.000002 | @@ -0,0 +1,1030 @@
+# -*- coding: utf-8 -*-%0A# Generated by Django 1.11.2 on 2017-07-28 14:05%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations, models%0Aimport django.db.models.deletion%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('convert', '0003_auto_20170714_1421'),%0A %5D%0A%0A operations = %5B%0A migrations.CreateModel(%0A name='Message',%0A fields=%5B%0A ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),%0A ('sender', models.CharField(max_length=100)),%0A ('receiver', models.CharField(max_length=100)),%0A ('date', models.DateTimeField(auto_now=True)),%0A ('mtype', models.CharField(choices=%5B('email', 'email')%5D, max_length=100)),%0A ('event', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='convert.EventOfInterest')),%0A %5D,%0A options=%7B%0A 'ordering': ('date',),%0A %7D,%0A ),%0A %5D%0A
|
|
c68d2492b8dcc6fbd7fc91e784994ef9cf43db0f | Create LORA_Repeater_logger.py | LORA_Repeater/LORA_Repeater_logger.py | LORA_Repeater/LORA_Repeater_logger.py | Python | 0 | @@ -0,0 +1,580 @@
+from datetime import datetime%0A%0ANOME_FILE = %22LORA_LOG.txt%22%0A%0Aimport serial%0Aser = serial.Serial('/dev/ttyACM0', 9600)%0A%0Awhile ser.inWaiting()!=0:%0A trash = ser.readline()%0A%0Awhile(True):%0A%0A while ser.inWaiting()!=0:%0A %0A incoming = ser.readline().decode(%22utf-8%22)%0A #print(incoming)%0A%0A parsed = str(incoming).split(%22,%22)%0A%0A time = datetime.now().strftime(%22%25H:%25M:%25S%22)%0A data = parsed%5B1%5D +%22,%22 + parsed%5B2%5D +%22,%22 + parsed%5B3%5D + %22,%22 + time + %22%5Cn%22%0A print(data)%0A%0A with open(NOME_FILE, %22a+%22) as f:%0A %0A f.write(data)%0A %0A %0A
|
|
399af52c20a5c490471f8e98c4c72aa6e99466df | fix a import typo | src/diamond/handler/mysql.py | src/diamond/handler/mysql.py | # coding=utf-8
"""
Insert the collected values into a mysql table
"""
from handler import Handler
import MySQLdb
class MySQLHandler(Handler):
"""
Implements the abstract Handler class, sending data to a mysql table
"""
conn = None
def __init__(self, config=None):
"""
Create a new instance of the MySQLHandler class
"""
# Initialize Handler
Handler.__init__(self, config)
# Initialize Options
self.hostname = self.config['hostname']
self.port = int(self.config['port'])
self.username = self.config['username']
self.password = self.config['password']
self.database = self.config['database']
self.table = self.config['table']
self.col_time = self.config['col_time']
self.col_metric = self.config['col_metric']
self.col_value = self.config['col_value']
# Connect
self._connect()
def get_default_config_help(self):
"""
Returns the help text for the configuration options for this handler
"""
config = super(MySQLHandler, self).get_default_config_help()
config.update({
})
return config
def get_default_config(self):
"""
Return the default config for the handler
"""
config = super(MySQLHandler, self).get_default_config()
config.update({
})
return config
def __del__(self):
"""
Destroy instance of the MySQLHandler class
"""
self._close()
def process(self, metric):
"""
Process a metric
"""
# Just send the data
self._send(str(metric))
def _send(self, data):
"""
Insert the data
"""
data = data.strip().split(' ')
try:
cursor = self.conn.cursor()
cursor.execute("INSERT INTO %s (%s, %s, %s) VALUES(%%s, %%s, %%s)"
% (self.table, self.col_metric,
self.col_time, self.col_value),
(data[0], data[2], data[1]))
cursor.close()
self.conn.commit()
except BaseException, e:
# Log Error
self.log.error("MySQLHandler: Failed sending data. %s.", e)
# Attempt to restablish connection
self._connect()
def _connect(self):
"""
Connect to the MySQL server
"""
self._close()
self.conn = MySQLdb.Connect(host=self.hostname,
port=self.port,
user=self.username,
passwd=self.password,
db=self.database)
def _close(self):
"""
Close the connection
"""
if self.conn:
self.conn.commit()
self.conn.close()
| Python | 0.999997 | @@ -70,17 +70,17 @@
%22%0A%0Afrom
-h
+H
andler i
|
0fedbb8def5914b36ca09a59e6718d2d6f04a36a | Revert "Update dict, don't iterate it" | src/diamond/utils/classes.py | src/diamond/utils/classes.py | # coding=utf-8
import configobj
import os
import sys
import logging
import inspect
import traceback
from diamond.util import load_class_from_name
from diamond.collector import Collector
from diamond.handler.Handler import Handler
logger = logging.getLogger('diamond')
def load_include_path(paths):
"""
Scan for and add paths to the include path
"""
for path in paths:
# Verify the path is valid
if not os.path.isdir(path):
continue
# Add path to the system path, to avoid name clashes
# with mysql-connector for example ...
if path not in sys.path:
sys.path.insert(1, path)
# Load all the files in path
for f in os.listdir(path):
# Are we a directory? If so process down the tree
fpath = os.path.join(path, f)
if os.path.isdir(fpath):
load_include_path([fpath])
def load_dynamic_class(fqn, subclass):
"""
Dynamically load fqn class and verify it's a subclass of subclass
"""
if not isinstance(fqn, basestring):
return fqn
cls = load_class_from_name(fqn)
if cls == subclass or not issubclass(cls, subclass):
raise TypeError("%s is not a valid %s" % (fqn, subclass.__name__))
return cls
def load_handlers(config, handler_names):
"""
Load handlers
"""
handlers = []
if isinstance(handler_names, basestring):
handler_names = [handler_names]
for handler in handler_names:
logger.debug('Loading Handler %s', handler)
try:
# Load Handler Class
cls = load_dynamic_class(handler, Handler)
cls_name = cls.__name__
# Initialize Handler config
handler_config = configobj.ConfigObj()
# Merge default Handler default config
handler_config.merge(config['handlers']['default'])
# Check if Handler config exists
if cls_name in config['handlers']:
# Merge Handler config section
handler_config.merge(config['handlers'][cls_name])
# Check for config file in config directory
if 'handlers_config_path' in config['server']:
configfile = os.path.join(
config['server']['handlers_config_path'],
cls_name) + '.conf'
if os.path.exists(configfile):
# Merge Collector config file
handler_config.merge(configobj.ConfigObj(configfile))
# Initialize Handler class
h = cls(handler_config)
handlers.append(h)
except (ImportError, SyntaxError):
# Log Error
logger.warning("Failed to load handler %s. %s",
handler,
traceback.format_exc())
continue
return handlers
def load_collectors(paths=None, filter=None):
"""
Scan for collectors to load from path
"""
# Initialize return value
collectors = {}
if paths is None:
return
if isinstance(paths, basestring):
paths = paths.split(',')
paths = map(str.strip, paths)
load_include_path(paths)
for path in paths:
# Get a list of files in the directory, if the directory exists
if not os.path.exists(path):
raise OSError("Directory does not exist: %s" % path)
if path.endswith('tests') or path.endswith('fixtures'):
return collectors
# Load all the files in path
for f in os.listdir(path):
# Are we a directory? If so process down the tree
fpath = os.path.join(path, f)
if os.path.isdir(fpath):
subcollectors = load_collectors([fpath])
for key in subcollectors:
collectors[key] = subcollectors[key]
# Ignore anything that isn't a .py file
elif (os.path.isfile(fpath) and
len(f) > 3 and
f[-3:] == '.py' and
f[0:4] != 'test' and
f[0] != '.'):
# Check filter
if filter and os.path.join(path, f) != filter:
continue
modname = f[:-3]
try:
# Import the module
mod = __import__(modname, globals(), locals(), ['*'])
except (KeyboardInterrupt, SystemExit), err:
logger.error(
"System or keyboard interrupt "
"while loading module %s"
% modname)
if isinstance(err, SystemExit):
sys.exit(err.code)
raise KeyboardInterrupt
except Exception:
# Log error
logger.error("Failed to import module: %s. %s",
modname,
traceback.format_exc())
continue
collectors.update(get_collectors_from_module(mod))
# Return Collector classes
return collectors
def get_collectors_from_module(mod):
# Find all classes defined in the module
for attrname in dir(mod):
attr = getattr(mod, attrname)
# Only attempt to load classes that are infact classes
# are Collectors but are not the base Collector class
if ((inspect.isclass(attr) and
issubclass(attr, Collector) and
attr != Collector)):
if attrname.startswith('parent_'):
continue
# Get class name
fqcn = '.'.join([mod.__name__, attrname])
try:
# Load Collector class
cls = load_dynamic_class(fqcn, Collector)
# Add Collector class
yield cls.__name__, cls
except Exception:
# Log error
logger.error(
"Failed to load Collector: %s. %s",
fqcn, traceback.format_exc())
continue
def initialize_collector(cls, name=None, configfile=None, handlers=[]):
"""
Initialize collector
"""
collector = None
try:
# Initialize Collector
collector = cls(name=name, configfile=configfile, handlers=handlers)
except Exception:
# Log error
logger.error("Failed to initialize Collector: %s. %s",
cls.__name__, traceback.format_exc())
# Return collector
return collector
| Python | 0 | @@ -5086,26 +5086,25 @@
-collectors.update(
+for name, cls in
get_
@@ -5130,17 +5130,60 @@
ule(mod)
-)
+:%0A collectors%5Bname%5D = cls
%0A%0A #
|
60c10a781501b0a467b55a599d835bdc760c8891 | Add test_utils | tests/test_utils.py | tests/test_utils.py | Python | 0.000006 | @@ -0,0 +1,1614 @@
+#!/usr/bin/env python%0A# -*- coding: utf-8 -*-%0A%0A%22%22%22%0Atest_django-watchman%0A------------%0A%0ATests for %60django-watchman%60 decorators module.%0A%22%22%22%0A%0Afrom __future__ import unicode_literals%0A%0Aimport unittest%0A%0Afrom watchman.utils import get_checks%0A%0A%0Aclass TestWatchman(unittest.TestCase):%0A%0A def setUp(self):%0A pass%0A%0A def test_get_checks_returns_all_available_checks_by_default(self):%0A self.assertEqual(%5Bcheck.__name__ for check in get_checks()%5D, %5B'caches_status', 'email_status', 'databases_status'%5D)%0A%0A def test_get_checks_with_check_list_returns_union(self):%0A check_list = %5B'watchman.checks.caches_status'%5D%0A self.assertEqual(%5Bcheck.__name__ for check in get_checks(check_list=check_list)%5D, %5B'caches_status'%5D)%0A%0A def test_get_checks_with_skip_list_returns_difference(self):%0A skip_list = %5B'watchman.checks.caches_status'%5D%0A self.assertEqual(%5Bcheck.__name__ for check in get_checks(skip_list=skip_list)%5D, %5B'databases_status', 'email_status'%5D)%0A%0A def test_get_checks_with_matching_check_and_skip_list_returns_empty_list(self):%0A check_list, skip_list = %5B'watchman.checks.caches_status'%5D, %5B'watchman.checks.caches_status'%5D%0A self.assertEqual(%5Bcheck.__name__ for check in get_checks(check_list=check_list, skip_list=skip_list)%5D, %5B%5D)%0A%0A def test_get_checks_with_check_and_skip_list(self):%0A check_list = %5B'watchman.checks.caches_status', 'watchman.checks.databases_status'%5D%0A skip_list = %5B'watchman.checks.caches_status'%5D%0A self.assertEqual(%5Bcheck.__name__ for check in get_checks(check_list=check_list, skip_list=skip_list)%5D, %5B'databases_status'%5D)%0A
|
|
52219c4d55c7b80b4a2185887675615c4d427298 | Add is_sequence util function | lib/ansible/module_utils/common/collections.py | lib/ansible/module_utils/common/collections.py | Python | 0.999999 | @@ -0,0 +1,902 @@
+# Copyright (c), Sviatoslav Sydorenko %[email protected]%3E 2018%0A# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)%0A%22%22%22Collection of low-level utility functions.%22%22%22%0A%0Afrom __future__ import absolute_import, division, print_function%0A__metaclass__ = type%0A%0A%0Afrom ..six import binary_type, text_type%0Afrom ._collections_compat import Sequence%0A%0A%0Adef is_string(seq):%0A %22%22%22Identify whether the input has a string-like type (inclding bytes).%22%22%22%0A return isinstance(seq, (text_type, binary_type))%0A%0A%0Adef is_sequence(seq, include_strings=False):%0A %22%22%22Identify whether the input is a sequence.%0A%0A Strings and bytes are not sequences here,%0A unless %60%60include_string%60%60 is %60%60True%60%60.%0A%0A Non-indexable things are never of a sequence type.%0A %22%22%22%0A if not include_strings and is_string(seq):%0A return False%0A%0A return isinstance(seq, Sequence)%0A
|
|
973c2098eec88c9656fe858d4815bd7925d532f6 | add Memento pattern | memento/Memento.py | memento/Memento.py | Python | 0.000001 | @@ -0,0 +1,1665 @@
+#%0A# Python Design Patterns: Memento%0A# Author: Jakub Vojvoda %5Bgithub.com/JakubVojvoda%5D%0A# 2016%0A#%0A# Source code is licensed under MIT License%0A# (for more details see LICENSE)%0A# %0A%0Aimport sys%0A%0A#%0A# Memento%0A# stores internal state of the Originator object and protects%0A# against access by objects other than the originator%0A#%0Aclass Memento:%0A def __init__(self, state):%0A self._state = state %0A%0A def setState(self, state):%0A self._state = state;%0A%0A def getState(self):%0A return self._state%0A %0A#%0A# Originator%0A# creates a memento containing a snapshot of its current internal%0A# state and uses the memento to restore its internal state%0A#%0Aclass Originator:%0A def __init__(self):%0A self._state = 0%0A %0A def setState(self, state):%0A print(%22Set state to %22 + str(state) + %22.%22)%0A self._state = state%0A%0A def getState(self):%0A return self._state%0A%0A def setMemento(self, memento):%0A self._state = memento.getState()%0A%0A def createMemento(self):%0A return Memento(self._state)%0A%0A#%0A# CareTaker%0A# is responsible for the memento's safe keeping%0A#%0Aclass CareTaker:%0A def __init__(self, originator):%0A self._originator = originator%0A self._history = %5B%5D%0A%0A def save(self):%0A print(%22Save state.%22)%0A self._history.append(self._originator.createMemento())%0A%0A def undo(self):%0A print(%22Undo state.%22)%0A self._originator.setMemento(self._history%5B-1%5D)%0A self._history.pop()%0A%0A%0Aif __name__ == %22__main__%22:%0A originator = Originator()%0A caretaker = CareTaker(originator)%0A%0A originator.setState(1)%0A caretaker.save()%0A%0A originator.setState(2)%0A caretaker.save()%0A%0A originator.setState(3)%0A caretaker.undo()%0A%0A print(%22Actual state is %22 + str(originator.getState()) + %22.%22)%0A
|
|
cbbf4ec62bc8b8ed2c375e9e60939f932d2034e8 | Create jogovelha.py | src/jogovelha.py | src/jogovelha.py | Python | 0.000002 | @@ -0,0 +1 @@
+%0A
|
|
0e12011edc31f964db8ce419d2f64b6d525be641 | Create delete_occurrences_of_an_element_if_it_occurs_more_than_n_times.py | delete_occurrences_of_an_element_if_it_occurs_more_than_n_times.py | delete_occurrences_of_an_element_if_it_occurs_more_than_n_times.py | Python | 0.00002 | @@ -0,0 +1,313 @@
+#Kunal Gautam%0A#Codewars : @Kunalpod%0A#Problem name: Delete occurrences of an element if it occurs more than n times%0A#Problem level: 6 kyu%0A%0Adef delete_nth(order,max_e):%0A i=0%0A while(i%3Clen(order)):%0A if order%5B:i%5D.count(order%5Bi%5D)%3E=max_e:%0A order.pop(i)%0A else: i+=1 %0A return order%0A
|
|
06451bdb55faaa7fd22f7bac403d00dda0018c5d | Create setup.py | setup.py | setup.py | Python | 0.000001 | @@ -0,0 +1,1193 @@
+from distutils.core import setup%0Afrom setuptools import find_packages%0A%0Asetup(%0A name=%22nhlscrapi%22,%0A %0A version=nhlscrapi.__version__,%0A %0A description='NHL Scrapr API for Python',%0A %0A author='Rob Howley',%0A author_email='[email protected]',%0A url='https://github.com/robhowley/nhlscrapi',%0A %0A packages=find_packages(),%0A %0A include_package_data=True,%0A %0A license=%22Apache Software License version 2.0%22,%0A %0A platforms='any',%0A %0A zip_safe=False,%0A %0A keywords='nhlscrapi',%0A %0A classifiers=%5B%0A 'Development Status :: 2 - Pre-Alpha',%0A 'Intended Audience :: Developers',%0A 'License :: OSI Approved :: BSD License',%0A 'Natural Language :: English',%0A %22Programming Language :: Python :: 2%22,%0A 'Programming Language :: Python :: 2.6',%0A 'Programming Language :: Python :: 2.7',%0A 'Programming Language :: Python :: 3',%0A 'Programming Language :: Python :: 3.3',%0A 'Operating System :: OS Independent',%0A 'Topic :: Software Development :: Libraries :: Python Modules',%0A %5D,%0A %0A test_suite='tests',%0A %0A # Dependent packages (distributions)%0A install_requires=%5B%5D,%0A)%0A
|
|
f1d277c58f80a352b3715c145ce55a4030a4ab6a | add setup.py | setup.py | setup.py | Python | 0.000001 | @@ -0,0 +1,315 @@
+#!/usr/bin/env python%0Afrom distutils.core import setup%0A%0Afrom setuptools import find_packages%0A%0Asetup(%0A name='Fake Zato',%0A version='0.1.0',%0A description='Fake Zato',%0A author='Zetaops',%0A author_email='[email protected]',%0A url='https://github.com/zetaops/fake_zato',%0A packages=find_packages(),%0A)%0A%0A
|
|
a262aeda8b706848b33d30353a9f269daf3acb0d | Bump version | setup.py | setup.py | # Copyright (C) 2011-2012 Yaco Sistemas <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from setuptools import setup, find_packages
def read(*rnames):
return open(os.path.join(os.path.dirname(__file__), *rnames)).read()
setup(
name='djangosaml2',
version='0.13.0',
description='pysaml2 integration in Django',
long_description='\n\n'.join([read('README'), read('CHANGES')]),
classifiers=[
"Development Status :: 3 - Alpha",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: WSGI",
"Topic :: Security",
"Topic :: Software Development :: Libraries :: Application Frameworks",
],
keywords="django,pysaml2,saml2,federated authentication,authentication",
author="Yaco Sistemas",
author_email="[email protected]",
url="https://bitbucket.org/lgs/djangosaml2",
license='Apache 2.0',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=[
'pysaml2==2.2.0',
'python-memcached==1.48',
],
)
| Python | 0 | @@ -801,17 +801,17 @@
n='0.13.
-0
+1
',%0A d
|
9eacc3c3b81002c721cb24a1641583bf49bc3a53 | bump version number | setup.py | setup.py | # setup.py inspired by the PyPA sample project:
# https://github.com/pypa/sampleproject/blob/master/setup.py
from setuptools import setup, find_packages
from codecs import open # To use a consistent encoding
from os import path
def get_long_description():
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name = 'pymtl',
version = '1.3alpha3', # https://www.python.org/dev/peps/pep-0440/
description = 'Python-based hardware modeling framework',
long_description = get_long_description(),
url = 'https://github.com/cornell-brg/pymtl',
author = 'Derek Lockhart',
author_email = '[email protected]',
# BSD 3-Clause License:
# - http://choosealicense.com/licenses/bsd-3-clause
# - http://opensource.org/licenses/BSD-3-Clause
license='BSD',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2.7',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX :: Linux',
],
packages = find_packages(
exclude=['scripts', 'tests', 'ubmark', 'perf_tests']
),
package_data={
'pymtl': [
'tools/translation/verilator_wrapper.templ.c',
'tools/translation/verilator_wrapper.templ.py',
'tools/translation/cpp_wrapper.templ.py',
],
},
install_requires = [
'cffi',
'greenlet',
'pytest',
'pytest-xdist',
# Note: leaving out numpy due to pypy incompatibility
#'numpy==1.9.0',
],
)
| Python | 0.000001 | @@ -467,15 +467,15 @@
'1.
-3
+4
alpha
-3
+0
', #
|
c99b5e564252aff55f14dd63c9cdef1728026561 | Add setup.py | setup.py | setup.py | Python | 0.000001 | @@ -0,0 +1,370 @@
+#!/usr/bin/env python%0A# -*- coding: utf-8 -*-%0A%0Aimport twid%0A%0Afrom setuptools import setup, find_packages%0A%0Asetup(%0A name = %22twid%22,%0A version = twid.__version__,%0A description = %22The relevant functions about Taiwan Identification Card system.%22,%0A author = %22Plenty Su%22,%0A author_email = %[email protected]%22,%0A license = %22MIT%22,%0A packages = find_packages()%0A)%0A
|
|
f16a21776eafc7fc373b9c43d5db74cea213c897 | Create SoftwareCategory.py | SoftwareCategory.py | SoftwareCategory.py | Python | 0 | @@ -0,0 +1,1248 @@
+from lxml import etree%0A%0A%0Aclass SoftwareCategory:%0A def __init__(self, parent, category, unlock, scan=False):%0A self.software = category%0A self.feature = unlock%0A if not scan:%0A self.create_software_category(parent, category, unlock)%0A%0A @classmethod%0A def delete_category(cls, feature, software_category):%0A %22%22%22%0A * Parameter: feature (etree element -Tag- 'Feature')%0A * Parameter: software_category (SoftwareCategory Object)%0A * Remove the dependency from feature (etree element)%0A %22%22%22%0A%0A for child in feature:%0A if child.tag == 'SoftwareCategory' and child.text == software_category:%0A feature.remove(child)%0A break%0A%0A def create_software_category(self, parent, category, unlock):%0A %22%22%22%0A * Parameter: parent (etree element -Tag- 'Feature')%0A * Parameter: category (str)%0A * Parameter: unlock (str)%0A * Create an etree subElement with a Tag %22SoftwareCategory%22,%0A * an attribute of Software equal to the parameter category.%0A * Set text to the unlock parameter value%0A * Return etree element%0A %22%22%22%0A etree.SubElement(parent, %22SoftwareCategory%22, Category=category).text = unlock%0A
|
|
c54bd0cf16891bbc8b82dd2cb2af1455795325a2 | add setup.py | setup.py | setup.py | Python | 0.000001 | @@ -0,0 +1,834 @@
+import os%0Aimport sys%0Afrom setuptools import setup%0A%0Aexec(open('dsplice/version.py').read())%0A%0Asetup(name='dsplice',%0A version=version,%0A packages=%5B'dsplice'%5D,%0A description='Docker image merge tool',%0A author='Bradley Cicenas',%0A author_email='[email protected]',%0A url='https://github.com/bcicen/dsplice',%0A install_requires=%5B'docker-py%3E=1.7.2'%5D,%0A license='http://opensource.org/licenses/MIT',%0A classifiers=(%0A 'License :: OSI Approved :: MIT License ',%0A 'Natural Language :: English',%0A 'Programming Language :: Python',%0A 'Programming Language :: Python :: 2',%0A 'Programming Language :: Python :: 3',%0A ),%0A keywords='docker image merge devops',%0A entry_points = %7B%0A 'console_scripts' : %5B'dsplice = dsplice.cli:main'%5D%0A %7D%0A )%0A
|
|
5d9ac40273f9dae541ffa20b8767ae289b743b95 | Add loader calls in main | nose2/main.py | nose2/main.py | import os
from nose2.compat import unittest
from nose2 import loader, session
class PluggableTestProgram(unittest.TestProgram):
sessionClass = session.Session
loaderClass = loader.PluggableTestLoader
# XXX override __init__ to warn that testLoader and testRunner are ignored?
def parseArgs(self, argv):
self.session = self.sessionClass()
self.argparse = self.session.argparse # for convenience
# XXX force these? or can it be avoided?
self.testLoader = self.loaderClass(self.session)
# Parse initial arguments like config file paths, verbosity
self.setInitialArguments()
cfg_args, argv = self.argparse.parse_args(argv)
self.handleCfgArgs(cfg_args)
# Parse arguments for plugins (if any) and test names
self.argparse.add_argument('testNames', nargs='*')
args, argv = self.argparse.parse_args(argv)
if argv:
self.argparse.error("Unrecognized arguments: %s" % ' '.join(argv))
self.handleArgs(args)
self.createTests()
def setInitialArguments(self):
self.argparse.add_argument('--config', '-c', nargs='?', action='append',
default=['unittest.cfg', 'nose2.cfg'])
self.argparse.add_argument('--no-user-config', action='store_const',
dest='user_config', const=False, default=True)
self.argparse.add_argument('--no-plugins', action='store_const',
dest='load_plugins', const=False, default=True)
self.argparse.add_argument('--verbose', '-v', action='count')
self.argparse.add_argument('--quiet', action='store_const',
dest='verbose', const=0)
def handleCfgArgs(self, cfg_args):
self.session.loadConfigFiles(*self.findConfigFiles(cfg_args))
if cfg_args.load_plugins:
self.loadPlugins()
# FIXME set verbosity
def findConfigFiles(self, cfg_args):
filenames = cfg_args.config[:]
if cfg_args.user_config:
opts = ('unittest.cfg', 'nose2.cfg', '.unittest.cfg', '.nose2.cfg')
for fn in opts:
filenames.append(os.path.expanduser(fn))
return filenames
def handleArgs(self, args):
# FIXME activate or deactivate plugins,
# pass arguments to plugins that want them
pass
def loadPlugins(self):
# FIXME pass in plugins set via __init__ args
self.session.loadPlugins()
def createTests(self):
# fire plugin hook
pass
def runTests(self):
# fire plugin hook
pass
main_ = PluggableTestProgram
| Python | 0.000001 | @@ -2392,35 +2392,62 @@
nt them%0A
-pas
+self.testNames = args.testName
s%0A%0A def loadP
@@ -2612,20 +2612,204 @@
-pass
+if self.testNames is None:%0A self.test = self.testLoader.loadTestsFromModule(self.module)%0A else:%0A self.test = self.testLoader.loadTestsFromNames(self.testNames)
%0A%0A de
|
3f66dbc15cb0564b22d304e09ed3c0b673d59476 | Add setup.py | setup.py | setup.py | Python | 0.000001 | @@ -0,0 +1,133 @@
+from distutils.core import setup%0A%0Asetup(name='fbmq',%0A version='1.0.1',%0A install_requires=%5B'json', 'requests%3E=2.0'%5D%0A )%0A
|
|
a1f17cf4b56edf861c9b650ccd18049ecf168e03 | Add setup.py | setup.py | setup.py | Python | 0.000001 | @@ -0,0 +1,1491 @@
+import os%0Aimport re%0A%0Atry:%0A from setuptools import setup%0Aexcept ImportError:%0A from distutils.core import setup%0A%0APACKAGE_NAME = %22humanizepy%22%0A%0AHERE = os.path.abspath(os.path.dirname(__file__))%0Awith open(os.path.join(HERE, %22README.md%22)) as fp:%0A README = fp.read()%0Awith open(os.path.join(HERE, PACKAGE_NAME, %22__init__.py%22)) as fp:%0A VERSION = re.search(%22__version__ = %5C%22(%5B%5E%5C%22%5D+)%5C%22%22, fp.read()).group(1)%0A%0Asetup(%0A name=PACKAGE_NAME,%0A version=VERSION,%0A author=%22James %5C%22clug%5C%22%22,%0A author_email=%[email protected]%22,%0A maintainer=%22James %5C%22clug%5C%22%22,%0A maintainer_email=%[email protected]%22,%0A url=%22https://github.com/clugg/humanizepy%22,%0A description=(%22Humanize values that are readable only for developers.%22),%0A long_description=README,%0A classifiers=%5B%22Development Status :: 5 - Production/Stable%22,%0A %22Environment :: Console%22,%0A %22Intended Audience :: Developers%22,%0A %22License :: OSI Approved :: MIT License%22,%0A %22Natural Language :: English%22,%0A %22Operating System :: OS Independent%22,%0A %22Programming Language :: Python%22,%0A %22Programming Language :: Python :: 2.7%22,%0A %22Programming Language :: Python :: 3.3%22,%0A %22Programming Language :: Python :: 3.4%22,%0A %22Programming Language :: Python :: 3.5%22,%0A %22Topic :: Utilities%22%5D,%0A license=%22MIT%22,%0A keywords=%22humanize values roman numeral binary%22,%0A packages=%5BPACKAGE_NAME%5D%0A)%0A
|
|
92138e7ab37e6a69eb3808f9888b52b9e38deaa0 | remove duplicate classifier | setup.py | setup.py | from distutils.core import setup
from require import __version__
version_str = ".".join(str(n) for n in __version__)
setup(
name = "django-require",
version = version_str,
license = "BSD",
description = "A Django staticfiles post-processor for optimizing with RequireJS.",
author = "Dave Hall",
author_email = "[email protected]",
url = "https://github.com/etianen/django-require",
packages = [
"require",
"require.management",
"require.management.commands",
"require.templatetags",
],
package_data = {
"require": [
"resources/*.jar",
"resources/*.js",
"resources/tests/*.js",
],
},
classifiers = [
"Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"Framework :: Django",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Topic :: Internet :: WWW/HTTP",
],
)
| Python | 0.999991 | @@ -1041,50 +1041,8 @@
n%22,%0A
- %22Programming Language :: Python%22,%0A
|
a745bfac07e5efb539c33dcdad2652cc240aec3b | Disable deprecation warnings for SDCH | sdch/sdch.gyp | sdch/sdch.gyp | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'variables': {
'chromium_code': 1,
},
'targets': [
{
'target_name': 'sdch',
'type': 'static_library',
'dependencies': [
'../base/base.gyp:base',
'../third_party/zlib/zlib.gyp:zlib',
],
'sources': [
'logging_forward.h',
'open-vcdiff/src/addrcache.cc',
'open-vcdiff/src/blockhash.cc',
'open-vcdiff/src/blockhash.h',
'open-vcdiff/src/checksum.h',
'open-vcdiff/src/codetable.cc',
'open-vcdiff/src/codetable.h',
'open-vcdiff/src/compile_assert.h',
'open-vcdiff/src/decodetable.cc',
'open-vcdiff/src/decodetable.h',
'open-vcdiff/src/encodetable.cc',
'open-vcdiff/src/encodetable.h',
'open-vcdiff/src/google/output_string.h',
'open-vcdiff/src/google/vcdecoder.h',
'open-vcdiff/src/headerparser.cc',
'open-vcdiff/src/headerparser.h',
'open-vcdiff/src/instruction_map.cc',
'open-vcdiff/src/instruction_map.h',
'open-vcdiff/src/rolling_hash.h',
'open-vcdiff/src/testing.h',
'open-vcdiff/src/varint_bigendian.cc',
'open-vcdiff/src/varint_bigendian.h',
'open-vcdiff/src/vcdecoder.cc',
'open-vcdiff/src/vcdiff_defs.h',
'open-vcdiff/src/vcdiffengine.cc',
'open-vcdiff/src/vcdiffengine.h',
'open-vcdiff/vsprojects/config.h',
'open-vcdiff/vsprojects/stdint.h',
],
'include_dirs': [
'open-vcdiff/src',
],
'direct_dependent_settings': {
'include_dirs': [
'open-vcdiff/src',
],
},
'conditions': [
[ 'OS == "linux" or OS == "android"', { 'include_dirs': [ 'linux' ] } ],
[ 'os_bsd==1 or OS=="solaris"', { 'include_dirs': [ 'bsd' ] } ],
[ 'OS == "ios"', { 'include_dirs': [ 'ios' ] } ],
[ 'OS == "mac"', { 'include_dirs': [ 'mac' ] } ],
[ 'OS == "win"', { 'include_dirs': [ 'open-vcdiff/vsprojects' ] } ],
],
# open-vcdiff's logging.h introduces static initializers. This was
# reported upstream years ago (
# https://code.google.com/p/open-vcdiff/issues/detail?id=33 ). Since
# upstream won't fix this, work around it on the chromium side:
# Inject a header that forwards to base/logging.h instead (which doesn't
# introduce static initializers, and which prevents open-vcdiff's
# logging.h from being used).
'variables': {
'logging_path': 'logging_forward.h',
'conditions': [
# gyp leaves unspecified what the cwd is when running the compiler,
# and gyp/linux doesn't have a built-in way for forcing an include.
# So hardcode the base directory. If this spreads, provide native
# support in gyp, like we have for gyp/mac and gyp/windows.
# path.
['"<(GENERATOR)"=="ninja"', { 'logging_dir': '../..' },
{ 'logging_dir': '.' }
],
],
},
# GCC_PREFIX_HEADER is relative to the current directory,
# ForcedIncludeFiles is relative to include_dirs, cflags relative to the
# build directory.
'xcode_settings': { 'GCC_PREFIX_HEADER': '<(logging_path)' },
'msvs_settings': {
'VCCLCompilerTool': { 'ForcedIncludeFiles': [ 'sdch/<(logging_path)' ] }
},
'cflags': [ '-include', '<(logging_dir)/sdch/<(logging_path)' ],
},
],
}
| Python | 0.000005 | @@ -2137,24 +2137,163 @@
cts' %5D %7D %5D,%0A
+ # TODO(mark): Remove usage of the deprecated auto_ptr.%0A %5B 'clang == 1', %7B 'cflags': %5B '-Wno-deprecated-declarations' %5D %7D %5D,%0A
%5D,%0A
|
ac94d2cf9b4ab775fb7a125a83abc4fa59d56136 | Add setuptools build | setup.py | setup.py | Python | 0 | @@ -0,0 +1,546 @@
+from setuptools import setup, find_packages%0Aimport os%0A%0Ahere = os.path.abspath(os.path.dirname(__file__))%0A%0Awith open(os.path.join(here, 'README.md')) as f:%0A long_description = f.read()%0A%0Asetup(%0A name='pyshadowcopy',%0A version='0.0.1',%0A description='Python class to work with Shadow Copy on Windows',%0A long_description=long_description,%0A url='https://github.com/sblosser/pyshadowcopy',%0A author='sblosser',%0A license='MIT',%0A keywords=%5B'Windows', 'VSS', 'win32'%5D,%0A py_modules=%5B'vss'%5D,%0A install_requires=%5B'pypiwin32'%5D,%0A)%0A
|
|
edcf0e371ea3430c7d0c515dbf59e39e3522c076 | Add license information to setup.py | setup.py | setup.py | from distutils.core import setup
import loginurl
setup(name='django-loginurl',
version=loginurl.__version__,
description='Allowing an anonymous user to log in by only visiting a URL',
author='Fajran Iman Rusadi',
author_email='[email protected]',
url='http://github.com/fajran/django-loginurl/',
download_url='http://github.com/fajran/django-loginurl/tarball/v0.1.2',
packages=['loginurl', 'loginurl.management', 'loginurl.management.commands'],
package_dir={'loginurl': 'loginurl'},
classifiers=['Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities'],
)
| Python | 0 | @@ -320,16 +320,37 @@
nurl/',%0A
+ license='BSD',%0A
do
|
ef53ea9d1754ce5056b7b872ad0b7cd99e4af2bc | Add setup file | setup.py | setup.py | Python | 0.000001 | @@ -0,0 +1,723 @@
+# -*- coding: utf-8 -*-%0A%0A%0Aimport re%0Afrom distutils.core import setup%0A%0A%0Aversion = re.search(%0A '%5E__version__%5Cs*=%5C*%22(.*)%22',%0A open('bundigo/bundigo.py').read(),%0A re.M%0A ).group(1)%0A%0A%0Awith open('README.md', 'rb') as f:%0A long_descr = f.read().decode('utf-8')%0A%0A%0Asetup(%0A name = 'bundigo',%0A packages = %5B'bundigo'%5D,%0A entry_points = %7B%0A 'console_scripts': %5B'bundigo = bundigo.bundigo.main'%5D%0A %7D,%0A version = version,%0A description = %22Your one-stop shop for starting a software project%22,%0A long_description = long_descr,%0A license = 'MIT',%0A author = 'Jared Smith',%0A author_email = '[email protected]',%0A url = 'https://jaredmichaelsmith.com/bundigo',%0A install_requires=%5B%0A %5D,%0A)%0A
|
|
d9b844db2dc0453c073050c6ce7db18c3d48b57c | add setup.py file | setup.py | setup.py | Python | 0.000001 | @@ -0,0 +1,914 @@
+import setuptools%0A%0Asetuptools.setup(%0A install_requires=%5B'pyyaml'%5D,%0A author = 'Caleb Boylan',%0A name = 'apt-package-mirror',%0A description = 'Python script for running an apt package mirror',%0A author_email = '[email protected]',%0A url = 'https://github.com/squidboylan/apt-package-mirror',%0A version = '0.1.1',%0A classifiers = %5B%0A 'Development Status :: 3 - Alpha',%0A 'Intended Audience :: System Administrators',%0A 'License :: OSI Approved :: Apache Software License',%0A 'Operating System :: POSIX :: Linux',%0A 'Programming Language :: Python',%0A 'Programming Language :: Python :: 2',%0A 'Programming Language :: Python :: 2.7',%0A %5D,%0A packages=setuptools.find_packages(),%0A entry_points = %7B%0A 'console_scripts': %5B'apt-package-mirror=mirror:main'%5D,%0A %7D%0A)%0A
|
|
ac2f2b72c1f653f15058b300c82060c90adf146b | Update for 1.3.0 release | setup.py | setup.py | # Importing these adds a 'bdist_mpkg' option that allows building binary
# packages on OS X.
try:
import setuptools
import bdist_mpkg
except ImportError:
pass
import os
import numpy.distutils.core as core
# Configure our C modules that are built with f2py.
tridiag = core.Extension(name = 'dadi.tridiag',
sources = ['dadi/tridiag.pyf', 'dadi/tridiag.c'])
int_c = core.Extension(name = 'dadi.integration_c',
sources = ['dadi/integration_c.pyf',
'dadi/integration1D.c',
'dadi/integration2D.c',
'dadi/integration3D.c',
'dadi/integration_shared.c',
'dadi/tridiag.c'])
# If we're building a distribution, try to update svnversion. Note that this
# fails silently.
for arg in os.sys.argv:
if arg.count('sdist') or arg.count('bdist'):
os.system("svn up")
os.system("svn info > dadi/svnversion")
core.setup(name='dadi',
version='1.2.3',
author='Ryan Gutenkunst',
author_email='[email protected]',
url='http://dadi.googlecode.com',
ext_modules = [tridiag, int_c],
scripts=['scripts/ms_jsfs.py'],
packages=['dadi'],
package_data = {'dadi':['svnversion'],
'tests':['IM.fs']},
license='BSD'
)
| Python | 0 | @@ -1092,11 +1092,11 @@
='1.
-2.3
+3.0
',%0A
|
916cdddfa1e861b8402bdda935c2a9c46a5b6566 | Bump version to 1.2. | setup.py | setup.py | import glob
import os
import platform
import subprocess
import sys
from setuptools import setup, Command, Extension
from setuptools.command.test import test as TestCommand
def define_extensions(file_ext):
return [Extension("lightfm.lightfm_fast",
['lightfm/lightfm_fast%s' % file_ext],
extra_link_args=["-fopenmp"],
extra_compile_args=['-fopenmp',
'-march=native',
'-ffast-math'])]
def set_gcc():
"""
Try to find and use GCC on OSX for OpenMP support.
"""
if 'darwin' in platform.platform().lower():
gcc_binaries = sorted(glob.glob('/usr/local/bin/gcc-*'))
if gcc_binaries:
_, gcc = os.path.split(gcc_binaries[-1])
os.environ["CC"] = gcc
else:
raise Exception('No GCC available. Install gcc from Homebrew '
'using brew install gcc.')
class Cythonize(Command):
"""
Compile the extension .pyx files.
"""
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
import Cython
from Cython.Build import cythonize
cythonize(define_extensions('.pyx'))
class Clean(Command):
"""
Clean build files.
"""
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
pth = os.path.dirname(os.path.abspath(__file__))
subprocess.call(['rm', '-rf', os.path.join(pth, 'build')])
subprocess.call(['rm', '-rf', os.path.join(pth, 'lightfm.egg-info')])
subprocess.call(['find', pth, '-name', 'lightfm*.pyc', '-type', 'f', '-delete'])
subprocess.call(['rm', os.path.join(pth, 'lightfm', 'lightfm_fast.so')])
class PyTest(TestCommand):
user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = []
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(self.pytest_args)
sys.exit(errno)
set_gcc()
setup(
name='lightfm',
version='1.1',
description='LightFM recommendation model',
url='https://github.com/lyst/lightfm',
download_url='https://github.com/lyst/lightfm/tarball/1.1',
packages=['lightfm'],
install_requires=['numpy'],
tests_require=['pytest', 'requests', 'scikit-learn', 'scipy'],
cmdclass={'test': PyTest, 'cythonize': Cythonize, 'clean': Clean},
author='Lyst Ltd (Maciej Kula)',
author_email='[email protected]',
license='MIT',
classifiers=['Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Topic :: Scientific/Engineering :: Artificial Intelligence'],
ext_modules=define_extensions('.c')
)
| Python | 0 | @@ -2483,17 +2483,17 @@
sion='1.
-1
+2
',%0A d
@@ -2638,17 +2638,17 @@
rball/1.
-1
+2
',%0A p
|
379488ee2980e1b33753d098d88fb1139a69deeb | add setup.py | setup.py | setup.py | Python | 0.000001 | @@ -0,0 +1,438 @@
+from setuptools import setup, find_packages%0Asetup(%0A name=%22hs-onliner%22,%0A version=%220.0.1%22,%0A author=%22Firemark & Kytes%22,%0A author_email=%[email protected]%22,%0A description=%22Site to view who will be in hackerspace every week.%22%0A license=%22MIT%22,%0A keywords=%22example documentation tutorial%22,%0A url=%22https://github.com/firemark/hs-onliner%22,%0A packages=find_packages(),%0A install_requires=(%0A 'Flask==0.10.1'%0A )%0A)
|
|
b63a6ababb1a66ed3766399328c5b9c4ac0a7ce3 | Bump version | setup.py | setup.py | from setuptools import setup
setup(
name="funsize",
version="0.28",
description="Funsize Scheduler",
author="Mozilla Release Engineering",
packages=["funsize"],
include_package_data=True,
# Not zip safe because we have data files in the package
zip_safe=False,
entry_points={
"console_scripts": [
"funsize-scheduler = funsize.scheduler:main",
],
},
install_requires=[
"amqp",
"anyjson",
"argparse",
"cffi",
# PGPy depends on this _specific_ version of cryptography
"cryptography==0.6",
"enum34",
"kombu",
"PGPy",
"pycparser",
"PyHawk-with-a-single-extra-commit",
"Jinja2",
"PyYAML",
"redo",
# Because taskcluster hard pins this version...
"requests==2.4.3",
"singledispatch",
"six",
"taskcluster>=0.0.16",
"wsgiref",
],
tests_require=[
'hypothesis',
'pytest',
'mock',
],
)
| Python | 0 | @@ -70,9 +70,9 @@
%220.2
-8
+9
%22,%0A
|
a281bad5905da4710314d657943cc145b7d748d4 | add minimal setup.py | setup.py | setup.py | Python | 0.000001 | @@ -0,0 +1,257 @@
+import setuptools%0A%0Asetuptools.setup(%0A name='tvb-hpc',%0A version='0.0',%0A description='HPC code generation for TVB',%0A author='TVB-HPC Contributors',%0A url='https://github.com/the-virtual-brain/tvb-hpc',%0A packages=setuptools.find_packages(),%0A)%0A
|
|
b383fadf43d3fb31d1501c780d4436717cc43776 | add setup.py | setup.py | setup.py | Python | 0.000001 | @@ -0,0 +1,1470 @@
+import os%0Afrom setuptools import setup, find_packages%0A%0Aos.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))%0A%0Asetup(%0A name='django-payline-dotir',%0A version='0.1',%0A author='Mahdi Bornazadeh',%0A author_email='[email protected]',%0A description='Persian payline.ir payment gateway in django.',%0A long_description=open(%22README.md%22, 'rb').read().decode('utf-8'),%0A license='BSD License',%0A url='http://www.bornazadeh.ir/payline',%0A zip_safe=False,%0A include_package_data=True,%0A packages=find_packages(),%0A%0A install_requires=%5B%0A %22requests%22,%0A %5D,%0A%0A classifiers=%5B%0A %22Development Status :: 4 - Beta%22,%0A %22Environment :: Web Environment%22,%0A %22Framework :: Django%22,%0A %22Intended Audience :: Developers%22,%0A %22License :: OSI Approved :: BSD License%22,%0A %22Operating System :: OS Independent%22,%0A %22Programming Language :: Python%22,%0A %22Programming Language :: Python :: 2.6%22,%0A %22Programming Language :: Python :: 2.7%22,%0A %22Programming Language :: Python :: 3%22,%0A %22Programming Language :: Python :: 3.3%22,%0A %22Topic :: Internet :: WWW/HTTP%22,%0A %22Topic :: Internet :: WWW/HTTP :: Dynamic Content%22,%0A %22Topic :: Internet :: WWW/HTTP :: WSGI%22,%0A %22Topic :: Software Development :: Libraries :: %22%0A %22Application Frameworks%22,%0A %22Topic :: Software Development :: Libraries :: Python Modules%22,%0A %5D,%0A)%0A
|
|
6bc555b93e09ab18a5778487cf3eb47329e83098 | Set version to our own. | setup.py | setup.py | #!/usr/bin/env python
from setuptools import setup, find_packages
setup(name="python-instagram",
version="0.8.0",
description="Instagram API client",
license="MIT",
install_requires=["simplejson","httplib2"],
author="Instagram, Inc",
author_email="[email protected]",
url="http://github.com/Instagram/python-instagram",
packages = find_packages(),
keywords= "instagram",
zip_safe = True)
| Python | 0 | @@ -111,16 +111,22 @@
n=%220.8.0
+powll1
%22,%0A
|
68fac699c5506f80ab727a4c569d8797294584bd | Bump the version number. | setup.py | setup.py | #!/usr/bin/env python
from setuptools import setup
setup(
name='hapipy',
version='2.9.0',
description="A python wrapper around HubSpot's APIs",
long_description=open('README.md').read(),
author='HubSpot Dev Team',
author_email='[email protected]',
url='https://github.com/HubSpot/hapipy',
download_url='https://github.com/HubSpot/hapipy/tarball/v2.9.0',
license='LICENSE.txt',
packages=['hapi', 'hapi.mixins'],
install_requires=[
'nose==1.1.2',
'unittest2==0.5.1',
'simplejson>=2.1.2'
],
)
| Python | 0.000002 | @@ -84,25 +84,25 @@
ersion='2.9.
-0
+1
',%0A descr
|
c1c49b0e1718331663ee109f3417aff97fd23b70 | Add minimal setup.py for RTD | setup.py | setup.py | Python | 0 | @@ -0,0 +1,184 @@
+# Minimal setup.py to get readthedocs working, not recommended for real use%0A%0Afrom distutils.core import setup%0A%0Asetup(name=%22h11%22,%0A version=%220.0.0%22,%0A packages=%5B%22h11%22%5D,%0A )%0A
|
|
f34dd8ab047275b8d29366599621443a8bc468c9 | Add launcher script for nbconvert | databaker/databaker_nbconvert.py | databaker/databaker_nbconvert.py | Python | 0 | @@ -0,0 +1,992 @@
+#!/usr/bin/env python%0Aimport os%0Aimport subprocess%0Aimport sys%0A%0A%0Adef main(argv):%0A if len(argv) == 0 or len(argv) %3E 2:%0A print(%22Usage: databaker_process.py %3Cnotebook_file%3E %3Cinput_file%3E%22)%0A print()%0A print(%22%3Cinput_file%3E is optional; it replaces DATABAKER_INPUT_FILE%22)%0A print(%22in the notebook.%22)%0A print(%22The input file should also be in the same directory as the%22)%0A print(%22notebook.%22)%0A sys.exit(1)%0A%0A process_env = os.environ.copy()%0A%0A if len(argv) == 2:%0A process_env%5B'DATABAKER_INPUT_FILE'%5D = argv%5B1%5D%0A%0A # TODO get custom templates working; according to this:%0A # https://github.com/jupyter/nbconvert/issues/391%0A # they should work, but I get TemplateNotFound when using absolute path%0A # for template.%0A cmd_line = %5B'jupyter', 'nbconvert', '--to', 'html', '--execute', argv%5B0%5D%5D%0A print(%22Running:%22, ' '.join(cmd_line))%0A subprocess.call(args=cmd_line, env=process_env)%0A%0A%0Aif __name__ == '__main__':%0A main(sys.argv%5B1:%5D)%0A
|
|
afe216da917c171ff857de122be64a9b2a7d3e9c | migrate doaj client test from harvester | doajtest/unit/test_api_client.py | doajtest/unit/test_api_client.py | Python | 0 | @@ -0,0 +1,1072 @@
+%22%22%22%0AUnit tests for the DOAJ client%0A%22%22%22%0A%0Afrom unittest import TestCase%0A%0Afrom doajtest.fixtures.journals import JournalFixtureFactory%0Afrom portality.api.v1.client import client as doajclient, models%0Afrom portality.lib import dataobj%0A%0Aclass TestDOAJ(TestCase):%0A def setUp(self):%0A pass%0A%0A def tearDown(self):%0A pass%0A%0A def test_01_journal_issns(self):%0A source = JournalFixtureFactory.make_journal_source()%0A j = models.Journal(source)%0A issns = j.all_issns()%0A assert %221234-5678%22 in issns%0A assert %229876-5432%22 in issns%0A assert %224444-4444%22 in issns%0A assert %225555-5555%22 in issns%0A assert %220101-0101%22 in issns%0A assert len(issns) == 5%0A%0A def test_02_validate_article(self):%0A invalid = %7B%22bibjson%22 : %7B%7D%7D%0A%0A # first check the article validator works%0A with self.assertRaises(dataobj.DataStructureException):%0A models.ArticleValidator(invalid)%0A%0A # then check that the api validation method works%0A a = models.Article(invalid)%0A assert not a.is_api_valid()%0A
|
|
fc9dd735c96ae21b4a64286e4c9ebcedc0e1fbca | Add script to subset kerning plist. | subsetKerning.py | subsetKerning.py | Python | 0 | @@ -0,0 +1,2052 @@
+import sys%0Afrom plistlib import writePlist%0Afrom defcon import Font%0A%0A%0A__doc__ = '''%0ASubset kerning in UFO given a list of glyphs provided.%0AWill export new plist files that can be swapped into the UFO.%0A%0AUsage:%0Apython subsetKerning.py subsetList font.ufo%0A'''%0A%0A%0Aclass SubsetKerning(object):%0A %22%22%22docstring for SubsetKerning%22%22%22%0A def __init__(self, font, subsetFile):%0A self.font = Font(font)%0A self.subsetFile = subsetFile%0A%0A with open(self.subsetFile, 'r') as ssfile:%0A rawData = ssfile.read()%0A self.subsetGlyphList = %5Bline.split()%5B0%5D for line in rawData.splitlines()%5D%0A %0A%0A def subsetGroups(self):%0A%0A newGroups = %7B%7D%0A for groupName, glyphList in self.font.groups.items():%0A combinedGlyphs = set(self.subsetGlyphList) & set(glyphList)%0A newGlyphList = sorted(list(combinedGlyphs))%0A%0A if len(newGlyphList):%0A newGroups%5BgroupName%5D = newGlyphList%0A return newGroups%0A%0A%0A%0A def subsetKerning(self):%0A newGroups = self.subsetGroups()%0A newKerning = %7B%7D%0A plistStyleKerning = %7B%7D%0A%0A # All allowed items for kerning, which are our subset glyphs, %0A # plus the groups filtered earlier:%0A allowedItems = set(newGroups) %7C set(self.subsetGlyphList)%0A%0A for %5Bleft, right%5D, value in self.font.kerning.items():%0A if set(%5Bleft, right%5D) %3C= allowedItems:%0A newKerning%5Bleft, right%5D = value%0A%0A # Since the kerning paradigm stored in the plist differs from the %0A # in the kerning object, the data structure needs some modification:%0A%0A for %5Bleft, right%5D, value in newKerning.items():%0A partnerDict = plistStyleKerning.setdefault(left, %7B%7D)%0A partnerDict%5Bright%5D = value%0A%0A return plistStyleKerning%0A%0A%0Adef run():%0A sk = SubsetKerning(sys.argv%5B-1%5D, sys.argv%5B-2%5D)%0A%0A writePlist(sk.subsetGroups(), 'subset_groups.plist')%0A writePlist(sk.subsetKerning(), 'subset_kerning.plist')%0A print 'done'%0A%0A%0Aif len(sys.argv) == 3:%0A run()%0Aelse:%0A print __doc__%0A%0A
|
|
35258c9b37997801af05875f04f450050a3e5273 | Create tarea5.py | tareas/tarea5.py | tareas/tarea5.py | Python | 0.000001 | @@ -0,0 +1,1238 @@
+#josue de leon %0A#lista de supermercado%0A%0A#una tupla para las opciones, y una lista para la lista%0Aimport os%0Alista = %5B%5D%0Aopciones = (%221. A%C3%B1adir producto a la lista.%22,%222. Borrar el ultimo producto de la lista.%22,%223. Mostrar toda la lista.%22)%0Acontrol = 1%0Aprint(%22%5Cn%5Cn%5CtBienvenido a su lista de compras.%5Cn%5CnRecuerde que su lista esta en blanco, asi que lo primero que debe hacer es%5Cnagregar nuevos elementos.%22)%0Awhile control == 1:%0A%09print(%22%5CnSeleccione que desea hacer:%5Cn%5Cn%5Ct%22+str(opciones%5B0%5D)+%22%5Cn%5Ct%22+str(opciones%5B1%5D)+%22%5Cn%5Ct%22+str(opciones%5B2%5D)+%22%5Cn%22)%0A%09seleccion = int(input(%22Escoja una opcion: %22))%0A%09if seleccion == 1:%0A%09%09print('*Ingrese un producto para a%C3%B1adir a su lista.%5Cn*Ingrese %22fin%22 para finalizar su lista.')%0A%09%09producto_lista = ''%0A%09%09while producto_lista.lower() != %22fin%22: %0A%09%09%09producto_lista = input()%0A%09%09%09if producto_lista.lower() != %22fin%22:%0A%09%09%09%09lista.append(producto_lista)%09%0A%09elif seleccion == 2:%0A%09%09del lista%5B(len(lista)-1)%5D%0A%09%09print(%22El ultimo elemento ha sido borrado!%22)%0A%09elif seleccion == 3:%0A%09%09centinela = 1%0A%09%09while centinela %3C= len(lista):%0A%09%09%09print(%22- %22+lista%5Bcentinela-1%5D)%0A%09%09%09centinela = centinela + 1%0A%09control = int(input('%5Cn%C2%BFDesea continuar con su lista?%5Cn- Presione %221%22 para CONTINUAR.%5Cn- Si desea SALIR presione %220%22: '))%0A%09os.system(%22cls%22)%0A
|
|
0c4d6491fe89e339e9d9505e6e46e8317e78034a | Add telnet testing script | telnet/telnet.py | telnet/telnet.py | Python | 0.000001 | @@ -0,0 +1,581 @@
+#!/usr/bin/env python3%0Aimport pexpect%0Aimport os, sys, time%0A%0Aip = %22127.0.0.1%22%0Aport = %2210000%22%0Ausername = %[email protected]%22%0Apassword = %2212345%22%0A%0Aos.remove('../maildir/.lock')%0A%0Achild = pexpect.spawn('telnet '+ ip + ' ' + port)%0A%0Achild.expect('.%5Cn')%0Achild.logfile = sys.stdout.buffer%0Atime.sleep(1)%0Achild.sendline('1 login ' + username + ' ' + password)%0Achild.expect('1 OK logged in successfully as [email protected]')%0Achild.sendline('2 select INBOX')%0Achild.expect('successful')%0Achild.sendline('3 fetch 1:2 (FLAGS BODY%5BHEADER.FIELDS (DATE FROM)%5D)')%0Achild.expect('unimplemented')%0A
|
|
945fe81c4a0f970e57ff7c5a13d8c3aa03df5fc6 | Add function to save/restore environment between configuration checks. | numscons/checkers/new/common.py | numscons/checkers/new/common.py | Python | 0 | @@ -0,0 +1,709 @@
+from copy import deepcopy%0A%0Adef save_and_set(env, opts, keys=None):%0A %22%22%22Put informations from option configuration into a scons environment, and%0A returns the savedkeys given as config opts args.%22%22%22%0A saved_keys = %7B%7D%0A if keys is None:%0A keys = opts.keys()%0A for k in keys:%0A saved_keys%5Bk%5D = (env.has_key(k) and deepcopy(env%5Bk%5D)) or %5B%5D%0A%0A kw = dict(zip(keys, %5Bopts%5Bk%5D for k in keys%5D))%0A if kw.has_key('LINKFLAGSEND'):%0A env.AppendUnique(**%7B'LINKFLAGSEND' : kw%5B'LINKFLAGSEND'%5D%7D)%0A del kw%5B'LINKFLAGSEND'%5D%0A%0A env.Prepend(**kw)%0A return saved_keys%0A%0Adef restore(env, saved):%0A keys = saved.keys()%0A kw = dict(zip(keys, %5Bsaved%5Bk%5D for k in keys%5D))%0A env.Replace(**kw)%0A
|
|
207f9f1ed34066c0ed00842cd6287eb6907078f8 | fix NameError in stub functions returning 'a' programmatically call all stub functions using inspect | 0mq/stub_server.py | 0mq/stub_server.py | import argparse
import inspect
import re
import operator
import time
import sys
#
import jsonrpc2_zeromq
import jsonrpc2_zeromq.common
class RPCTestServer(jsonrpc2_zeromq.RPCServer):
def handle_initialize_method(self, M_c, M_r, T, i):
X_L = {}
X_D = [[]]
return M_c, M_r, X_L, X_D
def handle_analyze_method(self, S, T, X_L, X_D, M_C, M_R, kernel_list,
n_steps, c, r, max_iterations, max_time):
X_L_prime = {}
X_D_prime = [[]]
return X_L_prime, X_D_prime
def handle_simple_predictive_sample_method(self, M_c, X_L, X_D, Y, q):
x = []
return x
def handle_simple_predictive_probability_method(self, M_c, X_L, X_D, Y, Q,
n):
p = None
return p
def handle_impute_method(self, M_c, X_L, X_D, Y, q, n):
e = []
return e
def handle_conditional_entropy_method(M_c, X_L, X_D, d_given, d_target,
n=None, max_time=None):
e = None
return e
def handle_predictively_related_method(self, M_c, X_L, X_D, d,
n=None, max_time=None):
m = []
return m
def handle_contextual_structural_similarity_method(self, X_D, r, d):
s = []
return s
def handle_structural_similarity_method(self, X_D, r):
s = []
return s
def handle_structural_anomalousness_columns_method(self, X_D):
a = []
return s
def handle_structural_anomalousness_rows_method(self, X_D):
a = []
return s
def handle_predictive_anomalousness_method(self, M_c, X_L, X_D, T, q, n):
a = []
return s
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--is_client', action='store_true')
parser.add_argument('--port', type=int, default=5557)
parser.add_argument('--lifetime', type=int, default=-1)
args = parser.parse_args()
is_client = args.is_client
port = args.port
lifetime = args.lifetime
endpoint = "tcp://127.0.0.1:%s" % port
if is_client:
client = jsonrpc2_zeromq.RPCClient(endpoint=endpoint)
args = ("M_c", "X_L", "X_D", "Y", "q")
args_joined = ", ".join(args)
msg = client.simple_predictive_sample(*args)
print msg, " = client.simple_predictive_sample(" + args_joined + ")"
#
# method_re = re.compile('handle_(.*)_method')
# server_method_names = filter(method_re.match, dir(RPCTestServer))
# for server_method_name in server_method_names:
# print "server_method_name: ", server_method_name
# method_name = method_re.match(server_method_name).groups()[0]
# method = RPCTestServer.__dict__[method_name]
# arg_str_list = inspect.getargspec(method).args[1:]
# arg_str_list_joined = ", ".join(arg_str_list)
# print arg_str_list
# msg = client.__getattr__(method_name)(*args)
# print msg, " = client." + method_name + "(" + arg_str_list_joined + ")"
else:
print "starting server"
server = RPCTestServer(endpoint)
server.start()
if lifetime != -1:
print "killing server in ", lifetime, " seconds"
time.sleep(lifetime)
print "killing server"
server.stop()
server.join()
server.close()
time.sleep(0.1)
print "server killed"
| Python | 0 | @@ -1532,33 +1532,33 @@
%0A return
-s
+a
%0A%0A def handle
@@ -1629,33 +1629,33 @@
%0A return
-s
+a
%0A%0A def handle
@@ -1748,17 +1748,17 @@
return
-s
+a
%0A%0Aif __n
@@ -2103,25 +2103,30 @@
gs.lifetime%0A
+ #
%0A
-
endpoint
@@ -2152,24 +2152,30 @@
:%25s%22 %25 port%0A
+ #%0A
if is_cl
@@ -2253,235 +2253,8 @@
- args = (%22M_c%22, %22X_L%22, %22X_D%22, %22Y%22, %22q%22)%0A args_joined = %22, %22.join(args)%0A msg = client.simple_predictive_sample(*args)%0A print msg, %22 = client.simple_predictive_sample(%22 + args_joined + %22)%22%0A #%0A #
met
@@ -2302,18 +2302,16 @@
%0A
- #
server_
@@ -2376,18 +2376,16 @@
%0A
- #
for ser
@@ -2436,21 +2436,12 @@
-#
- print %22
serv
@@ -2453,133 +2453,8 @@
thod
-_name: %22, server_method_name%0A # method_name = method_re.match(server_method_name).groups()%5B0%5D%0A # method
= R
@@ -2475,16 +2475,23 @@
_dict__%5B
+server_
method_n
@@ -2494,34 +2494,32 @@
od_name%5D%0A
- #
arg_str_lis
@@ -2541,16 +2541,23 @@
argspec(
+server_
method).
@@ -2568,26 +2568,24 @@
%5B1:%5D%0A
- #
arg_str
@@ -2635,40 +2635,95 @@
-# print arg_str_list
+ #%0A method_name = method_re.match(server_method_name).groups()%5B0%5D
%0A
#
@@ -2714,26 +2714,24 @@
)%5B0%5D%0A
- #
msg = c
@@ -2765,17 +2765,25 @@
me)(*arg
-s
+_str_list
)%0A
@@ -2783,18 +2783,16 @@
%0A
- #
pri
|
bef94fea3318c835c1474ebdfe74f89d8251baf9 | add test_cover.py | pylayers/gis/test/test_cover.py | pylayers/gis/test/test_cover.py | Python | 0.000003 | @@ -0,0 +1,1536 @@
+import pylayers.gis.ezone as ez%0Afrom pylayers.gis.gisutil import ent,ext2qt%0Aimport matplotlib.pyplot as plt%0Aimport numpy as np%0Aimport seaborn as sns%0Aimport os%0Aimport smopy%0Afrom cartopy import config%0Aimport cartopy.crs as ccrs%0Afig = plt.figure(figsize=(12,12))%0Awhite = np.zeros((10,10))%0Aax = fig.add_subplot(111)%0Az = ez.Ezone('N48W002')%0Az.loadh5()%0Az.rebase()%0Azoom=11%0Ap = (48.721095,-1.830548)%0Aprint %22p : %22,p%0Axtile,ytile=smopy.deg2num(p%5B0%5D,p%5B1%5D,zoom,do_round=True)%0Aprint %22xtile,ytile : %22,xtile,ytile%0A(lat0,lon0)=smopy.num2deg(xtile,ytile,zoom,do_round=True)%0A(lat1,lon1)=smopy.num2deg(xtile+1,ytile+1,zoom,do_round=True)%0Aprint %22lat,lon WN%22,lat0,lon0%0Aprint %22lat,lon ES%22,lat1,lon1%0A%0A#mp = smopy.Map((lat1,lon0,lat0,lon1),z=zoom)%0Amp = smopy.Map((48,-2,49,-1),z=zoom)%0A##f,a = z.show(alpha=0.3)%0Abox_tile = mp.box_tile%0Aprint box_tile%0AL_ll,l_ll=smopy.num2deg(box_tile%5B0%5D,box_tile%5B1%5D+1,zoom)%0AL_ur,l_ur=smopy.num2deg(box_tile%5B2%5D+1,box_tile%5B3%5D,zoom)%0Aextent_true = np.array((l_ll,l_ur,L_ll,L_ur))%0Aprint extent_true%0A#print extent_true%0A##print z.extent%0Af,a = z.show(fig=fig,ax=ax,alpha=0.4)%0A#f,a=plt.subplots(1,1)%0Aim1 = a.imshow(mp.img,extent=extent_true,alpha=0.6)%0Aim2 = a.imshow(white,extent=(-2.2,-0.9,47.9,49.1),alpha=0)%0Aa.plot(p%5B1%5D,p%5B0%5D,'ob')%0A###mp.box_tile=(0,0,73000,111000)%0A###mp.h=73000%0A###mp.w=111000%0A###mp.box_tile=(0,111000,73000,0)%0A###mp.xmin = 0%0A###mp.ymin=0%0A###ax = mp.show_mpl(figsize=(20,10),alpha=1)%0A##fig=plt.gcf()%0A###z.extent_c=(0,1024,0,1280)%0A###z.extent_c=(506,509,351,355)%0A###print z.extent_c%0Aa = z.cover(Ht=2,Hr=2,Rmax=10000)%0A##%0A
|
|
1a29e182a196e3fc4fbe00c0db6e22c2619473f3 | Add iOSExtractor test | strings2pot/extractors/ios_test.py | strings2pot/extractors/ios_test.py | Python | 0 | @@ -0,0 +1,2440 @@
+# -*- coding: utf-8 -*-%0A%0Aimport os%0Aimport unittest%0Aimport ios%0A%0Aclass iOSExtractorTest(unittest.TestCase):%0A def setUp(self):%0A self.mock_source_file = 'mock_source_ios.strings'%0A self.mock_destination_file = 'mock_destination_ios.pot'%0A def mock_context_id_generator(s): return 'MOCK_CONTEXT_ID'%0A self.mock_context_id_generator = mock_context_id_generator%0A%0A with open(self.mock_source_file, 'a') as source_file:%0A source_file.write(%22%22%22%0A/* Test string with a placeholder */%0A%0A%22Test string with a %5C%22%25@%5C%22 here%22 = %22Test string with a %5C%22%25@%5C%22 here%22;%0A %22%22%22)%0A %0A def tearDown(self):%0A try:%0A os.unlink(self.mock_source_file)%0A os.unlink(self.mock_destination_file)%0A except Exception, e:%0A pass%0A%0A # test that the iOSExtractor class constructor sets source_file and destination_file attributes%0A def test_ctor(self):%0A sut = ios.iOSExtractor(%0A self.mock_source_file,%0A self.mock_destination_file,%0A self.mock_context_id_generator%0A )%0A%0A self.assertEqual(sut.source_file, self.mock_source_file)%0A self.assertEqual(sut.destination_file, self.mock_destination_file)%0A %0A # test that iOSExtractor parse_string method converts string in POT format %0A def test_parse_string(self):%0A sut = ios.iOSExtractor('', '', self.mock_context_id_generator)%0A%0A single_line_string = %22%5C' %5C%22 %25@%22%0A self.assertEqual(%0A sut.parse_string(single_line_string),%0A '%22%5C' %5C%22 %25s%22'%0A )%0A%0A multi_line_string = %22%5C' %5C%22 %5C%5Cn %25@%22%0A self.assertEqual(%0A sut.parse_string(multi_line_string),%0A '''%22%22%0A%22%5C' %5C%22 %5C%5Cn%22%0A%22 %25s%22'''%0A )%0A %0A # test that iOSExtractor run method converts an input file in POT format%0A def test_run(self):%0A sut = ios.iOSExtractor(%0A self.mock_source_file,%0A self.mock_destination_file,%0A self.mock_context_id_generator%0A )%0A%0A sut.run()%0A%0A with open(self.mock_destination_file, 'r') as destination_file:%0A lines = destination_file.readlines()%0A pot_content_as_string = %22%22.join(lines)%0A%0A self.assertEqual(%0A pot_content_as_string,%0A '''%0A#: mock_source_ios.strings:4%0Amsgctxt %22MOCK_CONTEXT_ID%22%0Amsgid %22Test string with a %5C%22%25s%5C%22 here%22%0Amsgstr %22%22%0A'''%0A )%0A%0Aif __name__ == '__main__':%0A unittest.main()
|
|
90642d734fbdcc3a97693106259c35c25f19d38e | Add problem 1 | problem_1.py | problem_1.py | Python | 0.000022 | @@ -0,0 +1,85 @@
+import sys%0A%0Ahex_string = sys.argv%5B1%5D%0Aprint hex_string.decode('hex').encode('base64')%0A
|
|
e19e45f7c6ff68599503c3ee0d6712974a8b4e66 | Document current pycurl exception behavior | tests/error_test.py | tests/error_test.py | Python | 0 | @@ -0,0 +1,1901 @@
+#! /usr/bin/env python%0A# -*- coding: iso-8859-1 -*-%0A# vi:ts=4:et%0A%0Aimport pycurl%0Aimport sys%0Aimport unittest%0A%0Aclass ErrorTest(unittest.TestCase):%0A def setUp(self):%0A self.curl = pycurl.Curl()%0A%0A def tearDown(self):%0A self.curl.close()%0A%0A # error originating in libcurl%0A def test_pycurl_error_libcurl(self):%0A try:%0A # perform without a url%0A self.curl.perform()%0A except pycurl.error:%0A exc_type, exc = sys.exc_info()%5B:2%5D%0A assert exc_type == pycurl.error%0A # pycurl.error's arguments are libcurl errno and message%0A self.assertEqual(2, len(exc.args))%0A self.assertEqual(int, type(exc.args%5B0%5D))%0A self.assertEqual(str, type(exc.args%5B1%5D))%0A # unpack%0A err, msg = exc%0A self.assertEqual(pycurl.E_URL_MALFORMAT, err)%0A # possibly fragile%0A self.assertEqual('No URL set!', msg)%0A%0A # pycurl raises standard library exceptions in some cases%0A def test_pycurl_error_stdlib(self):%0A try:%0A # set an option of the wrong type%0A self.curl.setopt(pycurl.WRITEFUNCTION, True)%0A except TypeError:%0A exc_type, exc = sys.exc_info()%5B:2%5D%0A%0A # error originating in pycurl%0A def test_pycurl_error_pycurl(self):%0A try:%0A # invalid option combination%0A self.curl.setopt(pycurl.WRITEFUNCTION, lambda x: x)%0A with open(__file__) as f:%0A self.curl.setopt(pycurl.WRITEHEADER, f)%0A except pycurl.error:%0A exc_type, exc = sys.exc_info()%5B:2%5D%0A assert exc_type == pycurl.error%0A # for non-libcurl errors, arguments are just the error string%0A self.assertEqual(1, len(exc.args))%0A self.assertEqual(str, type(exc.args%5B0%5D))%0A self.assertEqual('cannot combine WRITEHEADER with WRITEFUNCTION.', exc.args%5B0%5D)%0A
|
|
b6500cc5ae48212b7cabefc313b417a42273274b | Add test for parsing the man page | tests/test_parse.py | tests/test_parse.py | Python | 0.000001 | @@ -0,0 +1,1347 @@
+#!/usr/bin/env python%0A# -*- coding: utf-8 -*-%0A%0Afrom __future__ import absolute_import%0A%0Aimport unittest%0A%0Aimport mock%0A%0Afrom tldr.parser import parse_page%0A%0A%0Aclass TestParse(unittest.TestCase):%0A def test_parse_page(self):%0A mock_config = %7B%0A 'colors': %7B%0A 'command': 'cyan',%0A 'description': 'blue',%0A 'usage': 'green'%0A %7D,%0A 'platform': 'linux',%0A 'repo_directory': '/tmp/tldr'%0A %7D%0A with mock.patch('tldr.parser.get_config', return_value=mock_config):%0A result = parse_page('/tmp/tldr/pages/sunos/prctl.md')%0A assert ''.join(result) == (%0A '%5Cn%5Cx1b%5B0m%5Cx1b%5B34m Get or set the resource controls of '%0A 'running processes,%5Cn%5Cx1b%5B0m%5Cx1b%5B34m tasks, and projects%5Cn'%0A '%5Cx1b%5B0m%5Cn%5Cx1b%5B0m%5Cx1b%5B32m- examine process limits and '%0A 'permissions%5Cn%5Cx1b%5B0m%5Cn%5Cx1b%5B0m%5Cx1b%5B36m prctl %7B%7BPID%7D%7D%5Cn%5Cx1b'%0A '%5B0m%5Cn%5Cx1b%5B0m%5Cx1b%5B32m- examine process limits and permissions '%0A 'in machine parseable format%5Cn%5Cx1b%5B0m%5Cn%5Cx1b%5B0m%5Cx1b%5B36m prctl '%0A '-P %7B%7BPID%7D%7D%5Cn%5Cx1b%5B0m%5Cn%5Cx1b%5B0m%5Cx1b%5B32m- Get specific limit for '%0A 'a running process%5Cn%5Cx1b%5B0m%5Cn%5Cx1b%5B0m%5Cx1b%5B36m prctl -n '%0A 'process.max-file-descriptor %7B%7BPID%7D%7D%5Cx1b%5B0m'%0A )%0A
|
|
38dee68b15e2daf3c9d6ece845dc561500545258 | Create test_plots.py | tests/test_plots.py | tests/test_plots.py | Python | 0.000104 | @@ -0,0 +1,941 @@
+from test_model import test_add_stressmodel%0Afrom pastas.plots import TrackSolve%0A%0Adef test_plot():%0A ml = test_add_stressmodel()%0A ml.plot()%0A %0Adef test_decomposition():%0A ml = test_add_stressmodel()%0A ml.plots.decomposition(min_ylim_diff=0.1)%0A %0Adef test_results():%0A ml = test_add_stressmodel()%0A ml.plots.results()%0A %0Adef test_block_response():%0A ml = test_add_stressmodel()%0A ml.plots.block_response()%0A %0Adef test_step_response():%0A ml = test_add_stressmodel()%0A ml.plots.step_response()%0A%0Adef test_diagnostics():%0A ml = test_add_stressmodel()%0A ml.plots.diagnostics()%0A %0Adef test_stresses():%0A ml = test_add_stressmodel()%0A ml.plots.stresses()%0A%0Adef test_contributions_pie():%0A ml = test_add_stressmodel()%0A ml.plots.contributions_pie()%0A %0Adef test_tracksolve():%0A ml = test_add_stressmodel()%0A track = TrackSolve(ml)%0A track.initialize_figure()%0A ml.solve(callback=track.update_figure)%0A
|
|
8a573baabee65bfbd348901e0d1c7828cdadd337 | Add tests for stats.normalize | tests/test_stats.py | tests/test_stats.py | Python | 0.000005 | @@ -0,0 +1,1559 @@
+import numpy as np%0Anp.seterr(all='raise')%0A%0Afrom stats import normalize%0A%0A%0Adef check_normalization_constants(arr, axis):%0A sum = np.log(np.sum(arr, axis=axis))%0A z = normalize(np.log(arr), axis=axis)%5B0%5D%0A%0A zdiff = np.abs(sum - z)%0A if not (zdiff %3C 1e-8).all():%0A print sum%0A print z%0A raise AssertionError(%22wrong normalization constant%22)%0A%0A%0Adef check_normalization(arr, axis):%0A sum = np.sum(arr, axis=axis)%0A norm = np.log(arr / np.expand_dims(sum, axis=axis))%0A n = normalize(np.log(arr), axis=axis)%5B1%5D%0A%0A ndiff = np.abs(norm - n)%0A if not(ndiff %3C 1e-8).all():%0A print norm%0A print n%0A raise AssertionError(%22wrong normalized values%22)%0A%0A%0Adef test_normalize_10():%0A %22%22%22Test stats.normalize for a vector%22%22%22%0A for i in xrange(5):%0A arr = np.random.gamma(2, scale=2, size=10)%0A yield (check_normalization_constants, arr, 0)%0A yield (check_normalization, arr, 0)%0A%0A%0Adef test_normalize_5x10x15():%0A %22%22%22Test stats.normalize for a multidimensional array%22%22%22%0A for i in xrange(5):%0A arr = np.random.gamma(2, scale=2, size=(5, 15, 20))%0A for axis in xrange(3):%0A yield (check_normalization_constants, arr, axis)%0A yield (check_normalization, arr, axis)%0A%0A%0Adef test_normalize_2x100000():%0A %22%22%22Test stats.normalize for a large array%22%22%22%0A for i in xrange(1):%0A arr = np.random.gamma(2, scale=2, size=(2, 100000))%0A for axis in xrange(2):%0A yield (check_normalization_constants, arr, axis)%0A yield (check_normalization, arr, axis)%0A
|
|
88f6c8c3657cba81c65da34a7161c860c8a23c5f | add RPC test for InvalidateBlock | qa/rpc-tests/invalidateblock.py | qa/rpc-tests/invalidateblock.py | Python | 0 | @@ -0,0 +1,1872 @@
+#!/usr/bin/env python2%0A# Copyright (c) 2014 The Bitcoin Core developers%0A# Distributed under the MIT software license, see the accompanying%0A# file COPYING or http://www.opensource.org/licenses/mit-license.php.%0A%0A#%0A# Test InvalidateBlock code%0A#%0A%0Afrom test_framework import BitcoinTestFramework%0Afrom bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException%0Afrom util import *%0A%0Aclass InvalidateTest(BitcoinTestFramework):%0A %0A %0A def setup_chain(self):%0A print(%22Initializing test directory %22+self.options.tmpdir)%0A initialize_chain_clean(self.options.tmpdir, 2)%0A %0A def setup_network(self):%0A self.nodes = %5B%5D%0A self.is_network_split = False %0A self.nodes.append(start_node(0, self.options.tmpdir, %5B%22-debug%22%5D))%0A self.nodes.append(start_node(1, self.options.tmpdir, %5B%22-debug%22%5D))%0A %0A def run_test(self):%0A print %22Mine 4 blocks on Node 0%22%0A self.nodes%5B0%5D.setgenerate(True, 4)%0A assert(self.nodes%5B0%5D.getblockcount() == 4)%0A besthash = self.nodes%5B0%5D.getbestblockhash()%0A%0A print %22Mine competing 6 blocks on Node 1%22%0A self.nodes%5B1%5D.setgenerate(True, 6)%0A assert(self.nodes%5B1%5D.getblockcount() == 6)%0A%0A print %22Connect nodes to force a reorg%22%0A connect_nodes_bi(self.nodes,0,1)%0A sync_blocks(self.nodes)%0A assert(self.nodes%5B0%5D.getblockcount() == 6)%0A badhash = self.nodes%5B1%5D.getblockhash(2)%0A%0A print %22Invalidate block 2 on node 0 and verify we reorg to node 0's original chain%22%0A self.nodes%5B0%5D.invalidateblock(badhash)%0A newheight = self.nodes%5B0%5D.getblockcount()%0A newhash = self.nodes%5B0%5D.getbestblockhash()%0A if (newheight != 4 or newhash != besthash):%0A raise AssertionError(%22Wrong tip for node0, hash %25s, height %25d%22%25(newhash,newheight))%0A%0Aif __name__ == '__main__':%0A InvalidateTest().main()%0A
|
|
d8b89170d98d200d3538a6435159f561aa888015 | Update forward compatibility horizon to 2018-12-03 | tensorflow/python/compat/compat.py | tensorflow/python/compat/compat.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for API compatibility between TensorFlow release versions.
See [Version
Compatibility](https://tensorflow.org/guide/version_compat#backward_forward)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
from tensorflow.python import tf2
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import variable_scope
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util.tf_export import tf_export
_FORWARD_COMPATIBILITY_HORIZON = datetime.date(2018, 12, 2)
@tf_export("compat.forward_compatible")
def forward_compatible(year, month, day):
"""Return true if the forward compatibility window has expired.
See [Version
compatibility](https://tensorflow.org/guide/version_compat#backward_forward).
Forward-compatibility refers to scenarios where the producer of a TensorFlow
model (a GraphDef or SavedModel) is compiled against a version of the
TensorFlow library newer than what the consumer was compiled against. The
"producer" is typically a Python program that constructs and trains a model
while the "consumer" is typically another program that loads and serves the
model.
TensorFlow has been supporting a 3 week forward-compatibility window for
programs compiled from source at HEAD.
For example, consider the case where a new operation `MyNewAwesomeAdd` is
created with the intent of replacing the implementation of an existing Python
wrapper - `tf.add`. The Python wrapper implementation should change from
something like:
```python
def add(inputs, name=None):
return gen_math_ops.add(inputs, name)
```
to:
```python
from tensorflow.python.compat import compat
def add(inputs, name=None):
if compat.forward_compatible(year, month, day):
# Can use the awesome new implementation.
return gen_math_ops.my_new_awesome_add(inputs, name)
# To maintain forward compatibiltiy, use the old implementation.
return gen_math_ops.add(inputs, name)
```
Where `year`, `month`, and `day` specify the date beyond which binaries
that consume a model are expected to have been updated to include the
new operations. This date is typically at least 3 weeks beyond the date
the code that adds the new operation is committed.
Args:
year: A year (e.g., 2018).
month: A month (1 <= month <= 12) in year.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month.
Returns:
True if the caller can expect that serialized TensorFlow graphs produced
can be consumed by programs that are compiled with the TensorFlow library
source code after (year, month, day).
"""
return _FORWARD_COMPATIBILITY_HORIZON > datetime.date(year, month, day)
@tf_export("compat.forward_compatibility_horizon")
@tf_contextlib.contextmanager
def forward_compatibility_horizon(year, month, day):
"""Context manager for testing forward compatibility of generated graphs.
See [Version
compatibility](https://tensorflow.org/guide/version_compat#backward_forward).
To ensure forward compatibility of generated graphs (see `forward_compatible`)
with older binaries, new features can be gated with:
```python
if compat.forward_compatible(year=2018, month=08, date=01):
generate_graph_with_new_features()
else:
generate_graph_so_older_binaries_can_consume_it()
```
However, when adding new features, one may want to unittest it before
the forward compatibility window expires. This context manager enables
such tests. For example:
```python
from tensorflow.python.compat import compat
def testMyNewFeature(self):
with compat.forward_compatibility_horizon(2018, 08, 02):
# Test that generate_graph_with_new_features() has an effect
```
Args :
year: A year (e.g. 2018).
month: A month (1 <= month <= 12) in year.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month.
Yields:
Nothing.
"""
global _FORWARD_COMPATIBILITY_HORIZON
try:
old_compat_date = _FORWARD_COMPATIBILITY_HORIZON
_FORWARD_COMPATIBILITY_HORIZON = datetime.date(year, month, day)
yield
finally:
_FORWARD_COMPATIBILITY_HORIZON = old_compat_date
@tf_export(v1=["enable_v2_behavior"])
def enable_v2_behavior():
"""Enables TensorFlow 2.x behaviors.
This function can be called at the beginning of the program (before `Tensors`,
`Graphs` or other structures have been created, and before devices have been
initialized. It switches all global behaviors that are different between
TensorFlow 1.x and 2.x to behave as intended for 2.x.
This function is called in the main TensorFlow `__init__.py` file, user should
not need to call it, except during complex migrations.
"""
tf2.enable() # Switches TensorArrayV2 and control flow V2
ops.enable_eager_execution()
tensor_shape.enable_v2_tensorshape() # Also switched by tf2
variable_scope.enable_resource_variables()
@tf_export(v1=["disable_v2_behavior"])
def disable_v2_behavior():
"""Enables TensorFlow 2.x behaviors.
This function can be called at the beginning of the program (before `Tensors`,
`Graphs` or other structures have been created, and before devices have been
initialized. It switches all global behaviors that are different between
TensorFlow 1.x and 2.x to behave as intended for 1.x.
User can call this function to disable 2.x behavior during complex migrations.
"""
tf2.disable() # Switches TensorArrayV2 and control flow V2
ops.disable_eager_execution()
tensor_shape.disable_v2_tensorshape() # Also switched by tf2
variable_scope.disable_resource_variables()
| Python | 0 | @@ -1320,17 +1320,17 @@
18, 12,
-2
+3
)%0A%0A%0A@tf_
|
fadac460052cb1a778bf8398879e1cb616c26228 | Add new migration for Django 1.8 | propaganda/migrations/0002_auto_20150802_1841.py | propaganda/migrations/0002_auto_20150802_1841.py | Python | 0 | @@ -0,0 +1,434 @@
+# -*- coding: utf-8 -*-%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import models, migrations%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('propaganda', '0001_initial'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterField(%0A model_name='subscriber',%0A name='email',%0A field=models.EmailField(unique=True, max_length=254, verbose_name='email'),%0A ),%0A %5D%0A
|
|
9a608da83605d162be891e69db903f581ca9566b | Update forward compatibility horizon to 2018-11-15 | tensorflow/python/compat/compat.py | tensorflow/python/compat/compat.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for API compatibility between TensorFlow release versions.
See [Version
Compatibility](https://tensorflow.org/guide/version_compat#backward_forward)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util.tf_export import tf_export
_FORWARD_COMPATIBILITY_HORIZON = datetime.date(2018, 11, 14)
@tf_export("compat.forward_compatible")
def forward_compatible(year, month, day):
"""Return true if the forward compatibility window has expired.
See [Version
compatibility](https://tensorflow.org/guide/version_compat#backward_forward).
Forward-compatibility refers to scenarios where the producer of a TensorFlow
model (a GraphDef or SavedModel) is compiled against a version of the
TensorFlow library newer than what the consumer was compiled against. The
"producer" is typically a Python program that constructs and trains a model
while the "consumer" is typically another program that loads and serves the
model.
TensorFlow has been supporting a 3 week forward-compatibility window for
programs compiled from source at HEAD.
For example, consider the case where a new operation `MyNewAwesomeAdd` is
created with the intent of replacing the implementation of an existing Python
wrapper - `tf.add`. The Python wrapper implementation should change from
something like:
```python
def add(inputs, name=None):
return gen_math_ops.add(inputs, name)
```
to:
```python
from tensorflow.python.compat import compat
def add(inputs, name=None):
if compat.forward_compatible(year, month, day):
# Can use the awesome new implementation.
return gen_math_ops.my_new_awesome_add(inputs, name)
# To maintain forward compatibiltiy, use the old implementation.
return gen_math_ops.add(inputs, name)
```
Where `year`, `month`, and `day` specify the date beyond which binaries
that consume a model are expected to have been updated to include the
new operations. This date is typically at least 3 weeks beyond the date
the code that adds the new operation is committed.
Args:
year: A year (e.g., 2018).
month: A month (1 <= month <= 12) in year.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month.
Returns:
True if the caller can expect that serialized TensorFlow graphs produced
can be consumed by programs that are compiled with the TensorFlow library
source code after (year, month, day).
"""
return _FORWARD_COMPATIBILITY_HORIZON > datetime.date(year, month, day)
@tf_export("compat.forward_compatibility_horizon")
@tf_contextlib.contextmanager
def forward_compatibility_horizon(year, month, day):
"""Context manager for testing forward compatibility of generated graphs.
See [Version
compatibility](https://tensorflow.org/guide/version_compat#backward_forward).
To ensure forward compatibility of generated graphs (see `forward_compatible`)
with older binaries, new features can be gated with:
```python
if compat.forward_compatible(year=2018, month=08, date=01):
generate_graph_with_new_features()
else:
generate_graph_so_older_binaries_can_consume_it()
```
However, when adding new features, one may want to unittest it before
the forward compatibility window expires. This context manager enables
such tests. For example:
```python
from tensorflow.python.compat import compat
def testMyNewFeature(self):
with compat.forward_compatibility_horizon(2018, 08, 02):
# Test that generate_graph_with_new_features() has an effect
```
Args :
year: A year (e.g. 2018).
month: A month (1 <= month <= 12) in year.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month.
Yields:
Nothing.
"""
global _FORWARD_COMPATIBILITY_HORIZON
try:
old_compat_date = _FORWARD_COMPATIBILITY_HORIZON
_FORWARD_COMPATIBILITY_HORIZON = datetime.date(year, month, day)
yield
finally:
_FORWARD_COMPATIBILITY_HORIZON = old_compat_date
| Python | 0 | @@ -1143,9 +1143,9 @@
1, 1
-4
+5
)%0A%0A%0A
|
2a0e004358f13d6ebe936ceab1b5e7d147606583 | Update forward compatibility horizon to 2018-11-16 | tensorflow/python/compat/compat.py | tensorflow/python/compat/compat.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for API compatibility between TensorFlow release versions.
See [Version
Compatibility](https://tensorflow.org/guide/version_compat#backward_forward)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util.tf_export import tf_export
_FORWARD_COMPATIBILITY_HORIZON = datetime.date(2018, 11, 15)
@tf_export("compat.forward_compatible")
def forward_compatible(year, month, day):
"""Return true if the forward compatibility window has expired.
See [Version
compatibility](https://tensorflow.org/guide/version_compat#backward_forward).
Forward-compatibility refers to scenarios where the producer of a TensorFlow
model (a GraphDef or SavedModel) is compiled against a version of the
TensorFlow library newer than what the consumer was compiled against. The
"producer" is typically a Python program that constructs and trains a model
while the "consumer" is typically another program that loads and serves the
model.
TensorFlow has been supporting a 3 week forward-compatibility window for
programs compiled from source at HEAD.
For example, consider the case where a new operation `MyNewAwesomeAdd` is
created with the intent of replacing the implementation of an existing Python
wrapper - `tf.add`. The Python wrapper implementation should change from
something like:
```python
def add(inputs, name=None):
return gen_math_ops.add(inputs, name)
```
to:
```python
from tensorflow.python.compat import compat
def add(inputs, name=None):
if compat.forward_compatible(year, month, day):
# Can use the awesome new implementation.
return gen_math_ops.my_new_awesome_add(inputs, name)
# To maintain forward compatibiltiy, use the old implementation.
return gen_math_ops.add(inputs, name)
```
Where `year`, `month`, and `day` specify the date beyond which binaries
that consume a model are expected to have been updated to include the
new operations. This date is typically at least 3 weeks beyond the date
the code that adds the new operation is committed.
Args:
year: A year (e.g., 2018).
month: A month (1 <= month <= 12) in year.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month.
Returns:
True if the caller can expect that serialized TensorFlow graphs produced
can be consumed by programs that are compiled with the TensorFlow library
source code after (year, month, day).
"""
return _FORWARD_COMPATIBILITY_HORIZON > datetime.date(year, month, day)
@tf_export("compat.forward_compatibility_horizon")
@tf_contextlib.contextmanager
def forward_compatibility_horizon(year, month, day):
"""Context manager for testing forward compatibility of generated graphs.
See [Version
compatibility](https://tensorflow.org/guide/version_compat#backward_forward).
To ensure forward compatibility of generated graphs (see `forward_compatible`)
with older binaries, new features can be gated with:
```python
if compat.forward_compatible(year=2018, month=08, date=01):
generate_graph_with_new_features()
else:
generate_graph_so_older_binaries_can_consume_it()
```
However, when adding new features, one may want to unittest it before
the forward compatibility window expires. This context manager enables
such tests. For example:
```python
from tensorflow.python.compat import compat
def testMyNewFeature(self):
with compat.forward_compatibility_horizon(2018, 08, 02):
# Test that generate_graph_with_new_features() has an effect
```
Args :
year: A year (e.g. 2018).
month: A month (1 <= month <= 12) in year.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month.
Yields:
Nothing.
"""
global _FORWARD_COMPATIBILITY_HORIZON
try:
old_compat_date = _FORWARD_COMPATIBILITY_HORIZON
_FORWARD_COMPATIBILITY_HORIZON = datetime.date(year, month, day)
yield
finally:
_FORWARD_COMPATIBILITY_HORIZON = old_compat_date
| Python | 0 | @@ -1143,9 +1143,9 @@
1, 1
-5
+6
)%0A%0A%0A
|
e853fd96f14b9331a25171fc435eea3ec829e9ef | Update forward compatibility horizon to 2020-04-06 | tensorflow/python/compat/compat.py | tensorflow/python/compat/compat.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for API compatibility between TensorFlow release versions.
See [Version
Compatibility](https://tensorflow.org/guide/version_compat#backward_forward)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import os
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util.tf_export import tf_export
# This value changes every day with an automatic CL. It can be modified in code
# via `forward_compatibility_horizon()` or with the environment variable
# TF_FORWARD_COMPATIBILITY_DELTA_DAYS, which is added to the compatibility date.
_FORWARD_COMPATIBILITY_HORIZON = datetime.date(2020, 4, 5)
_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME = "TF_FORWARD_COMPATIBILITY_DELTA_DAYS"
_FORWARD_COMPATIBILITY_DATE_NUMBER = None
def _date_to_date_number(year, month, day):
return (year << 9) | (month << 5) | day
def _update_forward_compatibility_date_number(date_to_override=None):
"""Update the base date to compare in forward_compatible function."""
global _FORWARD_COMPATIBILITY_DATE_NUMBER
if date_to_override:
date = date_to_override
else:
date = _FORWARD_COMPATIBILITY_HORIZON
delta_days = os.getenv(_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME)
if delta_days:
date += datetime.timedelta(days=int(delta_days))
_FORWARD_COMPATIBILITY_DATE_NUMBER = _date_to_date_number(
date.year, date.month, date.day)
_update_forward_compatibility_date_number()
@tf_export("compat.forward_compatible")
def forward_compatible(year, month, day):
"""Return true if the forward compatibility window has expired.
See [Version
compatibility](https://tensorflow.org/guide/version_compat#backward_forward).
Forward-compatibility refers to scenarios where the producer of a TensorFlow
model (a GraphDef or SavedModel) is compiled against a version of the
TensorFlow library newer than what the consumer was compiled against. The
"producer" is typically a Python program that constructs and trains a model
while the "consumer" is typically another program that loads and serves the
model.
TensorFlow has been supporting a 3 week forward-compatibility window for
programs compiled from source at HEAD.
For example, consider the case where a new operation `MyNewAwesomeAdd` is
created with the intent of replacing the implementation of an existing Python
wrapper - `tf.add`. The Python wrapper implementation should change from
something like:
```python
def add(inputs, name=None):
return gen_math_ops.add(inputs, name)
```
to:
```python
from tensorflow.python.compat import compat
def add(inputs, name=None):
if compat.forward_compatible(year, month, day):
# Can use the awesome new implementation.
return gen_math_ops.my_new_awesome_add(inputs, name)
# To maintain forward compatibility, use the old implementation.
return gen_math_ops.add(inputs, name)
```
Where `year`, `month`, and `day` specify the date beyond which binaries
that consume a model are expected to have been updated to include the
new operations. This date is typically at least 3 weeks beyond the date
the code that adds the new operation is committed.
Args:
year: A year (e.g., 2018). Must be an `int`.
month: A month (1 <= month <= 12) in year. Must be an `int`.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an
`int`.
Returns:
True if the caller can expect that serialized TensorFlow graphs produced
can be consumed by programs that are compiled with the TensorFlow library
source code after (year, month, day).
"""
return _FORWARD_COMPATIBILITY_DATE_NUMBER > _date_to_date_number(
year, month, day)
@tf_export("compat.forward_compatibility_horizon")
@tf_contextlib.contextmanager
def forward_compatibility_horizon(year, month, day):
"""Context manager for testing forward compatibility of generated graphs.
See [Version
compatibility](https://tensorflow.org/guide/version_compat#backward_forward).
To ensure forward compatibility of generated graphs (see `forward_compatible`)
with older binaries, new features can be gated with:
```python
if compat.forward_compatible(year=2018, month=08, date=01):
generate_graph_with_new_features()
else:
generate_graph_so_older_binaries_can_consume_it()
```
However, when adding new features, one may want to unittest it before
the forward compatibility window expires. This context manager enables
such tests. For example:
```python
from tensorflow.python.compat import compat
def testMyNewFeature(self):
with compat.forward_compatibility_horizon(2018, 08, 02):
# Test that generate_graph_with_new_features() has an effect
```
Args:
year: A year (e.g., 2018). Must be an `int`.
month: A month (1 <= month <= 12) in year. Must be an `int`.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an
`int`.
Yields:
Nothing.
"""
try:
_update_forward_compatibility_date_number(datetime.date(year, month, day))
yield
finally:
_update_forward_compatibility_date_number()
| Python | 0.000001 | @@ -1382,17 +1382,17 @@
020, 4,
-5
+6
)%0A_FORWA
|
9747ab1249a159857c5fb7d59fdb6a6225121964 | Remove the '",' that shows up in the install instructions. | tensorflow/tools/docs/generate2.py | tensorflow/tools/docs/generate2.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A tool to generate api_docs for TensorFlow2.
```
python generate2.py --output_dir=/tmp/out
```
Requires a local installation of:
https://github.com/tensorflow/docs/tree/master/tools
tf-nightly-2.0-preview
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from os import path
from absl import app
from absl import flags
import tensorflow as tf
from tensorflow_docs.api_generator import doc_controls
from tensorflow_docs.api_generator import doc_generator_visitor
from tensorflow_docs.api_generator import generate_lib
from tensorflow_docs.api_generator import parser
import tensorboard
import tensorflow_estimator
from tensorflow.python.util import tf_export
from tensorflow.python.util import tf_inspect
# Use tensorflow's `tf_inspect`, which is aware of `tf_decorator`.
parser.tf_inspect = tf_inspect
# `tf` has an `__all__` that doesn't list important things like `keras`.
# The doc generator recognizes `__all__` as the list of public symbols.
# So patch `tf.__all__` to list everything.
tf.__all__ = [item_name for item_name, value in tf_inspect.getmembers(tf)]
tf.__doc__ = """
## Tensorflow 2.0 Preview
Caution: This is a developer preview. You will likely find some bugs,
performance issues, and more, and we encourage you to tell us about them.
We value your feedback!
These docs were generated from a nightly build of tensorflow 2.0.
You can install the exact version that was used to generate these docs
with:
```
pip install tf-nightly-2.0-preview=={version}",
```
""".format(version=tf.__version__)
FLAGS = flags.FLAGS
flags.DEFINE_string(
"code_url_prefix",
"/code/stable/tensorflow",
"A url to prepend to code paths when creating links to defining code")
flags.DEFINE_string(
"output_dir", "/tmp/out",
"A directory, where the docs will be output to.")
flags.DEFINE_bool("search_hints", True,
"Include meta-data search hints at the top of each file.")
flags.DEFINE_string("site_path", "",
"The prefix ({site-path}/api_docs/python/...) used in the "
"`_toc.yaml` and `_redirects.yaml` files")
# The doc generator isn't aware of tf_export.
# So prefix the score tuples with -1 when this is the canonical name, +1
# otherwise. The generator chooses the name with the lowest score.
class TfExportAwareDocGeneratorVisitor(
doc_generator_visitor.DocGeneratorVisitor):
"""A `tf_export` aware doc_visitor."""
def _score_name(self, name):
canonical = tf_export.get_canonical_name_for_symbol(self._index[name])
canonical_score = 1
if canonical is not None and name == "tf." + canonical:
canonical_score = -1
scores = super(TfExportAwareDocGeneratorVisitor, self)._score_name(name)
return (canonical_score,) + scores
def _hide_layer_and_module_methods():
"""Hide methods and properties defined in the base classes of keras layers."""
# __dict__ only sees attributes defined in *this* class, not on parent classes
module_contents = list(tf.Module.__dict__.items())
layer_contents = list(tf.keras.layers.Layer.__dict__.items())
for name, obj in module_contents + layer_contents:
if name == "__init__":
continue
if isinstance(obj, property):
obj = obj.fget
if isinstance(obj, (staticmethod, classmethod)):
obj = obj.__func__
try:
doc_controls.do_not_doc_in_subclasses(obj)
except AttributeError:
pass
def build_docs(output_dir, code_url_prefix, search_hints=True):
"""Build api docs for tensorflow v2.
Args:
output_dir: A string path, where to put the files.
code_url_prefix: prefix for "Defined in" links.
search_hints: Bool. Include meta-data search hints at the top of each file.
"""
_hide_layer_and_module_methods()
try:
doc_controls.do_not_generate_docs(tf.tools)
except AttributeError:
pass
base_dir = path.normpath(path.join(path.dirname(tf.__file__), "../.."))
base_dirs = (
base_dir,
# External packages base directories,
path.dirname(tensorboard.__file__),
path.dirname(tensorflow_estimator.__file__),
)
code_url_prefixes = (
code_url_prefix,
# External packages source repositories,
"https://github.com/tensorflow/tensorboard/tree/master/tensorboard",
"https://github.com/tensorflow/estimator/tree/master/tensorflow_estimator",
)
doc_generator = generate_lib.DocGenerator(
root_title="TensorFlow 2.0 Preview",
py_modules=[("tf", tf)],
base_dir=base_dirs,
search_hints=search_hints,
code_url_prefix=code_url_prefixes,
site_path=FLAGS.site_path,
visitor_cls=TfExportAwareDocGeneratorVisitor)
doc_generator.build(output_dir)
def main(argv):
del argv
build_docs(output_dir=FLAGS.output_dir,
code_url_prefix=FLAGS.code_url_prefix,
search_hints=FLAGS.search_hints)
if __name__ == "__main__":
app.run(main)
| Python | 0.99994 | @@ -2237,18 +2237,16 @@
version%7D
-%22,
%0A%60%60%60%0A%22%22%22
|
b29417d3b387c8ab62c1e09589c2d93dae905993 | Add skeleton tle.api | tle/api.py | tle/api.py | Python | 0.000069 | @@ -0,0 +1,2394 @@
+import json%0Aimport logging%0Aimport bottle%0Aimport functools%0A%0Afrom paste import httpserver%0Afrom paste.translogger import TransLogger%0A%0Afrom collections import OrderedDict%0A%0Alog = logging.getLogger(__name__)%0A%0Aclass APILogger(TransLogger):%0A def write_log(%0A self,%0A environ,%0A method,%0A req_uri,%0A start,%0A status,%0A bytes_,%0A ):%0A remote_addr = environ%5B'REMOTE_ADDR'%5D%0A protocol = environ%5B'SERVER_PROTOCOL'%5D%0A referer = environ.get('HTTP_REFERER', '-')%0A user_agent = environ.get('HTTP_USER_AGENT', '-')%0A msg = ('%7Bremote_addr%7D %7Bmethod%7D %7Breq_uri%7D %7Bprotocol%7D %7Bstatus%7D '%0A '%7Bbytes_%7D %7Breferer%7D %7Buser_agent%7D'%0A ).format(%0A remote_addr=remote_addr,%0A method=method,%0A req_uri=req_uri,%0A protocol=protocol,%0A status=status,%0A bytes_=bytes_,%0A referer=referer,%0A user_agent=user_agent,%0A )%0A log.info(msg)%0A%0Aclass APIServer(bottle.ServerAdapter):%0A def run(self, handler):%0A handler = APILogger(handler)%0A httpserver.serve(%0A handler,%0A host=self.host,%0A port=str(self.port),%0A **self.options%0A )%0A%0Adef set_content(type_, charset='charset=UTF-8'):%0A bottle.response.content_type = '%7Btype_%7D; %7Bcharset%7D'.format(%0A type_=type_,%0A charset=charset,%0A )%0A%0Adef json_content(fn):%0A @functools.wraps(fn)%0A def wrapper(*args, **kwargs):%0A set_content('application/json')%0A return fn(*args, **kwargs)%0A return wrapper%0A%[email protected](404)%[email protected](403)%[email protected](500)%0A@json_content%0Adef api_error(error):%0A status = OrderedDict(%5B%0A ('code', error.status),%0A ('message', error.body)%0A %5D)%0A status = OrderedDict(%5B%0A ('status', status),%0A %5D)%0A%0A return json.dumps(status)%0A%0Aclass EventAPI01(object):%0A def __init__(self):%0A pass%0A%0A def apply(self, callback, context):%0A %22%22%22%0A Similar to a bottle.JSONPlugin's apply%0A method. This one also ensures that self%0A is available to methods with bottle%0A decorators.%0A %22%22%22%0A @functools.wraps(callback)%0A @json_content%0A def wrapper(*args, **kwargs):%0A kwargs%5B'self'%5D = self%0A return callback(*args, **kwargs)%0A return wrapper%0A
|
|
b042675463c34340d4d3ae5d6868b243abf9741b | Create Average_sorting.py | Average_sorting.py | Average_sorting.py | Python | 0.000001 | @@ -0,0 +1,1713 @@
+# coding: utf-8%0Aimport rw%0A%0Asuccess_list=%5B%5D #Meet the requirements of the combined group%0Amax_min=%5B%5D #Max ad min volue save var;%5Bfunction_name : max_min_mark%5D%0A%0Adef count(x,y):%0A result=x+y%0A return result%0A%0Adef count_list(x,y):%0A total=count(len(x),len(y))%0A return total%0A %0Adef max_min_mark(var):%0A for i in var:%0A length=len(i)%0A max_min.append(length)%0A%0A%0Adef merger_group(textdir):%0A textlines = open(textdir,'r').readlines()%0A b_split=%5B%5D%0A for i in xrange(0,len(textlines)):%0A if i%252!=0:%0A if count_list(x, textlines%5Bi%5D)%3E35:%0A b_split.append(x)%0A b_split.append(textlines%5Bi%5D)%0A else:%0A success_list.append(x.replace('%5Cn','')+' '+textlines%5Bi%5D)%0A else:%0A x=textlines%5Bi%5D%0A return b_split%0A%0Adef best_value(b_split):%0A max_min_mark(b_split)%0A min_value_location=max_min.index(min(max_min))%0A while min_value_location:%0A max_value_location=max_min.index(max(max_min))%0A if max_min%5Bmax_value_location%5D+max_min%5Bmin_value_location%5D%3E35:%0A success_list.append(b_split%5Bmax_value_location%5D)%0A success_list.append(b_split%5Bmax_value_location%5D)%0A max_min%5Bmax_value_location%5D=None%0A else:%0A success_list.append(b_split%5Bmax_value_location%5D.replace('%5Cn','')+' '+b_split%5Bmin_value_location%5D)%0A max_min%5Bmax_value_location%5D=None%0A max_min%5Bmin_value_location%5D=None%0A min_value_location=max_min.index(min(max_min))%0A%0Adef main(textdir):%0A path=raw_input('save_filename:')%0A best_value(merger_group(textdir))%0A rw.handle(success_list,path)%0A %0A%0A%0Aif __name__ == '__main__':%0A textdir = 'd:/name.txt'%0A main(textdir)%0A
|
|
4cab21fb6ed217ad4a83c4d6943266845c087d88 | Make tests work with Python 2 | test_seleniumrequests.py | test_seleniumrequests.py | import json
import socket
import threading
from seleniumrequests import Firefox
from seleniumrequests.request import get_unused_port
from six.moves import BaseHTTPServer, http_cookies
import requests
import six
class EchoHeaderRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_GET(self):
# Python 2's HTTPMessage class contains the actual data in its
# "dict"-attribute, whereas in Python 3 HTTPMessage is itself the
# container. Treat headers as case-insensitive
data = json.dumps(dict(self.headers) if six.PY3 else self.headers.dict)
self.send_response(200)
# Send JSON data in a header instead of the body field, because some
# browsers add additional markup which is ugly to parse out
self.send_header('Echo', data)
self.end_headers()
# This is needed so the WebDriver instance allows setting of cookies
self.wfile.write(six.b('<html></html>'))
# Suppress unwanted logging to stderr
def log_message(self, format, *args):
pass
# TODO: Preferably only use websites served by the localhost
def test_window_handling():
webdriver = Firefox()
webdriver.get('https://www.google.com/')
webdriver.execute_script("window.open('https://www.facebook.com/');")
original_window_handle = webdriver.current_window_handle
original_window_handles = set(webdriver.window_handles)
webdriver.request('GET', 'https://www.youtube.com/')
# Make sure that the window handle was switched back to the original one
# after making a request that caused a new window to open
assert webdriver.current_window_handle == original_window_handle
# Make sure that all additional window handles that were opened during the
# request were closed again
assert set(webdriver.window_handles) == original_window_handles
webdriver.quit()
def test_headers():
while True:
port = get_unused_port()
try:
server = BaseHTTPServer.HTTPServer(('', port), EchoHeaderRequestHandler)
break
except socket.error:
pass
def handle_requests():
while True:
server.handle_request()
threading.Thread(target=handle_requests, daemon=True).start()
webdriver = Firefox()
server_url = 'http://127.0.0.1:%d/' % port
webdriver.get(server_url)
# TODO: Add more cookie examples with additional fields, such as
# expires, path, comment, max-age, secure, version, httponly
cookies = (
{'name': 'Hello', 'value': 'World'},
{'name': 'Another', 'value': 'Cookie'}
)
for cookie in cookies:
webdriver.add_cookie(cookie)
response = webdriver.request('GET', server_url, headers={'Extra': 'Header'}, cookies={'Extra': 'Cookie'})
sent_headers = requests.structures.CaseInsensitiveDict(json.loads(response.headers['Echo']))
# These are the default headers sent for the Mozilla Firefox browser, it's
# easier to simply check that the values are not empty instead of comparing
# them to constants, since those would change frequently with each
# iteration of the used browser. Additionally the existence of headers such
# as Accept-Language and Referer confirms that these are not simply the
# default headers sent by the requests library itself
assert 'cookie' in sent_headers and sent_headers['cookie']
assert 'accept' in sent_headers and sent_headers['accept']
assert 'host' in sent_headers and sent_headers['host']
assert 'connection' in sent_headers and sent_headers['connection']
assert 'accept-language' in sent_headers and sent_headers['accept-language']
assert 'accept-encoding' in sent_headers and sent_headers['accept-encoding']
assert 'user-agent' in sent_headers and sent_headers['user-agent']
assert 'referer' in sent_headers and sent_headers['referer']
# Check if the additional header was sent as well
assert 'extra' in sent_headers and sent_headers['extra'] == 'Header'
cookies = http_cookies.SimpleCookie()
cookies.load(sent_headers['Cookie'])
assert 'Hello' in cookies and cookies['Hello'].value == 'World'
assert 'Another' in cookies and cookies['Another'].value == 'Cookie'
# Check if the additional cookie was sent as well
assert 'Extra' in cookies and cookies['Extra'].value == 'Cookie'
webdriver.quit()
| Python | 0.000878 | @@ -207,16 +207,37 @@
t six%0A%0A%0A
+ENCODING = 'UTF-8'%0A%0A%0A
class Ec
@@ -2219,16 +2219,25 @@
t()%0A%0A
+ thread =
threadi
@@ -2272,22 +2272,146 @@
ests
-, daemon=True)
+)%0A%0A # Set daemon attribute after instantiating thread object to stay compatible%0A # with Python 2%0A thread.daemon = True%0A thread
.sta
@@ -2411,24 +2411,25 @@
ead.start()%0A
+%0A
webdrive
@@ -4215,47 +4215,172 @@
e()%0A
- cookies.load(sent_headers%5B'Cookie'%5D
+%0A # Python 2's Cookie module expects a string object, not Unicode%0A cookies.load(sent_headers%5B'Cookie'%5D if six.PY3 else sent_headers%5B'Cookie'%5D.encode(ENCODING)
)%0A%0A
|
1136824ab60dbb8774ba5cb8d011e898f9286e06 | Add a missing file | reviewboard/admin/validation.py | reviewboard/admin/validation.py | Python | 0 | @@ -0,0 +1,1001 @@
+from django import forms%0A%0A%0Adef validate_bug_tracker(input_url):%0A %22%22%22%0A Validates that an issue tracker URI string contains one %60%25s%60 Python format%0A specification type (no other types are supported).%0A %22%22%22%0A try:%0A # Ignore escaped %60%25%60's%0A test_url = input_url.replace('%25%25', '')%0A%0A if test_url.find('%25s') == -1:%0A raise TypeError%0A%0A # Ensure an arbitrary value can be inserted into the URL string%0A test_url = test_url %25 1%0A except (TypeError, ValueError):%0A raise forms.ValidationError(%5B%22%25s has invalid format specification %22%0A %22type(s). Use only one '%25%25s' to mark the %22%0A %22location of the bug id. If the URI %22%0A %22contains encoded values (e.g. '%25%2520'), %22%0A %22prepend the encoded values with an %22%0A %22additional '%25%25'.%22%0A %25 input_url%5D)%0A
|
|
298d3e352193e574e0c8980e37a50d226552109e | Create conf.py | docs/conf.py | docs/conf.py | Python | 0.000001 | @@ -0,0 +1,248 @@
+extensions = %5B%0A 'sphinx.ext.autodoc',%0A 'sphinx.ext.intersphinx',%0A 'sphinx.ext.todo',%0A 'sphinx.ext.coverage',%0A 'sphinx.ext.viewcode',%0A 'repoze.sphinx.autointerface',%0A 'sphinxcontrib.programoutput',%0A 'sphinxcontrib.images',%0A%5D%0A
|
|
acdb13c3680b7958f9a1def3e538ef9ebd166922 | add migration for org name + apptext | portal/migrations/versions/9b1bedfa916b_.py | portal/migrations/versions/9b1bedfa916b_.py | Python | 0.000001 | @@ -0,0 +1,1110 @@
+from alembic import op%0Aimport sqlalchemy as sa%0Afrom sqlalchemy.orm import sessionmaker%0A%0Afrom portal.models.app_text import AppText%0Afrom portal.models.organization import Organization%0A%0A%22%22%22empty message%0A%0ARevision ID: 9b1bedfa916b%0ARevises: 441185240f62%0ACreate Date: 2017-10-26 15:24:32.623899%0A%0A%22%22%22%0A%0A# revision identifiers, used by Alembic.%0Arevision = '9b1bedfa916b'%0Adown_revision = '441185240f62'%0A%0ASession = sessionmaker()%0A%0A%0Adef update_org_name(old, new):%0A bind = op.get_bind()%0A session = Session(bind=bind)%0A%0A session.execute(%22UPDATE organizations SET name='%7B%7D' %22%0A %22WHERE name='%7B%7D'%22.format(new, old))%0A for at in session.query(AppText).filter(AppText.name.contains(old)):%0A at.name = at.name.replace(old, new)%0A session.commit()%0A%0A%0Adef upgrade():%0A # ### commands auto generated by Alembic - please adjust! ###%0A update_org_name('CRV', 'TrueNTH Global Registry')%0A # ### end Alembic commands ###%0A%0A%0Adef downgrade():%0A # ### commands auto generated by Alembic - please adjust! ###%0A update_org_name('TrueNTH Global Registry', 'CRV')%0A # ### end Alembic commands ###%0A
|
|
4ce5e57b882ae057fa21d0397925512073447b77 | Add admin interface | chunked_upload/admin.py | chunked_upload/admin.py | Python | 0.000001 | @@ -0,0 +1,302 @@
+from django.contrib import admin%0Afrom .models import ChunkedUpload%0A%0A%0A%0Aclass ChunkedUploadAdmin(admin.ModelAdmin):%0A list_display = ('upload_id', 'file', 'filename', 'user', 'offset',%0A 'created_on', 'status', 'completed_on')%0A%0A%0Aadmin.site.register(ChunkedUpload, ChunkedUploadAdmin)%0A
|
|
1db74fafd5f281053dc82d2d4ff2d24447db8338 | add initial Nose tests | tests/test_connection.py | tests/test_connection.py | Python | 0.000001 | @@ -0,0 +1,1415 @@
+from nose.tools import raises%0Afrom unittest.case import SkipTest%0Afrom urllib2 import urlopen%0Aimport StringIO%0A%0Aimport mock%0Aimport datetime, md5%0A%0Aimport harvestmedia.api.exceptions%0Aimport harvestmedia.api.config%0Aimport harvestmedia.api.client%0A%0Aapi_key = '12345'%0Awebservice_url = 'https://service.harvestmedia.net/HMP-WS.svc'%0A%[email protected]('harvestmedia.api.client.urlopen')%0A@raises(harvestmedia.api.exceptions.InvalidAPIResponse)%0Adef test_xml_failure(urlopen_mock):%0A urlopen_mock.return_value = StringIO.StringIO('%3Cxml%3E%3Cthis xml is malformed%3C/xml%3E')%0A%0A hmconfig = harvestmedia.api.config.Config()%0A hmconfig.api_key = api_key%0A hmconfig.webservice_url = webservice_url%0A client = harvestmedia.api.client.Client()%0A%[email protected]('harvestmedia.api.client.urlopen')%0Adef test_get_service_token(UrlOpenMock):%0A u = UrlOpenMock()%0A expiry = datetime.datetime.today().isoformat()%0A test_token = md5.md5(expiry).hexdigest() # generate an md5 from the date for testing%0A u.read.return_value = '%3C?xml version=%221.0%22 encoding=%22utf-8%22?%3E%3Cresponseservicetoken%3E%3Ctoken value=%22%25s%22 expiry=%22%25s%22/%3E%3C/responseservicetoken%3E' %25 (test_token, expiry)%0A %0A hmconfig = harvestmedia.api.config.Config()%0A hmconfig.api_key = api_key%0A hmconfig.webservice_url = webservice_url%0A client = harvestmedia.api.client.Client()%0A %0A assert client.service_token == test_token%0A assert client.service_token_expires == expiry%0A%0A
|
|
a3deadbc54fad13e4e40da143f25ae4b26cf690b | Add missed travis-ci manage.py. | travis-ci/manage.py | travis-ci/manage.py | Python | 0 | @@ -0,0 +1,252 @@
+#!/usr/bin/env python%0Aimport os%0Aimport sys%0A%0Aif __name__ == %22__main__%22:%0A os.environ.setdefault(%22DJANGO_SETTINGS_MODULE%22, %22travis-ci.settings%22)%0A%0A from django.core.management import execute_from_command_line%0A%0A execute_from_command_line(sys.argv)%0A
|
|
58a5257505a4ae9d32cf233d059b4350f9494d86 | Create timer.py | timer.py | timer.py | Python | 0.000003 | @@ -0,0 +1,1251 @@
+#%0A# jasoncg%0A# 2015-02-23%0A#%0A# timer.py%0A#%0A# A simple timer supporting the Python %22with%22 statement%0A#%0Aimport time%0A%0A#%0A# Use in a %22with%22 statement:%0A# with timer.Timer():%0A# %09perform_expensive_calculation()%0A# %0A# May also print the current progress:%0A# with timer.Timer() as t:%0A# %09perform_expensive_calculation_1()%0A#%09t.print_progress()%0A#%09perform_expensive_calculation_2()%0A#%0Aclass Timer():%0A%09def __enter__(self):%0A%09%09self.reset()%0A%09%09return self%0A%0A%09def __exit__(self, type, value, traceback):%0A%09%09end = time.time()%0A%09%09print(%22Took %25s seconds%5Cn%22 %25(end-self.start))%0A%0A%09def reset(self):%0A%09%09# Reset the start to now%0A%09%09self.start = time.time()%0A%09%09self.elapsed = time.time()%0A%0A%09def get_progress(self):%0A%09%09# Get the current time elapsed since start%0A%09%09return time.time() - self.start%0A%0A%0A%09def print_progress(self, message=None):%0A%09%09if message is None:%0A%09%09%09message=%22%22%0A%09%09else:%0A%09%09%09message=message+%22 %22%0A%09%09print(%22%25s%25s seconds%5Cn%22 %25(message, self.get_progress()))%0A%0A%09def get_elapsed(self):%0A%09%09# Get the current time elapsed since start%0A%09%09newelapsed = time.time()%0A%09%09e = newelapsed - self.elapsed%0A%09%09self.elapsed = newelapsed%0A%0A%09%09return e%0A%09def print_elapsed(self, message=None):%0A%09%09if message is None:%0A%09%09%09message=%22%22%0A%09%09else:%0A%09%09%09message=message+%22 %22%0A%09%09print(%22%25s%25s seconds%5Cn%22 %25(message, self.get_elapsed()))%0A
|
|
d1e568ab1e238586ed914de35ed44dc2231af3d2 | Create version.py | ngboost/version.py | ngboost/version.py | Python | 0.000001 | @@ -0,0 +1,22 @@
+__version__ = %220.2.0%22%0A
|
|
b6a6e6a9bf0254f9c79215c98b392b02db53827b | Add wireless module #305 | cme/modules/wireless.py | cme/modules/wireless.py | Python | 0 | @@ -0,0 +1,633 @@
+class CMEModule:%0A%0A name = 'wireless'%0A description = %22Get key of all wireless interfaces%22%0A supported_protocols = %5B'smb'%5D%0A opsec_safe = True%0A multiple_hosts = True%0A%0A def options(self, context, module_options):%0A '''%0A ''' %0A%0A def on_admin_login(self, context, connection):%0A%0A command = 'powershell.exe -c %22(netsh wlan show profiles) %7C Select-String %22%22%22%22%5C:(.+)$%22%22%22%22 %7C %25%7B$name=$_.Matches.Groups%5B1%5D.Value.Trim(); $_%7D %7C %25%7B(netsh wlan show profile name=%22$name%22 key=clear)%7D%22'%0A context.log.info('Executing command')%0A p = connection.execute(command, True)%0A context.log.success(p)%0A
|
|
5ad1170c2515fd799acc43e99e35299bbab9cec1 | Add tests for harmonic in 791628c4df60369583474c07d64f1439bd5c19e0 | tests/test_transforms.py | tests/test_transforms.py | Python | 0.000002 | @@ -0,0 +1,431 @@
+%22%22%22 Test for %60yatsm.regression.transforms%60%0A%22%22%22%0Aimport numpy as np%0Aimport patsy%0Aimport py.test%0A%0Afrom yatsm.regression.transforms import harm%0A%0A%0Adef test_harmonic_transform():%0A x = np.arange(735688, 735688 + 100, 1)%0A design = patsy.dmatrix('0 + harm(x, 1)')%0A%0A truth = np.vstack((np.cos(2 * np.pi / 365.25 * x),%0A np.sin(2 * np.pi / 365.25 * x))).T%0A%0A np.testing.assert_equal(np.asarray(design), truth)%0A
|
|
3c37704b3b819bee5d441c75a6fd59a64279a0e8 | use unicode.strip instead of string.strip of the string module for metadata processors | pelican/readers.py | pelican/readers.py | # -*- coding: utf-8 -*-
try:
from docutils import core
# import the directives to have pygments support
from pelican import rstdirectives
except ImportError:
core = False
try:
from markdown import Markdown
except ImportError:
Markdown = False
import re
import string
from pelican.utils import get_date, open
_METADATAS_PROCESSORS = {
'tags': lambda x: map(string.strip, x.split(',')),
'date': lambda x: get_date(x),
'status': string.strip,
}
class Reader(object):
enabled = True
class RstReader(Reader):
enabled = bool(core)
extension = "rst"
def _parse_metadata(self, content):
"""Return the dict containing metadatas"""
output = {}
for m in re.compile('^:([a-z]+): (.*)\s', re.M).finditer(content):
name, value = m.group(1).lower(), m.group(2)
output[name] = _METADATAS_PROCESSORS.get(
name, lambda x:x
)(value)
return output
def read(self, filename):
"""Parse restructured text"""
text = open(filename)
metadatas = self._parse_metadata(text)
extra_params = {'input_encoding': 'unicode',
'initial_header_level': '2'}
rendered_content = core.publish_parts(text, writer_name='html',
settings_overrides=extra_params)
title = rendered_content.get('title')
content = rendered_content.get('body')
if not metadatas.has_key('title'):
metadatas['title'] = title
return content, metadatas
class MarkdownReader(Reader):
enabled = bool(Markdown)
extension = "md"
def read(self, filename):
"""Parse content and metadata of markdown files"""
text = open(filename)
md = Markdown(extensions = ['meta', 'codehilite'])
content = md.convert(text)
metadatas = {}
for name, value in md.Meta.items():
name = name.lower()
metadatas[name] = _METADATAS_PROCESSORS.get(
name, lambda x:x
)(value[0])
return content, metadatas
class HtmlReader(Reader):
extension = "html"
_re = re.compile('\<\!\-\-\#\s?[A-z0-9_-]*\s?\:s?[A-z0-9\s_-]*\s?\-\-\>')
def read(self, filename):
"""Parse content and metadata of (x)HTML files"""
content = open(filename)
metadatas = {'title':'unnamed'}
for i in self._re.findall(content):
key = i.split(':')[0][5:].strip()
value = i.split(':')[-1][:-3].strip()
metadatas[key.lower()] = value
return content, metadatas
_EXTENSIONS = dict((cls.extension, cls) for cls in Reader.__subclasses__())
def read_file(filename, fmt=None):
"""Return a reader object using the given format."""
if not fmt:
fmt = filename.split('.')[-1]
if fmt not in _EXTENSIONS.keys():
raise TypeError('Pelican does not know how to parse %s' % filename)
reader = _EXTENSIONS[fmt]()
if not reader.enabled:
raise ValueError("Missing dependencies for %s" % fmt)
return reader.read(filename)
| Python | 0.000001 | @@ -274,22 +274,8 @@
t re
-%0Aimport string
%0A%0Afr
@@ -367,22 +367,23 @@
x: map(
-string
+unicode
.strip,
@@ -450,14 +450,15 @@
s':
-string
+unicode
.str
|
2b8afafeda8d576187aee35c19b292febd1cd1cd | use re groups to simplify the patterns | ua2os.py | ua2os.py | """ua to os - from a user agent return operating system, architecture, and browser"""
import sys,splunk.Intersplunk
import re
os_mapping = (
('Windows .. 5.1', 'Windows XP'),
('Windows .. 5.2', 'Windows XP'),
('Windows NT 6.0', 'Windows Vista'),
('Windows 6.0', 'Windows Server 2008'),
('Windows NT 6.1', 'Windows 7'),
('OS X 10.7', 'MAC OS X 10.7.x'),
('OS X 10.6', 'MAC OS X 10.6.x'),
('OS X 10.5', 'MAC OS X 10.5.x'),
('OS X 10.4', 'MAC OS X 10.4.x'),
('OS X 10.3', 'MAC OS X 10.3.x'),
('SunOS', 'Solaris'),
('droid', 'Android'),
('Windows', 'Windows - Other'),
('iPad', 'ipad'),
('iPod', 'ipod'),
('iPhone', 'iphone'),
('OS X', 'MAC OS X other'),
('Darwin', 'MAC OS X other'),
('Linux ', 'Linux'),
('winhttp', 'Windows - Other'),
('MSIE 4.0;', 'Windows - Other'),
('Microsoft', 'Windows - Other'),
('Win32', 'Windows - Other'),
('BlackBerry', 'BlackBerry'),
('urlgrabber/.* yum', 'Linux - redhat/fedora'),
('Skype for Macintosh', 'MAC OS X other'),
('Xbox Live Client', 'Xbox'),
)
browser_mapping = (
('MSIE 7.*Trident/4.0', 'Internet Explorer 8.0'),
('MSIE 9.0', 'Internet Explorer 9.0'),
('MSIE 8.0', 'Internet Explorer 8.0'),
('MSIE 7.0', 'Internet Explorer 7.0'),
('MSIE 6.0', 'Internet Explorer 6.0'),
('droid', 'Android'),
('iPhone', 'Safari - mobile'),
('Safari/', 'Safari'),
('iTunes', 'iTunes'),
('Firefox/6', 'Firefox 6'),
('Firefox/5', 'Firefox 5'),
('Firefox/4', 'Firefox 4'),
('Firefox/3', 'Firefox 3'),
('MSIE 5.00', 'Internet Explorer 5.0'),
('MSIE', 'Internet Explorer - Other'),
('Chrome', 'Chrome'),
('AppleWebKit', 'Safari'),
('Google Update', 'Google Update'),
('Firefox/2', 'Firefox 2'),
('Firefox/1', 'Firefox 1'),
('Opera', 'Opera'),
('urlgrabber/.* yum', 'yum'),
('BlackBerry', 'Blackberry'),
)
arch_mapping = (
('Windows .. 5.2', 'x64'),
('x64', 'x64'),
('i386', 'i386'),
('x86_64', 'x64'),
('PPC', 'PowerPC'),
('Power.{1,3}Macint', 'PowerPC'),
('droid', 'android'),
('iPad', 'ipad'),
('iPod', 'ipod'),
('iPhone', 'iphone'),
('Intel', 'Intel'),
('BlackBerry', 'BlackBerry'),
)
os_mapping = [(re.compile(a, re.IGNORECASE),b) for (a,b) in os_mapping]
browser_mapping = [(re.compile(a, re.IGNORECASE),b) for (a,b) in browser_mapping]
arch_mapping = [(re.compile(a, re.IGNORECASE),b) for (a,b) in arch_mapping]
def get_thing(line, mapping):
for r, name in mapping:
if r.search(line):
return name
return 'unknown'
def get_ua_info(line):
i = {}
i['operating_system'] = get_thing(line, os_mapping)
i['architecture'] = get_thing(line, arch_mapping)
i['browser'] = get_thing(line, browser_mapping)
return i
try:
results,dummyresults,settings = splunk.Intersplunk.getOrganizedResults()
for r in results:
if "_raw" not in r:
continue
info = get_ua_info(r['_raw'])
r.update(info)
except:
import traceback
stack = traceback.format_exc()
results = splunk.Intersplunk.generateErrorResults("Error : Traceback: " + str(stack))
splunk.Intersplunk.outputResults( results )
| Python | 0 | @@ -380,228 +380,37 @@
10.
-7', 'MAC OS X 10.7.x'),%0A ('OS X 10.6', 'MAC OS X 10.6.x'),%0A ('OS X 10.5', 'MAC OS X 10.5.x'),%0A ('OS X 10.4', 'MAC OS X 10.4.x'),%0A ('OS X 10.3', 'MAC OS X 10.3
+(%5Cd)', 'MAC OS X 10.%25s
.x')
@@ -1249,182 +1249,20 @@
SIE
-9.0', 'Internet Explorer 9.0'),%0A ('MSIE 8.0', 'Internet Explorer 8.0'),%0A ('MSIE 7.0', 'Internet Explorer 7.0'),%0A ('MSIE 6.0',
+(%5B9876%5D).0',
@@ -1281,17 +1281,18 @@
xplorer
-6
+%25s
.0'),%0A
@@ -1473,93 +1473,12 @@
fox/
-6', 'Firefox 6'),%0A ('Firefox/5', 'Firefox 5'),%0A ('Firefox/4
+(%5Cd)
',
@@ -1475,35 +1475,32 @@
x/(%5Cd)',
-
-
'Firefox 4'),%0A
@@ -1496,51 +1496,10 @@
fox
-4'),%0A ('Firefox/3', 'Firefox 3
+%25s
'),%0A
@@ -1738,92 +1738,8 @@
'),%0A
- ('Firefox/2', 'Firefox 2'),%0A ('Firefox/1', 'Firefox 1'),%0A
@@ -2638,18 +2638,23 @@
-if
+match =
r.searc
@@ -2656,24 +2656,41 @@
search(line)
+%0A if match
:%0A
@@ -2702,16 +2702,33 @@
urn name
+ %25 match.groups()
%0A ret
|
0d390edeeb8829c0b8afef090f133d0fee8bce4f | Bump PROVISION_VERSION for latest changes. | version.py | version.py | ZULIP_VERSION = "1.6.0+git"
PROVISION_VERSION = '9.0'
| Python | 0 | @@ -44,11 +44,11 @@
ON = '9.
-0
+1
'%0A
|
59a228312bb3091db8bfb6bf9a75ce4ae47431f4 | Add zero system test to neural net | neuralnets/net_test.py | neuralnets/net_test.py | Python | 0.004377 | @@ -0,0 +1,526 @@
+from net import NeuralNet%0Aimport numpy as np%0A%0A#TODO(Wesley) More tests%0A%0Aclass TestNeuralNet(object):%0A def test_zero_system(self):%0A net = NeuralNet(3, 2, 4, 1, seed=0)%0A net.weights = %5B np.zeros((3,4)),%0A np.zeros((4,4)),%0A np.zeros((4,4)),%0A np.zeros((4,1)) %5D%0A inpt = np.asarray(%5B1, 1, 1%5D)%0A print(net.forward(inpt))%0A for layer in net.forward(inpt)%5B1:%5D:%0A for neuron in layer:%0A assert neuron == 0.5%0A
|
|
0a7fb32471fa5ae6e66348527c0ae2f299361211 | Allow to pass extra argument to the Base class initializer | opbeat/handlers/logging.py | opbeat/handlers/logging.py | """
opbeat.handlers.logging
~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2011-2012 Opbeat
Large portions are
:copyright: (c) 2010 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import datetime
import logging
import sys
import traceback
from opbeat.base import Client
from opbeat.utils import six
from opbeat.utils.encoding import to_string
from opbeat.utils.stacks import iter_stack_frames
class OpbeatHandler(logging.Handler, object):
def __init__(self, *args, **kwargs):
client = kwargs.get('client_cls', Client)
if len(args) == 1:
arg = args[0]
if isinstance(arg, Client):
self.client = arg
else:
raise ValueError(
'The first argument to %s must be a Client instance, '
'got %r instead.' % (
self.__class__.__name__,
arg,
))
elif 'client' in kwargs:
self.client = kwargs['client']
else:
self.client = client(*args, **kwargs)
logging.Handler.__init__(self)
def emit(self, record):
self.format(record)
# Avoid typical config issues by overriding loggers behavior
if record.name.startswith('opbeat.errors'):
six.print_(to_string(record.message), file=sys.stderr)
return
try:
return self._emit(record)
except Exception:
six.print_(
"Top level Opbeat exception caught - "
"failed creating log record",
sys.stderr)
six.print_(to_string(record.msg), sys.stderr)
six.print_(to_string(traceback.format_exc()), sys.stderr)
try:
self.client.capture('Exception')
except Exception:
pass
def _emit(self, record, **kwargs):
data = {}
for k, v in six.iteritems(record.__dict__):
if '.' not in k and k not in ('culprit',):
continue
data[k] = v
stack = getattr(record, 'stack', None)
if stack is True:
stack = iter_stack_frames()
if stack:
frames = []
started = False
last_mod = ''
for item in stack:
if isinstance(item, (list, tuple)):
frame, lineno = item
else:
frame, lineno = item, item.f_lineno
if not started:
f_globals = getattr(frame, 'f_globals', {})
module_name = f_globals.get('__name__', '')
if last_mod.startswith(
'logging') and not module_name.startswith(
'logging'):
started = True
else:
last_mod = module_name
continue
frames.append((frame, lineno))
stack = frames
extra = getattr(record, 'data', {})
# Add in all of the data from the record that we aren't already capturing
for k in record.__dict__.keys():
if k in (
'stack', 'name', 'args', 'msg', 'levelno', 'exc_text',
'exc_info', 'data', 'created', 'levelname', 'msecs',
'relativeCreated'):
continue
if k.startswith('_'):
continue
extra[k] = record.__dict__[k]
date = datetime.datetime.utcfromtimestamp(record.created)
# If there's no exception being processed,
# exc_info may be a 3-tuple of None
# http://docs.python.org/library/sys.html#sys.exc_info
if record.exc_info and all(record.exc_info):
handler = self.client.get_handler('opbeat.events.Exception')
data.update(handler.capture(exc_info=record.exc_info))
# data['checksum'] = handler.get_hash(data)
data['level'] = record.levelno
data['logger'] = record.name
return self.client.capture('Message',
param_message={'message': record.msg,
'params': record.args},
stack=stack, data=data, extra=extra,
date=date, **kwargs)
| Python | 0 | @@ -586,19 +586,19 @@
kwargs.
-get
+pop
('client
@@ -665,16 +665,44 @@
args%5B0%5D%0A
+ args = args%5B1:%5D%0A
@@ -1106,17 +1106,21 @@
args
-%5B
+.pop(
'client'
%5D%0A
@@ -1115,17 +1115,17 @@
'client'
-%5D
+)
%0A
@@ -1194,37 +1194,59 @@
-logging.Handler.__init__(self
+super(OpbeatHandler, self).__init__(*args, **kwargs
)%0A%0A
|
d97b9f6c508dd24da0f86bc1587ea64708c84a89 | Add parser for the advisory mail recipients. | tools/dist/security/mailinglist.py | tools/dist/security/mailinglist.py | Python | 0.000296 | @@ -0,0 +1,1794 @@
+#%0A# Licensed to the Apache Software Foundation (ASF) under one%0A# or more contributor license agreements. See the NOTICE file%0A# distributed with this work for additional information%0A# regarding copyright ownership. The ASF licenses this file%0A# to you under the Apache License, Version 2.0 (the%0A# %22License%22); you may not use this file except in compliance%0A# with the License. You may obtain a copy of the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing,%0A# software distributed under the License is distributed on an%0A# %22AS IS%22 BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY%0A# KIND, either express or implied. See the License for the%0A# specific language governing permissions and limitations%0A# under the License.%0A#%0A%0A%22%22%22%0AParser for advisory e-mail distribution addresses%0A%22%22%22%0A%0Afrom __future__ import absolute_import%0A%0A%0Aimport os%0Aimport re%0A%0A%0Aclass MailingList(object):%0A %22%22%22%0A A list of e-mail addresses for security advisory pre-notifications.%0A Parses %5E/pmc/subversion/security/pre-notifications.txt%0A %22%22%22%0A%0A __PRE_NOTIFICATIONS = 'pre-notifications.txt'%0A __ADDRESS_LINE = re.compile(r'%5E%5Cs%7B6%7D(?:%5B%5E%3C%5D+)?%3C%5B%5E%3C%3E%5D+%3E%5Cs*$')%0A%0A def __init__(self, rootdir):%0A self.__addresses = %5B%5D%0A self.__parse_addresses(rootdir)%0A%0A def __iter__(self):%0A return self.__addresses.__iter__()%0A%0A def __len__(self):%0A return len(self.__addresses)%0A%0A def __parse_addresses(self, rootdir):%0A pre_notifications = os.path.join(rootdir, self.__PRE_NOTIFICATIONS)%0A with open(pre_notifications, 'rt') as pn:%0A for line in pn:%0A m = self.__ADDRESS_LINE.match(line)%0A if not m:%0A continue%0A%0A self.__addresses.append(line.strip())%0A
|
|
80a7493e56b1ba6b01bf44f6dd9140de916511a7 | add twisted interface to psycopg2 | pyiem/twistedpg.py | pyiem/twistedpg.py | Python | 0 | @@ -0,0 +1,552 @@
+%22%22%22%0Amodule twistedpg.py%0AAuthor: Federico Di Gregorio%0Ahttp://twistedmatrix.com/pipermail/twisted-python/2006-April/012955.html%0A%22%22%22%0A%0Afrom psycopg2 import *%0Afrom psycopg2 import connect as _2connect%0Afrom psycopg2.extensions import connection as _2connection%0Afrom psycopg2.extras import RealDictCursor%0A%0Adel connect%0Adef connect(*args, **kwargs):%0A kwargs%5B'connection_factory'%5D = connection%0A return _2connect(*args, **kwargs)%0A%0Aclass connection(_2connection):%0A def cursor(self):%0A return _2connection.cursor(self, cursor_factory=RealDictCursor)%0A
|
|
009182d0c603f9c1f8fa650f6a9771b38a74c6cc | Add a proper validator for disable_builtins | flexget/plugins/plugin_disable_builtins.py | flexget/plugins/plugin_disable_builtins.py | import logging
from flexget import plugin
from flexget.plugin import priority, register_plugin
log = logging.getLogger('builtins')
class PluginDisableBuiltins(object):
"""
Disables all builtin plugins from a feed.
"""
def __init__(self):
self.disabled = []
def validator(self):
from flexget import validator
# TODO: accept only list (of texts) or boolean
return validator.factory('any')
def debug(self):
for name, info in plugin.plugins.iteritems():
if not info.builtin:
continue
log.debug('Builtin plugin: %s' % name)
def on_feed_start(self, feed):
for name, info in plugin.plugins.iteritems():
if info.builtin:
if isinstance(feed.config['disable_builtins'], list):
if info.name in feed.config['disable_builtins']:
info.builtin = False
self.disabled.append(name)
else:
# disabling all builtins
info.builtin = False
self.disabled.append(name)
log.debug('Disabled builtin plugin %s' % ', '.join(self.disabled))
@priority(-255)
def on_feed_exit(self, feed):
names = []
for name in self.disabled:
names.append(name)
plugin.plugins[name].builtin = True
self.disabled = []
log.debug('Enabled builtin plugins %s' % ', '.join(names))
on_feed_abort = on_feed_exit
register_plugin(PluginDisableBuiltins, 'disable_builtins')
| Python | 0.000064 | @@ -87,16 +87,25 @@
r_plugin
+, plugins
%0A%0Alog =
@@ -136,16 +136,185 @@
ins')%0A%0A%0A
+def all_builtins():%0A %22%22%22Helper function to return an iterator over all builtin plugins.%22%22%22%0A return (plugin for plugin in plugins.itervalues() if plugin.builtin)%0A%0A%0A
class Pl
@@ -349,25 +349,16 @@
%0A %22%22%22
-%0A
Disables
@@ -357,24 +357,38 @@
Disables all
+ (or specific)
builtin plu
@@ -408,70 +408,13 @@
eed.
-%0A
%22%22%22%0A%0A
- def __init__(self):%0A self.disabled = %5B%5D%0A%0A
@@ -484,203 +484,167 @@
-# TODO: accept only list (of texts) or boolean%0A return validator.factory('any')%0A%0A def debug(self):%0A for name, info in plugin.plugins.iteritems():%0A if not info.
+root = validator.factory()%0A root.accept('boolean')%0A root.accept('list').accept('choice').accept_choices(plugin.name for plugin in all_
builtin
-:
+s())
%0A
@@ -652,29 +652,42 @@
- continue%0A
+return root%0A%0A def debug(self):%0A
@@ -719,16 +719,17 @@
ugin
+s
: %25s' %25
name
@@ -728,259 +728,219 @@
' %25
-name)%0A%0A def on_feed_start(self, feed):%0A for name, info in plugin.plugins.iteritems():%0A if info.builtin:%0A if isinstance(feed.config%5B'disable_builtins'%5D, list):%0A if info.name in feed.config%5B'disable
+', '.join(plugin.name for plugin in all_builtins()))%0A%0A @priority(255)%0A def on_feed_start(self, feed, config):%0A self.disabled = %5B%5D%0A if not config:%0A return%0A%0A for plugin in all
_bui
@@ -948,10 +948,10 @@
tins
-'%5D
+()
:%0A
@@ -964,158 +964,51 @@
- info.builtin = False%0A self.disabled.append(name)%0A else:%0A # disabling all builtins
+if config is True or plugin.name in config:
%0A
@@ -1024,16 +1024,14 @@
- info
+plugin
.bui
@@ -1043,20 +1043,16 @@
= False%0A
-
@@ -1072,32 +1072,39 @@
disabled.append(
+plugin.
name)%0A lo
@@ -1135,16 +1135,20 @@
n plugin
+(s):
%25s' %25 '
@@ -1224,16 +1224,24 @@
lf, feed
+, config
):%0A
@@ -1247,18 +1247,49 @@
-names = %5B%5D
+if not self.disabled:%0A return%0A
%0A
@@ -1324,39 +1324,8 @@
ed:%0A
- names.append(name)%0A
@@ -1372,35 +1372,8 @@
rue%0A
- self.disabled = %5B%5D%0A
@@ -1409,17 +1409,20 @@
n plugin
-s
+(s):
%25s' %25 '
@@ -1434,15 +1434,50 @@
oin(
-names))
+self.disabled))%0A self.disabled = %5B%5D
%0A%0A
@@ -1557,18 +1557,29 @@
isable_builtins'
+, api_ver=2
)%0A
|
98524c4e7c7c4b6e8b51b7fd89501d8ac00e0d8e | generates a hash of the string input | elements/GenerateHashOfString.py | elements/GenerateHashOfString.py | Python | 0.999975 | @@ -0,0 +1,1440 @@
+# coding: utf-8%0Afrom ElementBase import ElementBase%0Afrom ElementParameter import ElementParameter%0Afrom ElementValue import ElementValue%0A%0Aimport hashlib%0A%0Aclass GenerateHashOfString(ElementBase):%0A%09def __init__(self):%0A%09%09self.status = 'running'%0A%09%09self.output = None %0A%09%09self.params = %5B%5D%0A%09%09self.type = 'Standard'%0A%09%09self.setup_params()%0A%09%0A%09def can_handle_list(self):%0A%09%09return False%0A%09%0A%09def setup_params(self):%0A%09%09algs = %5B%5D%0A%09%09for t in hashlib.algorithms_available:%0A%09%09%09algs.append(t)%0A%09%09self.params.append(ElementParameter(name='algorithms',displayName='Hash Algorithm',display=True, type='list',value='md5',allowedValues=algs))%0A%09%09%0A%09%0A%09def get_status(self):%0A%09%09return self.status%0A%09%09%0A%09def get_input_type(self):%0A%09%09return 'string'%0A%09%0A%09def get_output(self):%0A%09%09return self.output%0A%09%09%0A%09def get_output_type(self):%0A%09%09return 'string'%0A%09%09%0A%09def get_params(self):%0A%09%09return self.params%0A%09%09%0A%09def set_params(self, params = None):%0A%09%09self.params = params or %5B%5D%0A%09%09%0A%09def get_description(self):%0A%09%09return 'Generates a hash of the input value and returns it'%0A%09%0A%09def get_title(self):%0A%09%09return 'Generate Hash of string'%0A%09%09%0A%09def get_icon(self):%0A%09%09return 'iob:ios7_cog_outline_32'%0A%09%09%0A%09def get_category(self):%0A%09%09return 'Utility'%0A%09%09%0A%09def get_type(self):%0A%09%09return self.type%0A%09%09%0A%09def run(self, input=''):%0A%09%09algo = self.get_param_by_name('algorithms')%0A%09%09self.status = 'complete'%0A%09%09return ElementValue(type=self.output, value=hashlib.new(algo.value, input.value.encode('utf-8')).hexdigest())%0A
|
|
f47482df83a8ab643a55062b12fce11fbd703886 | add 90. The first 100 problems have been solved! Oh~~~~~~~~Yeah | vol2/90.py | vol2/90.py | Python | 0.999935 | @@ -0,0 +1,391 @@
+from itertools import combinations%0A%0Adef valid(c1, c2):%0A return all(x in c1 and y in c2 or x in c2 and y in c1 for x, y in squares)%0A%0Aif __name__ == %22__main__%22:%0A squares = %5B(0,1), (0,4), (0,6), (1,6), (2,5), (3,6), (4,6), (8,1)%5D%0A cube = list(combinations(%5B0,1,2,3,4,5,6,7,8,6%5D, 6))%0A print sum(1 for i, c1 in enumerate(cube)%0A for c2 in cube%5Bi+1:%5D if valid(c1, c2))%0A%0A%0A
|
|
f4c25b13e2eb1736e47190ad1a2ec66cbe17f2bf | update total_projected_qty in bin.py | erpnext/stock/doctype/bin/bin.py | erpnext/stock/doctype/bin/bin.py | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import flt, nowdate
import frappe.defaults
from frappe.model.document import Document
class Bin(Document):
def validate(self):
if self.get("__islocal") or not self.stock_uom:
self.stock_uom = frappe.db.get_value('Item', self.item_code, 'stock_uom')
self.validate_mandatory()
self.projected_qty = flt(self.actual_qty) + flt(self.ordered_qty) + \
flt(self.indented_qty) + flt(self.planned_qty) - flt(self.reserved_qty)
def validate_mandatory(self):
qf = ['actual_qty', 'reserved_qty', 'ordered_qty', 'indented_qty']
for f in qf:
if (not getattr(self, f, None)) or (not self.get(f)):
self.set(f, 0.0)
def update_stock(self, args, allow_negative_stock=False, via_landed_cost_voucher=False):
self.update_qty(args)
if args.get("actual_qty") or args.get("voucher_type") == "Stock Reconciliation":
from erpnext.stock.stock_ledger import update_entries_after
if not args.get("posting_date"):
args["posting_date"] = nowdate()
# update valuation and qty after transaction for post dated entry
if args.get("is_cancelled") == "Yes" and via_landed_cost_voucher:
return
update_entries_after({
"item_code": self.item_code,
"warehouse": self.warehouse,
"posting_date": args.get("posting_date"),
"posting_time": args.get("posting_time"),
"voucher_no": args.get("voucher_no")
}, allow_negative_stock=allow_negative_stock, via_landed_cost_voucher=via_landed_cost_voucher)
def update_qty(self, args):
# update the stock values (for current quantities)
if args.get("voucher_type")=="Stock Reconciliation":
if args.get('is_cancelled') == 'No':
self.actual_qty = args.get("qty_after_transaction")
else:
qty_after_transaction = frappe.db.get_value("""select qty_after_transaction
from `tabStock Ledger Entry`
where item_code=%s and warehouse=%s
and not (voucher_type='Stock Reconciliation' and voucher_no=%s)
order by posting_date desc limit 1""",
(self.item_code, self.warehouse, args.get('voucher_no')))
self.actual_qty = flt(qty_after_transaction[0][0]) if qty_after_transaction else 0.0
else:
self.actual_qty = flt(self.actual_qty) + flt(args.get("actual_qty"))
self.ordered_qty = flt(self.ordered_qty) + flt(args.get("ordered_qty"))
self.reserved_qty = flt(self.reserved_qty) + flt(args.get("reserved_qty"))
self.indented_qty = flt(self.indented_qty) + flt(args.get("indented_qty"))
self.planned_qty = flt(self.planned_qty) + flt(args.get("planned_qty"))
self.projected_qty = flt(self.actual_qty) + flt(self.ordered_qty) + \
flt(self.indented_qty) + flt(self.planned_qty) - flt(self.reserved_qty)
self.save()
update_item_projected_qty(self.item_code)
def get_first_sle(self):
sle = frappe.db.sql("""
select * from `tabStock Ledger Entry`
where item_code = %s
and warehouse = %s
order by timestamp(posting_date, posting_time) asc, name asc
limit 1
""", (self.item_code, self.warehouse), as_dict=1)
return sle and sle[0] or None
def update_item_projected_qty(item_code):
'''Set Item project qty'''
frappe.db.sql('''update tabItem set
total_projected_qty = (select sum(projected_qty) from tabBin where item_code=%s)
where name=%s''', (item_code, item_code))
| Python | 0 | @@ -3316,16 +3316,23 @@
d_qty =
+ifnull(
(select
@@ -3381,16 +3381,20 @@
code=%25s)
+, 0)
%0A%09%09where
|
c6cd7d2a310bc0b107e0d2a481260b2e95bac577 | add prime_factors function to utils | utils.py | utils.py | Python | 0.000008 | @@ -0,0 +1,226 @@
+%22Utilities to help solving problems.%22%0A%0Adef prime_factors(num):%0A i = 2%0A while i * i %3C= num:%0A if num %25 i:%0A i += 1%0A else:%0A num //= i%0A yield i%0A if num %3E 1:%0A yield num%0A
|
|
6be93bfbaf254234f008e2c714b0aae10434fe68 | add orm | www/orm.py | www/orm.py | Python | 0.000047 | @@ -0,0 +1,1818 @@
+#!/usr/bin/env python3%0A# -*- coding: utf-8 -*-%0A%0A__author__ = 'Jiayi Li'%0A%0Aimport asyncio, aiomysql, logging%0A%0Adef log(sql, args=()):%0A logging.info('SQL: %25s' %25 sql)%0A%0A# create a connection pool, stored by global variable '__pool'%0Aasync def create_pool(loop, **kw):%0A logging.info('create database connection pool...')%0A global __pool%0A __pool = await aiomysql.create_pool(%0A host = kw.get('host', 'localhost'),%0A port = kw.get('port', 3306),%0A user = kw.get('user'),%0A password = kw.get('password'),%0A db = kw.get('db'),%0A charset = kw.get('charset', 'utf-8'),%0A autocommit = kw.get('autocommit', True),%0A maxsize = kw.get('maxsize', 10),%0A minsize = kw.get('minsize', 1),%0A loop = loop%0A )%0A%0A# SELECT%0Aasync def select(sql, args, size=None):%0A log(sql, args)%0A global __pool%0A async with __pool.get() as conn:%0A async with conn.cursor(aiomysql.DictCursor) as cur:%0A await cur.execute(sql.replace('?', '%25s'), args or ())%0A if size:%0A rs = await cur.fetchmany(size)%0A else:%0A rs = yield from cur.fetchall()%0A await cur.close()%0A logging.info('rows returned: %25s' %25 len(rs))%0A return rs%0A %0A# INSERT, UPDATE and DELETE%0Aasync def execute(sql, args, autocommit=True):%0A log(sql)%0A async with __pool.get() as conn:%0A if not autocommit:%0A await conn.begin()%0A try:%0A async with conn.cursor(aiomysql.DictCursor) as cur:%0A await cur.execute(sql.replace('?', '%25s'), args)%0A affected = cur.rowcount%0A if not autocommit:%0A await conn.commit()%0A except BaseException as e:%0A if not autocommit:%0A await conn.rollback()%0A raise%0A return affected
|
|
5b05640a60c66d9d12b9794f2ae55785efe1e099 | Define solidfill. | riot/tags/solidfill.py | riot/tags/solidfill.py | Python | 0.000002 | @@ -0,0 +1,108 @@
+# -*- coding: utf-8 -*-%0A%0Afrom urwid import SolidFill%0A%0Adef parse_tag_from_node(node):%0A return SolidFill()%0A
|
|
785f2d3a6d10d8d6ba72712eec29c5be5849f671 | Add build_raw_data.py | fluid/PaddleNLP/text_classification/async_executor/data_generator/build_raw_data.py | fluid/PaddleNLP/text_classification/async_executor/data_generator/build_raw_data.py | Python | 0.000008 | @@ -0,0 +1,1850 @@
+# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.%0A#%0A# Licensed under the Apache License, Version 2.0 (the %22License%22);%0A# you may not use this file except in compliance with the License.%0A# You may obtain a copy of the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS,%0A# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.%0A# See the License for the specific language governing permissions and%0A# limitations under the License.%0A%22%22%22%0ABuild lego raw data%0A%22%22%22%0Afrom __future__ import print_function%0Aimport sys%0Aimport os%0Aimport random%0Aimport re%0Adata_type = sys.argv%5B1%5D%0A%0Aif not (data_type == %22train%22 or data_type == %22test%22):%0A print(%22python %25s %5Btest/train%5D%22 %25 sys.argv%5B0%5D, file=sys.stderr)%0A sys.exit(-1)%0A%0Apos_folder = %22aclImdb/%22 + data_type + %22/pos/%22%0Aneg_folder = %22aclImdb/%22 + data_type + %22/neg/%22%0A%0Apos_train_list = %5B(pos_folder + x, %221%22) for x in os.listdir(pos_folder)%5D%0Aneg_train_list = %5B(neg_folder + x, %220%22) for x in os.listdir(neg_folder)%5D%0A%0Aall_train_list = pos_train_list + neg_train_list%0Arandom.shuffle(all_train_list)%0A%0A%0Adef load_dict(dictfile):%0A %22%22%22%0A Load word id dict%0A %22%22%22%0A vocab = %7B%7D%0A wid = 0%0A with open(dictfile) as f:%0A for line in f:%0A vocab%5Bline.strip()%5D = str(wid)%0A wid += 1%0A return vocab%0A%0A%0Avocab = load_dict(%22aclImdb/imdb.vocab%22)%0Aunk_id = str(len(vocab))%0Aprint(%22vocab size: %22, len(vocab), file=sys.stderr)%0Apattern = re.compile(r'(;%7C,%7C%5C.%7C%5C?%7C!%7C%5Cs%7C%5C(%7C%5C))')%0A%0Afor fitem in all_train_list:%0A label = str(fitem%5B1%5D)%0A fname = fitem%5B0%5D%0A with open(fname) as f:%0A sent = f.readline().lower().replace(%22%3Cbr /%3E%22, %22 %22).strip()%0A out_s = %22%25s %7C %25s%22 %25 (sent, label)%0A print(out_s, file=sys.stdout)%0A
|
|
f99c8e6e26b85ae7805ff38e4d89978d06e93c97 | Add SQSRequest base class | sqs.py | sqs.py | Python | 0 | @@ -0,0 +1,339 @@
+from tornado.httpclient import AsyncHTTPClient, HTTPRequest, HTTPClient%0Afrom tornado.httputil import url_concat%0Aimport datetime%0Aimport hashlib%0Aimport hmac%0A%0A%0Aclass SQSRequest(HTTPRequest):%0A %22%22%22SQS AWS Adapter for Tornado HTTP request%22%22%22%0A def __init__(self, *args, **kwargs):%0A super(SQSRequest, self).__init__(*args, **kwargs)%0A%0A
|
|
9e2669539c5d7662bb6d6a89877b30235eef1bc2 | Write solution to DEC14 XOR question. | xor.py | xor.py | Python | 0.999905 | @@ -0,0 +1,515 @@
+# http://www.codechef.com/DEC14/problems/XORSUB%0Aimport operator%0Adef f(p):%0A%09if p == %5B%5D:%0A%09%09return 0%0A%09elif len(p) == 1:%0A%09%09return p%5B0%5D%0A%09else:%0A%09%09return reduce(operator.xor, p)%0A%0Adef list_powerset(lst):%0A result = %5B%5B%5D%5D%0A for x in lst:%0A result.extend(%5Bsubset + %5Bx%5D for subset in result%5D)%0A return result%0A%0At = int(raw_input())%0Awhile t:%0A%09k = int(raw_input().split()%5B1%5D)%0A%09array = map(int, raw_input().split())%0A%09max = -1%0A%09for i in list_powerset(array):%0A%09%09if max %3C (k %5E f(i)):%0A%09%09%09max = k %5E f(i)%0A%0A%09print max%0A%09t -= 1
|
|
135cdb7f16372978774acf06d4da556d0a7a7db7 | add solution template | exercises/error-handling/error_handling.py | exercises/error-handling/error_handling.py | Python | 0.000001 | @@ -0,0 +1,244 @@
+def handle_error_by_throwing_exception():%0A pass%0A%0A%0Adef handle_error_by_returning_none(input_data):%0A pass%0A%0A%0Adef handle_error_by_returning_tuple(input_data):%0A pass%0A%0A%0Adef filelike_objects_are_closed_on_exception(filelike_object):%0A pass%0A
|
|
286e996c8dd7a299a5db148e78bbdaa0e1cb1b5c | Add sample base. | samples/sample.py | samples/sample.py | Python | 0 | @@ -0,0 +1,133 @@
+# -*- coding: utf-8 -*-%0A%0A%22%22%22AirWaveAPIClient sample.%22%22%22%0A%0A%0Adef main():%0A %22%22%22Sample main.%22%22%22%0A%0Aif __name__ == %22__main__%22:%0A%0A main()%0A
|
|
c1fae9e5ace57320b4f4e69efc941c7fe6266381 | add stft graph writer | write_stft_graph.py | write_stft_graph.py | Python | 0 | @@ -0,0 +1,374 @@
+import pdb%0Aimport tensorflow as tf%0Afrom birdwatcher.generators import compose, stft, amplitude_to_db, read_audio, reshape%0A%0AAUDIO_SHAPE = (44100*3, 1)%0Aclean_samples = compose(reshape, amplitude_to_db, stft, read_audio)%0A%0Ax = tf.placeholder(tf.float32, shape=AUDIO_SHAPE)%0Aout = clean_samples(x)%0A%0Asess = tf.Session()%0Atf.train.write_graph(sess.graph_def, 'models', 'stft.pbtxt')%0A
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.