commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
---|---|---|---|---|---|---|---|
625d250c7eabcf48292590a6b0ca57f1b3cc7c49 | Add meshprocessing scratch | scratch/meshprocessing.py | scratch/meshprocessing.py | Python | 0.000001 | @@ -0,0 +1,1712 @@
+import networkx as nx%0Afrom time import time%0Aimport numpy as np%0A%0Adef mesh2graph(faces):%0A %22%22%22 Converts a triangular mesh to a graph only taking%0A the connectivity into account %22%22%22%0A g = nx.Graph()%0A for i in range(len(faces)):%0A g.add_edge(faces%5Bi,0%5D, faces%5Bi,1%5D)%0A g.add_edge(faces%5Bi,1%5D, faces%5Bi,2%5D)%0A return g%0A%0Adef graphlaplacian(g):%0A %0A import scipy.sparse as sp%0A # scipy.sparse.linalg.eigen%0A n = g.order()%0A D = sp.identity(n)%0A A = nx.to_scipy_sparse_matrix(g)%0A di = A.sum(axis=1).T.tolist()%5B0%5D%0A %0A D.setdiag(di)%0A L = D - A%0A return L%0A%0Adef grapheigendecomposition(graphlaplacian, k = 3):%0A %22%22%22 k is the number of eigenvalues desired%0A %0A See http://docs.scipy.org/doc/scipy/reference/sparse.linalg.html%0A %22%22%22%0A from scipy.sparse.linalg.eigen import lobpcg%0A guess = np.random.rand(graphlaplacian.shape%5B0%5D,k) * 100%0A return lobpcg(graphlaplacian, guess)%0A%0Aif __name__ == '__main__':%0A %0A %0A faces = np.array(%5B %5B0,1,2%5D,%0A %5B1,2,3%5D%5D, dtype = np.uint)%0A %0A start = time()%0A import nibabel.gifti as gi%0A a=gi.read('/home/stephan/Dev/PyWorkspace/connectomeviewer/cviewer/resources/atlases/template_atlas_homo_sapiens_01/Gifti/fsaverage.gii')%0A faces = a.darrays%5B1%5D.data%5B:100,:%5D%0A print %22Loading took %22, time()-start%0A %0A g = mesh2graph(faces)%0A print %22Making graph %22, time()-start%0A%0A gl = graphlaplacian(g)%0A print %22Getting laplacian %22, time()-start%0A%0A w,v = grapheigendecomposition(gl, k = 3)%0A # Ev, Evect = eig(gl)%0A print w%0A print %22Getting eigendecomposition %22, time()-start%0A from scipy.linalg import eig, eigh%0A Ev, Evect = eigh(gl.todense())%0A print Ev%0A #print np.real(Ev)%0A %0A
|
|
6a13511db8401a17a5c6feb7071af821211c2836 | Create sitemap urls | opps/sitemaps/urls.py | opps/sitemaps/urls.py | Python | 0.000207 | @@ -0,0 +1,728 @@
+#!/usr/bin/env python%0A# -*- coding: utf-8 -*-%0Afrom django.conf.urls import patterns, url%0Afrom django.contrib.sitemaps import views as sitemap_views%0A%0Afrom opps.sitemaps.sitemaps import GenericSitemap, InfoDisct%0A%0A%0Asitemaps = %7B%0A 'articles': GenericSitemap(InfoDisct(), priority=0.6),%0A%7D%0A%0Asitemaps_googlenews = %7B%0A 'articles': GenericSitemap(InfoDisct(True), priority=0.6),%0A%7D%0A%0Aurlpatterns = patterns(%0A '',%0A url(r'%5E%5C.xml$', sitemap_views.index,%0A %7B'sitemaps': sitemaps%7D),%0A url(r'%5E-googlenews%5C.xml$', sitemap_views.sitemap,%0A %7B'sitemaps': sitemaps_googlenews,%0A 'template_name': 'sitemap_googlenews.xml'%7D),%0A url(r'%5E-(?P%3Csection%3E.+)%5C.xml$', sitemap_views.sitemap,%0A %7B'sitemaps': sitemaps%7D),%0A%0A)%0A
|
|
a2309df146fd9870f4ec4dd6f422af5a37a037c1 | Add a spacer pixel between images. [ci skip] | examples/utils.py | examples/utils.py | import climate
import pickle
import gzip
import numpy as np
import os
import tempfile
logging = climate.get_logger(__name__)
climate.enable_default_logging()
try:
import matplotlib.pyplot as plt
except ImportError:
logging.critical('please install matplotlib to run the examples!')
raise
try:
import skdata.mnist
import skdata.cifar10
except ImportError:
logging.critical('please install skdata to run the examples!')
raise
def load_mnist(labels=False):
'''Load the MNIST digits dataset.'''
mnist = skdata.mnist.dataset.MNIST()
mnist.meta # trigger download if needed.
def arr(n, dtype):
arr = mnist.arrays[n]
return arr.reshape((len(arr), -1)).astype(dtype)
train_images = arr('train_images', np.float32) / 128 - 1
train_labels = arr('train_labels', np.uint8)
test_images = arr('test_images', np.float32) / 128 - 1
test_labels = arr('test_labels', np.uint8)
if labels:
return ((train_images[:50000], train_labels[:50000, 0]),
(train_images[50000:], train_labels[50000:, 0]),
(test_images, test_labels[:, 0]))
return train_images[:50000], train_images[50000:], test_images
def load_cifar(labels=False):
cifar = skdata.cifar10.dataset.CIFAR10()
cifar.meta # trigger download if needed.
pixels = cifar._pixels.astype(np.float32).reshape((len(cifar._pixels), -1)) / 128 - 1
if labels:
labels = cifar._labels.astype(np.uint8)
return ((pixels[:40000], labels[:40000, 0]),
(pixels[40000:50000], labels[40000:50000, 0]),
(pixels[50000:], labels[50000:, 0]))
return pixels[:40000], pixels[40000:50000], pixels[50000:]
def plot_images(imgs, loc, title=None, channels=1):
'''Plot an array of images.
We assume that we are given a matrix of data whose shape is (n*n, s*s*c) --
that is, there are n^2 images along the first axis of the array, and each
image is c squares measuring s pixels on a side. Each row of the input will
be plotted as a sub-region within a single image array containing an n x n
grid of images.
'''
n = int(np.sqrt(len(imgs)))
assert n * n == len(imgs), 'images array must contain a square number of rows!'
s = int(np.sqrt(len(imgs[0]) / channels))
assert s * s == len(imgs[0]) / channels, 'images must be square!'
img = np.zeros((s * n, s * n, channels), dtype=imgs[0].dtype)
for i, pix in enumerate(imgs):
r, c = divmod(i, n)
img[r * s:(r+1) * s, c * s:(c+1) * s] = pix.reshape((s, s, channels))
img -= img.min()
img /= img.max()
ax = plt.gcf().add_subplot(loc)
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
ax.set_frame_on(False)
ax.imshow(img.squeeze(), cmap=plt.cm.gray)
if title:
ax.set_title(title)
def plot_layers(weights, tied_weights=False, channels=1):
'''Create a plot of weights, visualized as "bottom-level" pixel arrays.'''
if hasattr(weights[0], 'get_value'):
weights = [w.get_value() for w in weights]
k = min(len(weights), 9)
imgs = np.eye(weights[0].shape[0])
for i, weight in enumerate(weights[:-1]):
imgs = np.dot(weight.T, imgs)
plot_images(imgs,
100 + 10 * k + i + 1,
channels=channels,
title='Layer {}'.format(i+1))
weight = weights[-1]
n = weight.shape[1] / channels
if int(np.sqrt(n)) ** 2 != n:
return
if tied_weights:
imgs = np.dot(weight.T, imgs)
plot_images(imgs,
100 + 10 * k + k,
channels=channels,
title='Layer {}'.format(k))
else:
plot_images(weight,
100 + 10 * k + k,
channels=channels,
title='Decoding weights')
def plot_filters(filters):
'''Create a plot of conv filters, visualized as pixel arrays.'''
imgs = filters.get_value()
N, channels, x, y = imgs.shape
n = int(np.sqrt(N))
assert n * n == N, 'filters must contain a square number of rows!'
assert channels == 1 or channels == 3, 'can only plot grayscale or rgb filters!'
img = np.zeros(((y+1) * n - 1, (x+1) * n - 1, channels), dtype=imgs[0].dtype)
for i, pix in enumerate(imgs):
r, c = divmod(i, n)
img[r * (y+1):(r+1) * (y+1) - 1,
c * (x+1):(c+1) * (x+1) - 1] = pix.transpose((1, 2, 0))
img -= img.min()
img /= img.max()
ax = plt.gcf().add_subplot(111)
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
ax.set_frame_on(False)
ax.imshow(img.squeeze(), cmap=plt.cm.gray)
| Python | 0.000003 | @@ -2397,20 +2397,36 @@
os((
-s * n, s * n
+(s+1) * n - 1, (s+1) * n - 1
, ch
@@ -2534,17 +2534,21 @@
img%5Br *
-s
+(s+1)
:(r+1) *
@@ -2552,26 +2552,58 @@
) *
-s, c * s:(c+1) * s
+(s+1) - 1,%0A c * (s+1):(c+1) * (s+1) - 1
%5D =
|
78d926434ff1ad6ade0764ac18cca2413a5beccb | Bump dev version in master | fabric/version.py | fabric/version.py | """
Current Fabric version constant plus version pretty-print method.
This functionality is contained in its own module to prevent circular import
problems with ``__init__.py`` (which is loaded by setup.py during installation,
which in turn needs access to this version information.)
"""
from subprocess import Popen, PIPE
from os.path import abspath, dirname
def git_sha():
loc = abspath(dirname(__file__))
p = Popen(
"cd \"%s\" && git log -1 --format=format:%%h" % loc,
shell=True,
stdout=PIPE,
stderr=PIPE
)
return p.communicate()[0]
VERSION = (1, 2, 0, 'final', 0)
def get_version(form='short'):
"""
Return a version string for this package, based on `VERSION`.
Takes a single argument, ``form``, which should be one of the following
strings:
* ``branch``: just the major + minor, e.g. "0.9", "1.0".
* ``short`` (default): compact, e.g. "0.9rc1", "0.9.0". For package
filenames or SCM tag identifiers.
* ``normal``: human readable, e.g. "0.9", "0.9.1", "0.9 beta 1". For e.g.
documentation site headers.
* ``verbose``: like ``normal`` but fully explicit, e.g. "0.9 final". For
tag commit messages, or anywhere that it's important to remove ambiguity
between a branch and the first final release within that branch.
"""
# Setup
versions = {}
branch = "%s.%s" % (VERSION[0], VERSION[1])
tertiary = VERSION[2]
type_ = VERSION[3]
final = (type_ == "final")
type_num = VERSION[4]
firsts = "".join([x[0] for x in type_.split()])
sha = git_sha()
sha1 = (" (%s)" % sha) if sha else ""
# Branch
versions['branch'] = branch
# Short
v = branch
if (tertiary or final):
v += "." + str(tertiary)
if not final:
v += firsts
if type_num:
v += str(type_num)
else:
v += sha1
versions['short'] = v
# Normal
v = branch
if tertiary:
v += "." + str(tertiary)
if not final:
if type_num:
v += " " + type_ + " " + str(type_num)
else:
v += " pre-" + type_ + sha1
versions['normal'] = v
# Verbose
v = branch
if tertiary:
v += "." + str(tertiary)
if not final:
if type_num:
v += " " + type_ + " " + str(type_num)
else:
v += " pre-" + type_ + sha1
else:
v += " final"
versions['verbose'] = v
try:
return versions[form]
except KeyError:
raise TypeError('"%s" is not a valid form specifier.' % form)
__version__ = get_version('short')
| Python | 0 | @@ -602,20 +602,20 @@
(1,
-2
+3
, 0, '
-final
+alpha
', 0
|
f18dc77d49a7c5154df11232f645dbb8e0f897dd | Remove bias | models/dual_encoder.py | models/dual_encoder.py | import tensorflow as tf
import numpy as np
from models import helpers
FLAGS = tf.flags.FLAGS
def get_embeddings(hparams):
if hparams.glove_path and hparams.vocab_path:
tf.logging.info("Loading Glove embeddings...")
vocab_array, vocab_dict = helpers.load_vocab(hparams.vocab_path)
glove_vectors, glove_dict = helpers.load_glove_vectors(hparams.glove_path, vocab=set(vocab_array))
initializer = helpers.build_initial_embedding_matrix(vocab_dict, glove_dict, glove_vectors, hparams.embedding_dim)
else:
tf.logging.info("No glove/vocab path specificed, starting with random embeddings.")
initializer = tf.random_uniform_initializer(-0.25, 0.25)
return tf.get_variable(
"word_embeddings",
shape=[hparams.vocab_size, hparams.embedding_dim],
initializer=initializer)
def dual_encoder_model(
hparams,
mode,
context,
context_len,
utterance,
utterance_len,
targets):
# Initialize embedidngs randomly or with pre-trained vectors if available
embeddings_W = get_embeddings(hparams)
# Embed the context and the utterance
context_embedded = tf.nn.embedding_lookup(
embeddings_W, context, name="embed_context")
utterance_embedded = tf.nn.embedding_lookup(
embeddings_W, utterance, name="embed_utterance")
# Build the RNN
with tf.variable_scope("rnn") as vs:
# We use an LSTM Cell
cell = tf.nn.rnn_cell.LSTMCell(
hparams.rnn_dim,
forget_bias=2.0,
use_peepholes=True,
state_is_tuple=True)
# Run the utterance and context through the RNN
rnn_outputs, rnn_states = tf.nn.dynamic_rnn(
cell,
tf.concat(0, [context_embedded, utterance_embedded]),
sequence_length=tf.concat(0, [context_len, utterance_len]),
dtype=tf.float32)
encoding_context, encoding_utterance = tf.split(0, 2, rnn_states.h)
with tf.variable_scope("prediction") as vs:
M = tf.get_variable("M",
shape=[hparams.rnn_dim, hparams.rnn_dim],
initializer=tf.truncated_normal_initializer())
b = tf.get_variable("b", [hparams.rnn_dim])
# "Predict" a response: c * M
generated_response = tf.matmul(encoding_context, M) + b
generated_response = tf.expand_dims(generated_response, 2)
encoding_utterance = tf.expand_dims(encoding_utterance, 2)
# Dot product between generated response and actual response
# (c * M) * r
logits = tf.batch_matmul(generated_response, encoding_utterance, True)
logits = tf.squeeze(logits, [2])
# Apply sigmoid to convert logits to probabilities
probs = tf.sigmoid(logits)
# Calculate the binary cross-entropy loss
losses = tf.nn.sigmoid_cross_entropy_with_logits(logits, tf.to_float(targets))
# Mean loss across the batch of examples
mean_loss = tf.reduce_mean(losses, name="mean_loss")
return probs, mean_loss
| Python | 0.000017 | @@ -2035,16 +2035,18 @@
r())%0A
+ #
b = tf.
@@ -2177,12 +2177,8 @@
, M)
- + b
%0A
|
04d122d88bb9f71843df924e048b12de1976b847 | Add missing migration | src/keybar/migrations/0008_entry_salt.py | src/keybar/migrations/0008_entry_salt.py | Python | 0.0002 | @@ -0,0 +1,439 @@
+# -*- coding: utf-8 -*-%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import models, migrations%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('keybar', '0007_remove_entry_key'),%0A %5D%0A%0A operations = %5B%0A migrations.AddField(%0A model_name='entry',%0A name='salt',%0A field=models.BinaryField(null=True, blank=True),%0A preserve_default=True,%0A ),%0A %5D%0A
|
|
3ac5648f8f3ab9e2dd6d93002f63c65bedb3e637 | Patch beanstalkd collector | src/collectors/beanstalkd/beanstalkd.py | src/collectors/beanstalkd/beanstalkd.py | # coding=utf-8
"""
Collects the following from beanstalkd:
- Server statistics via the 'stats' command
- Per tube statistics via the 'stats-tube' command
#### Dependencies
* beanstalkc
"""
import re
import diamond.collector
try:
import beanstalkc
beanstalkc # workaround for pyflakes issue #13
except ImportError:
beanstalkc = None
class BeanstalkdCollector(diamond.collector.Collector):
COUNTERS_REGEX = re.compile(
r'^(cmd-.*|job-timeouts|total-jobs|total-connections)$')
def get_default_config_help(self):
config_help = super(BeanstalkdCollector,
self).get_default_config_help()
config_help.update({
'host': 'Hostname',
'port': 'Port',
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(BeanstalkdCollector, self).get_default_config()
config.update({
'path': 'beanstalkd',
'host': 'localhost',
'port': 11300,
})
return config
def _get_stats(self):
stats = {}
try:
connection = beanstalkc.Connection(self.config['host'],
int(self.config['port']))
except beanstalkc.BeanstalkcException, e:
self.log.error("Couldn't connect to beanstalkd: %s", e)
return {}
stats['instance'] = connection.stats()
stats['tubes'] = []
for tube in connection.tubes():
tube_stats = connection.stats_tube(tube)
stats['tubes'].append(tube_stats)
return stats
def collect(self):
if beanstalkc is None:
self.log.error('Unable to import beanstalkc')
return {}
info = self._get_stats()
for stat, value in info['instance'].items():
if stat != 'version':
self.publish(stat, value,
metric_type=self.get_metric_type(stat))
for tube_stats in info['tubes']:
tube = tube_stats['name']
for stat, value in tube_stats.items():
if stat != 'name':
self.publish('tubes.%s.%s' % (tube, stat), value,
metric_type=self.get_metric_type(stat))
def get_metric_type(self, stat):
if self.COUNTERS_REGEX.match(stat):
return 'COUNTER'
return 'GAUGE'
| Python | 0.00001 | @@ -411,16 +411,62 @@
ector):%0A
+ SKIP_LIST = %5B'version', 'id', 'hostname'%5D%0A
COUN
@@ -1996,20 +1996,29 @@
tat
-!= 'version'
+not in self.SKIP_LIST
:%0A
|
531da297c57c7b359c37a743095c10e7ad0592cf | Add test_container | tests/test_container.py | tests/test_container.py | Python | 0.000003 | @@ -0,0 +1,486 @@
+import pdir%0A%0A%0Adef test_acting_like_a_list():%0A dadada = 1%0A cadada = 1%0A vadada = 1%0A apple1 = 1%0A xapple2 = 1%0A result, correct = pdir(), dir()%0A assert len(correct) == len(result)%0A%0A for x, y in zip(correct, result):%0A assert x == y%0A%0A%0Adef test_acting_like_a_list_when_search():%0A dadada = 1%0A cadada = 1%0A vadada = 1%0A apple1 = 1%0A xapple2 = 1%0A result = pdir().s('apple')%0A assert len(result) == 2%0A assert list(result) == %5B'apple1', 'xapple2'%5D%0A
|
|
ddf296682cce9e367bc73a33117c969a8def6ee7 | Fix PEP8 error | tests/test_operators.py | tests/test_operators.py | import unittest
from nose.tools import *
from gargoyle.operators.comparable import *
from gargoyle.operators.identity import *
from gargoyle.operators.misc import *
class BaseCondition(object):
def test_has_label(self):
ok_(self.condition.label)
def test_has_description(self):
ok_(self.condition.description)
def test_has_applies_to_method(self):
ok_(self.condition.applies_to)
class TestTruthyCondition(BaseCondition, unittest.TestCase):
@property
def condition(self):
return Truthy()
def test_applies_to_if_argument_is_truthy(self):
ok_(self.condition.applies_to(True))
ok_(self.condition.applies_to("hello"))
ok_(self.condition.applies_to(False) is False)
ok_(self.condition.applies_to("") is False)
class TestEqualsCondition(BaseCondition, unittest.TestCase):
@property
def condition(self):
return Equals(value='Fred')
def test_applies_to_if_argument_is_equal_to_value(self):
ok_(self.condition.applies_to('Fred'))
ok_(self.condition.applies_to('Steve') is False)
ok_(self.condition.applies_to('') is False)
ok_(self.condition.applies_to(True) is False)
@raises(TypeError)
def test_raises_error_if_not_provided_value(self):
Equals()
class TestEnumCondition(BaseCondition, unittest.TestCase):
@property
def condition(self):
return Enum(False, 2.0, '3')
def test_applies_to_if_argument_in_enum(self):
ok_(self.condition.applies_to(False))
ok_(self.condition.applies_to(2.0))
ok_(self.condition.applies_to(9) is False)
ok_(self.condition.applies_to("1") is False)
ok_(self.condition.applies_to(True) is False)
class TestBetweenCondition(BaseCondition, unittest.TestCase):
@property
def condition(self, lower=1, higher=100):
return Between(lower, higher)
def test_applies_to_if_between_lower_and_upper_bound(self):
ok_(self.condition.applies_to(0) is False)
ok_(self.condition.applies_to(1) is False)
ok_(self.condition.applies_to(2))
ok_(self.condition.applies_to(99))
ok_(self.condition.applies_to(100) is False)
ok_(self.condition.applies_to('steve') is False)
def test_applies_to_works_with_any_comparable(self):
animals = Between('cobra', 'orangatang')
ok_(animals.applies_to('dog'))
ok_(animals.applies_to('elephant'))
ok_(animals.applies_to('llama'))
ok_(animals.applies_to('aardvark') is False)
ok_(animals.applies_to('whale') is False)
ok_(animals.applies_to('zebra') is False)
class TestLessThanCondition(BaseCondition, unittest.TestCase):
@property
def condition(self, upper=500):
return LessThan(upper)
def test_applies_to_if_value_less_than_argument(self):
ok_(self.condition.applies_to(float("-inf")))
ok_(self.condition.applies_to(-50000))
ok_(self.condition.applies_to(-1))
ok_(self.condition.applies_to(0))
ok_(self.condition.applies_to(499))
ok_(self.condition.applies_to(500) is False)
ok_(self.condition.applies_to(10000) is False)
ok_(self.condition.applies_to(float("inf")) is False)
def test_works_with_any_comparable(self):
ok_(LessThan('giraffe').applies_to('aardvark'))
ok_(LessThan('giraffe').applies_to('zebra') is False)
ok_(LessThan(56.7).applies_to(56))
ok_(LessThan(56.7).applies_to(56.0))
ok_(LessThan(56.7).applies_to(57.0) is False)
ok_(LessThan(56.7).applies_to(56.71) is False)
class TestMoreThanCondition(BaseCondition, unittest.TestCase):
@property
def condition(self, lower=10):
return MoreThan(lower)
def test_applies_to_if_value_more_than_argument(self):
ok_(self.condition.applies_to(float("inf")))
ok_(self.condition.applies_to(10000))
ok_(self.condition.applies_to(11))
ok_(self.condition.applies_to(10) is False)
ok_(self.condition.applies_to(0) is False)
ok_(self.condition.applies_to(-100) is False)
ok_(self.condition.applies_to(float('-inf')) is False)
def test_works_with_any_comparable(self):
ok_(MoreThan('giraffe').applies_to('zebra'))
ok_(MoreThan('giraffe').applies_to('aardvark') is False)
ok_(MoreThan(56.7).applies_to(57))
ok_(MoreThan(56.7).applies_to(57.0))
ok_(MoreThan(56.7).applies_to(56.0) is False)
ok_(MoreThan(56.7).applies_to(56.71))
class TestPercentCondition(BaseCondition, unittest.TestCase):
class FalseyObject(object):
def __nonzero__(self):
return False
@property
def condition(self):
return Percent(50)
def test_applies_to_percentage_passed_in(self):
runs = map(self.condition.applies_to, range(1000))
successful_runs = filter(bool, runs)
self.assertAlmostEqual(len(successful_runs), 500, delta=50)
def test_returns_false_if_argument_is_falsey(self):
eq_(self.condition.applies_to(False), False)
eq_(self.condition.applies_to(self.FalseyObject()), False)
| Python | 0.000012 | @@ -3618,16 +3618,17 @@
False)%0A%0A
+%0A
class Te
|
79ebedc800c31b47bd0cc340de06dafcd6ade7f9 | Add TwrOauth basic test | tests/test_twr_oauth.py | tests/test_twr_oauth.py | Python | 0 | @@ -0,0 +1,2641 @@
+#!/usr/bin/env python%0A#%0A# Copyright (c) 2013 Martin Abente Lahaye. - [email protected]%0A%0A#Permission is hereby granted, free of charge, to any person obtaining a copy%0A#of this software and associated documentation files (the %22Software%22), to deal%0A#in the Software without restriction, including without limitation the rights%0A#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell%0A#copies of the Software, and to permit persons to whom the Software is%0A#furnished to do so, subject to the following conditions:%0A%0A#The above copyright notice and this permission notice shall be included in%0A#all copies or substantial portions of the Software.%0A%0A#THE SOFTWARE IS PROVIDED %22AS IS%22, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR%0A#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,%0A#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE%0A#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER%0A#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,%0A#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN%0A#THE SOFTWARE.%0A%0Aimport sys%0A%0Afrom gi.repository import GObject%0A%0Asys.path.append(%22..%22)%0Afrom twitter.twr_oauth import TwrOauth%0Afrom twitter.twr_account import TwrAccount%0A%0A%0Aconsumer_key = ''%0Aconsumer_secret = ''%0A%0Aaccess_key = ''%0Aaccess_secret = ''%0A%0ATwrAccount.set_secrets(consumer_key, consumer_secret,%0A access_key, access_secret)%0A%0A%0Adef __phase2_failed_cb(oauth, info):%0A print '%5BFAILED%5D phase2: access-downloaded-failed, with %25s' %25 info%0A loop.quit()%0A%0A%0Adef __phase1_failed_cb(oauth, info):%0A print '%5BFAILED%5D phase1: request-downloaded-failed, with %25s' %25 info%0A loop.quit()%0A%0A%0Adef __phase2_cb(oauth, info):%0A print '%5BOK%5D phase2: access-downloaded, with %25s' %25 info%0A%0A TwrAccount.set_secrets(consumer_key, consumer_secret,%0A info%5B'oauth_token'%5D, info%5B'oauth_token_secret'%5D)%0A loop.quit()%0A%0A%0Adef __phase1_cb(oauth, info):%0A print '%5BOK%5D phase1: request-downloaded'%0A%0A url = TwrOauth.AUTHORIZATION_URL %25 info%5B'oauth_token'%5D%0A print 'Please visit %25s' %25 url%0A verifier = raw_input('verifier: ')%0A%0A TwrAccount.set_secrets(consumer_key, consumer_secret,%0A info%5B'oauth_token'%5D, info%5B'oauth_token_secret'%5D)%0A%0A oauth.connect('access-downloaded', __phase2_cb)%0A oauth.connect('access-downloaded-failed', __phase2_failed_cb)%0A oauth.access_token(verifier)%0A%0Aoauth = TwrOauth()%0Aoauth.connect('request-downloaded', __phase1_cb)%0Aoauth.connect('request-downloaded-failed', __phase1_failed_cb)%0Aoauth.request_token()%0A%0Aloop = GObject.MainLoop()%0Aloop.run()%0A
|
|
d15c8eaca5fb115b8600a8e743ae73a9edba9a5b | Initialize P04_datetimeModule | books/AutomateTheBoringStuffWithPython/Chapter15/P04_datetimeModule.py | books/AutomateTheBoringStuffWithPython/Chapter15/P04_datetimeModule.py | Python | 0.000008 | @@ -0,0 +1,678 @@
+# This program uses the datetime module to manipulate dates and times.%0A%0A# The datetime Module%0Aimport datetime, time%0Aprint(datetime.datetime.now())%0A%0Adt = datetime.datetime(2015, 10, 21, 16, 29, 0)%0Aprint((dt.year, dt.month, dt.day))%0Aprint((dt.hour, dt.minute, dt.second))%0A%0Aprint(datetime.datetime.fromtimestamp(1000000))%0Aprint(datetime.datetime.fromtimestamp(time.time()))%0A%0Ahalloween2015 = datetime.datetime(2015, 10, 31, 0, 0, 0)%0Anewyears2016 = datetime.datetime(2016, 1, 1, 0, 0, 0)%0Aoct31_2015 = datetime.datetime(2015, 10, 31, 0, 0, 0)%0Aprint(halloween2015 == oct31_2015)%0Aprint(halloween2015 %3E newyears2016)%0Aprint(newyears2016 %3E halloween2015)%0Aprint(newyears2016 != oct31_2015)%0A
|
|
8106c22a5c05f438eb9c6436af054fd1e72b103c | Add SK_IGNORE_FASTER_TEXT_FIX define for staging Skia change. | public/blink_skia_config.gyp | public/blink_skia_config.gyp | #
# Copyright (C) 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# This target is a dependency of Chromium's skia/skia_library.gyp.
# It only contains code suppressions which keep Webkit tests from failing.
{
'targets': [
{
'target_name': 'blink_skia_config',
'type': 'none',
'direct_dependent_settings': {
'defines': [
# Place defines here that require significant Blink rebaselining, or that
# are otherwise best removed in Blink and then rolled into Chromium.
# Defines should be in single quotes and a comma must appear after every one.
# DO NOT remove the define until you are ready to rebaseline, and
# AFTER the flag has been removed from skia.gyp in Chromium.
'SK_DEFERRED_CANVAS_USES_FACTORIES=1',
],
},
},
],
}
| Python | 0.000004 | @@ -2273,16 +2273,55 @@
IES=1',%0A
+ 'SK_IGNORE_FASTER_TEXT_FIX',%0A
|
bb940826d78e44a4098023e83d788b3d915b9b1f | Revert "Add the GitHub-supported format extensions." | grip/constants.py | grip/constants.py | # The supported extensions, as defined by https://github.com/github/markup
supported_extensions = [
'.markdown', '.mdown', '.mkdn', '.md',
'.textile',
'.rdoc',
'.org',
'.creole',
'.mediawiki', '.wiki',
'.rst',
'.asciidoc', '.adoc', '.asc',
'.pod',
]
# The default filenames when no file is provided
default_filenames = map(lambda ext: 'README' + ext, supported_extensions)
| Python | 0 | @@ -96,195 +96,27 @@
= %5B
-%0A '.markdown', '.mdown', '.mkdn', '.md',%0A '.textile',%0A '.rdoc',%0A '.org',%0A '.creole',%0A '.mediawiki', '.wiki',%0A '.rst',%0A '.asciidoc', '.adoc', '.asc',%0A '.pod',%0A%5D%0A
+'.md', '.markdown'%5D
%0A%0A#
|
d1952cd6549e3136e802ac17ecee0fd182455729 | Revert "Add Projects Team name to Alert-Mail subject" | src/sentry/plugins/sentry_mail/models.py | src/sentry/plugins/sentry_mail/models.py | """
sentry.plugins.sentry_mail.models
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2013 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import sentry
from django.conf import settings
from django.core.urlresolvers import reverse
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from sentry.models import User, UserOption
from sentry.plugins import register
from sentry.plugins.bases.notify import NotificationPlugin
from sentry.utils.cache import cache
from sentry.utils.email import MessageBuilder, group_id_to_email
from sentry.utils.http import absolute_uri
NOTSET = object()
class MailPlugin(NotificationPlugin):
title = _('Mail')
conf_key = 'mail'
slug = 'mail'
version = sentry.VERSION
author = "Sentry Team"
author_url = "https://github.com/getsentry/sentry"
project_default_enabled = True
project_conf_form = None
subject_prefix = settings.EMAIL_SUBJECT_PREFIX
def _send_mail(self, subject, template=None, html_template=None, body=None,
project=None, headers=None, context=None, fail_silently=False):
send_to = self.get_send_to(project)
if not send_to:
return
subject_prefix = self.get_option('subject_prefix', project) or self.subject_prefix
msg = MessageBuilder(
subject='%s%s' % (subject_prefix, subject),
template=template,
html_template=html_template,
body=body,
headers=headers,
context=context,
)
msg.send(send_to, fail_silently=fail_silently)
def send_test_mail(self, project=None):
self._send_mail(
subject='Test Email',
body='This email was requested as a test of Sentry\'s outgoing email',
project=project,
fail_silently=False,
)
def get_notification_settings_url(self):
return absolute_uri(reverse('sentry-account-settings-notifications'))
def on_alert(self, alert):
project = alert.project
team = project.team
subject = '[{0} - {1}] ALERT: {2}'.format(
team.name.encode('utf-8'),
project.name.encode('utf-8'),
alert.message.encode('utf-8'),
)
template = 'sentry/emails/alert.txt'
html_template = 'sentry/emails/alert.html'
context = {
'alert': alert,
'link': alert.get_absolute_url(),
'settings_link': self.get_notification_settings_url(),
}
headers = {
'X-Sentry-Project': project.name,
}
self._send_mail(
subject=subject,
template=template,
html_template=html_template,
project=project,
fail_silently=False,
headers=headers,
context=context,
)
def get_emails_for_users(self, user_ids, project=None):
email_list = set()
user_ids = set(user_ids)
# XXX: It's possible that options have been set to an empty value
if project:
alert_queryset = UserOption.objects.filter(
project=project,
user__in=user_ids,
key='mail:email',
)
for option in (o for o in alert_queryset if o.value):
user_ids.remove(option.user_id)
email_list.add(option.value)
if user_ids:
alert_queryset = UserOption.objects.filter(
user__in=user_ids,
key='alert_email',
)
for option in (o for o in alert_queryset if o.value):
user_ids.remove(option.user_id)
email_list.add(option.value)
if user_ids:
email_list |= set(User.objects.filter(
pk__in=user_ids, is_active=True
).values_list('email', flat=True))
return email_list
def get_send_to(self, project=None):
"""
Returns a list of email addresses for the users that should be notified of alerts.
The logic for this is a bit complicated, but it does the following:
The results of this call can be fairly expensive to calculate, so the send_to list gets cached
for 60 seconds.
"""
if project:
project_id = project.pk
else:
project_id = ''
conf_key = self.get_conf_key()
cache_key = '%s:send_to:%s' % (conf_key, project_id)
send_to_list = cache.get(cache_key)
if send_to_list is None:
send_to_list = set()
if project and project.team:
member_set = self.get_sendable_users(project)
send_to_list |= set(self.get_emails_for_users(
member_set, project=project))
send_to_list = filter(bool, send_to_list)
cache.set(cache_key, send_to_list, 60) # 1 minute cache
return send_to_list
def notify_users(self, group, event, fail_silently=False):
project = group.project
interface_list = []
for interface in event.interfaces.itervalues():
body = interface.to_email_html(event)
if not body:
continue
interface_list.append((interface.get_title(), mark_safe(body)))
subject = '[%s] %s: %s' % (
project.name.encode('utf-8'),
unicode(event.get_level_display()).upper().encode('utf-8'),
event.error().encode('utf-8').splitlines()[0])
link = group.get_absolute_url()
template = 'sentry/emails/error.txt'
html_template = 'sentry/emails/error.html'
context = {
'group': group,
'event': event,
'link': link,
'interfaces': interface_list,
'settings_link': self.get_notification_settings_url(),
}
headers = {
'X-Sentry-Logger': event.logger,
'X-Sentry-Logger-Level': event.get_level_display(),
'X-Sentry-Project': project.name,
'X-Sentry-Server': event.server_name,
'X-Sentry-Reply-To': group_id_to_email(group.pk),
}
self._send_mail(
subject=subject,
template=template,
html_template=html_template,
project=project,
fail_silently=fail_silently,
headers=headers,
context=context,
)
# Legacy compatibility
MailProcessor = MailPlugin
register(MailPlugin)
| Python | 0 | @@ -2115,36 +2115,8 @@
ect%0A
- team = project.team%0A
@@ -2138,14 +2138,8 @@
%5B%7B0%7D
- - %7B1%7D
%5D AL
@@ -2144,17 +2144,17 @@
ALERT: %7B
-2
+1
%7D'.forma
@@ -2160,47 +2160,8 @@
at(%0A
- team.name.encode('utf-8'),%0A
|
f5718764185ce1149ed291601e4fe28f9cd2be06 | add single list module for mini-stl (Python) | python/mini-stl/single_list.py | python/mini-stl/single_list.py | Python | 0 | @@ -0,0 +1,3082 @@
+#!/usr/bin/python -e%0A# -*- encoding: utf-8 -*-%0A#%0A# Copyright (c) 2013 ASMlover. All rights reserved.%0A#%0A# Redistribution and use in source and binary forms, with or without%0A# modification, are permitted provided that the following conditions%0A# are met:%0A#%0A# * Redistributions of source code must retain the above copyright%0A# notice, this list ofconditions and the following disclaimer.%0A#%0A# * Redistributions in binary form must reproduce the above copyright%0A# notice, this list of conditions and the following disclaimer in%0A# the documentation and/or other materialsprovided with the%0A# distribution.%0A#%0A# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS%0A# %22AS IS%22 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT%0A# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS%0A# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE%0A# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,%0A# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,%0A# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;%0A# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER%0A# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT%0A# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN%0A# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE%0A# POSSIBILITY OF SUCH DAMAGE.%0A%0Aclass SingleList(object):%0A class ListNode(object):%0A def __init__(self):%0A self.next = None%0A self.data = None%0A def __del__(self):%0A self.next = None %0A self.data = None%0A%0A def __init__(self):%0A self.front_ = None %0A self.rear_ = None%0A self.size_ = 0%0A self.iter_ = None%0A %0A def __del__(self):%0A self.clear() %0A self.iter_ = None%0A%0A def __iter__(self):%0A self.iter_ = self.front_%0A return self%0A%0A def next(self):%0A if self.iter_ == None:%0A raise StopIteration%0A else:%0A data = self.iter_.data %0A self.iter_ = self.iter_.next%0A return data%0A%0A def clear(self):%0A while self.front_ != None:%0A node = self.front_%0A self.front_ = self.front_.next%0A del node %0A self.front_ = None%0A self.rear_ = None%0A self.size_ = 0 %0A%0A def empty(self):%0A return self.front_ == None%0A%0A def size(self):%0A return self.size_%0A%0A def push_back(self, x):%0A node = self.ListNode()%0A node.next = None%0A node.data = x%0A if self.front_ == None:%0A self.front_ = node %0A self.rear_ = node %0A else:%0A self.rear_.next = node %0A self.rear_ = node %0A self.size_ += 1 %0A%0A def push_front(self, x):%0A node = self.ListNode()%0A node.next = self.front_%0A node.data = x%0A if self.front_ == None:%0A self.rear_ = node %0A self.front_ = node %0A self.size_ += 1 %0A%0A def pop_front(self):%0A if self.front_ == None:%0A return%0A node = self.front_%0A self.front_ = self.front_.next%0A del node %0A self.size_ -= 1 %0A%0A def front(self):%0A if self.front_ == None:%0A return None%0A return self.front_.data %0A%0A def back(self):%0A if self.rear_ == None:%0A return None%0A return self.rear_.data %0A
|
|
0be7f2fe05588d93eb478a4fa648d310055b3ce7 | Add experimental generation code to make drafts from raster images | pyweaving/generators/raster.py | pyweaving/generators/raster.py | Python | 0 | @@ -0,0 +1,1232 @@
+from .. import Draft%0Afrom PIL import Image%0A%0A%0Adef point_threaded(im, warp_color=(0, 0, 0), weft_color=(255, 255, 255),%0A shafts=40, max_float=8, repeats=2):%0A %22%22%22%0A Given an image, generate a point-threaded drawdown that attempts to%0A represent the image. Results in a drawdown with bilateral symmetry from a%0A non-symmetric source image.%0A %22%22%22%0A draft = Draft(num_shafts=shafts, liftplan=True)%0A%0A im.thumbnail((shafts, im.size%5B1%5D), Image.ANTIALIAS)%0A im = im.convert('1')%0A%0A w, h = im.size%0A assert w == shafts%0A warp_pattern_size = ((2 * shafts) - 2)%0A for __ in range(repeats):%0A for ii in range(warp_pattern_size):%0A if ii %3C shafts:%0A shaft = ii%0A else:%0A shaft = warp_pattern_size - ii%0A draft.add_warp_thread(color=warp_color, shaft=shaft)%0A%0A imdata = im.getdata()%0A%0A for __ in range(repeats):%0A for yy in range(h):%0A offset = yy * w%0A pick_shafts = set()%0A for xx in range(w):%0A pixel = imdata%5Boffset + xx%5D%0A if not pixel:%0A pick_shafts.add(xx)%0A draft.add_weft_thread(color=weft_color, shafts=pick_shafts)%0A%0A return draft%0A
|
|
de325dbe53bbd28eddcbbf188f2689474994249b | add migration for new version of storedmessages | osmaxx-py/osmaxx/third_party_apps/stored_messages/migrations/0002_message_url.py | osmaxx-py/osmaxx/third_party_apps/stored_messages/migrations/0002_message_url.py | Python | 0 | @@ -0,0 +1,402 @@
+# -*- coding: utf-8 -*-%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import models, migrations%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('stored_messages', '0001_initial'),%0A %5D%0A%0A operations = %5B%0A migrations.AddField(%0A model_name='message',%0A name='url',%0A field=models.URLField(blank=True, null=True),%0A ),%0A %5D%0A
|
|
1472011cb8cd323357626443f714284feedfed62 | add merge of ACIS provided data | scripts/climodat/use_acis.py | scripts/climodat/use_acis.py | Python | 0 | @@ -0,0 +1,1921 @@
+%22%22%22Use data provided by ACIS to replace climodat data%22%22%22%0Aimport requests%0Aimport sys%0Aimport psycopg2%0Aimport datetime%0A%0ASERVICE = %22http://data.rcc-acis.org/StnData%22%0A%0A%0Adef safe(val):%0A if val in %5B'M', 'S'%5D:%0A return None%0A if val == 'T':%0A return 0.0001%0A try:%0A return float(val)%0A except:%0A print(%22failed to convert %25s to float, using None%22 %25 (repr(val),))%0A return None%0A%0A%0Adef main(station, acis_station):%0A table = %22alldata_%25s%22 %25 (station%5B:2%5D,)%0A payload = %7B%22sid%22: acis_station,%0A %22sdate%22: %221850-01-01%22,%0A %22edate%22: %222017-01-01%22,%0A %22elems%22: %22maxt,mint,pcpn,snow,snwd%22%7D%0A req = requests.post(SERVICE, json=payload)%0A j = req.json()%0A pgconn = psycopg2.connect(database='coop', host='localhost', port=5555,%0A user='mesonet')%0A cursor = pgconn.cursor()%0A for row in j%5B'data'%5D:%0A date = row%5B0%5D%0A (high, low, precip, snow, snowd) = map(safe, row%5B1:%5D)%0A if all(%5Ba is None for a in (high, low, precip, snow, snowd)%5D):%0A continue%0A cursor.execute(%22%22%22%0A UPDATE %22%22%22 + table + %22%22%22 SET high = %25s, low = %25s, precip = %25s,%0A snow = %25s, snowd = %25s WHERE station = %25s and day = %25s%0A %22%22%22, (high, low, precip, snow, snowd, station, date))%0A if cursor.rowcount == 0:%0A date = datetime.datetime.strptime(date, '%25Y-%25m-%25d')%0A sday = %22%2502i%2502i%22 %25 (date.month, date.day)%0A print(%22Adding entry for %25s%22 %25 (date,))%0A cursor.execute(%22%22%22INSERT into %22%22%22 + table + %22%22%22 (station, day,%0A high, low, precip, snow, snowd, sday, year, month, estimated)%0A VALUES (%25s, %25s, %25s, %25s, %25s, %25s, %25s, %25s, %25s, %25s, 'f')%0A %22%22%22, (station, date, high, low, precip, snow, snowd, sday,%0A date.year, date.month))%0A cursor.close()%0A pgconn.commit()%0A%0Aif __name__ == '__main__':%0A main(sys.argv%5B1%5D, sys.argv%5B2%5D)%0A
|
|
b333d95f3f4187b9d9b480ba8ff4985a62d65f41 | Add tests for nginx version | tests/pytests/unit/modules/test_nginx.py | tests/pytests/unit/modules/test_nginx.py | Python | 0.999999 | @@ -0,0 +1,736 @@
+import pytest%0Aimport salt.modules.nginx as nginx%0Afrom tests.support.mock import patch%0A%0A%[email protected]%0Adef configure_loader_modules():%0A return %7Bnginx: %7B%7D%7D%0A%0A%[email protected](%0A %22expected_version,nginx_output%22,%0A %5B%0A (%221.2.3%22, %22nginx version: nginx/1.2.3%22),%0A (%221%22, %22nginx version: nginx/1%22),%0A (%229.1.100a1+abc123%22, %22nginx version: nginx/9.1.100a1+abc123%22),%0A (%0A %2242.9.13.1111111111.whatever%22,%0A %22nginx version: nginx/42.9.13.1111111111.whatever%22,%0A ),%0A %5D,%0A)%0Adef test_basic_nginx_version_output(expected_version, nginx_output):%0A with patch.dict(nginx.__salt__, %7B%22cmd.run%22: lambda *args, **kwargs: nginx_output%7D):%0A assert nginx.version() == expected_version%0A
|
|
e77b9a5dff36b3318759a18a786c7cc08bb8ac3e | Create Scramble_String.py | Array/Scramble_String.py | Array/Scramble_String.py | Python | 0.000081 | @@ -0,0 +1,1721 @@
+Given a string s1, we may represent it as a binary tree by partitioning it to two non-empty substrings recursively.%0A%0ABelow is one possible representation of s1 = %22great%22:%0A%0A great%0A / %5C%0A gr eat%0A / %5C / %5C%0Ag r e at%0A / %5C%0A a t%0ATo scramble the string, we may choose any non-leaf node and swap its two children.%0A%0AFor example, if we choose the node %22gr%22 and swap its two children, it produces a scrambled string %22rgeat%22.%0A%0A rgeat%0A / %5C%0A rg eat%0A / %5C / %5C%0Ar g e at%0A / %5C%0A a t%0AWe say that %22rgeat%22 is a scrambled string of %22great%22.%0A%0ASimilarly, if we continue to swap the children of nodes %22eat%22 and %22at%22, it produces a scrambled string %22rgtae%22.%0A%0A rgtae%0A / %5C%0A rg tae%0A / %5C / %5C%0Ar g ta e%0A / %5C%0A t a%0AWe say that %22rgtae%22 is a scrambled string of %22great%22.%0A%0AGiven two strings s1 and s2 of the same length, determine if s2 is a scrambled string of s1.%0A%0Aclass Solution:%0A # @return a boolean%0A def isScramble(self, s1, s2):%0A if len(s1) != len(s2):%0A return False%0A if s1 == s2:%0A return True%0A %0A length = len(list(s1))%0A if sorted(s1) != sorted(s2):%0A return False%0A %0A for i in xrange(1,length):%0A if self.isScramble(s1%5B:i%5D,s2%5B:i%5D) and self.isScramble(s1%5Bi:%5D,s2%5Bi:%5D): %0A return True%0A if self.isScramble(s1%5B:i%5D, s2%5B-i:%5D) and self.isScramble(s1%5Bi:%5D,s2%5B:-i%5D):%0A return True %0A return False %0A%0A# Note:%0A# Condition: 1) length_s1 != length_s2%0A# 2) s1 == s2, s1%E4%B8%8Es2%E5%AE%8C%E5%85%A8%E7%9B%B8%E7%AD%89%0A# 3) sorted(s1) %E4%B8%8E sorted(s2)%E6%98%AF%E4%B8%8D%E6%98%AF%E7%9B%B8%E7%AD%89%0A# 4) %E6%AF%94%E8%BE%83s1%5B:i%5D s2%5B:i%5D and s1%5Bi:%5D,s2%5Bi:%5D%0A# 5) %E6%AF%94%E8%BE%83s1%5B:i%5D, s2%5Blength_s2-i:%5D and s1%5Bi:%5D,s2%5Blength_s2:-i%5D%0A
|
|
f25b69a6ad6777576e31d0b01c4fc2c2bbe02788 | Create new.py | simple_mqtt/templates/new.py | simple_mqtt/templates/new.py | Python | 0.000001 | @@ -0,0 +1 @@
+%0A
|
|
0ee5d568ddc1f37abedb94f32d6b7da0439e6a4d | Create title_retriever.py | solutions/title_retriever.py | solutions/title_retriever.py | Python | 0.000003 | @@ -0,0 +1,711 @@
+'''%0AScript that will scrape the title of the given website%0A'''%0A%0Aimport urllib%0Aimport re%0A%0Adef getstock(title):%0A regex = '%3Ctitle%3E(.+?)%3C/title%3E' #find all contents within title braces%0A pattern = re.compile(regex) #converts regex into a pattern that can be understood by re module%0A htmlfile = urllib.urlopen(title) #takes a string arguement%0A htmltext = htmlfile.read()%0A titles = re.findall(pattern,htmltext)%0A return titles %0A%0Awhile True:%0A try:%0A title = str(raw_input(%22Please give me a url: %22))%0A if not %22http%22 in title:%0A title = %22http://%22+title%0A break%0A except IOError:%0A print %22Sorry that url is not valid. Please try another.%22%0A%0Aprint getstock(title)%5B0%5D%0A
|
|
071da9c0668d495e052baf5ad4d5bc9e068aa6a7 | Create dict2xml.py | dict2xml.py | dict2xml.py | Python | 0 | @@ -0,0 +1,931 @@
+# Python Dictionary to XML converter%0A# Written by github.com/Pilfer%0A# @CodesStuff%0Aclass dict2xml:%0A def __init__(self, debug = False):%0A self.debug = debug%0A if self.debug:%0A print %22json2xml class has been loaded%22%0A%0A def genXML(self,xmldict):%0A tag = xmldict%5B'tag'%5D%0A attrs = %5B%5D%0A kidstack = %5B%5D%0A for attr in xmldict%5B'attributes'%5D:%0A attrs.append(str(%22%25s=%5C%22%25s%5C%22%22) %25 (attr%5B'name'%5D,attr%5B'value'%5D))%0A if xmldict%5B'children'%5D != None:%0A for child in xmldict%5B'children'%5D:%0A tmp = self.genXML(child)%0A kidstack.append(tmp)%0A if(len(kidstack) == 0):%0A children = None%0A else:%0A children = %22%5Cn%5Ct%22.join(kidstack)%0A else:%0A children = None%0A xmlout = str(%22%3C%25s %25s%3E%25s%3C/%25s%3E%22) %25 (tag, ' '.join(attrs), children if children != None else '',tag)%0A return xmlout%0A
|
|
320da5dcc192d654d09ea631e9684f26e97795c0 | add mitm script | reversing/400a-graphic/mitm.py | reversing/400a-graphic/mitm.py | Python | 0 | @@ -0,0 +1,1132 @@
+vals = %5B0xdeadbeef,0xcafebabe,0xdeadbabe,0x8badf00d,0xb16b00b5,0xcafed00d,0xdeadc0de,0xdeadfa11,0xdefec8ed,0xdeadfeed,0xfee1dead,0xfaceb00b,0xfacefeed,0x000ff1ce,0x12345678,0x743029ab,0xdeed1234,0x00000000,0x11111111,0x11111112,0x11111113,0x42424242%5D%0A%0Astart = 0xdeadbeef%0Atarget = 0x764c648c%0A%0Agroup1 = vals%5B:11%5D%0Agroup2 = vals%5B11:%5D%0Aprint(len(group1), len(group2))%0A%0Adef recur(begin, rest):%0A ret = %5B%5D%0A if not rest:%0A return %5Bbegin%5D%0A%0A for i in rest%5B0%5D:%0A ret += recur(begin + %5Bi%5D, rest%5B1:%5D)%0A return ret%0A%0Adef all_possible(l):%0A l = list(zip(%5B0x0%5D * len(l), l))%0A return recur(%5B%5D, l)%0A%0Adef xor_all(l, begin=0x0):%0A for i in l:%0A begin %5E= i%0A return begin%0A%0Agroup1_xors = %7B%7D%0Agroup2_xors = %7B%7D%0A%0Afor i in all_possible(group1):%0A group1_xors%5Bxor_all(i, start)%5D = i%0A%0Afor i in all_possible(group2):%0A group2_xors%5Bxor_all(i, target)%5D = i%0A%0Aintersect = set(group1_xors.keys()) & set(group2_xors.keys())%0Aprint(intersect)%0Asol = intersect.pop()%0Aprint(hex(sol))%0A%0Avalsol = group1_xors%5Bsol%5D + group2_xors%5Bsol%5D%0Avalsol = %5Bi for i in valsol if i != 0%5D%0A%0Aprint(hex(xor_all(valsol, start)))%0Aprint(list(map(hex, valsol)))%0A
|
|
0733ef7d32f5a4731cdfce652733e67b2ed023b4 | Fix podnapisi download | subliminal/providers/podnapisi.py | subliminal/providers/podnapisi.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import io
import logging
import re
import xml.etree.ElementTree
import zipfile
import babelfish
import bs4
import guessit
import requests
from . import Provider
from .. import __version__
from ..exceptions import InvalidSubtitle, ProviderNotAvailable, ProviderError
from ..subtitle import Subtitle, decode, is_valid_subtitle, compute_guess_matches
from ..video import Episode, Movie
logger = logging.getLogger(__name__)
class PodnapisiSubtitle(Subtitle):
provider_name = 'podnapisi'
def __init__(self, language, id, releases, hearing_impaired, page_link, series=None, season=None, episode=None, # @ReservedAssignment
title=None, year=None):
super(PodnapisiSubtitle, self).__init__(language, hearing_impaired, page_link)
self.id = id
self.releases = releases
self.hearing_impaired = hearing_impaired
self.series = series
self.season = season
self.episode = episode
self.title = title
self.year = year
def compute_matches(self, video):
matches = set()
# episode
if isinstance(video, Episode):
# series
if video.series and self.series.lower() == video.series.lower():
matches.add('series')
# season
if video.season and self.season == video.season:
matches.add('season')
# episode
if video.episode and self.episode == video.episode:
matches.add('episode')
# guess
for release in self.releases:
matches |= compute_guess_matches(video, guessit.guess_episode_info(release + '.mkv'))
# movie
elif isinstance(video, Movie):
# title
if video.title and self.title.lower() == video.title.lower():
matches.add('title')
# guess
for release in self.releases:
matches |= compute_guess_matches(video, guessit.guess_movie_info(release + '.mkv'))
# year
if self.year == video.year:
matches.add('year')
return matches
class PodnapisiProvider(Provider):
languages = {babelfish.Language.frompodnapisi(l) for l in babelfish.get_language_converter('podnapisi').codes}
video_types = (Episode, Movie)
server = 'http://simple.podnapisi.net'
link_re = re.compile('^.*(?P<link>/ppodnapisi/download/i/\d+/k/.*$)')
def initialize(self):
self.session = requests.Session()
self.session.headers = {'User-Agent': 'Subliminal/%s' % __version__}
def terminate(self):
self.session.close()
def get(self, url, params=None, is_xml=True):
"""Make a GET request on `url` with the given parameters
:param string url: part of the URL to reach with the leading slash
:param dict params: params of the request
:param bool xml: whether the response content is XML or not
:return: the response
:rtype: :class:`xml.etree.ElementTree.Element` or :class:`bs4.BeautifulSoup`
:raise: :class:`~subliminal.exceptions.ProviderNotAvailable`
"""
try:
r = self.session.get(self.server + '/ppodnapisi' + url, params=params, timeout=10)
except requests.Timeout:
raise ProviderNotAvailable('Timeout after 10 seconds')
if r.status_code != 200:
raise ProviderNotAvailable('Request failed with status code %d' % r.status_code)
if is_xml:
return xml.etree.ElementTree.fromstring(r.content)
else:
return bs4.BeautifulSoup(r.content, ['permissive'])
def query(self, language, series=None, season=None, episode=None, title=None, year=None):
params = {'sXML': 1, 'sJ': language.podnapisi}
if series and season and episode:
params['sK'] = series
params['sTS'] = season
params['sTE'] = episode
elif title:
params['sK'] = title
else:
raise ValueError('Missing parameters series and season and episode or title')
if year:
params['sY'] = year
logger.debug('Searching episode %r', params)
subtitles = []
while True:
root = self.get('/search', params)
if not int(root.find('pagination/results').text):
logger.debug('No subtitle found')
break
if series and season and episode:
subtitles.extend([PodnapisiSubtitle(language, int(s.find('id').text), s.find('release').text.split(),
'h' in (s.find('flags').text or ''), s.find('url').text,
series=series, season=season, episode=episode,
year=s.find('year').text)
for s in root.findall('subtitle')])
elif title:
subtitles.extend([PodnapisiSubtitle(language, int(s.find('id').text), s.find('release').text.split(),
'h' in (s.find('flags').text or ''), s.find('url').text,
title=title, year=s.find('year').text)
for s in root.findall('subtitle')])
if int(root.find('pagination/current').text) >= int(root.find('pagination/count').text):
break
params['page'] = int(root.find('pagination/current').text) + 1
return subtitles
def list_subtitles(self, video, languages):
if isinstance(video, Episode):
return [s for l in languages for s in self.query(l, series=video.series, season=video.season,
episode=video.episode, year=video.year)]
elif isinstance(video, Movie):
return [s for l in languages for s in self.query(l, title=video.title, year=video.year)]
def download_subtitle(self, subtitle):
soup = self.get(subtitle.link[38:], is_xml=False)
link = soup.find('a', href=self.link_re)
if not link:
raise ProviderError('Cannot find the download link')
try:
r = self.session.get(self.server + self.link_re.match(link['href']).group('link'), timeout=10)
except requests.Timeout:
raise ProviderNotAvailable('Timeout after 10 seconds')
if r.status_code != 200:
raise ProviderNotAvailable('Request failed with status code %d' % r.status_code)
with zipfile.ZipFile(io.BytesIO(r.content)) as zf:
if len(zf.namelist()) > 1:
raise ProviderError('More than one file to unzip')
subtitle_bytes = zf.read(zf.namelist()[0])
subtitle_content = decode(subtitle_bytes, subtitle.language)
if not is_valid_subtitle(subtitle_content):
raise InvalidSubtitle
subtitle.content = subtitle_content
| Python | 0 | @@ -6133,16 +6133,21 @@
ubtitle.
+page_
link%5B38:
|
b46fa327b62d114cedefe1a38cf383189dcb5092 | Fix doc language | doc/conf.py | doc/conf.py | import sys
from os.path import abspath
from pathlib import Path
from json import loads as json_loads
ROOT = Path(__file__).resolve().parent
sys.path.insert(0, abspath("."))
# -- General configuration ------------------------------------------------
project = "pptrees"
version = "1.0.8"
copyright = "2020, tdene"
author = "tdene"
autodoc_member_order = "bysource"
extensions = [
"matplotlib.sphinxext.plot_directive",
"myst_parser",
"nbsphinx",
"sphinx.ext.doctest",
"sphinx.ext.extlinks",
"sphinx.ext.intersphinx",
"sphinx.ext.autodoc",
"sphinx.ext.todo",
"sphinx.ext.githubpages",
"sphinx.ext.viewcode",
"sphinx.ext.coverage",
"sphinx.ext.mathjax",
"sphinx.ext.napoleon",
"sphinxcontrib.bibtex",
"sphinx_autodoc_typehints",
"sphinx_click",
"sphinx_markdown_tables",
]
bibtex_default_style = "plain"
bibtex_bibfiles = [str(ROOT / "refs.bib")]
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_init_with_doc = True
napoleon_include_private_with_doc = True
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
todo_include_todos = True
templates_path = ["_templates"]
source_suffix = {
".rst": "restructuredtext",
".txt": "markdown",
".md": "markdown",
}
master_doc = "index"
project = "Parallel prefix tree generation and exploration"
author = "Teodor-Dumitru Ene"
copyright = "Teodor-Dumitru Ene"
version = "1.0.8"
release = version # The full version, including alpha/beta/rc tags.
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = en
exclude_patterns = [
"_build",
"Thumbs.db",
".DS_Store",
"**.ipynb_checkpoints",
"build",
"extra",
]
# reST settings
prologPath = "prolog.inc"
try:
with open(prologPath, "r") as prologFile:
rst_prolog = prologFile.read()
except Exception as ex:
print("[ERROR:] While reading '{0!s}'.".format(prologPath))
print(ex)
rst_prolog = ""
# -- Options for HTML output ----------------------------------------------
myst_html_meta = {
"description lang=en": "metadata description",
"description lang=fr": "description des métadonnées",
"keywords": "Sphinx, MyST",
"property=og:locale": "en_US",
}
html_context = {}
ctx = ROOT / "context.json"
if ctx.is_file():
html_context.update(json_loads(ctx.open("r").read()))
if (ROOT / "_theme").is_dir():
html_theme_path = ["."]
html_theme = "_theme"
html_theme_options = {
"logo_only": True,
"home_breadcrumbs": True,
"vcs_pageview_mode": "blob",
}
else:
html_theme = "sphinx_rtd_theme"
htmlhelp_basename = "pptrees_doc"
# -- Sphinx.Ext.InterSphinx -----------------------------------------------
intersphinx_mapping = {"python": ("https://docs.python.org/3.7/", None)}
# -- Sphinx.Ext.ExtLinks --------------------------------------------------
extlinks = {"wikipedia": ("https://en.wikipedia.org/wiki/%s", None)}
| Python | 0 | @@ -1969,18 +1969,20 @@
guage =
+%22
en
+%22
%0A%0Aexclud
|
056bd290a4df08876109ef4e2da1115783a06f25 | Add examples for setting classes attribute | examples/classes.py | examples/classes.py | Python | 0 | @@ -0,0 +1,1756 @@
+from flask_table import Table, Col%0A%0A%0A%22%22%22If we want to put an HTML class onto the table element, we can set%0Athe %22classes%22 attribute on the table class. This should be an iterable%0Aof that are joined together and all added as classes. If none are set,%0Athen no class is added to the table element.%0A%0A%22%22%22%0A%0A%0Aclass Item(object):%0A def __init__(self, name, description):%0A self.name = name%0A self.description = description%0A%0A%0Aclass ItemTableOneClass(Table):%0A classes = %5B'class1'%5D%0A%0A name = Col('Name')%0A description = Col('Description')%0A%0A%0Aclass ItemTableTwoClasses(Table):%0A classes = %5B'class1', 'class2'%5D%0A%0A name = Col('Name')%0A description = Col('Description')%0A%0A%0Adef one_class(items):%0A table = ItemTableOneClass(items)%0A%0A # or %7B%7B table %7D%7D in jinja%0A print(table.__html__())%0A%0A %22%22%22Outputs:%0A%0A %3Ctable class=%22class1%22%3E%0A %3Cthead%3E%0A %3Ctr%3E%0A %3Cth%3EName%3C/th%3E%0A %3Cth%3EDescription%3C/th%3E%0A %3C/tr%3E%0A %3C/thead%3E%0A %3Ctbody%3E%0A %3Ctr%3E%0A %3Ctd%3EName1%3C/td%3E%0A %3Ctd%3EDescription1%3C/td%3E%0A %3C/tr%3E%0A %3C/tbody%3E%0A %3C/table%3E%0A %22%22%22%0A%0A%0Adef two_classes(items):%0A table = ItemTableTwoClasses(items)%0A%0A # or %7B%7B table %7D%7D in jinja%0A print(table.__html__())%0A%0A %22%22%22Outputs:%0A%0A %3Ctable class=%22class1 class2%22%3E%0A %3Cthead%3E%0A %3Ctr%3E%0A %3Cth%3EName%3C/th%3E%0A %3Cth%3EDescription%3C/th%3E%0A %3C/tr%3E%0A %3C/thead%3E%0A %3Ctbody%3E%0A %3Ctr%3E%0A %3Ctd%3EName1%3C/td%3E%0A %3Ctd%3EDescription1%3C/td%3E%0A %3C/tr%3E%0A %3C/tbody%3E%0A %3C/table%3E%0A %22%22%22%0A%0A%0Adef main():%0A items = %5BItem('Name1', 'Description1')%5D%0A%0A # user ItemTableOneClass%0A one_class(items)%0A%0A print('%5Cn######################%5Cn')%0A%0A # user ItemTableTwoClasses%0A two_classes(items)%0A%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
3e5587f087bdb24f1b13ec54725f27ddca629869 | Make sure remember_rejected sets the config modified flag on the right feed when entries expire. | flexget/plugins/filter/remember_rejected.py | flexget/plugins/filter/remember_rejected.py | import logging
from datetime import datetime, timedelta
from sqlalchemy import Column, Integer, String, Unicode, DateTime, ForeignKey, and_, Index
from sqlalchemy.orm import relation
from flexget import schema
from flexget.event import event
from flexget.plugin import register_plugin, register_parser_option, register_feed_phase
from flexget.utils.sqlalchemy_utils import table_columns, drop_tables, table_add_column
from flexget.utils.tools import parse_timedelta
log = logging.getLogger('remember_rej')
Base = schema.versioned_base('remember_rejected', 3)
@schema.upgrade('remember_rejected')
def upgrade(ver, session):
if ver is None:
columns = table_columns('remember_rejected_entry', session)
if 'uid' in columns:
# Drop the old table
log.info('Dropping old version of remember_rejected_entry table from db')
drop_tables(['remember_rejected_entry'], session)
# Create new table from the current model
Base.metadata.create_all(bind=session.bind)
# We go directly to version 2, as remember_rejected_entries table has just been made from current model
# TODO: Fix this somehow. Just avoid dropping tables?
ver = 3
else:
ver = 0
if ver == 0:
log.info('Adding reason column to remember_rejected_entry table.')
table_add_column('remember_rejected_entry', 'reason', String, session)
ver = 1
if ver == 1:
log.info('Adding `added` column to remember_rejected_entry table.')
table_add_column('remember_rejected_entry', 'added', DateTime, session, default=datetime.now)
ver = 2
if ver == 2:
log.info('Adding expires column to remember_rejected_entry table.')
table_add_column('remember_rejected_entry', 'expires', DateTime, session)
ver = 3
return ver
class RememberFeed(Base):
__tablename__ = 'remember_rejected_feeds'
id = Column(Integer, primary_key=True)
name = Column(Unicode)
entries = relation('RememberEntry', backref='feed', cascade='all, delete, delete-orphan')
class RememberEntry(Base):
__tablename__ = 'remember_rejected_entry'
id = Column(Integer, primary_key=True)
added = Column(DateTime, default=datetime.now)
expires = Column(DateTime)
title = Column(Unicode)
url = Column(String)
rejected_by = Column(String)
reason = Column(String)
feed_id = Column(Integer, ForeignKey('remember_rejected_feeds.id'), nullable=False)
Index('remember_feed_title_url', RememberEntry.feed_id, RememberEntry.title, RememberEntry.url)
class FilterRememberRejected(object):
"""Internal.
Rejects entries which have been rejected in the past.
This is enabled when item is rejected with remember=True flag.
Example:
feed.reject(entry, 'message', remember=True)
"""
def on_feed_start(self, feed, config):
"""Purge remembered entries if the config has changed."""
# See if the feed has changed since last run
old_feed = feed.session.query(RememberFeed).filter(RememberFeed.name == feed.name).first()
if old_feed and (feed.config_modified or feed.manager.options.forget_rejected):
if feed.manager.options.forget_rejected:
log.info('Forgetting previous rejections.')
feed.config_changed()
else:
log.debug('Feed config has changed since last run, purging remembered entries.')
feed.session.delete(old_feed)
old_feed = None
if not old_feed:
# Create this feed in the db if not present
feed.session.add(RememberFeed(name=feed.name))
# Delete expired items
deleted = feed.session.query(RememberEntry).filter(RememberEntry.expires < datetime.now()).delete()
if deleted:
log.debug('%s entries have expired from remember_rejected table.' % deleted)
feed.config_changed()
feed.session.commit()
# This runs before metainfo phase to avoid re-parsing metainfo for entries that will be rejected
def on_feed_prefilter(self, feed, config):
"""Reject any remembered entries from previous runs"""
(feed_id,) = feed.session.query(RememberFeed.id).filter(RememberFeed.name == feed.name).first()
reject_entries = feed.session.query(RememberEntry).filter(RememberEntry.feed_id == feed_id)
if reject_entries.count():
# Reject all the remembered entries
for entry in feed.entries:
if not entry.get('url'):
# We don't record or reject any entries without url
continue
reject_entry = reject_entries.filter(and_(RememberEntry.title == entry['title'],
RememberEntry.url == entry['url'])).first()
if reject_entry:
feed.reject(entry, 'Rejected on behalf of %s plugin: %s' %
(reject_entry.rejected_by, reject_entry.reason))
def on_entry_reject(self, feed, entry, remember=None, remember_time=None, **kwargs):
# We only remember rejections that specify the remember keyword argument
if not remember and not remember_time:
return
expires = None
if remember_time:
if isinstance(remember_time, basestring):
remember_time = parse_timedelta(remember_time)
expires = datetime.now() + remember_time
if not entry.get('title') or not entry.get('original_url'):
log.debug('Can\'t remember rejection for entry without title or url.')
return
message = 'Remembering rejection of `%s`' % entry['title']
if remember_time:
message += ' for %i minutes' % (remember_time.seconds / 60)
log.info(message)
(remember_feed_id,) = feed.session.query(RememberFeed.id).filter(RememberFeed.name == feed.name).first()
feed.session.add(RememberEntry(title=entry['title'], url=entry['original_url'], feed_id=remember_feed_id,
rejected_by=feed.current_plugin, reason=kwargs.get('reason'), expires=expires))
# The test stops passing when this is taken out for some reason...
feed.session.flush()
@event('manager.db_cleanup')
def db_cleanup(session):
# Remove entries older than 30 days
result = session.query(RememberEntry).filter(RememberEntry.added < datetime.now() - timedelta(days=30)).delete()
if result:
log.verbose('Removed %d entries from remember rejected table.' % result)
register_plugin(FilterRememberRejected, 'remember_rejected', builtin=True, api_ver=2)
register_feed_phase('prefilter', after='input')
register_parser_option('--forget-rejected', action='store_true', dest='forget_rejected',
help='Forget all previous rejections so entries can be processed again.')
| Python | 0.000003 | @@ -3694,16 +3694,34 @@
.name))%0A
+ else:%0A
@@ -3743,16 +3743,20 @@
d items%0A
+
@@ -3791,32 +3791,135 @@
(RememberEntry).
+filter(RememberEntry.feed_id == old_feed.id).%5C%0A
filter(RememberE
@@ -3966,16 +3966,20 @@
+
if delet
@@ -3978,24 +3978,28 @@
if deleted:%0A
+
@@ -4071,24 +4071,28 @@
%25 deleted)%0A
+
|
f16187d5943158d82fc87611f998283789b5ecdf | Add libarchive 3.1.2 | packages/libarchive.py | packages/libarchive.py | Python | 0.000004 | @@ -0,0 +1,231 @@
+Package ('libarchive', '3.1.2', sources = %5B'http://libarchive.org/downloads/%25%7Bname%7D-%25%7Bversion%7D.tar.gz'%5D,%0A%09configure_flags = %5B%0A%09'--enable-bsdtar=shared',%0A%09'--enable-bsdcpio=shared',%0A%09'--disable-silent-rules',%0A%09'--without-nettle'%5D%0A)%0A
|
|
b9b2b87f0d630de931765c1c9f448e295440e611 | Create fetch_qt_version.py | fetch_qt_version.py | fetch_qt_version.py | Python | 0 | @@ -0,0 +1,2146 @@
+%22%22%22Module to return the Qt version of a Qt codebase.%0A%0AThis module provides a function that returns the version of a Qt codebase, given%0Athe toplevel qt5 repository directory. Note, the %60qt5%60 directory applies to both%0AQt 5.x and Qt 6.x%0A%0AIf it is run standalone with a python interpreter and not as part of another%0APython module, it must be run from the toplevel directory of a qt5 repository%0Awith the qtbase git submodule cloned and checked out.%0A%22%22%22%0A%0Afrom __future__ import print_function # For python2 portability%0Aimport os%0Aimport sys%0Aimport re%0A%0Adef qt_version(qt5_dir: str) -%3E str:%0A %22%22%22Returns the Qt version of a Qt codebase%22%22%22%0A%0A if not os.path.exists(qt5_dir + %22/qtbase%22):%0A print(%22qtbase doesn't exist. Please pass the path to a qt5 repo. aborting.%22, file=sys.stderr)%0A return None%0A%0A changesFiles = os.listdir(qt5_dir + %22/qtbase/dist%22)%0A%0A # Every version released has a 'changes-%3Cversion #%3E' file describing what %0A # changed - we will use that to figure out the closest version number to %0A # this checked out code.%0A # Only include versions that have version numbers that conform to standard%0A # version numbering rules (major.minor.release)%0A regex = r%22%5Echanges-(%5B0-9.%5D*)%22%0A src = re.search%0A%0A versions = %5Bm.group(1) for changesFile in changesFiles for m in %5Bsrc(regex, changesFile)%5D if m%5D%0A%0A # Fetch version from qtbase/.cmake.conf%0A cmake_conf_path = qt5_dir + %22/qtbase/.cmake.conf%22%0A if os.path.exists(cmake_conf_path):%0A # Qt6 uses CMake, and we can determine version from .cmake.conf%0A cmake_conf_file = open(cmake_conf_path, 'r')%0A%0A qt6_version = %22%22%0A for line in cmake_conf_file:%0A if %22QT_REPO_MODULE_VERSION%22 in line:%0A qt6_version = line.split('%22')%5B1%5D%0A break%0A if qt6_version:%0A versions.append(qt6_version)%0A%0A versions.sort(key=lambda s: list(map(int, s.split('.'))))%0A%0A return versions%5B-1%5D%0A%0A%0Aif __name__ == %22__main__%22:%0A if not os.path.exists(%22qtbase%22):%0A print(%22qtbase doesn't exist. Please run from base of qt5 repo. aborting.%22, file=sys.stderr)%0A sys.exit(1)%0A print(qt_version(%22.%22))%0A
|
|
1ae4f976c22dd3c07de6882f9a18b0be606ff5e3 | Check for empty dataset | km3pipe/utils/streamds.py | km3pipe/utils/streamds.py | # coding=utf-8
"""
Access the KM3NeT StreamDS DataBase service.
Usage:
streamds
streamds list
streamds upload CSV_FILE
streamds info STREAM
streamds get STREAM [PARAMETERS...]
streamds (-h | --help)
streamds --version
Options:
STREAM Name of the stream.
CSV_FILE Tab separated data for the runsummary tables.
PARAMETERS List of parameters separated by space (e.g. detid=29).
-h --help Show this screen.
"""
from __future__ import division, absolute_import, print_function
import os
import json
import requests
import pandas as pd
import km3pipe as kp
__author__ = "Tamas Gal"
__copyright__ = "Copyright 2017, Tamas Gal and the KM3NeT collaboration."
__credits__ = []
__license__ = "MIT"
__maintainer__ = "Tamas Gal"
__email__ = "[email protected]"
__status__ = "Development"
log = kp.logger.get("streamds")
RUNSUMMARY_URL = "https://km3netdbweb.in2p3.fr/jsonds/runsummarynumbers/i"
def print_streams():
"""Print all available streams with their full description"""
sds = kp.db.StreamDS()
sds.print_streams()
def print_info(stream):
"""Print the information about a stream"""
sds = kp.db.StreamDS()
sds.help(stream)
def get_data(stream, parameters):
"""Retrieve data for given stream and parameters, or None if not found"""
sds = kp.db.StreamDS()
if stream not in sds.streams:
log.error("Stream '{}' not found in the database.".format(stream))
return
fun = getattr(sds, stream)
params = {}
if parameters:
for parameter in parameters:
if '=' not in parameter:
log.error("Invalid parameter syntax '{}'\n"
"The correct syntax is 'parameter=value'"
.format(parameter))
continue
key, value = parameter.split('=')
params[key] = value
data = fun(**params)
if data is not None:
print(data)
else:
sds.help(stream)
def available_streams():
"""Show a short list of available streams."""
sds = kp.db.StreamDS()
print("Available streams: ")
print(', '.join(sorted(sds.streams)))
def upload_runsummary(csv_filename):
"""Reads the CSV file and uploads its contents to the runsummary table"""
print("Checking '{}' for consistency.".format(csv_filename))
if not os.path.exists(csv_filename):
log.critical("{} -> file not found.".format(csv_filename))
return
try:
df = pd.read_csv(csv_filename, sep='\t')
except pd.errors.EmptyDataError as e:
log.error(e)
return
required_columns = set(['run', 'det_id', 'source'])
cols = set(df.columns)
if not required_columns.issubset(cols):
log.error("Missing columns: {}."
.format(', '.join(str(c) for c in required_columns - cols)))
return
parameters = cols - required_columns
if len(parameters) < 1:
log.error("No parameter columns found.")
return
print("Found data for parameters: {}."
.format(', '.join(str(c) for c in parameters)))
print("Converting CSV data into JSON")
data = convert_runsummary_to_json(df)
print("We have {:.3f} MB to upload.".format(len(data) / 1024**2))
print("Requesting database session.")
db = kp.db.DBManager()
session_cookie = kp.config.Config().get('DB', 'session_cookie')
log.debug("Using the session cookie: {}".format(session_cookie))
cookie_key, sid = session_cookie.split('=')
print("Uploading the data to the database.")
r = requests.post(RUNSUMMARY_URL,
cookies={cookie_key: sid},
files={'datafile': data})
if r.status_code == 200:
log.debug("POST request status code: {}".format(r.status_code))
print("Database response:")
db_answer = json.loads(r.text)
for key, value in db_answer.items():
print(" -> {}: {}".format(key, value))
if db_answer['Result'] == 'OK':
print("Upload successful.")
else:
log.critical("Something went wrong.")
else:
log.error("POST request status code: {}".format(r.status_code))
log.critical("Something went wrong...")
return
def convert_runsummary_to_json(df, comment='Test Upload', prefix='TEST_'):
"""Convert a Pandas DataFrame with runsummary to JSON for DB upload"""
data_field = []
for det_id, det_data in df.groupby('det_id'):
runs_field = []
data_field.append({"DetectorId": det_id, "Runs": runs_field})
for run, run_data in det_data.groupby('run'):
parameters_field = []
runs_field.append({"Run":int(run), "Parameters": parameters_field})
parameter_dict = {}
for row in run_data.itertuples():
for parameter_name in run_data.columns[3:]:
if parameter_name not in parameter_dict:
entry = {'Name': prefix + parameter_name, 'Data': []}
parameter_dict[parameter_name] = entry
value = {'S': getattr(row, 'source'),
'D': float(getattr(row, parameter_name))}
parameter_dict[parameter_name]['Data'].append(value)
for parameter_data in parameter_dict.values():
parameters_field.append(parameter_data)
data_to_upload = {"Comment": comment, "Data": data_field}
file_data_to_upload = json.dumps(data_to_upload)
return file_data_to_upload
def main():
from docopt import docopt
args = docopt(__doc__)
if args['info']:
print_info(args['STREAM'])
elif args['list']:
print_streams()
elif args['upload']:
upload_runsummary(args['CSV_FILE'])
elif args['get']:
get_data(args['STREAM'], args['PARAMETERS'])
else:
available_streams()
| Python | 0.000015 | @@ -2995,24 +2995,104 @@
return
+%0A %0A if len(df) == 0:%0A log.critical(%22Empty dataset.%22)%0A return
%0A%0A print(
|
f9b38f675df9752a4b5309df059c6d15a1e1b3c2 | Add module for range support. | ex_range.py | ex_range.py | Python | 0 | @@ -0,0 +1,1405 @@
+from collections import namedtuple%0A%0Afrom vintage_ex import EX_RANGE_REGEXP%0Aimport location%0A%0A%0AEX_RANGE = namedtuple('ex_range', 'left left_offset separator right right_offset')%0A%0A%0Adef get_range_parts(range):%0A parts = EX_RANGE_REGEXP.search(range).groups()%0A return EX_RANGE(%0A left=parts%5B1%5D,%0A left_offset=parts%5B3%5D or '0',%0A separator=parts%5B5%5D,%0A right=parts%5B7%5D,%0A right_offset=parts%5B9%5D or '0'%0A )%0A%0A%0Adef calculate_range(view, range):%0A parsed_range = get_range_parts(range)%0A if parsed_range.left == '%25':%0A left, left_offset = '1', '0'%0A right, right_offset = '$', '0'%0A elif parsed_range.separator:%0A left, left_offset = parsed_range.left, parsed_range.left_offset%0A right, right_offset = parsed_range.right, parsed_range.right_offset%0A %0A return calculate_range_part(view, left) + int(left_offset), %5C%0A calculate_range_part(view, right) + int(right_offset)%0A%0A%0Adef calculate_range_part(view, p):%0A if p.isdigit():%0A return int(p)%0A if p.startswith('/') or p.startswith('?'):%0A if p.startswith('?'):%0A return location.reverse_search(view, p%5B1:-1%5D,%0A end=view.sel()%5B0%5D.begin())%0A return location.search(view, p%5B1:-1%5D)%0A if p in ('$', '.'):%0A return location.calculate_relative_ref(view, p)%0A
|
|
15cf6b5d35e2fbaf39d419ddbe5da1b16247ccaa | add test_parse_table_options.py | tests/test_parse_table_options.py | tests/test_parse_table_options.py | Python | 0.000005 | @@ -0,0 +1,1766 @@
+#!/usr/bin/env python3%0A%22%22%22%0A%60header%60 and %60markdown%60 is checked by %60test_to_bool%60 instead%0A%22%22%22%0Afrom .context import pandoc_tables%0Aimport panflute%0A%0A%0Adef test_parse_table_options():%0A options = %7B%0A 'caption': None,%0A 'alignment': None,%0A 'width': None,%0A 'table-width': 1.0,%0A 'header': True,%0A 'markdown': True,%0A 'include': None%0A %7D%0A raw_table_list = %5B%5B'1', '2', '3', '4'%5D, %5B'5', '6', '7', '8'%5D%5D%0A # check init is preserved%0A assert pandoc_tables.parse_table_options(%0A options, raw_table_list) == options%0A # check caption%0A options%5B'caption'%5D = '**sad**'%0A assert str(pandoc_tables.parse_table_options(%0A options, raw_table_list%0A )%5B'caption'%5D%5B0%5D) == 'Strong(Str(sad))'%0A # check alignment%0A options%5B'alignment'%5D = 'LRCD'%0A assert pandoc_tables.parse_table_options(%0A options, raw_table_list%0A )%5B'alignment'%5D == %5B%0A 'AlignLeft',%0A 'AlignRight',%0A 'AlignCenter',%0A 'AlignDefault'%0A %5D%0A options%5B'alignment'%5D = 'LRC'%0A assert pandoc_tables.parse_table_options(%0A options, raw_table_list%0A )%5B'alignment'%5D == %5B%0A 'AlignLeft',%0A 'AlignRight',%0A 'AlignCenter',%0A 'AlignDefault'%0A %5D%0A # check width%0A options%5B'width'%5D = %5B0.1, 0.2, 0.3, 0.4%5D%0A assert pandoc_tables.parse_table_options(%0A options, raw_table_list%0A )%5B'width'%5D == %5B0.1, 0.2, 0.3, 0.4%5D%0A # auto-width%0A raw_table_list = %5B%0A %5B'asdfdfdfguhfdhghfdgkla', '334%5Cn2', '**la**', '4'%5D,%0A %5B'5', '6', '7', '8'%5D%0A %5D%0A options%5B'width'%5D = None%0A options%5B'table-width'%5D = 1.2%0A assert pandoc_tables.parse_table_options(%0A options, raw_table_list%0A )%5B'width'%5D == %5B22 / 32 * 1.2, 3 / 32 * 1.2, 6 / 32 * 1.2, 1 / 32 * 1.2%5D%0A return%0A
|
|
71dd485685a481f21e03af6db5a4bc1f91a64ce9 | Add service settings migration | nodeconductor/structure/migrations/0018_service_settings_plural_form.py | nodeconductor/structure/migrations/0018_service_settings_plural_form.py | Python | 0 | @@ -0,0 +1,441 @@
+# -*- coding: utf-8 -*-%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import models, migrations%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('structure', '0017_add_azure_service_type'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterModelOptions(%0A name='servicesettings',%0A options=%7B'verbose_name': 'Service settings', 'verbose_name_plural': 'Service settings'%7D,%0A ),%0A %5D%0A
|
|
578f532bb7a6c75dd6526b9fe130879e0a7cc0e6 | Pick best out of two outputs | session2/select_best_output.py | session2/select_best_output.py | Python | 1 | @@ -0,0 +1,1393 @@
+import argparse, logging, codecs%0Afrom nltk.translate.bleu_score import sentence_bleu as bleu%0A%0A%0Adef setup_args():%0A parser = argparse.ArgumentParser()%0A parser.add_argument('out1', help = 'Output 1')%0A parser.add_argument('out2', help = 'Output 2')%0A parser.add_argument('input', help = 'Input')%0A parser.add_argument('output', help='Selected Output')%0A args = parser.parse_args()%0A return args%0A%0A%0Adef main():%0A logging.basicConfig(format='%25(asctime)s : %25(levelname)s : %25(message)s', level=logging.INFO)%0A args = setup_args()%0A logging.info(args)%0A%0A out1_lines = codecs.open(args.out1, 'r', 'utf-8').readlines()%0A out2_lines = codecs.open(args.out2, 'r', 'utf-8').readlines()%0A%0A picked_num1 = 0%0A picked_num2 = 0%0A%0A input_lines = codecs.open(args.input, 'r').readlines()%0A%0A fw = codecs.open(args.output, 'w', 'utf-8')%0A%0A for index, (out1, out2, input) in enumerate(zip(out1_lines, out2_lines, input_lines)):%0A q2 = input.split('END')%5B2%5D%0A bleu_1 = bleu(%5Bq2.split()%5D, out1, weights=(1.0,))%0A bleu_2 = bleu(%5Bq2.split()%5D, out2, weights=(1.0,))%0A logging.info('Index:%25d Bleu1: %25f Bleu2: %25f'%25 (index, bleu_1, bleu_2))%0A%0A if bleu_1 %3E bleu_2:%0A picked_num1 += 1%0A fw.write(out1.strip() + '%5Cn')%0A else:%0A picked_num2 += 1%0A fw.write(out2.strip() + '%5Cn')%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
3509585cd14bb51fb00b60df1dcb295bc561d679 | Add _version.py file | py/desidatamodel/_version.py | py/desidatamodel/_version.py | Python | 0.000004 | @@ -0,0 +1,28 @@
+__version__ = '0.2.0.dev71'%0A
|
|
b3383e6c428eccdd67ddc4cfa90e6d22da35412a | Add lib/sccache.py helper script | script/lib/sccache.py | script/lib/sccache.py | Python | 0.000001 | @@ -0,0 +1,564 @@
+import os%0Aimport sys%0A%0Afrom config import TOOLS_DIR%0A%0A%0AVERSION = '0.2.6'%0ASUPPORTED_PLATFORMS = %7B%0A 'cygwin': 'windows',%0A 'darwin': 'mac',%0A 'linux2': 'linux',%0A 'win32': 'windows',%0A%7D%0A%0A%0Adef is_platform_supported(platform):%0A return platform in SUPPORTED_PLATFORMS%0A%0A%0Adef get_binary_path():%0A platform = sys.platform%0A if not is_platform_supported(platform):%0A return None%0A%0A platform_dir = SUPPORTED_PLATFORMS%5Bplatform%5D%0A%0A path = os.path.join(TOOLS_DIR, 'sccache', VERSION, platform_dir, 'sccache')%0A if platform_dir == 'windows':%0A path += '.exe'%0A%0A return path%0A
|
|
8302536cafa07a078cfb6629b5e9cc85e1798e1e | Add Appalachian Regional Commission. | inspectors/arc.py | inspectors/arc.py | Python | 0 | @@ -0,0 +1,2227 @@
+#!/usr/bin/env python%0A%0Aimport datetime%0Aimport logging%0Aimport os%0Afrom urllib.parse import urljoin%0A%0Afrom bs4 import BeautifulSoup%0Afrom utils import utils, inspector%0A%0A# http://www.arc.gov/oig%0A# Oldest report: 2003%0A%0A# options:%0A# standard since/year options for a year range to fetch from.%0A#%0A# Notes for IG's web team:%0A#%0A%0AAUDIT_REPORTS_URL = %22http://www.arc.gov/about/OfficeofInspectorGeneralAuditandInspectionReports.asp%22%0ASEMIANNUAL_REPORTS_URL = %22http://www.arc.gov/about/OfficeofinspectorGeneralSemiannualReports.asp%22%0A%0Adef run(options):%0A year_range = inspector.year_range(options)%0A%0A # Pull the audit reports%0A for url in %5BAUDIT_REPORTS_URL, SEMIANNUAL_REPORTS_URL%5D:%0A doc = BeautifulSoup(utils.download(url))%0A results = doc.select(%22table p %3E a%22)%0A for result in results:%0A report = report_from(result, url, year_range)%0A if report:%0A inspector.save_report(report)%0A%0Adef report_from(result, landing_url, year_range):%0A report_url = urljoin(landing_url, result.get('href'))%0A report_filename = report_url.split(%22/%22)%5B-1%5D%0A report_id, _ = os.path.splitext(report_filename)%0A try:%0A title = result.parent.find(%22em%22).text%0A except AttributeError:%0A title = result.parent.contents%5B0%5D%0A%0A estimated_date = False%0A try:%0A published_on_text = title.split(%22%E2%80%93%22)%5B-1%5D.strip()%0A published_on = datetime.datetime.strptime(published_on_text, '%25B %25d, %25Y')%0A except ValueError:%0A # For reports where we can only find the year, set them to Nov 1st of that year%0A published_on_year = int(result.find_previous(%22strong%22).text.replace(%22Fiscal Year %22, %22%22))%0A published_on = datetime.datetime(published_on_year, 11, 1)%0A estimated_date = True%0A%0A if published_on.year not in year_range:%0A logging.debug(%22%5B%25s%5D Skipping, not in requested range.%22 %25 report_url)%0A return%0A%0A report = %7B%0A 'inspector': 'arc',%0A 'inspector_url': 'http://www.arc.gov/oig',%0A 'agency': 'arc',%0A 'agency_name': 'Appalachian Regional Commission',%0A 'report_id': report_id,%0A 'url': report_url,%0A 'title': title,%0A 'published_on': datetime.datetime.strftime(published_on, %22%25Y-%25m-%25d%22),%0A %7D%0A if estimated_date:%0A report%5B'estimated_date'%5D = estimated_date%0A return report%0A%0Autils.run(run) if (__name__ == %22__main__%22) else None%0A
|
|
d834216bcc93eac7b324d95498d9580e3f769dfa | Add Government Printing Office. | inspectors/gpo.py | inspectors/gpo.py | Python | 0 | @@ -0,0 +1,2671 @@
+#!/usr/bin/env python%0A%0Aimport datetime%0Aimport logging%0Afrom urllib.parse import urljoin%0A%0Afrom bs4 import BeautifulSoup%0Afrom utils import utils, inspector%0A%0A# http://www.gpo.gov/oig/%0A# Oldest report: 2004%0A%0A# options:%0A# standard since/year options for a year range to fetch from.%0A#%0A# Notes for IG's web team:%0A#%0A%0AAUDIT_REPORTS_URL = %22http://www.gpo.gov/oig/audits.htm%22%0ASEMIANNUAL_REPORTS_URL = %22http://www.gpo.gov/oig/semi-anual.htm%22%0A%0AHEADER_TITLES = %5B%0A 'Report #',%0A 'Date',%0A%5D%0A%0Adef run(options):%0A year_range = inspector.year_range(options)%0A%0A # Pull the reports%0A for url in %5BAUDIT_REPORTS_URL, SEMIANNUAL_REPORTS_URL%5D:%0A doc = BeautifulSoup(utils.download(url))%0A results = doc.select(%22div.section1 div.ltext %3E table tr%22)%0A if not results:%0A results = doc.select(%22td.three-col-layout-middle div.ltext %3E table tr%22)%0A if not results:%0A raise AssertionError(%22No report links found for %25s%22 %25 url)%0A for result in results:%0A if (not result.text.strip() or%0A result.find(%22th%22) or%0A result.find(%22strong%22) or%0A result.contents%5B1%5D.text in HEADER_TITLES%0A ):%0A # Skip header rows%0A continue%0A report = report_from(result, url, year_range)%0A if report:%0A inspector.save_report(report)%0A%0Adef report_from(result, landing_url, year_range):%0A title = result.select(%22td%22)%5B-1%5D.text%0A%0A if %22contains sensitive information%22 in title:%0A unreleased = True%0A report_url = None%0A report_id = %22-%22.join(title.split())%5B:50%5D%0A else:%0A unreleased = False%0A link = result.find(%22a%22)%0A report_id = link.text%0A report_url = urljoin(landing_url, link.get('href'))%0A%0A estimated_date = False%0A try:%0A published_on = datetime.datetime.strptime(report_id.strip(), '%25m.%25d.%25y')%0A except ValueError:%0A published_on_year_text = result.find_previous(%22th%22).text%0A published_on_year = int(published_on_year_text.replace(%22Fiscal Year %22, %22%22))%0A published_on = datetime.datetime(published_on_year, 11, 1)%0A estimated_date = True%0A%0A if published_on.year not in year_range:%0A logging.debug(%22%5B%25s%5D Skipping, not in requested range.%22 %25 report_url)%0A return%0A%0A report = %7B%0A 'inspector': 'gpo',%0A 'inspector_url': 'http://www.gpo.gov/oig/',%0A 'agency': 'gpo',%0A 'agency_name': 'Government Printing Office',%0A 'file_type': 'pdf',%0A 'report_id': report_id,%0A 'url': report_url,%0A 'title': title,%0A 'published_on': datetime.datetime.strftime(published_on, %22%25Y-%25m-%25d%22),%0A %7D%0A if estimated_date:%0A report%5B'estimated_date'%5D = estimated_date%0A if unreleased:%0A report%5B'unreleased'%5D = unreleased%0A report%5B'landing_url'%5D = landing_url%0A return report%0A%0Autils.run(run) if (__name__ == %22__main__%22) else None%0A
|
|
d3c9a6bdc1b8cfb56f9ad408f5257b9ac518b2ac | Add preprocessor | scripts/preprocess.py | scripts/preprocess.py | Python | 0.000138 | @@ -0,0 +1,1553 @@
+#!/usr/bin/env python%0A%0Aimport argparse%0Aimport os%0A%0A%0Adef preprocess(path):%0A includes = set()%0A res = %5B%5D%0A%0A def preprocess_line(path, line):%0A if line.strip().startswith('#'):%0A line = line.strip()%0A if line.startswith('#include') and len(line.split('%22')) %3E= 3:%0A lx = line.split('%22')%0A relpath = ''.join(lx%5B1:len(lx) - 1%5D)%0A target_path = os.path.dirname(path) + '/' + relpath%0A if target_path.startswith('/'):%0A target_path = target_path%5B1:%5D%0A preprocess_path(os.path.normpath(target_path))%0A return '%5Cn'%0A elif line.startswith('#pragma'):%0A if ''.join(line.split(' ')%5B1:%5D).strip() == 'once':%0A return ''%0A return line%0A%0A def preprocess_path(path):%0A if path not in includes:%0A has_not_started = True%0A includes.add(path)%0A%0A for line in open(path):%0A s = preprocess_line(path, line)%0A if has_not_started and s.strip() is not %22%22:%0A prefix = '//===== %7B%7D =====%5Cn%5Cn'.format(os.path.basename(path))%0A res.append(prefix)%0A has_not_started = False%0A res.append(s.rstrip())%0A%0A preprocess_path(path)%0A print('%5Cn'.join(res))%0A%0A%0Aif __name__ == '__main__':%0A parser = argparse.ArgumentParser()%0A parser.add_argument('filepath', nargs=1, help='cpp file')%0A args = parser.parse_args()%0A filepath = args.filepath%5B0%5D%0A preprocess(filepath)%0A
|
|
e285c097be60f9db5fae075f21b7450f403640d2 | add scaffold for an AvailabilityAssessment class | python/cvmfs/availability.py | python/cvmfs/availability.py | Python | 0 | @@ -0,0 +1,1100 @@
+#!/usr/bin/env python%0A# -*- coding: utf-8 -*-%0A%22%22%22%0ACreated by Ren%C3%A9 Meusel%0AThis file is part of the CernVM File System auxiliary tools.%0A%22%22%22%0A%0Aimport cvmfs%0A%0Aclass WrongRepositoryType(Exception):%0A def __init__(self, repo, expected_type):%0A assert repo.type != expected_type%0A self.repo = repo%0A self.expected_type = expected_type%0A%0A def __str__(self):%0A return self.repo.fqrn + %22 is of type '%22 + self.repo.type + %22' but '%22 + self.expected_type + %22' was expected%22%0A%0A%0Aclass AvailabilityAssessment:%0A def _check_repo_type(self, repo, expected_type):%0A if repo.has_repository_type() and repo.type != expected_type:%0A raise WrongRepositoryType(repo, expected_type)%0A return True;%0A%0A def __init__(self, stratum0_repository, stratum1_repositories = %5B%5D):%0A self._check_repo_type(stratum0_repository, 'stratum0')%0A for stratum1 in stratum1_repositories:%0A self._check_repo_type(stratum1, 'stratum1')%0A self.stratum0 = stratum0_repository%0A self.stratum1s = stratum1_repositories%0A%0A def assess(self):%0A pass%0A
|
|
0e9da5d0099b9c7b527250d6bf8051242e77103a | Add script for showing the results | triangular_lattice/distances_analyze.py | triangular_lattice/distances_analyze.py | Python | 0 | @@ -0,0 +1,1566 @@
+#!/usr/bin/env python%0A# -*- coding:utf-8 -*-%0A#%0A# written by Shotaro Fujimoto%0A# 2016-10-12%0A%0Aimport numpy as np%0Aimport matplotlib.pyplot as plt%0Afrom mpl_toolkits.mplot3d.axes3d import Axes3D%0A%0A%0Aif __name__ == '__main__':%0A%0A # result_data_path = %22./results/data/distances/beta=0.00_161012_171430.npz%22%0A # result_data_path = %22./results/data/distances/beta=5.00_161012_171649.npz%22%0A # result_data_path = %22./results/data/distances/beta=10.00_161012_172119.npz%22%0A # result_data_path = %22./results/data/distances/beta=15.00_161012_172209.npz%22%0A # result_data_path = %22./results/data/distances/beta=20.00_161012_172338.npz%22%0A%0A data = np.load(result_data_path)%0A beta = data%5B'beta'%5D%0A num_of_strings = data%5B'num_of_strings'%5D%0A L = data%5B'L'%5D%0A frames = data%5B'frames'%5D%0A distance_list = data%5B'distance_list'%5D%0A path_length = data%5B'path_length'%5D%0A%0A fig = plt.figure()%0A ax = fig.add_subplot(111, projection='3d')%0A hist, xedges, yedges = np.histogram2d(distance_list, path_length, bins=100)%0A xpos, ypos = np.meshgrid(xedges%5B:-1%5D + (xedges%5B1%5D - xedges%5B0%5D) / 2.,%0A yedges%5B:-1%5D + (yedges%5B1%5D - yedges%5B0%5D) / 2.)%0A zpos = hist.T%0A ax.plot_wireframe(xpos, ypos, zpos, rstride=1)%0A ax.plot(xpos%5B0%5D, xpos%5B0%5D, lw=2)%0A%0A ax.set_aspect('equal')%0A ax.set_xlim(xedges%5B0%5D, xedges%5B-1%5D)%0A ax.set_ylim(yedges%5B0%5D, yedges%5B-1%5D)%0A ax.set_xlabel('Distance')%0A ax.set_ylabel('Path length')%0A ax.set_title('Path length and distances between two points in the cluster'%0A + r'($%5Cbeta = %252.2f$)' %25 beta)%0A plt.show()%0A
|
|
29e170f9f92f8327c71a9dfc2b9fb9e18947db72 | create predictions on pre-trained models | source/generate_predictions.py | source/generate_predictions.py | Python | 0.000001 | @@ -0,0 +1,981 @@
+import numpy as np%0Aimport pandas as pd%0Afrom sklearn.externals import joblib%0A%0Afrom data_preprocessing import join_strings%0Afrom model import mlb, count_vectorizer_test_x, tfidf_vectorizer_test_x, file_cnt, file_tfidf%0A%0Acount_vectorizer_model, tfidf_vectorizer_model = joblib.load(file_cnt), joblib.load(file_tfidf)%0Aprint(%22Both the trained models have been imported successfully!%22)%0Aprint()%0Aprint(%22Making predictions...%22)%0Apred1 = count_vectorizer_model.predict(count_vectorizer_test_x.toarray())%0Apred2 = tfidf_vectorizer_model.predict(tfidf_vectorizer_test_x.toarray())%0A%0A# Combine predictions and map the labels if the values do not equal 0, else assign empty string%0Aarr = np.where((pred1 + pred2) != 0, mlb.classes_, %22%22)%0A# Load the array into a DataFrame constructor and join non-empty strings%0Apredictions = pd.DataFrame(arr).apply(join_strings, axis=1).to_frame(%22tags%22)%0A# Submit predictions%0Aprint(%22Submitting predictions...%22)%0Apredictions.to_csv(%22tags.tsv%22, index=False)%0Aprint(%22done%22)%0A
|
|
c6358b282ea28dd113c9053dab0fe2fa66f4d59d | Allow metrics to start with a braces expression | webapp/graphite/render/grammar.py | webapp/graphite/render/grammar.py | from graphite.thirdparty.pyparsing import *
ParserElement.enablePackrat()
grammar = Forward()
expression = Forward()
# Literals
intNumber = Combine(
Optional('-') + Word(nums)
)('integer')
floatNumber = Combine(
Optional('-') + Word(nums) + Literal('.') + Word(nums)
)('float')
aString = quotedString('string')
# Use lookahead to match only numbers in a list (can't remember why this is necessary)
afterNumber = FollowedBy(",") ^ FollowedBy(")") ^ FollowedBy(LineEnd())
number = Group(
(floatNumber + afterNumber) |
(intNumber + afterNumber)
)('number')
boolean = Group(
CaselessKeyword("true") |
CaselessKeyword("false")
)('boolean')
# Function calls
arg = Group(
boolean |
number |
aString |
expression
)
args = delimitedList(arg)('args')
func = Word(alphas+'_', alphanums+'_')('func')
call = Group(
func + Literal('(').suppress() +
args + Literal(')').suppress()
)('call')
# Metric pattern (aka. pathExpression)
validMetricChars = alphanums + r'''!#$%&"'*+-.:;<=>?@[\]^_`|~'''
pathExpression = Combine(
Word(validMetricChars) +
Combine(
ZeroOrMore(
Group(
Literal('{') +
Word(validMetricChars + ',') +
Literal('}') + Optional( Word(validMetricChars) )
)
)
)
)('pathExpression')
expression << Group(call | pathExpression)('expression')
grammar << expression
def enableDebug():
for name,obj in globals().items():
try:
obj.setName(name)
obj.setDebug(True)
except:
pass
| Python | 0.000005 | @@ -1035,16 +1035,25 @@
bine(%0A
+Optional(
Word(val
@@ -1066,16 +1066,17 @@
icChars)
+)
+%0A Com
|
1fdd1f306d45f6aeee91c7f016f7c37286ee3b3b | clear signing | lang/python/examples/howto/clear-sign-file.py | lang/python/examples/howto/clear-sign-file.py | Python | 0 | @@ -0,0 +1,2027 @@
+#!/usr/bin/env python3%0A# -*- coding: utf-8 -*-%0A%0Afrom __future__ import absolute_import, division, unicode_literals%0A%0A# Copyright (C) 2018 Ben McGinnes %[email protected]%3E%0A#%0A# This program is free software; you can redistribute it and/or modify it under%0A# the terms of the GNU General Public License as published by the Free Software%0A# Foundation; either version 2 of the License, or (at your option) any later%0A# version.%0A#%0A# This program is free software; you can redistribute it and/or modify it under%0A# the terms of the GNU Lesser General Public License as published by the Free%0A# Software Foundation; either version 2.1 of the License, or (at your option)%0A# any later version.%0A#%0A# This program is distributed in the hope that it will be useful, but WITHOUT%0A# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS%0A# FOR A PARTICULAR PURPOSE. See the GNU General Public License and the GNU%0A# Lesser General Public Licensefor more details.%0A#%0A# You should have received a copy of the GNU General Public License and the GNU%0A# Lesser General Public along with this program; if not, see%0A# %3Chttp://www.gnu.org/licenses/%3E.%0A%0Aimport gpg%0Aimport sys%0A%0A%22%22%22%0AClear-signs a file with a specified key. If entering both the key and the%0Afilename on the command line, the key must be entered first.%0A%22%22%22%0A%0Aif len(sys.argv) %3E 3:%0A logrus = sys.argv%5B1%5D%0A filename = %22 %22.join(sys.argv%5B2:%5D)%0Aelif len(sys.argv) == 3:%0A logrus = sys.argv%5B1%5D%0A filename = sys.argv%5B2%5D%0Aelif len(sys.argv) == 2:%0A logrus = sys.argv%5B1%5D%0A filename = input(%22Enter the path and filename to sign: %22)%0Aelse:%0A logrus = input(%22Enter the fingerprint or key ID to sign with: %22)%0A filename = input(%22Enter the path and filename to sign: %22)%0A%0Awith open(filename, %22rb%22) as f:%0A text = f.read()%0A%0Akey = list(gpg.Context().keylist(pattern=logrus))%0A%0Awith gpg.Context(armor=True, signers=key) as c:%0A signed_data, result = c.sign(text, mode=gpg.constants.sig.mode.CLEAR)%0A with open(%22%7B0%7D.asc%22.format(filename), %22wb%22) as f:%0A f.write(signed_data)%0A
|
|
c199892e07217f164ae694d510b206bfa771090b | remove unused import | src/vmw/vco/components.py | src/vmw/vco/components.py | # Copyright (c) 2001-2010 Twisted Matrix Laboratories.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from zope.interface import interface, implements, declarations
from zope.interface.adapter import AdapterRegistry
# The following is taken almots as-is from twisted.python.components
_vcoRegistry = AdapterRegistry()
def _registered(registry, required, provided):
"""
Return the adapter factory for the given parameters in the given
registry, or None if there is not one.
"""
return registry.get(required).selfImplied.get(provided, {}).get('')
def registerAdapter(adapterFactory, origInterface, *interfaceClasses):
"""Register an adapter class.
An adapter class is expected to implement the given interface, by
adapting instances implementing 'origInterface'. An adapter class's
__init__ method should accept one parameter, an instance implementing
'origInterface'.
"""
assert interfaceClasses, "You need to pass an Interface"
# deal with class->interface adapters:
if not isinstance(origInterface, interface.InterfaceClass):
origInterface = declarations.implementedBy(origInterface)
for interfaceClass in interfaceClasses:
factory = _registered(_vcoRegistry, origInterface, interfaceClass)
if factory is not None:
raise ValueError("an adapter (%s) was already registered." % (factory, ))
for interfaceClass in interfaceClasses:
_vcoRegistry.register([origInterface], interfaceClass, '', adapterFactory)
# add global adapter lookup hook for our newly created registry
def _hook(iface, ob, lookup=_vcoRegistry.lookup1):
factory = lookup(declarations.providedBy(ob), iface)
if factory is None:
return None
else:
return factory(ob)
interface.adapter_hooks.append(_hook)
| Python | 0.000001 | @@ -1144,20 +1144,8 @@
ace,
- implements,
dec
|
5fcdad9f147ec20f5b6427feb1a02e862f9bfbe9 | version bump | standard_form/__init__.py | standard_form/__init__.py | __version__ = '1.1.0' | Python | 0.000001 | @@ -16,6 +16,6 @@
1.1.
-0
+1
'
|
26e339f8f7a43d61a801a6dc82d55b70a3e6cc73 | add a withFlag param to extract_tags | jieba/analyse/tfidf.py | jieba/analyse/tfidf.py | # encoding=utf-8
from __future__ import absolute_import
import os
import jieba
import jieba.posseg
from operator import itemgetter
_get_module_path = lambda path: os.path.normpath(os.path.join(os.getcwd(),
os.path.dirname(__file__), path))
_get_abs_path = jieba._get_abs_path
DEFAULT_IDF = _get_module_path("idf.txt")
class KeywordExtractor(object):
STOP_WORDS = set((
"the", "of", "is", "and", "to", "in", "that", "we", "for", "an", "are",
"by", "be", "as", "on", "with", "can", "if", "from", "which", "you", "it",
"this", "then", "at", "have", "all", "not", "one", "has", "or", "that"
))
def set_stop_words(self, stop_words_path):
abs_path = _get_abs_path(stop_words_path)
if not os.path.isfile(abs_path):
raise Exception("jieba: file does not exist: " + abs_path)
content = open(abs_path, 'rb').read().decode('utf-8')
for line in content.splitlines():
self.stop_words.add(line)
def extract_tags(self, *args, **kwargs):
raise NotImplementedError
class IDFLoader(object):
def __init__(self, idf_path=None):
self.path = ""
self.idf_freq = {}
self.median_idf = 0.0
if idf_path:
self.set_new_path(idf_path)
def set_new_path(self, new_idf_path):
if self.path != new_idf_path:
self.path = new_idf_path
content = open(new_idf_path, 'rb').read().decode('utf-8')
self.idf_freq = {}
for line in content.splitlines():
word, freq = line.strip().split(' ')
self.idf_freq[word] = float(freq)
self.median_idf = sorted(
self.idf_freq.values())[len(self.idf_freq) // 2]
def get_idf(self):
return self.idf_freq, self.median_idf
class TFIDF(KeywordExtractor):
def __init__(self, idf_path=None):
self.tokenizer = jieba.dt
self.postokenizer = jieba.posseg.dt
self.stop_words = self.STOP_WORDS.copy()
self.idf_loader = IDFLoader(idf_path or DEFAULT_IDF)
self.idf_freq, self.median_idf = self.idf_loader.get_idf()
def set_idf_path(self, idf_path):
new_abs_path = _get_abs_path(idf_path)
if not os.path.isfile(new_abs_path):
raise Exception("jieba: file does not exist: " + new_abs_path)
self.idf_loader.set_new_path(new_abs_path)
self.idf_freq, self.median_idf = self.idf_loader.get_idf()
def extract_tags(self, sentence, topK=20, withWeight=False, allowPOS=()):
"""
Extract keywords from sentence using TF-IDF algorithm.
Parameter:
- topK: return how many top keywords. `None` for all possible words.
- withWeight: if True, return a list of (word, weight);
if False, return a list of words.
- allowPOS: the allowed POS list eg. ['ns', 'n', 'vn', 'v','nr'].
if the POS of w is not in this list,it will be filtered.
"""
if allowPOS:
allowPOS = frozenset(allowPOS)
words = self.postokenizer.cut(sentence)
else:
words = self.tokenizer.cut(sentence)
freq = {}
for w in words:
if allowPOS:
if w.flag not in allowPOS:
continue
else:
w = w.word
if len(w.strip()) < 2 or w.lower() in self.stop_words:
continue
freq[w] = freq.get(w, 0.0) + 1.0
total = sum(freq.values())
for k in freq:
freq[k] *= self.idf_freq.get(k, self.median_idf) / total
if withWeight:
tags = sorted(freq.items(), key=itemgetter(1), reverse=True)
else:
tags = sorted(freq, key=freq.__getitem__, reverse=True)
if topK:
return tags[:topK]
else:
return tags
| Python | 0.000001 | @@ -2578,16 +2578,32 @@
owPOS=()
+, withFlag=False
):%0A
@@ -3059,16 +3059,220 @@
ltered.%0A
+ - withFlag: only work with allowPOS is not empty.%0A if True, return a list of pair(word, weight) like posseg.cut%0A if False, return a list of words%0A
@@ -3603,34 +3603,47 @@
el
-se
+if not withFlag
:%0A
@@ -3659,16 +3659,72 @@
w.word%0A
+ wc = w.word if allowPOS and withFlag else w%0A
@@ -3735,16 +3735,17 @@
if len(w
+c
.strip()
@@ -3754,16 +3754,17 @@
%3C 2 or w
+c
.lower()
@@ -3912,16 +3912,72 @@
n freq:%0A
+ kw = k.word if allowPOS and withFlag else k%0A
@@ -4010,16 +4010,17 @@
eq.get(k
+w
, self.m
|
f76c06acf52094cd13cdf7087fa8d3914c2b992a | Add interactive module | sirius/interactive.py | sirius/interactive.py | Python | 0.000001 | @@ -0,0 +1,405 @@
+%0A%22%22%22Interactive sirius module%0A%0AUse this module to define variables and functions to be globally available when%0Ausing%0A%0A 'from sirius.interactive import *'%0A%22%22%22%0A%0Afrom pyaccel.interactive import *%0Aimport sirius.SI_V07 as si_model%0Aimport sirius.BO_V901 as bo_model%0A%0A%0A__all__ = %5Bname for name in dir() if not name.startswith('_')%5D%0A%0Aprint('Names defined in sirius.interactive: ' + ', '.join(__all__) + '.%5Cn')%0A
|
|
f1e6926f964877acc3bfe0d667a199861b431ed7 | add test_xadc | software/test_xadc.py | software/test_xadc.py | Python | 0.000001 | @@ -0,0 +1,319 @@
+def main(wb):%0A%09wb.open()%0A%09regs = wb.regs%0A%09# # #%0A%09print(%22temperature: %25f%C2%B0C%22 %25(regs.xadc_temperature.read()*503.975/4096 - 273.15))%0A%09print(%22vccint: %25fV%22 %25(regs.xadc_vccint.read()/4096*3))%0A%09print(%22vccaux: %25fV%22 %25(regs.xadc_vccaux.read()/4096*3))%0A%09print(%22vccbram: %25fV%22 %25(regs.xadc_vccbram.read()/4096*3))%0A%09# # #%0A%09wb.close()%0A
|
|
c2dab85f24e648c66daae847f19b605271ed858b | Add more threader tests | spec/threader_spec.py | spec/threader_spec.py | from expects import expect, be
from doublex import Spy, Mock
from pysellus import threader
with description('the threader module'):
with it('should create as many threads as keys * values in the supplied dict'):
a_stream = Mock()
another_stream = Mock()
foo = Spy()
a_function = Spy()
another_function = Spy()
streams_to_observers = {
a_stream: [a_function],
another_stream: [a_function, another_function]
}
threads = threader.build_threads(streams_to_observers, foo)
expected_length = sum(
len(fn_list) for fn_list in streams_to_observers.values()
)
expect(len(threads)).to(be(expected_length))
| Python | 0 | @@ -1,12 +1,86 @@
+import queue%0Afrom functools import partial%0A%0Afrom doublex import Spy, Mock%0A
from expects
@@ -110,33 +110,48 @@
doublex
- import Spy, Mock
+_expects import have_been_called
%0A%0Afrom p
@@ -266,24 +266,33 @@
as
-keys *
+the sum of len(
values
- in
+) of
the
@@ -440,32 +440,33 @@
unction = Spy()%0A
+%0A
streams_
@@ -822,8 +822,1198 @@
ength))%0A
+%0A with it('should create a properly initialized thread'):%0A stream = Mock()%0A observer = Spy()%0A target = Spy().target_function%0A%0A thread = threader.make_thread(target, stream, observer)%0A%0A thread.start()%0A thread.join()%0A%0A expect(target).to(have_been_called)%0A%0A with it('should call the target function with the correct arguments'):%0A stream = Mock()%0A observer = Spy()%0A que = queue.Queue(maxsize=1)%0A%0A # Return a list with the stream and the observer fn%0A target_function = lambda s, o: %5Bs, o%5D%0A%0A # We can't return from a function running in another thread%0A # so we put the value on a queue%0A target_wrapper = lambda q, s, o: q.put(target_function(s, o))%0A%0A # We define a partial so that we don't have to pass the queue%0A # as a parameter to make_thread%0A target_partial = partial(target_wrapper, que)%0A%0A thread = threader.make_thread(target_partial, stream, observer)%0A%0A thread.start()%0A thread.join()%0A%0A result = que.get()%0A # result is %5Bstream, observer%5D%0A%0A expect(result%5B0%5D).to(be(stream))%0A expect(result%5B1%5D).to(be(observer))%0A
|
e6a137026ff9b84814199517a452d354e121a476 | Create quiz_3.py | laboratorios/quiz_3.py | laboratorios/quiz_3.py | Python | 0.001596 | @@ -0,0 +1,386 @@
+#dado un intervalo de tiempo en segundos, calcular los segundos restantes %0A#corresponden para convertirse exactamente en minutos. Este programa debe %0A#funcionar para 5 oportunidades.%0A%0Achance = 0%0Asegundos_restantes = 0%0Awhile chance %3C 5:%0A%09segundos = int (input(%22Introduzca sus segundos:%22))%0A%09chance +=1%0A%09if segundos / 60:%0A%09%09segundos_restantes =60-segundos%2560%0A%09%09print (segundos_restantes)%0A%0A
|
|
58ac46511964ca1dd3de25d2b6053eb785e3e281 | Add outlier detection util script. | util/detect-outliers.py | util/detect-outliers.py | Python | 0 | @@ -0,0 +1,2389 @@
+#!/usr/bin/env python2%0A#%0A# Detect outlier faces (not of the same person) in a directory%0A# of aligned images.%0A# Brandon Amos%0A# 2016/02/14%0A#%0A# Copyright 2015-2016 Carnegie Mellon University%0A#%0A# Licensed under the Apache License, Version 2.0 (the %22License%22);%0A# you may not use this file except in compliance with the License.%0A# You may obtain a copy of the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS,%0A# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.%0A# See the License for the specific language governing permissions and%0A# limitations under the License.%0A%0Aimport time%0A%0Astart = time.time()%0A%0Aimport argparse%0Aimport cv2%0Aimport itertools%0Aimport os%0Aimport glob%0A%0Aimport numpy as np%0Anp.set_printoptions(precision=2)%0A%0Afrom sklearn.covariance import EllipticEnvelope%0Afrom sklearn.metrics.pairwise import euclidean_distances%0A%0Aimport openface%0A%0AfileDir = os.path.dirname(os.path.realpath(__file__))%0AmodelDir = os.path.join(fileDir, '..', 'models')%0AopenfaceModelDir = os.path.join(modelDir, 'openface')%0A%0Adef main():%0A parser = argparse.ArgumentParser()%0A%0A parser.add_argument('--networkModel', type=str, help=%22Path to Torch network model.%22,%0A default=os.path.join(openfaceModelDir, 'nn4.small2.v1.t7'))%0A parser.add_argument('--imgDim', type=int,%0A help=%22Default image dimension.%22, default=96)%0A parser.add_argument('--cuda', action='store_true')%0A parser.add_argument('--threshold', type=int, default=0.9)%0A parser.add_argument('directory')%0A%0A args = parser.parse_args()%0A%0A net = openface.TorchNeuralNet(args.networkModel, args.imgDim, cuda=args.cuda)%0A%0A reps = %5B%5D%0A paths = sorted(list(glob.glob(os.path.join(args.directory, '*.png'))))%0A for imgPath in paths:%0A reps.append(net.forwardPath(imgPath))%0A%0A mean = np.mean(reps, axis=0)%0A dists = euclidean_distances(reps, mean)%0A outliers = %5B%5D%0A for path, dist in zip(paths, dists):%0A dist = dist.take(0)%0A if dist %3E args.threshold:%0A outliers.append((path, dist))%0A%0A print(%22Found %7B%7D outlier(s) from %7B%7D images.%22.format(len(outliers), len(paths)))%0A for path, dist in outliers:%0A print(%22 + %7B%7D (%7B:0.2f%7D)%22.format(path, dist))%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
dad13d26aaf58ea186891e138ac9a10153363c8a | add vicon data extraction | script_r448_vicon_process.py | script_r448_vicon_process.py | Python | 0.000001 | @@ -0,0 +1,519 @@
+import pickle%0Aimport signal_processing as sig_proc%0A%0Adir_name = '../data/r448/r448_131022_rH/'%0A%0Aimg_ext = '.png'%0Asave_img = True%0Ashow = False%0Asave_obj = True%0A%0Asp = sig_proc.Signal_processing(save_img, show, img_ext)%0A%0Afilename='p0_3RW05'%0Afile_events=sp.load_csv(dir_name+filename+'_EVENTS.csv')%0Afile_analog=sp.load_csv(dir_name+filename+'_ANALOG.csv')%0Adata=sp.vicon_extract(file_events)%0Adata=sp.vicon_extract(file_analog,data)%0Adata=sp.synch_vicon_with_TDT(data)%0A%0A%0Aprint('%5Cn%5Cn#################')%0Aprint('#### END ####')
|
|
b46e7e31c5476c48e2a53d5a632354700d554174 | Add test_html_fetchers | tests/test_html_fetchers.py | tests/test_html_fetchers.py | Python | 0.000001 | @@ -0,0 +1,1604 @@
+import os%0Aimport sys%0Asys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))%0A%0Aimport unittest%0Afrom unittest import mock%0A%0Afrom web_scraper.core import html_fetchers%0A%0A%0Adef mocked_requests_get(*args, **kwargs):%0A%09%22%22%22this method will be used by the mock to replace requests.get%22%22%22%0A%09class MockResponse:%0A%09%09def __init__(self, html, status_code):%0A%09%09%09self.html = html%0A%09%09%09self.status_code = status_code%0A%0A%09%09def text(self):%0A%09%09%09return self.html%0A%0A%09%09def status_code(self):%0A%09%09%09return self.status_code%0A%0A%09if args%5B0%5D == 'http://example.com/':%0A%09%09return MockResponse(200, (200, 'html'))%0A%09%0A%09return MockResponse(404, (404, 'Not Found'))%0A%0A%0Aclass TestHtmlFetchersMethods(unittest.TestCase):%0A%[email protected]('web_scraper.core.html_fetchers.requests.get', side_effect=mocked_requests_get)%0A%09def test_fetch_html_document_200(self, mock_get):%0A%09%09%22%22%22fetch_html_document should return 200 and html%22%22%22%0A%09%09response = html_fetchers.fetch_html_document('http://example.com/') # reponse = tuple, MockResponse object%0A%09%09status_code = response%5B0%5D%5B0%5D%0A%09%09html = response%5B0%5D%5B1%5D%0A%0A%09%09self.assertEqual((status_code, html), (200, 'html'))%0A%[email protected]('web_scraper.core.html_fetchers.requests.get', side_effect=mocked_requests_get)%0A%09def test_fetch_html_document_404(self, mock_get):%0A%09%09%22%22%22fetch_html_document should return 404 and 'Not Found'%22%22%22%0A%09%09response = html_fetchers.fetch_html_document('http://example.com/nonexistantpath') # reponse = tuple, MockResponse object.%0A%09%09status_code = response%5B0%5D%5B0%5D%0A%09%09html = response%5B0%5D%5B1%5D%0A%0A%09%09self.assertEqual((status_code, html), (404, 'Not Found'))%0A%0A%0A%09%09%0A%0Aif __name__ == '__main__':%0A%09unittest.main()
|
|
b4f8e8d38636a52d3d4b199fdc670ff93eca33f6 | Add prototype for filters module. | mltils/filters.py | mltils/filters.py | Python | 0 | @@ -0,0 +1,192 @@
+# pylint: disable=missing-docstring, invalid-name, import-error%0A%0A%0Aclass VarianceFilter(object):%0A pass%0A%0A%0Aclass SimilarityFilter(object):%0A pass%0A%0A%0Aclass CorrelationFilter(object):%0A pass%0A
|
|
b0f5c33461d08325581cc0ad272c7f2b39b8dc66 | Fix typo. | metpy/calc/__init__.py | metpy/calc/__init__.py | import basic
from basic import *
__all__ == []
__all__.extend(basic.__all__)
| Python | 0.001604 | @@ -36,17 +36,16 @@
_all__ =
-=
%5B%5D%0A__al
|
167712a6640abca106bbcd50daf5dc22ba90083d | Fix log formatting | src/sentry/tasks/email.py | src/sentry/tasks/email.py | """
sentry.tasks.email
~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
import logging
from django.core.mail import get_connection
from sentry.tasks.base import instrumented_task
logger = logging.getLogger(__name__)
def _get_user_from_email(group, email):
from sentry.models import Project, User
# TODO(dcramer): we should encode the userid in emails so we can avoid this
for user in User.objects.filter(email__iexact=email):
# Make sure that the user actually has access to this project
if group.project not in Project.objects.get_for_user(
team=group.team, user=user):
logger.warning('User %r does not have access to group %r', (user, group))
continue
return user
@instrumented_task(
name='sentry.tasks.email.process_inbound_email',
queue='email')
def process_inbound_email(mailfrom, group_id, payload):
"""
"""
from sentry.models import Event, Group
from sentry.web.forms import NewNoteForm
try:
group = Group.objects.select_related('project', 'team').get(pk=group_id)
except Group.DoesNotExist:
logger.warning('Group does not exist: %d', group_id)
return
user = _get_user_from_email(group, mailfrom)
if user is None:
logger.warning('Inbound email from unknown address: %s', mailfrom)
return
event = group.get_latest_event() or Event()
Event.objects.bind_nodes([event], 'data')
event.group = group
event.project = group.project
form = NewNoteForm({'text': payload})
if form.is_valid():
form.save(event, user)
@instrumented_task(
name='sentry.tasks.email.send_email',
queue='email')
def send_email(message):
connection = get_connection()
connection.send_messages([message])
| Python | 0.000005 | @@ -838,17 +838,16 @@
up %25r',
-(
user, gr
@@ -850,17 +850,16 @@
, group)
-)
%0A
|
5e1c58db69adad25307d23c240b905eaf68e1671 | Add fade animation | src/fade_animation.py | src/fade_animation.py | Python | 0.000001 | @@ -0,0 +1,1950 @@
+import animation, colorsys%0A%0Adef colorunpack(color):%0A color = int(color)%0A return ((color %3E%3E 16) / 255,%0A ((color %3E%3E 8) & 255) / 0xff,%0A (color & 0xff) / 0xff)%0Adef colorpack(color):%0A return sum(int(color%5Bi%5D * 0xff) %3C%3C (16 - 8*i) for i in range(3))%0A%0Aclass FadeAnimation(animation.Animation):%0A %22%22%22%0A animation fades relevant section of light strip solid between two colors%0A for the duration of the animation%0A %22%22%22%0A%0A def __init__(self, start_time, stop_time, start_pos, stop_pos, start_color, stop_color):%0A %22%22%22%0A :param start_time: seconds since the epoch to start animation%0A :param stop_time: seconds since the epoch to stop animation%0A :param start_pos: number from 0 to 1 indicating start on strip%0A :param stop_pos: number from 0 to 1 indicating stop on strip%0A :param start_color: initial 24-bit integer RGB color%0A :param stop_color: final 24-bit integer RGB color%0A %22%22%22%0A self.set_start_time(start_time)%0A self.set_stop_time(stop_time)%0A self.set_start_pos(start_pos)%0A self.set_stop_pos(stop_pos)%0A %0A self.__start_hsv = colorsys.rgb_to_hsv(*colorunpack(start_color))%0A self.__stop_hsv = colorsys.rgb_to_hsv(*colorunpack(stop_color))%0A%0A def get_color(self, time, pos):%0A %22%22%22%0A :param time: current time as seconds since the epoch%0A :param pos: position from 0 to 1 to get color for%0A :return: 24-bit integer RGB color%0A %22%22%22%0A lerp = (time - self.get_start_time()) %5C%0A / (self.get_stop_time() - self.get_start_time())%0A lerp = max(0, min(1, lerp))%0A curr = (self.__start_hsv%5B0%5D + (self.__stop_hsv%5B0%5D-self.__start_hsv%5B0%5D)*lerp,%0A self.__start_hsv%5B1%5D + (self.__stop_hsv%5B1%5D-self.__start_hsv%5B1%5D)*lerp,%0A self.__start_hsv%5B2%5D + (self.__stop_hsv%5B2%5D-self.__start_hsv%5B2%5D)*lerp)%0A return colorpack(colorsys.hsv_to_rgb(*curr))%0A
|
|
f537abe2ff1826a9decd9dace5597cbc4f7f318b | Add 1.6 | 1_arrays_hashtables/string_compression.py | 1_arrays_hashtables/string_compression.py | Python | 0.999996 | @@ -0,0 +1,794 @@
+def compress(string):%0A count_array = %5B%5D%0A element_count = 1%0A for index, character in enumerate(string%5B1:%5D):%0A print(character, string%5Bindex%5D)%0A if string%5Bindex%5D == character:%0A element_count = element_count + 1%0A else:%0A count_array.append(element_count)%0A element_count = 1%0A count_array.append(element_count)%0A # if len(count_array) == len(string):%0A # return string%0A compressed_string = ''%0A string_position = 0%0A print(count_array)%0A for numbers in count_array:%0A if(numbers != 1):%0A compressed_string += str(numbers)%0A compressed_string += string%5Bstring_position%5D%0A string_position += numbers%0A return compressed_string%0A%0Aif __name__ == '__main__':%0A print(compress('aafbbcdaaaaa'))%0A
|
|
296efcc28e19fc76371496881a546f1ca52dc622 | add nagios check for iembot availability | nagios/check_iembot.py | nagios/check_iembot.py | Python | 0 | @@ -0,0 +1,404 @@
+%22%22%22Ensure iembot is up.%22%22%22%0Aimport sys%0A%0Aimport requests%0A%0A%0Adef main():%0A %22%22%22Go Main Go.%22%22%22%0A req = requests.get('http://iembot:9004/room/kdmx.xml')%0A if req.status_code == 200:%0A print(%22OK - len(kdmx.xml) is %25s%22 %25 (len(req.content), ))%0A return 0%0A print(%22CRITICAL - /room/kdmx.xml returned code %25s%22 %25 (req.status_code, ))%0A return 2%0A%0A%0Aif __name__ == '__main__':%0A sys.exit(main())%0A
|
|
1d0c0741f1605f3786a752288161c679ab271ea2 | Add a utility file for aggregating decorators | website/addons/osfstorage/decorators.py | website/addons/osfstorage/decorators.py | Python | 0.000001 | @@ -0,0 +1,1574 @@
+import functools%0A%0Afrom webargs import Arg%0Afrom webargs import core%0A%0Afrom framework.auth.decorators import must_be_signed%0A%0Afrom website.models import User%0Afrom framework.exceptions import HTTPError%0Afrom website.addons.osfstorage import utils%0Afrom website.project.decorators import (%0A must_not_be_registration, must_have_addon,%0A)%0A%0A%0Aclass JSONParser(core.Parser):%0A def __init__(self, data):%0A self._data = data%0A%0A def parse(self, args):%0A return super(JSONParser, self).parse(args, None, ('json',))%0A%0A def parse_json(self, _, name, arg):%0A if self._data:%0A return core.get_value(self._data, name, arg.multiple)%0A else:%0A return core.Missing%0A%0Adef path_validator(path):%0A return (%0A path.startswith('/') and%0A len(path.strip('/').split('/')) %3C 3%0A )%0A%0Afile_opt_args = %7B%0A 'source': Arg(%7B%0A 'path': Arg(str, required=True, validate=path_validator),%0A 'cookie': Arg(None, required=True, use=User.from_cookie, validate=lambda x: x is not None)%0A %7D),%0A 'destination': Arg(%7B%0A 'path': Arg(str, required=True, validate=path_validator),%0A 'cookie': Arg(None, required=True, use=User.from_cookie, validate=lambda x: x is not None)%0A %7D)%0A%7D%0A%0A%0Adef waterbutler_opt_hook(func):%0A%0A @must_be_signed%0A @utils.handle_odm_errors%0A @must_not_be_registration%0A @must_have_addon('osfstorage', 'node')%0A @functools.wraps(func)%0A def wrapped(payload, *args, **kwargs):%0A kwargs.update(JSONParser(payload).parse(file_opt_args))%0A return func(*args, **kwargs)%0A return wrapped%0A%0A
|
|
bb8e7ee023d678e68d1da3018bf6d1d3d36d55bd | Create new package (#6588) | var/spack/repos/builtin/packages/perl-statistics-descriptive/package.py | var/spack/repos/builtin/packages/perl-statistics-descriptive/package.py | Python | 0 | @@ -0,0 +1,1639 @@
+##############################################################################%0A# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.%0A# Produced at the Lawrence Livermore National Laboratory.%0A#%0A# This file is part of Spack.%0A# Created by Todd Gamblin, [email protected], All rights reserved.%0A# LLNL-CODE-647188%0A#%0A# For details, see https://github.com/spack/spack%0A# Please also see the NOTICE and LICENSE files for our notice and the LGPL.%0A#%0A# This program is free software; you can redistribute it and/or modify%0A# it under the terms of the GNU Lesser General Public License (as%0A# published by the Free Software Foundation) version 2.1, February 1999.%0A#%0A# This program is distributed in the hope that it will be useful, but%0A# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF%0A# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and%0A# conditions of the GNU Lesser General Public License for more details.%0A#%0A# You should have received a copy of the GNU Lesser General Public%0A# License along with this program; if not, write to the Free Software%0A# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA%0A##############################################################################%0Afrom spack import *%0A%0A%0Aclass PerlStatisticsDescriptive(PerlPackage):%0A %22%22%22Module of basic descriptive statistical functions.%22%22%22%0A%0A homepage = %22http://search.cpan.org/~shlomif/Statistics-Descriptive-3.0612/lib/Statistics/Descriptive.pm%22%0A url = %22http://search.cpan.org/CPAN/authors/id/S/SH/SHLOMIF/Statistics-Descriptive-3.0612.tar.gz%22%0A%0A version('3.0612', 'e38cfbc1e3962d099b62a14a57a175f1')%0A
|
|
12f7dddcbe8c7c2160bf8de8f7a9c3082b950003 | Create longest-harmonious-subsequence.py | Python/longest-harmonious-subsequence.py | Python/longest-harmonious-subsequence.py | Python | 0.999951 | @@ -0,0 +1,448 @@
+# Time: O(n)%0A# Space: O(n)%0A%0Aclass Solution(object):%0A def findLHS(self, nums):%0A %22%22%22%0A :type nums: List%5Bint%5D%0A :rtype: int%0A %22%22%22%0A lookup = collections.defaultdict(int)%0A result = 0%0A for num in nums:%0A lookup%5Bnum%5D += 1%0A for diff in %5B-1, 1%5D:%0A if (num + diff) in lookup:%0A result = max(result, lookup%5Bnum%5D + lookup%5Bnum + diff%5D)%0A return result%0A
|
|
d7cc3d6590d1d6d46bdf780b93e76ea6aa50334d | Create peak-index-in-a-mountain-array.py | Python/peak-index-in-a-mountain-array.py | Python/peak-index-in-a-mountain-array.py | Python | 0.032261 | @@ -0,0 +1,932 @@
+# Time: O(logn)%0A# Space: O(1)%0A%0A# Let's call an array A a mountain if the following properties hold:%0A#%0A# A.length %3E= 3%0A# There exists some 0 %3C i %3C A.length - 1%0A# such that A%5B0%5D %3C A%5B1%5D %3C ... A%5Bi-1%5D %3C A%5Bi%5D %3E A%5Bi+1%5D %3E ... %3E A%5BA.length - 1%5D%0A# Given an array that is definitely a mountain,%0A# return any i such that%0A# A%5B0%5D %3C A%5B1%5D %3C ... A%5Bi-1%5D %3C A%5Bi%5D %3E A%5Bi+1%5D %3E ... %3E A%5BA.length - 1%5D.%0A#%0A# Example 1:%0A#%0A# Input: %5B0,1,0%5D%0A# Output: 1%0A# Example 2:%0A#%0A# Input: %5B0,2,1,0%5D%0A# Output: 1%0A# Note:%0A#%0A# 3 %3C= A.length %3C= 10000%0A# 0 %3C= A%5Bi%5D %3C= 10%5E6%0A# A is a mountain, as defined above.%0A%0A%0Aclass Solution(object):%0A def peakIndexInMountainArray(self, A):%0A %22%22%22%0A :type A: List%5Bint%5D%0A :rtype: int%0A %22%22%22%0A left, right = 0, len(A)%0A while left %3C right:%0A mid = left + (right-left)//2%0A if A%5Bmid%5D %3E A%5Bmid+1%5D:%0A right = mid%0A else:%0A left = mid+1%0A return left%0A
|
|
64eab4beaf4e00d47423ea027ec6f40129ee2e95 | Create execi-3.py | execi-3.py | execi-3.py | Python | 0.000001 | @@ -0,0 +1,171 @@
+n1 = int(input(%22Digite um valor: %22))%0Aif n1 %3C 0:%0A print (n1 * -1)%0Aelif n1 %3E 10:%0A n2 = int(input(%22Digite outro valor: %22))%0A print (n1 - n2)%0Aelse:%0A print (n1/5.0)%0A
|
|
7dce21cc8fa3b81e150ed6586db8ca80cd537fc7 | Add compat module to test package | test/compat.py | test/compat.py | Python | 0 | @@ -0,0 +1,614 @@
+# -*- coding: utf-8 -*-%0A'''%0AA common module for compatibility related imports and%0Adefinitions used during testing%0A'''%0A%0Afrom __future__ import unicode_literals%0Aimport unittest%0A%0Afrom six import assertCountEqual, PY2%0A%0Atry:%0A from unittest.mock import Mock, MagicMock, patch # @NoMove%0Aexcept ImportError:%0A from mock import Mock, MagicMock, patch # @NoMove @UnusedImport%0A%0Aclass Py2TestCase(unittest.TestCase):%0A def assertCountEqual(self, expected_sequence, actual_sequence):%0A return assertCountEqual(self, expected_sequence, actual_sequence)%0A %0Aif PY2:%0A unittest.TestCase = Py2TestCase%0A
|
|
9a97847419ad569b1f9f3d302507aca8544944e2 | test file | test_scheme.py | test_scheme.py | Python | 0.000002 | @@ -0,0 +1,320 @@
+import unittest%0Aimport scheme_mongo%0Aclass TestScheme(unittest.TestCase):%0A def runTest(self):%0A mongo_uri = %22mongodb://localhost/test.in%22%0A wrapper = scheme_mongo.open(mongo_uri)%0A assert wrapper%0A for result in wrapper:%0A print result%0A%0Aif __name__ == '__main__':%0A unittest.main()%0A
|
|
fc95c998dc8c3caee3e0a4590b96c9ed7e0321a7 | add a test suite for Division | test_htmlgen/block.py | test_htmlgen/block.py | Python | 0 | @@ -0,0 +1,276 @@
+from unittest import TestCase%0A%0Afrom asserts import assert_equal%0A%0Afrom htmlgen import Division%0A%0A%0Aclass DivisionTest(TestCase):%0A%0A def test_render(self):%0A div = Division()%0A div.append(%22Test%22)%0A assert_equal(%5Bb%22%3Cdiv%3E%22, b%22Test%22, b%22%3C/div%3E%22%5D, list(iter(div)))%0A
|
|
e4980879f0f4a0d223cccc99a486fb62cbe5807f | change models.py | physics/models.py | physics/models.py | from django.db import models
class Student(models.Model):
"""Student Info"""
stu_id = models.CharField(u'学号', max_length=30, primary_key=True)
name = models.CharField(u'姓名', max_length=30)
password = models.CharField(u'密码', max_length=30)
def __unicode__(self):
return '{stu_id} {name}'.format(stu_id=self.stu_id, name=self.name)
class Teacher(models.Model):
"""Teacher Info"""
name = models.CharField(u'姓名', max_length=30)
def __unicode__(self):
return self.name
class Questoin(models.Model):
"""Question Info"""
title = models.TextField()
content = models.TextField()
answer = models.CharField(max_length=1)
def __unicode__(self):
return self.title
class Notification(self):
"""Notification Info"""
title = models.TextField()
content = models.TextField()
time = models.DateField()
def __unicode__(self):
return self.title
| Python | 0.000001 | @@ -588,32 +588,37 @@
odels.TextField(
+u'%E9%A2%98%E7%9B%AE'
)%0A content =
@@ -630,24 +630,29 @@
s.TextField(
+u'%E9%80%89%E9%A1%B9'
)%0A answer
@@ -671,16 +671,23 @@
arField(
+u'%E7%AD%94%E6%A1%88',
max_leng
@@ -827,24 +827,31 @@
s.TextField(
+u'%E9%80%9A%E7%9F%A5%E6%A0%87%E9%A2%98'
)%0A conten
@@ -871,16 +871,23 @@
xtField(
+u'%E9%80%9A%E7%9F%A5%E5%86%85%E5%AE%B9'
)%0A ti
@@ -908,16 +908,23 @@
teField(
+u'%E9%80%9A%E7%9F%A5%E6%97%B6%E9%97%B4'
)%0A%0A d
|
964d1f97df600308b23b6a91b9de8811795509a4 | Add a test for the @cachit decorator. | sympy/core/tests/test_cache.py | sympy/core/tests/test_cache.py | Python | 0 | @@ -0,0 +1,217 @@
+from sympy.core.cache import cacheit%0A%0Adef test_cacheit_doc():%0A @cacheit%0A def testfn():%0A %22test docstring%22%0A pass%0A%0A assert testfn.__doc__ == %22test docstring%22%0A assert testfn.__name__ == %22testfn%22%0A
|
|
a8ddae9343683ca69067eecbece5ecff6d4e5d1d | Add myStrom switch platform | homeassistant/components/switch/mystrom.py | homeassistant/components/switch/mystrom.py | Python | 0 | @@ -0,0 +1,3335 @@
+%22%22%22%0Ahomeassistant.components.switch.mystrom%0A~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%0ASupport for myStrom switches.%0A%0AFor more details about this component, please refer to the documentation at%0Ahttps://home-assistant.io/components/switch.mystrom/%0A%22%22%22%0Aimport logging%0Aimport requests%0A%0Afrom homeassistant.components.switch import SwitchDevice%0Afrom homeassistant.const import STATE_UNKNOWN%0A%0ADEFAULT_NAME = 'myStrom Switch'%0A%0A_LOGGER = logging.getLogger(__name__)%0A%0A%0A# pylint: disable=unused-argument, too-many-function-args%0Adef setup_platform(hass, config, add_devices, discovery_info=None):%0A %22%22%22 Find and return myStrom switches. %22%22%22%0A resource = config.get('resource')%0A%0A if resource is None:%0A _LOGGER.error('Missing required variable: resource')%0A return False%0A%0A try:%0A requests.get(resource, timeout=10)%0A except requests.exceptions.MissingSchema:%0A _LOGGER.error(%22Missing resource or schema in configuration. %22%0A %22Add http:// to your URL.%22)%0A return False%0A except requests.exceptions.ConnectionError:%0A _LOGGER.error(%22No route to device. %22%0A %22Please check the IP address in the configuration file.%22)%0A return False%0A%0A add_devices(%5BMyStromSwitch(%0A config.get('name', DEFAULT_NAME),%0A config.get('resource'))%5D)%0A%0A%0Aclass MyStromSwitch(SwitchDevice):%0A %22%22%22 Represents a myStrom switch. %22%22%22%0A def __init__(self, name, resource):%0A self._state = STATE_UNKNOWN%0A self._name = name%0A self._resource = resource%0A self.consumption = 0%0A%0A @property%0A def name(self):%0A %22%22%22 The name of the switch. %22%22%22%0A return self._name%0A%0A @property%0A def is_on(self):%0A %22%22%22 True if switch is on. %22%22%22%0A return self._state%0A%0A @property%0A def current_power_mwh(self):%0A %22%22%22 Current power consumption in mwh. %22%22%22%0A return self.consumption%0A%0A def turn_on(self, **kwargs):%0A %22%22%22 Turn the switch on. %22%22%22%0A request = requests.get('%7B%7D/relay'.format(self._resource),%0A params=%7B'state': '1'%7D,%0A timeout=10)%0A if request.status_code == 200:%0A self._state = True%0A else:%0A _LOGGER.error(%22Can't turn on %25s. Is device offline?%22,%0A self._resource)%0A%0A def turn_off(self, **kwargs):%0A %22%22%22 Turn the switch off. %22%22%22%0A request = requests.get('%7B%7D/relay'.format(self._resource),%0A params=%7B'state': '0'%7D,%0A timeout=10)%0A if request.status_code == 200:%0A self._state = False%0A else:%0A _LOGGER.error(%22Can't turn off %25s. Is device offline?%22,%0A self._resource)%0A%0A def update(self):%0A %22%22%22 Gets the latest data from REST API and updates the state. %22%22%22%0A try:%0A request = requests.get('%7B%7D/report'.format(self._resource),%0A timeout=10)%0A if request.json()%5B'relay'%5D is True:%0A self._state = True%0A else:%0A self._state = False%0A%0A self.consumption = request.json()%5B'power'%5D%0A except requests.exceptions.ConnectionError:%0A _LOGGER.error(%22No route to device '%25s'. Is device offline?%22,%0A self._resource)%0A
|
|
fbf5ecffb4249e7f881f53f30625a47a6e779592 | Create selective_array_reversing.py | selective_array_reversing.py | selective_array_reversing.py | Python | 0.000166 | @@ -0,0 +1,352 @@
+#Kunal Gautam%0A#Codewars : @Kunalpod%0A#Problem name: Selective Array Reversing%0A#Problem level: 6 kyu%0A%0Adef sel_reverse(arr,l):%0A li=%5B%5D%0A if not l:%0A return arr%0A for i in range(0,len(arr),l):%0A if i+l%3Elen(arr):%0A li+=(list(reversed(arr%5Bi:%5D)))%0A else: %0A li+=(list(reversed(arr%5Bi:i+l%5D)))%0A return li %0A
|
|
afe8e16be43b5e66df0f7bf14832f77009aab151 | Create __init__.py | oauth/__init__.py | oauth/__init__.py | Python | 0.000429 | @@ -0,0 +1,2553 @@
+#!/usr/bin/python%0A# -*- coding: utf-8 -*-%0A%22%22%22%0ACreated by bu on 2017-05-10%0A%22%22%22%0Afrom __future__ import unicode_literals%0Aimport json as complex_json%0Aimport requests%0Afrom utils import verify_sign%0Afrom utils import get_sign%0A%0A%0Aclass RequestClient(object):%0A __headers = %7B%0A 'Content-Type': 'application/json; charset=utf-8',%0A 'Accept': 'application/json',%0A 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36'%0A %7D%0A%0A def __init__(self, access_id, secret_key, headers=dict()):%0A self.access_id = access_id%0A self.secret_key = secret_key%0A self.headers = self.__headers%0A self.headers.update(headers)%0A%0A def set_authorization(self, params):%0A params%5B'access_id'%5D = self.access_id%0A self.headers%5B'access_id'%5D = self.access_id%0A self.headers%5B'AUTHORIZATION'%5D = get_sign(params, self.secret_key)%0A%0A def request(self, method, url, params=dict(), data='', json=dict()):%0A method = method.upper()%0A if method == 'GET':%0A self.set_authorization(params)%0A result = requests.request('GET', url, params=params, headers=self.headers)%0A else:%0A if data:%0A json.update(complex_json.loads(data))%0A self.set_authorization(json)%0A result = requests.request(method, url, json=json, headers=self.headers)%0A return result%0A%0A%0Aclass OAuthClient(object):%0A def __init__(self, request):%0A self.request = request%0A self._body = dict()%0A self._authorization = ''%0A%0A @property%0A def body(self):%0A raise NotImplementedError('extract body')%0A%0A @property%0A def authorization(self):%0A raise NotImplementedError('authorization')%0A%0A def verify_request(self, secret_key):%0A return verify_sign(self.body, secret_key, self.authorization)%0A%0A%0Aclass FlaskOAuthClient(OAuthClient):%0A @property%0A def body(self):%0A if self._body:%0A return self._body%0A%0A if self.request.method == 'GET':%0A self._body = self.request.args.to_dict()%0A elif self.request.is_json:%0A self._body = self.request.json%0A%0A access_id = self.request.headers.get('ACCESS_ID')%0A if access_id:%0A self._body%5B'access_id'%5D = access_id%0A return self._body%0A%0A @property%0A def authorization(self):%0A if self._authorization:%0A return self._authorization%0A%0A self._authorization = self.request.headers%5B'AUTHORIZATION'%5D%0A return self.authorization%0A%0A
|
|
a3bbd175ef5640843cb16b0166b462ffaed25242 | standardize logging interface for fs-drift | fsd_log.py | fsd_log.py | Python | 0 | @@ -0,0 +1,420 @@
+import logging%0A# standardize use of logging module in fs-drift%0A%0Adef start_log(prefix):%0A log = logging.getLogger(prefix)%0A h = logging.StreamHandler()%0A log_format = prefix + ' %25(asctime)s - %25(levelname)s - %25(message)s'%0A formatter = logging.Formatter(log_format)%0A h.setFormatter(formatter)%0A log.addHandler(h)%0A log.setLevel(logging.DEBUG)%0A return log%0A%0A #with open('/tmp/weights.csv', 'w') as w_f:%0A
|
|
52e71001b7e775daaaaf42280ebe06c31291b595 | Add a simplemeshtest variant where all AJ packets of one node are always dropped | tests/failmeshtest.py | tests/failmeshtest.py | Python | 0.000002 | @@ -0,0 +1,2447 @@
+#!/usr/bin/env python%0A%0Afrom twisted.internet import reactor%0Afrom mesh import Mesh, MeshNode, packet_type, ATTEMPT_JOIN%0Aimport sys%0A%0ANUMNODES = 5%0ANUMPACKETS = 10%0ADELAY = 0.1%0A%0A%0Anodes = %5B%5D%0A# We're optimists%0Asuccess = True%0A%0Aclass TestMeshNode(MeshNode):%0A nodes = 1%0A%0A def __init__ (self, name, mesh):%0A MeshNode.__init__(self, name, mesh)%0A%0A def node_connected(self):%0A MeshNode.node_connected(self)%0A print %22Connected%22%0A%0A def newNode (self, data):%0A MeshNode.newNode (self, data)%0A print %22node0 - Added %22 + data%0A self.nodes += 1%0A if self.nodes == NUMNODES - 1:%0A print %22Everybody who could joined%22%0A for x in xrange(0, NUMPACKETS):%0A reactor.callLater(0.1 * x, (lambda y: self.pushInput(str(y) + %22%5Cn%22)), x)%0A%0A def leftNode (self, data):%0A MeshNode.leftNode (self, data)%0A print data.rstrip() + %22 left%22%0A reactor.stop()%0A%0Aclass FailMeshNode (MeshNode):%0A%0A def __init__ (self, name, mesh):%0A MeshNode.__init__(self, name, mesh)%0A%0A def sendPacket (self, data):%0A if packet_type(data) != ATTEMPT_JOIN:%0A MeshNode.sendPacket(self, data)%0A%0A%0A%0Aclass TestMesh(Mesh):%0A expected = %7B%7D%0A done = 0%0A%0A def gotOutput(self, node, sender, data):%0A global success%0A%0A if self.expected.get(node) == None:%0A self.expected%5Bnode%5D = 0%0A%0A if (self.expected.get(node, int(data)) != int(data)):%0A print %22Got %22 + data.rstrip() + %22 instead of %22 + %5C%0A str(self.expected%5Bnode%5D) + %22 from %22 + node.name%0A%0A success = False%0A reactor.crash()%0A%0A if not sender in node.peers:%0A print %22Sender %22 + sender + %22 not in node peers%22%0A success = False%0A reactor.crash()%0A%0A self.expected%5Bnode%5D = int(data) + 1%0A%0A if self.expected%5Bnode%5D == 10:%0A self.done += 1%0A%0A if self.done == NUMNODES - 2:%0A for x in self.nodes:%0A x.stats()%0A self.nodes%5B-2%5D.disconnect()%0A%0Am = TestMesh()%0A%0A%0An = TestMeshNode(%22node0%22, m)%0Anodes.append(n)%0Am.addMeshNode(n)%0A%0Afor x in xrange(1, NUMNODES - 1):%0A nodes.append(m.addNode(%22node%22 + str(x)))%0A%0Ax = NUMNODES - 1%0An = FailMeshNode(%22node%22 + str(x), m)%0Anodes.append(n)%0Am.addMeshNode(n)%0A%0A%0A# Connect all nodes to all others. 1024 bytes/s bandwidth, 50ms delay and 0%25%0A# packet loss.. (bandwidth and delay aren't implemented just yet)%0Am.connect_full(1024, 50, 0.30)%0A%0Adef timeout():%0A global success%0A print %22TIMEOUT!%22%0A success = False%0A reactor.crash()%0A%0Areactor.callLater(60, timeout)%0A%0Areactor.run()%0A%0A%0Aif not success:%0A print %22FAILED%22%0A sys.exit(-1)%0A%0Aprint %22SUCCESS%22%0A
|
|
57fc053939702f4baf04604a9226873c98526ae5 | Add test for Moniker | tests/lsp/test_moniker.py | tests/lsp/test_moniker.py | Python | 0.000001 | @@ -0,0 +1,3557 @@
+############################################################################%0A# Copyright(c) Open Law Library. All rights reserved. #%0A# See ThirdPartyNotices.txt in the project root for additional notices. #%0A# #%0A# Licensed under the Apache License, Version 2.0 (the %22License%22) #%0A# you may not use this file except in compliance with the License. #%0A# You may obtain a copy of the License at #%0A# #%0A# http: // www.apache.org/licenses/LICENSE-2.0 #%0A# #%0A# Unless required by applicable law or agreed to in writing, software #%0A# distributed under the License is distributed on an %22AS IS%22 BASIS, #%0A# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #%0A# See the License for the specific language governing permissions and #%0A# limitations under the License. #%0A############################################################################%0Aimport unittest%0Afrom typing import List, Optional%0A%0Afrom pygls.lsp.methods import TEXT_DOCUMENT_MONIKER%0Afrom pygls.lsp.types import (Moniker, MonikerKind, MonikerOptions, MonikerParams, Position,%0A TextDocumentIdentifier, UniquenessLevel)%0A%0Afrom ..conftest import CALL_TIMEOUT, ClientServer%0A%0A%0Aclass TestMoniker(unittest.TestCase):%0A @classmethod%0A def setUpClass(cls):%0A cls.client_server = ClientServer()%0A cls.client, cls.server = cls.client_server%0A%0A @cls.server.feature(%0A TEXT_DOCUMENT_MONIKER,%0A MonikerOptions(),%0A )%0A def f(params: MonikerParams) -%3E Optional%5BList%5BMoniker%5D%5D:%0A if params.text_document.uri == 'file://return.list':%0A return %5B%0A Moniker(%0A scheme='test_scheme',%0A identifier='test_identifier',%0A unique=UniquenessLevel.Global,%0A kind=MonikerKind.Local,%0A ),%0A %5D%0A else:%0A return None%0A%0A cls.client_server.start()%0A%0A @classmethod%0A def tearDownClass(cls):%0A cls.client_server.stop()%0A%0A def test_capabilities(self):%0A capabilities = self.server.server_capabilities%0A%0A assert capabilities.moniker_provider%0A%0A def test_moniker_return_list(self):%0A response = self.client.lsp.send_request(%0A TEXT_DOCUMENT_MONIKER,%0A MonikerParams(%0A text_document=TextDocumentIdentifier(uri='file://return.list'),%0A position=Position(line=0, character=0),%0A )%0A ).result(timeout=CALL_TIMEOUT)%0A%0A assert response%0A%0A assert response%5B0%5D%5B'scheme'%5D == 'test_scheme'%0A assert response%5B0%5D%5B'identifier'%5D == 'test_identifier'%0A assert response%5B0%5D%5B'unique'%5D == 'global'%0A assert response%5B0%5D%5B'kind'%5D == 'local'%0A%0A def test_references_return_none(self):%0A response = self.client.lsp.send_request(%0A TEXT_DOCUMENT_MONIKER,%0A MonikerParams(%0A text_document=TextDocumentIdentifier(uri='file://return.none'),%0A position=Position(line=0, character=0),%0A )%0A ).result(timeout=CALL_TIMEOUT)%0A%0A assert response is None%0A%0A%0Aif __name__ == '__main__':%0A unittest.main()%0A%0A
|
|
20c4df8c61ee1f625ebd77c8613fc470a3e87438 | add another lazy function | lazy_function/another_lazy_class.py | lazy_function/another_lazy_class.py | Python | 0.000217 | @@ -0,0 +1,615 @@
+#!/usr/bin/env python%0A# -*- coding: utf-8 -*-%0Aclass lazy_property(object):%0A def __init__(self, func, name=None, doc=None):%0A self._func = func%0A self._name = name or func.func_name%0A self.__doc__ = doc or func.__doc__%0A%0A def __get__(self, obj, objtype=None):%0A if obj is None:%0A return self%0A value = self._func(obj)%0A setattr(obj, self._name, value)%0A return value%0A%0Aclass BaseRequest(object):%0A def form(self):%0A return 123%0A form = lazy_property(form)%0A%0Abb = BaseRequest()%0Aprint bb.form%0Aprint bb.form%0Abb = BaseRequest()%0Aprint bb.form%0Aprint bb.form
|
|
1d8fccf6943adf40c77d5d2df002330719dcfcd1 | test for S3Sync | tests/test_s3_sync.py | tests/test_s3_sync.py | Python | 0 | @@ -0,0 +1,931 @@
+import os%0Aimport unittest%0Afrom pathlib import Path%0A%0Aimport mock%0A%0Afrom taskcat._s3_sync import S3Sync%0A%0A%0Aclass TestS3Sync(unittest.TestCase):%0A def test_init(self):%0A m_s3_client = mock.Mock()%0A m_s3_client.list_objects_v2.return_value = %7B%0A %22Contents%22: %5B%7B%22Key%22: %22test_prefix/test_object%22, %22ETag%22: %22test_etag%22%7D%5D%0A %7D%0A m_s3_client.delete_objects.return_value = %7B%7D%0A m_s3_client.upload_file.return_value = None%0A prefix = %22test_prefix%22%0A base_path = %22./%22 if os.getcwd().endswith(%22/tests%22) else %22./tests/%22%0A base_path = Path(base_path + %22data/%22).resolve()%0A S3Sync(%0A m_s3_client,%0A %22test_bucket%22,%0A prefix,%0A str(base_path / %22lambda_build_with_submodules%22),%0A )%0A m_s3_client.list_objects_v2.assert_called_once()%0A m_s3_client.delete_objects.assert_called_once()%0A m_s3_client.upload_file.assert_called()%0A
|
|
0f1cf524c2b90d77e17d516a30d62632ebb5ed2f | Add pipeline for untar'ing GCS blobs. | datathon/datathon_etl_pipelines/generic_imagining/untar_gcs.py | datathon/datathon_etl_pipelines/generic_imagining/untar_gcs.py | Python | 0 | @@ -0,0 +1,1936 @@
+r%22%22%22Untar .tar and .tar.gz GCS files.%22%22%22%0A%0Afrom __future__ import absolute_import%0Afrom __future__ import division%0Afrom __future__ import print_function%0A%0Aimport argparse%0Aimport apache_beam as beam%0Afrom apache_beam.options.pipeline_options import PipelineOptions%0Afrom apache_beam.options.pipeline_options import SetupOptions%0Afrom datathon_etl_pipelines.dofns.read_tar_file import ReadTarFile%0Afrom datathon_etl_pipelines.utils import get_setup_file%0Aimport tensorflow as tf%0A%0A%0Adef write_file(element):%0A path, contents = element%0A with tf.io.gfile.GFile(path, 'wb') as fp:%0A fp.write(contents)%0A%0A%0Adef main():%0A %22%22%22Build and execute the Apache Beam pipeline using the commandline arguments.%22%22%22%0A parser = argparse.ArgumentParser(description=__doc__)%0A parser.add_argument(%0A '--input_tars',%0A required=True,%0A nargs='+',%0A help=%22%22%22One or more wildcard patterns that give the full paths to the%0A input tar files on GCS.%22%22%22)%0A%0A parser.add_argument(%0A '--output_dir',%0A required=True,%0A help=%22%22%22The output directory to write the untar'd files to.%22%22%22)%0A%0A args, pipeline_args = parser.parse_known_args()%0A beam_options = PipelineOptions(pipeline_args)%0A # serialize and provide global imports, functions, etc. to workers.%0A beam_options.view_as(SetupOptions).save_main_session = True%0A beam_options.view_as(SetupOptions).setup_file = get_setup_file()%0A%0A if args.output_dir.endswith('/'):%0A out_dir = args.output_dir%5B:-1%5D%0A else:%0A out_dir = args.output_dir%0A%0A def get_full_output_path(relative_path):%0A if relative_path.startswith('/'):%0A return out_dir + relative_path%0A else:%0A return '%7B%7D/%7B%7D'.format(out_dir, relative_path)%0A%0A with beam.Pipeline(options=beam_options) as p:%0A _ = %5C%0A (p%0A %7C beam.Create(tf.io.gfile.glob(args.input_tars))%0A %7C 'Untar' %3E%3E beam.ParDo(ReadTarFile(), get_full_output_path)%0A %7C 'Write' %3E%3E beam.Map(write_file))%0A%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
d68d4e8c1adfa1cdc9577d133c48717b504092e5 | Test extension | tests/testcallable.py | tests/testcallable.py | # Copyright (C) 2007-2011 Michael Foord & the mock team
# E-mail: fuzzyman AT voidspace DOT org DOT uk
# http://www.voidspace.org.uk/python/mock/
from tests.support import is_instance, unittest2, X
from mock import (
Mock, MagicMock, NonCallableMagicMock,
NonCallableMock, patch, create_autospec
)
"""
Note that NonCallableMock and NonCallableMagicMock still have the unused (and
unusable) attributes: return_value, side_effect, call_count, call_args and
call_args_list. These could be removed or raise errors on getting / setting.
They also have the assert_called_with and assert_called_once_with methods.
Removing these would be pointless as fetching them would create a mock
(attribute) that could be called without error.
"""
class TestCallable(unittest2.TestCase):
def test_non_callable(self):
for mock in NonCallableMagicMock(), NonCallableMock():
self.assertRaises(TypeError, mock)
self.assertFalse(hasattr(mock, '__call__'))
def test_attributes(self):
one = NonCallableMock()
self.assertTrue(issubclass(type(one.one), Mock))
two = NonCallableMagicMock()
self.assertTrue(issubclass(type(two.two), MagicMock))
def test_subclasses(self):
class MockSub(Mock):
pass
one = MockSub()
self.assertTrue(issubclass(type(one.one), MockSub))
class MagicSub(MagicMock):
pass
two = MagicSub()
self.assertTrue(issubclass(type(two.two), MagicSub))
def test_patch_spec(self):
patcher = patch('%s.X' % __name__, spec=True)
mock = patcher.start()
self.addCleanup(patcher.stop)
instance = mock()
mock.assert_called_once_with()
self.assertTrue(is_instance(instance, NonCallableMagicMock))
self.assertRaises(TypeError, instance)
def test_patch_spec_instance(self):
patcher = patch('%s.X' % __name__, spec=X())
mock = patcher.start()
self.addCleanup(patcher.stop)
self.assertTrue(is_instance(mock, NonCallableMagicMock))
self.assertRaises(TypeError, mock)
def test_patch_spec_callable_class(self):
class CallableX(X):
def __call__(self):
pass
patcher = patch('%s.X' % __name__, spec=CallableX)
mock = patcher.start()
self.addCleanup(patcher.stop)
instance = mock()
mock.assert_called_once_with()
self.assertTrue(is_instance(instance, MagicMock))
instance()
instance.assert_called_once_with()
def test_create_autopsec(self):
mock = create_autospec(X)
instance = mock()
self.assertRaises(TypeError, instance)
mock = create_autospec(X())
self.assertRaises(TypeError, mock)
| Python | 0.000001 | @@ -191,16 +191,27 @@
test2, X
+, SomeClass
%0A%0Afrom m
@@ -2248,32 +2248,339 @@
pass%0A%0A
+ class Sub(CallableX):%0A pass%0A%0A class Multi(SomeClass, Sub):%0A pass%0A%0A class OldStyle:%0A def __call__(self):%0A pass%0A%0A class OldStyleSub(OldStyle):%0A pass%0A%0A for Klass in CallableX, Sub, Multi, OldStyle, OldStyleSub:%0A
patcher
@@ -2615,19 +2615,19 @@
pec=
-CallableX)%0A
+Klass)%0A
@@ -2653,32 +2653,36 @@
start()%0A
+
+
self.addCleanup(
@@ -2696,32 +2696,36 @@
.stop)%0A%0A
+
instance = mock(
@@ -2726,32 +2726,36 @@
mock()%0A
+
+
mock.assert_call
@@ -2770,32 +2770,36 @@
with()%0A%0A
+
+
self.assertTrue(
@@ -2832,32 +2832,36 @@
cMock))%0A
+
instance()%0A
@@ -2855,16 +2855,20 @@
tance()%0A
+
|
4302389b1e4e5ba753b2f76427408910c05f683c | replace our single use of assertEquals with assertEqual | tests/thirdparty_tests.py | tests/thirdparty_tests.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2008 John Paulett (john -at- paulett.org)
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
import unittest
import jsonpickle
RSS_DOC = """<?xml version="1.0" encoding="utf-8"?>
<feed xmlns="http://www.w3.org/2005/Atom" xml:base="http://example.org/" xml:lang="en">
<title type="text">Sample Feed</title>
<subtitle type="html">For documentation <em>only</em></subtitle>
<link rel="alternate" type="html" href="/"/>
<link rel="self" type="application/atom+xml" href="http://www.example.org/atom10.xml"/>
<rights type="html"><p>Copyright 2005, Mark Pilgrim</p><</rights>
<generator uri="http://example.org/generator/" version="4.0">Sample Toolkit</generator>
<id>tag:feedparser.org,2005-11-09:/docs/examples/atom10.xml</id>
<updated>2005-11-09T11:56:34Z</updated>
<entry>
<title>First entry title</title>
<link rel="alternate" href="/entry/3"/>
<link rel="related" type="text/html" href="http://search.example.com/"/>
<link rel="via" type="text/html" href="http://toby.example.com/examples/atom10"/>
<link rel="enclosure" type="video/mpeg4" href="http://www.example.com/movie.mp4" length="42301"/>
<id>tag:feedparser.org,2005-11-09:/docs/examples/atom10.xml:3</id>
<published>2005-11-09T00:23:47Z</published>
<updated>2005-11-09T11:56:34Z</updated>
<author>
<name>Mark Pilgrim</name>
<uri>http://diveintomark.org/</uri>
<email>[email protected]</email>
</author>
<contributor>
<name>Joe</name>
<uri>http://example.org/joe/</uri>
<email>[email protected]</email>
</contributor>
<contributor>
<name>Sam</name>
<uri>http://example.org/sam/</uri>
<email>[email protected]</email>
</contributor>
<summary type="text">Watch out for nasty tricks</summary>
<content type="xhtml" xml:base="http://example.org/entry/3" xml:lang="en-US">
<div xmlns="http://www.w3.org/1999/xhtml">Watch out for <span style="background: url(javascript:window.location='http://example.org/')"> nasty tricks</span></div>
</content>
</entry>
</feed>"""
class FeedParserTest(unittest.TestCase):
def setUp(self):
try:
import feedparser
except ImportError as e:
if hasattr(self, 'skipTest'):
doit = self.skipTest
else:
doit = self.fail
doit('feedparser module not available, please install')
self.doc = feedparser.parse(RSS_DOC)
def test(self):
pickled = jsonpickle.encode(self.doc)
unpickled = jsonpickle.decode(pickled)
self.assertEquals(self.doc['feed']['title'], unpickled['feed']['title'])
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(FeedParserTest, 'test'))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| Python | 0 | @@ -2746,17 +2746,16 @@
ertEqual
-s
(self.do
|
0bf6f0b6021b2ca3801b0d68c0ee63e39ddc36df | Make a ValueBuffer class | proj/avg_pdti8/util.py | proj/avg_pdti8/util.py | Python | 0 | @@ -0,0 +1,2358 @@
+#!/bin/env python%0A# Copyright 2021 Google LLC%0A#%0A# Licensed under the Apache License, Version 2.0 (the %22License%22);%0A# you may not use this file except in compliance with the License.%0A# You may obtain a copy of the License at%0A#%0A# https://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS,%0A# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.%0A# See the License for the specific language governing permissions and%0A# limitations under the License.%0Afrom nmigen import Mux, Signal, signed%0Afrom nmigen_cfu import InstructionBase, SimpleElaboratable, TestBase, Cfu, CfuTestBase%0Afrom nmigen.sim import Delay, Settle%0Aimport unittest%0A%0A%0Aclass ValueBuffer(SimpleElaboratable):%0A %22%22%22Buffers a signal.%0A%0A Parameters:%0A inp: A Signal%0A The signal to buffer%0A%0A Interface:%0A capture: Signal()%0A Input.%0A When high, captures input while transparently placing on output.%0A When low, output is equal to last captured input.%0A output: Signal(like inp)%0A Output. The last captured input.%0A %22%22%22%0A%0A def __init__(self, inp):%0A self.capture = Signal()%0A self.input = inp%0A self.output = Signal.like(inp)%0A%0A def elab(self, m):%0A captured = Signal.like(self.input)%0A with m.If(self.capture):%0A m.d.sync += captured.eq(self.input)%0A m.d.comb += self.output.eq(Mux(self.capture, self.input, captured))%0A%0A%0Aclass ValueBufferTest(TestBase):%0A def create_dut(self):%0A self.in_signal = Signal(4)%0A return ValueBuffer(self.in_signal)%0A%0A def test(self):%0A DATA = %5B%0A ((0, 0), 0),%0A ((1, 5), 5),%0A ((0, 3), 5),%0A ((0, 2), 5),%0A ((0, 2), 5),%0A ((1, 2), 2),%0A ((0, 2), 2),%0A ((0, 2), 2),%0A %5D%0A%0A def process():%0A for n, ((capture, in_sig), expected_output) in enumerate(DATA):%0A yield self.in_signal.eq(in_sig)%0A yield self.dut.capture.eq(capture)%0A yield Settle()%0A self.assertEqual((yield self.dut.output), expected_output, f%22cycle=%7Bn%7D%22)%0A yield%0A self.run_sim(process, True)%0A%0Aif __name__ == '__main__':%0A unittest.main()%0A
|
|
f2f4accf304cfe1aaed042f7df35bc0ee86a6c59 | Add enums for service/record/assignment/transaction type | netsgiro/enums.py | netsgiro/enums.py | Python | 0.000001 | @@ -0,0 +1,1291 @@
+from enum import IntEnum%0A%0A%0Aclass ServiceType(IntEnum):%0A NONE = 0%0A OCR_GIRO = 9%0A AVTALEGIRO = 21%0A%0A%0Aclass RecordType(IntEnum):%0A TRANSMISSION_START = 10%0A ASSIGNMENT_START = 20%0A TRANSACTION_AMOUNT_1 = 30%0A TRANSACTION_AMOUNT_2 = 31%0A TRANSACTION_AMOUNT_3 = 32 # Only for TransactionType 20 and 21%0A TRANSACTION_SPECIFICATION = 49%0A AGREEMENTS = 78 # TODO Better name?%0A ASSIGNMENT_END = 88%0A TRANSMISSION_END = 89%0A%0A%0Aclass AvtaleGiroAssignmentType(IntEnum):%0A PAYMENT_REQUEST = 0 # TODO Better name?%0A AGREEMENTS = 24 # TODO Better name?%0A CANCELATION = 36 # TODO Better name?%0A%0A%0Aclass AvtaleGiroTransactionType(IntEnum):%0A NO_NOTIFICATION_FROM_BANK = 2 # TODO Better name?%0A NOTIFICATION_FROM_BANK = 21 # TODO Better name?%0A CANCELATION = 93 # TODO Better name?%0A AGREEMENTS = 94 # TODO Better name?%0A%0A%0Aclass OcrGiroTransactionType(IntEnum):%0A FROM_GIRO_DEBITED_ACCOUNT = 10%0A FROM_STANDING_ORDERS = 11%0A FROM_DIRECT_REMITTANCE = 12%0A FROM_BUSINESS_TERMINAL_GIRO = 13%0A FROM_COUNTER_GIRO = 14%0A FROM_AVTALEGIRO = 15%0A FROM_TELEGIRO = 16%0A FROM_CASH_GIRO = 17%0A%0A REVERSING_WITH_KID = 18%0A PURCHASE_WITH_KID = 19%0A REVERSING_WITH_TEXT = 20%0A PURCHASE_WITH_TEXT = 21%0A
|
|
ca16e36b79e9c7dcd5cb31d899ef9c50ebf602c1 | add unit test for _nearest_neighbor() | urbanaccess/tests/test_network.py | urbanaccess/tests/test_network.py | Python | 0.000002 | @@ -0,0 +1,1333 @@
+import pytest%0Aimport pandas as pd%0Afrom urbanaccess import network%0A%0A%[email protected]%0Adef nearest_neighbor_dfs():%0A data = %7B%0A 'id': (1, 2, 3),%0A 'x': %5B-122.267546, -122.264479, -122.219119%5D,%0A 'y': %5B37.802919, 37.808042, 37.782288%5D%0A %7D%0A osm_nodes = pd.DataFrame(data).set_index('id')%0A%0A data = %7B%0A 'node_id_route': %5B'1_transit_a', '2_transit_a',%0A '3_transit_a', '4_transit_a'%5D,%0A 'x': %5B-122.265417, -122.266910, -122.269741, -122.238638%5D,%0A 'y': %5B37.806372, 37.802687, 37.799480, 37.797234%5D%0A %7D%0A transit_nodes = pd.DataFrame(data).set_index('node_id_route')%0A%0A data = %7B'node_id_route': %5B'1_transit_a', '2_transit_a',%0A '3_transit_a', '4_transit_a'%5D,%0A 'nearest_osm_node': %5B2, 1, 1, 3%5D%7D%0A index = range(4)%0A expected_transit_nodes = pd.concat(%0A %5Btransit_nodes, pd.DataFrame(data, index).set_index('node_id_route')%5D,%0A axis=1)%0A return osm_nodes, transit_nodes, expected_transit_nodes%0A%0A%0Adef test_nearest_neighbor(nearest_neighbor_dfs):%0A osm_nodes, transit_nodes, expected_transit_nodes = nearest_neighbor_dfs%0A transit_nodes%5B'nearest_osm_node'%5D = network._nearest_neighbor(%0A osm_nodes%5B%5B'x', 'y'%5D%5D,%0A transit_nodes%5B%5B'x', 'y'%5D%5D)%0A%0A assert expected_transit_nodes.equals(transit_nodes)%0A
|
|
6c4253c1ddf12c7255ed9c68b566b957d85b7048 | use closing output (test is not yet ready) | utest/reporting/test_reporting.py | utest/reporting/test_reporting.py | from StringIO import StringIO
import os
import unittest
from robot.reporting.resultwriter import ResultWriter, Result
from robot.output import LOGGER
from robot.result.executionresult import ExecutionResult
from robot.result.testsuite import TestSuite
from robot.utils.asserts import assert_true, assert_equals
LOGGER.disable_automatic_console_logger()
class TestReporting(unittest.TestCase, ResultWriter):
EXPECTED_SUITE_NAME = 'My Suite Name'
EXPECTED_TEST_NAME = 'My Test Name'
EXPECTED_KEYWORD_NAME = 'My Keyword Name'
EXPECTED_FAILING_TEST = 'My Failing Test'
EXPECTED_DEBUG_MESSAGE = '1111DEBUG777'
def setUp(self):
self._settings = lambda:0
self._settings.log = None
self._settings.log_config = None
self._settings.split_log = False
self._settings.report = None
self._settings.report_config = None
self._settings.output = None
self._settings.xunit = None
self._settings.status_rc = True
self._settings.suite_config = {}
self._settings.statistics_config = {}
self._create_default_suite()
def _create_default_suite(self):
root_suite = TestSuite(name=self.EXPECTED_SUITE_NAME)
root_suite.tests.create(name=self.EXPECTED_TEST_NAME).\
keywords.create(name=self.EXPECTED_KEYWORD_NAME,
status='PASS')
root_suite.tests.create(name=self.EXPECTED_FAILING_TEST).\
keywords.create(name=self.EXPECTED_KEYWORD_NAME).\
messages.create(message=self.EXPECTED_DEBUG_MESSAGE,
level='DEBUG',
timestamp='20201212 12:12:12.000')
self._result = Result(self._settings, None)
self._result._model = ExecutionResult(root_suite)
def test_generate_report_and_log(self):
self._settings.log = ClosingOutput()
self._settings.report = ClosingOutput()
self.write_results()
self._verify_log()
self._verify_report()
def _verify_log(self):
log = self._settings.log.getvalue()
assert_true(self.EXPECTED_KEYWORD_NAME in log)
assert_true(self.EXPECTED_SUITE_NAME in log)
assert_true(self.EXPECTED_TEST_NAME in log)
assert_true(self.EXPECTED_FAILING_TEST in log)
def _verify_report(self):
report = self._settings.report.getvalue()
assert_true(self.EXPECTED_KEYWORD_NAME not in report)
assert_true(self.EXPECTED_SUITE_NAME in report)
assert_true(self.EXPECTED_TEST_NAME in report)
assert_true(self.EXPECTED_FAILING_TEST in report)
def test_no_generation(self):
self._result._model = None
self.write_results()
assert_equals(self._result._model, None)
def test_only_log(self):
self._settings.log = ClosingOutput()
self.write_results()
self._verify_log()
def test_only_report(self):
self._settings.report = ClosingOutput()
self.write_results()
self._verify_report()
def test_only_xunit(self):
self._settings.xunit = ClosingOutput()
self.write_results()
self._verify_xunit()
def test_only_output_generation(self):
self._settings.output = ClosingOutput()
self.write_results()
self._verify_output()
def test_generate_all(self):
self._settings.log = ClosingOutput()
self._settings.report = ClosingOutput()
self._settings.xunit = ClosingOutput()
self._settings.output = ClosingOutput()
self.write_results()
self._verify_log()
self._verify_report()
self._verify_xunit()
self._verify_output()
def _verify_xunit(self):
xunit = self._settings.xunit.getvalue()
assert_true(self.EXPECTED_DEBUG_MESSAGE in xunit)
def _verify_output(self):
assert_true(self._settings.output.getvalue())
def _test_split_tests(self):
self._settings.split_log = True
self._settings.log = StringIO()
self.write_results()
self._verify_log()
if os.name == 'java':
import java.io.OutputStream
import java.lang.String
class ClosingOutput(java.io.OutputStream):
def __init__(self):
self._output = StringIO()
__enter__ = lambda *args: 0
__exit__ = lambda self, *args: self.close()
def write(self, *args):
self._output.write(java.lang.String(args[0]))
def close(self):
self.value = self._output.getvalue()
self._output.close()
def getvalue(self):
return self.value
else:
class ClosingOutput(object):
def __init__(self):
self._output = StringIO()
__enter__= lambda *args: 0
__exit__ = lambda self, *args: self.close()
def write(self, data):
self._output.write(data)
def close(self):
self.value = self._output.getvalue()
self._output.close()
def getvalue(self):
return self.value
if __name__ == '__main__':
unittest.main()
| Python | 0 | @@ -3969,24 +3969,64 @@
etvalue())%0A%0A
+ #TODO: Find a way to test split_log%0A
def _tes
@@ -4115,24 +4115,29 @@
s.log =
-StringIO
+ClosingOutput
()%0A
|
7c5dbbcd1de6376a025117fe8f00516f2fcbb40d | Add regressiontest for crypto_onetimeauth_verify | tests/unit/test_auth_verify.py | tests/unit/test_auth_verify.py | Python | 0.004908 | @@ -0,0 +1,1867 @@
+# Import nacl libs%0Aimport libnacl%0A%0A# Import python libs%0Aimport unittest%0A%0A%0Aclass TestAuthVerify(unittest.TestCase):%0A '''%0A Test onetimeauth functions%0A '''%0A def test_auth_verify(self):%0A msg = b'Anybody can invent a cryptosystem he cannot break himself. Except Bruce Schneier.'%0A key1 = libnacl.utils.rand_nonce()%0A key2 = libnacl.utils.rand_nonce()%0A%0A sig1 = libnacl.crypto_auth(msg, key1)%0A sig2 = libnacl.crypto_auth(msg, key2)%0A%0A self.assertTrue(libnacl.crypto_auth_verify(sig1, msg, key1))%0A with self.assertRaises(ValueError) as context:%0A libnacl.crypto_auth_verify(sig1, msg, key2)%0A self.assertTrue('Failed to auth msg' in context.exception)%0A%0A with self.assertRaises(ValueError) as context:%0A libnacl.crypto_auth_verify(sig2, msg, key1)%0A self.assertTrue('Failed to auth msg' in context.exception)%0A self.assertTrue(libnacl.crypto_auth_verify(sig2, msg, key2))%0A%0A '''%0A Test onetimeauth functions%0A '''%0A def test_onetimeauth_verify(self):%0A msg = b'Anybody can invent a cryptosystem he cannot break himself. Except Bruce Schneier.'%0A key1 = libnacl.utils.rand_nonce()%0A key2 = libnacl.utils.rand_nonce()%0A%0A sig1 = libnacl.crypto_onetimeauth(msg, key1)%0A sig2 = libnacl.crypto_onetimeauth(msg, key2)%0A%0A self.assertTrue(libnacl.crypto_onetimeauth_verify(sig1, msg, key1))%0A with self.assertRaises(ValueError) as context:%0A libnacl.crypto_onetimeauth_verify(sig1, msg, key2)%0A self.assertTrue('Failed to auth msg' in context.exception)%0A%0A with self.assertRaises(ValueError) as context:%0A libnacl.crypto_onetimeauth_verify(sig2, msg, key1)%0A self.assertTrue('Failed to auth msg' in context.exception)%0A self.assertTrue(libnacl.crypto_onetimeauth_verify(sig2, msg, key2))%0A
|
|
80ccffb269b04af02224c1121c41d4e7c503bc30 | Add unit test for intersperse | tests/util/test_intersperse.py | tests/util/test_intersperse.py | Python | 0.000001 | @@ -0,0 +1,482 @@
+# This file is part of rinohtype, the Python document preparation system.%0A#%0A# Copyright (c) Brecht Machiels.%0A#%0A# Use of this source code is subject to the terms of the GNU Affero General%0A# Public License v3. See the LICENSE file or http://www.gnu.org/licenses/.%0A%0A%0Afrom rinoh.util import intersperse%0A%0A%0Adef test_intersperse():%0A separator = %22.%22%0A letters = %5B127, 0, 0, 1%5D%0A localhost = list(intersperse(letters, separator))%0A assert %5B127, %22.%22, 0, %22.%22, 0, %22.%22, 1%5D == localhost%0A
|
|
8f18a1b75b68d8c97efd57673b160a9ceda608a3 | Add Manifest class | manifest.py | manifest.py | Python | 0 | @@ -0,0 +1,23 @@
+__author__ = 'fervent'%0A
|
|
e56d9337cc5c63ef61afe8ffdee2019e19af0963 | Add test for resolved issue 184 | test/test_issue184.py | test/test_issue184.py | Python | 0 | @@ -0,0 +1,893 @@
+from rdflib.term import Literal%0Afrom rdflib.term import URIRef%0Afrom rdflib.graph import ConjunctiveGraph%0A%0Adef test_escaping_of_triple_doublequotes():%0A %22%22%22%0A Issue 186 - Check escaping of multiple doublequotes.%0A A serialization/deserialization roundtrip of a certain class of %0A Literals fails when there are both, newline characters and multiple subsequent %0A quotation marks in the lexical form of the Literal. In this case invalid N3%0A is emitted by the serializer, which in turn cannot be parsed correctly.%0A %22%22%22%0A g=ConjunctiveGraph()%0A g.add((URIRef('http://foobar'), URIRef('http://fooprop'), Literal('abc%5Cndef%22%22%22%22%22')))%0A # assert g.serialize(format='n3') == '@prefix ns1: %3Chttp:// .%5Cn%5Cnns1:foobar ns1:fooprop %22%22%22abc%5Cndef%5C%5C%22%5C%5C%22%5C%5C%22%5C%5C%22%5C%5C%22%22%22%22 .%5Cn%5Cn'%0A g2=ConjunctiveGraph()%0A g2.parse(data=g.serialize(format='n3'), format='n3')%0A assert g.isomorphic(g2) is True
|
|
0988a2a18688a8b8e07d94e1609405c17bbe717d | Add test suite for the playlist plugin | test/test_playlist.py | test/test_playlist.py | Python | 0 | @@ -0,0 +1,2888 @@
+# -*- coding: utf-8 -*-%0A# This file is part of beets.%0A# Copyright 2016, Thomas Scholtes.%0A#%0A# Permission is hereby granted, free of charge, to any person obtaining%0A# a copy of this software and associated documentation files (the%0A# %22Software%22), to deal in the Software without restriction, including%0A# without limitation the rights to use, copy, modify, merge, publish,%0A# distribute, sublicense, and/or sell copies of the Software, and to%0A# permit persons to whom the Software is furnished to do so, subject to%0A# the following conditions:%0A#%0A# The above copyright notice and this permission notice shall be%0A# included in all copies or substantial portions of the Software.%0A%0Afrom __future__ import division, absolute_import, print_function%0A%0Aimport os%0Aimport tempfile%0Aimport unittest%0A%0Afrom test import _common%0Afrom test import helper%0A%0Aimport beets%0A%0A%0Aclass PlaylistTest(unittest.TestCase, helper.TestHelper):%0A def setUp(self):%0A self.setup_beets()%0A self.lib = beets.library.Library(':memory:')%0A%0A i1 = _common.item()%0A i1.path = beets.util.normpath('/a/b/c.mp3')%0A i1.title = u'some item'%0A i1.album = u'some album'%0A self.lib.add(i1)%0A self.lib.add_album(%5Bi1%5D)%0A%0A i2 = _common.item()%0A i2.path = beets.util.normpath('/d/e/f.mp3')%0A i2.title = 'another item'%0A i2.album = 'another album'%0A self.lib.add(i2)%0A self.lib.add_album(%5Bi2%5D)%0A%0A i3 = _common.item()%0A i3.path = beets.util.normpath('/x/y/z.mp3')%0A i3.title = 'yet another item'%0A i3.album = 'yet another album'%0A self.lib.add(i3)%0A self.lib.add_album(%5Bi3%5D)%0A%0A self.playlist_dir = tempfile.TemporaryDirectory()%0A with open(os.path.join(self.playlist_dir.name, 'test.m3u'), 'w') as f:%0A f.write('%7B0%7D%5Cn'.format(beets.util.displayable_path(i1.path)))%0A f.write('%7B0%7D%5Cn'.format(beets.util.displayable_path(i2.path)))%0A%0A self.config%5B'directory'%5D = '/'%0A self.config%5B'playlist'%5D%5B'relative_to'%5D = 'library'%0A self.config%5B'playlist'%5D%5B'playlist_dir'%5D = self.playlist_dir.name%0A self.load_plugins('playlist')%0A%0A def tearDown(self):%0A self.unload_plugins()%0A self.playlist_dir.cleanup()%0A self.teardown_beets()%0A%0A def test_query_name(self):%0A q = u'playlist:test'%0A results = self.lib.items(q)%0A self.assertEqual(set(%5Bi.title for i in results%5D), set(%5B%0A u'some item',%0A u'another item',%0A %5D))%0A%0A def test_query_path(self):%0A q = u'playlist:%7B0%7D/test.m3u'.format(self.playlist_dir.name)%0A results = self.lib.items(q)%0A self.assertEqual(set(%5Bi.title for i in results%5D), set(%5B%0A u'some item',%0A u'another item',%0A %5D))%0A%0A%0Adef suite():%0A return unittest.TestLoader().loadTestsFromName(__name__)%0A%0Aif __name__ == '__main__':%0A unittest.main(defaultTest='suite')%0A
|
|
d7c4e7bcf366965c808e7807d8f37640dc2af281 | Add tests for image_pull_policy | tests/test_objects.py | tests/test_objects.py | """
Test functions used to create k8s objects
"""
from kubespawner.objects import make_pod_spec, make_pvc_spec
def test_make_simplest_pod():
"""
Test specification of the simplest possible pod specification
"""
assert make_pod_spec(
name='test',
image_spec='jupyter/singleuser:latest',
env={},
volumes=[],
volume_mounts=[],
cpu_limit=None,
cpu_guarantee=None,
mem_limit=None,
mem_guarantee=None
) == {
"metadata": {
"name": "test"
},
"spec": {
"containers": [
{
"env": [],
"name": "notebook",
"image": "jupyter/singleuser:latest",
"ports": [{
"containerPort": 8888
}],
"volumeMounts": [],
"resources": {
"limits": {
"cpu": None,
"memory": None
},
"requests": {
"cpu": None,
"memory": None
}
}
}
],
"volumes": []
},
"kind": "Pod",
"apiVersion": "v1"
}
def test_make_pod_resources_all():
"""
Test specifying all possible resource limits & guarantees
"""
assert make_pod_spec(
name='test',
image_spec='jupyter/singleuser:latest',
env={},
volumes=[],
volume_mounts=[],
cpu_limit=2,
cpu_guarantee=1,
mem_limit='1Gi',
mem_guarantee='512Mi'
) == {
"metadata": {
"name": "test"
},
"spec": {
"containers": [
{
"env": [],
"name": "notebook",
"image": "jupyter/singleuser:latest",
"ports": [{
"containerPort": 8888
}],
"volumeMounts": [],
"resources": {
"limits": {
"cpu": 2,
"memory": '1Gi'
},
"requests": {
"cpu": 1,
"memory": '512Mi'
}
}
}
],
"volumes": []
},
"kind": "Pod",
"apiVersion": "v1"
}
def test_make_pod_with_env():
"""
Test specification of a pod with custom environment variables
"""
assert make_pod_spec(
name='test',
image_spec='jupyter/singleuser:latest',
env={
'TEST_KEY': 'TEST_VALUE'
},
volumes=[],
volume_mounts=[],
cpu_limit=None,
cpu_guarantee=None,
mem_limit=None,
mem_guarantee=None
) == {
"metadata": {
"name": "test"
},
"spec": {
"containers": [
{
"env": [{'name': 'TEST_KEY', 'value': 'TEST_VALUE'}],
"name": "notebook",
"image": "jupyter/singleuser:latest",
"ports": [{
"containerPort": 8888
}],
"volumeMounts": [],
"resources": {
"limits": {
"cpu": None,
"memory": None
},
"requests": {
"cpu": None,
"memory": None
}
}
}
],
"volumes": []
},
"kind": "Pod",
"apiVersion": "v1"
}
def test_make_pvc_simple():
"""
Test specification of the simplest possible pvc specification
"""
assert make_pvc_spec(
name='test',
storage_class='',
access_modes=[],
storage=None
) == {
'kind': 'PersistentVolumeClaim',
'apiVersion': 'v1',
'metadata': {
'name': 'test',
'annotations': {
'volume.beta.kubernetes.io/storage-class': ''
}
},
'spec': {
'accessModes': [],
'resources': {
'requests': {
'storage': None
}
}
}
}
def test_make_resources_all():
"""
Test specifying all possible resource limits & guarantees
"""
assert make_pvc_spec(
name='test',
storage_class='gce-standard-storage',
access_modes=['ReadWriteOnce'],
storage='10Gi'
) == {
'kind': 'PersistentVolumeClaim',
'apiVersion': 'v1',
'metadata': {
'name': 'test',
'annotations': {
'volume.beta.kubernetes.io/storage-class': 'gce-standard-storage'
}
},
'spec': {
'accessModes': ['ReadWriteOnce'],
'resources': {
'requests': {
'storage': '10Gi'
}
}
}
}
| Python | 0.000002 | @@ -469,33 +469,76 @@
m_guarantee=None
+,%0A image_pull_policy='IfNotPresent',
%0A
-
) == %7B%0A
@@ -777,32 +777,87 @@
leuser:latest%22,%0A
+ %22imagePullPolicy%22: %22IfNotPresent%22,%0A
@@ -1828,24 +1828,67 @@
ntee='512Mi'
+,%0A image_pull_policy='IfNotPresent',
%0A ) == %7B%0A
@@ -2132,32 +2132,87 @@
leuser:latest%22,%0A
+ %22imagePullPolicy%22: %22IfNotPresent%22,%0A
@@ -3228,24 +3228,67 @@
arantee=None
+,%0A image_pull_policy='IfNotPresent',
%0A ) == %7B%0A
@@ -3517,32 +3517,32 @@
e%22: %22notebook%22,%0A
-
@@ -3575,32 +3575,87 @@
leuser:latest%22,%0A
+ %22imagePullPolicy%22: %22IfNotPresent%22,%0A
|
71577ec62406c0119ea2282a3011ebbc368a3a04 | add test_pollbot.py | tests/test_pollbot.py | tests/test_pollbot.py | Python | 0.000007 | @@ -0,0 +1,412 @@
+#!/usr/bin/env python3%0A%0Aimport pytest%0Aimport poll_bot%0A%0Aclass TestPollBot:%0A%0A%09def test_extract_emoji(self):%0A%09%09lines_and_emojis = %7B%0A%09%09%09' M)-ystery meat': 'M',%0A%09%09%09'%F0%9F%90%95 dog sandwiches': '%F0%9F%90%95',%0A%09%09%09'3 blind mice': '3',%0A%09%09%09'%F0%9F%87%BA%F0%9F%87%B8 flags': '%F0%9F%87%BA%F0%9F%87%B8',%0A%09%09%09'%3C:python3:232720527448342530%3E python3!': '%3C:python3:232720527448342530%3E',%0A%09%09%7D%0A%09%09%0A%09%09for input, output in lines_and_emojis.items():%0A%09%09%09assert poll_bot.extract_emoji(input) == output
|
|
1e9980aff2370b96171011f7fa50d4517957fa86 | Add a script to check TOI coverage for a bbox and zoom range | tilepack/check_toi.py | tilepack/check_toi.py | Python | 0 | @@ -0,0 +1,1512 @@
+import mercantile%0Aimport argparse%0A%0Adef main():%0A parser = argparse.ArgumentParser()%0A parser.add_argument('min_lon',%0A type=float,%0A help='Bounding box minimum longitude/left')%0A parser.add_argument('min_lat',%0A type=float,%0A help='Bounding box minimum latitude/bottom')%0A parser.add_argument('max_lon',%0A type=float,%0A help='Bounding box maximum longitude/right')%0A parser.add_argument('max_lat',%0A type=float,%0A help='Bounding box maximum latitude/top')%0A parser.add_argument('min_zoom',%0A type=int,%0A help='The minimum zoom level to include')%0A parser.add_argument('max_zoom',%0A type=int,%0A help='The maximum zoom level to include')%0A args = parser.parse_args()%0A%0A print(%22zoom%5Ctmissing from toi%5Ctin aoi%22)%0A%0A for zoom in range(args.min_zoom, args.max_zoom + 1):%0A tiles_in_aoi = set(%5B%0A '%7B%7D/%7B%7D/%7B%7D'.format(z, x, y)%0A for x, y, z in mercantile.tiles(%0A args.min_lon, args.min_lat, args.max_lon, args.max_lat,%0A %5Bzoom%5D%0A )%0A %5D)%0A%0A with open('toi.z%7B%7D.txt'.format(zoom), 'r') as f:%0A tiles_in_toi = set(%5B%0A l.strip()%0A for l in f.readlines()%0A %5D)%0A%0A print(%22%7Bzoom:2d%7D%5Ct%7Btiles_not_in_toi%7D%5Ct%7Btiles_in_aoi%7D%22.format(%0A zoom=zoom,%0A tiles_not_in_toi=len(tiles_in_aoi - tiles_in_toi),%0A tiles_in_aoi=len(tiles_in_aoi),%0A ))%0A%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
14f9aa65f9a92f074846ea6861539f4f4fa18926 | Handle CalledProcessError when removing files from downloaded zip file | tools/download_jar.py | tools/download_jar.py | #!/usr/bin/python
# Copyright (C) 2013 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from hashlib import sha1
from optparse import OptionParser
from os import link, makedirs, path, symlink
import shutil
from subprocess import check_call, CalledProcessError
from sys import stderr
from zipfile import ZipFile, BadZipfile, LargeZipFile
REPO_ROOTS = {
'GERRIT': 'http://gerrit-maven.commondatastorage.googleapis.com',
'MAVEN_CENTRAL': 'http://repo1.maven.org/maven2',
}
GERRIT_HOME = '~/.gerritcodereview'
LOCAL_PROPERTIES = 'local.properties'
def hashfile(p):
d = sha1()
with open(p, 'rb') as f:
while True:
b = f.read(8192)
if not b:
break
d.update(b)
return d.hexdigest()
def safe_mkdirs(d):
if path.isdir(d):
return
try:
makedirs(d)
except OSError as err:
if not path.isdir(d):
raise err
def download_properties(root_dir):
""" Get the download properties.
First tries to find the properties file in the given root directory,
and if not found there, tries in the Gerrit settings folder in the
user's home directory.
Returns a set of download properties, which may be empty.
"""
p = {}
local_prop = path.join(root_dir, LOCAL_PROPERTIES)
if not path.isfile(local_prop):
local_prop = path.join(path.expanduser(GERRIT_HOME), LOCAL_PROPERTIES)
if path.isfile(local_prop):
try:
with open(local_prop) as fd:
for line in fd:
if line.startswith('download.'):
d = [e.strip() for e in line.split('=', 1)]
name, url = d[0], d[1]
p[name[len('download.'):]] = url
except OSError:
pass
return p
def cache_entry(root_dir, args):
if args.v:
h = args.v
else:
h = sha1(args.u).hexdigest()
name = '%s-%s' % (path.basename(args.o), h)
return path.join(root_dir, 'buck-cache', name)
def resolve_url(url, redirects):
s = url.find(':')
if s < 0:
return url
scheme, rest = url[:s], url[s+1:]
if scheme not in REPO_ROOTS:
return url
if scheme in redirects:
root = redirects[scheme]
else:
root = REPO_ROOTS[scheme]
while root.endswith('/'):
root = root[:-1]
while rest.startswith('/'):
rest = rest[1:]
return '/'.join([root, rest])
opts = OptionParser()
opts.add_option('-o', help='local output file')
opts.add_option('-u', help='URL to download')
opts.add_option('-v', help='expected content SHA-1')
opts.add_option('-x', action='append', help='file to delete from ZIP')
opts.add_option('--exclude_java_sources', action='store_true')
args, _ = opts.parse_args()
root_dir = args.o
while root_dir:
root_dir, n = path.split(root_dir)
if n == 'buck-out':
break
redirects = download_properties(root_dir)
cache_ent = cache_entry(root_dir, args)
src_url = resolve_url(args.u, redirects)
if not path.exists(cache_ent):
try:
safe_mkdirs(path.dirname(cache_ent))
print('Download %s' % src_url, file=stderr)
check_call(['curl', '--proxy-anyauth', '-sfo', cache_ent, src_url])
except OSError as err:
print('error creating directory %s: %s' %
(path.dirname(cache_ent), err), file=stderr)
exit(1)
except CalledProcessError as err:
print('error using curl: %s' % err, file=stderr)
exit(1)
if args.v:
have = hashfile(cache_ent)
if args.v != have:
o = cache_ent[len(root_dir) + 1:]
print((
'%s:\n' +
'expected %s\n' +
'received %s\n' +
' %s\n') % (src_url, args.v, have, o), file=stderr)
exit(1)
exclude = []
if args.x:
exclude += args.x
if args.exclude_java_sources:
try:
zf = ZipFile(cache_ent, 'r')
try:
for n in zf.namelist():
if n.endswith('.java'):
exclude.append(n)
finally:
zf.close()
except (BadZipfile, LargeZipFile) as err:
print("error opening %s: %s" % (cache_ent, err), file=stderr)
exit(1)
safe_mkdirs(path.dirname(args.o))
if exclude:
shutil.copyfile(cache_ent, args.o)
check_call(['zip', '-d', args.o] + exclude)
else:
try:
link(cache_ent, args.o)
except OSError as err:
symlink(cache_ent, args.o)
| Python | 0.000023 | @@ -4503,16 +4503,25 @@
args.o)%0A
+ try:%0A
check_
@@ -4558,16 +4558,118 @@
xclude)%0A
+ except CalledProcessError as err:%0A print('error removing files from zip: %25s' %25 err, file=stderr)%0A
else:%0A
|
17654378a6039203ead1c711b6bb8f7fb3ad8680 | add Ermine ELF dumper. | tools/dump-ermine-elfs.py | tools/dump-ermine-elfs.py | Python | 0 | @@ -0,0 +1,2399 @@
+#!/usr/bin/env python%0A#%0A# Copyright (C) 2013 Mikkel Krautz %[email protected]%3E%0A#%0A# All rights reserved.%0A#%0A# Redistribution and use in source and binary forms, with or without%0A# modification, are permitted provided that the following conditions%0A# are met:%0A#%0A# - Redistributions of source code must retain the above copyright notice,%0A# this list of conditions and the following disclaimer.%0A# - Redistributions in binary form must reproduce the above copyright notice,%0A# this list of conditions and the following disclaimer in the documentation%0A# and/or other materials provided with the distribution.%0A# - Neither the name of the Mumble Developers nor the names of its%0A# contributors may be used to endorse or promote products derived from this%0A# software without specific prior written permission.%0A#%0A# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS%0A# %60AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT%0A# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR%0A# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR%0A# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,%0A# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,%0A# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR%0A# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF%0A# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING%0A# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS%0A# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.%0A%0A# dump-ermine-elfs.py is a simple script that dumps all embedded%0A# ELFs (executables and shared libraries) contained in an Ermine%0A# packed ELF binary.%0A%0Aimport os%0Aimport sys%0A%0Adef usage():%0A%09print 'dump-ermine-elfs.py %3Cfn%3E'%0A%09sys.exit(1)%0A%0Adef main():%0A%09if len(sys.argv) %3C 2:%0A%09%09usage()%0A%09fn = sys.argv%5B1%5D%0A%09f = open(fn, 'r')%0A%09all = f.read()%0A%09f.close()%0A%0A%09elfMagic = '%5Cx7fELF'%0A%09elfPairs = %5B%5D%0A%09for i in range(0, len(all)):%0A%09%09if i == 0: # skip binary itself%0A%09%09%09continue%0A%09%09if all%5Bi:i+len(elfMagic)%5D == elfMagic:%0A%09%09%09elfPairs.append(i)%0A%09elfPairs.append(len(all))%0A%0A%09for i, ofs in enumerate(elfPairs):%0A%09%09if i == len(elfPairs)-1: # done?%0A%09%09%09break%0A%09%09end = elfPairs%5Bi+1%5D%0A%09%09fn = 'dumped-%25i.elf' %25 i%0A%09%09print 'dumping elf @ 0x%25x to %25s' %25 (ofs, fn)%0A%09%09f = open(fn, 'w')%0A%09%09f.write(all%5Bofs:end%5D)%0A%09%09f.close()%0A%0Aif __name__ == '__main__':%0A%09main()%0A
|
|
654e2bf70b4a47adb53d8a0b17f0257e84c7bdf8 | read in data, check things look sensible. Note: need to change unknowns in group col so we have a more usable data type in the pandas dataframe. | main.py | main.py | Python | 0 | @@ -0,0 +1,231 @@
+# Data modelling challenge.%0A%0A__author__ = 'Remus Knowles %[email protected]%3E'%0A%0Aimport pandas as pd%0A%0AF_DATA = r'data challenge test.csv'%0A%0Adef main():%0A%09df = pd.read_csv(F_DATA)%0A%0A%09print df.head()%0A%0Aif __name__ == '__main__':%0A%09main()
|
|
0046f5276c9572fbc40080cc2201a89ee37b96b2 | Create mwis.py | mwis.py | mwis.py | Python | 0.000005 | @@ -0,0 +1,782 @@
+weights = %5Bint(l) for l in open('mwis.txt')%5D%5B1:%5D%0A%0Adef mwis(weights):%0A%0A n = len(weights)%0A%0A weights = %5B0%5D + weights%0A%0A maxsetweight = %5B0, weights%5B1%5D%5D%0A%0A for i in range(2, n + 1):%0A maxsetweight.append(max(maxsetweight%5Bi - 1%5D, maxsetweight%5Bi - 2%5D + weights%5Bi%5D ))%0A%0A i = n%0A maxset = %5B%5D%0A%0A while i %3E 1:%0A%0A if maxsetweight%5Bi-2%5D + weights%5Bi%5D %3E maxsetweight%5Bi-1%5D:%0A maxset.append(i)%0A i -= 2%0A%0A if i == 1:%0A maxset.append(1)%0A break%0A else:%0A i -= 1%0A%0A return (maxsetweight%5Bn%5D, maxset)%0A%0Aa, b = mwis(weights)%0Aprint %22The weight of the maximum weight independent set of the graph is :%22, a%0Aprint %22The vertices that constitute the maximum weight independent set of the path graph are :%22, b%0A
|
|
572a47ab8b05f8e93ec5e1b415cb56387d4279ca | add m_restart.py | pyscf/nao/m_restart.py | pyscf/nao/m_restart.py | Python | 0.000026 | @@ -0,0 +1,1872 @@
+%0A#An HDF5 file is a container for two kinds of objects: datasets (array-like collections of data), and groups (folder-like containers that hold datasets).%0A#Groups work like dictionaries, and datasets work like NumPy arrays%0A%0Adef read_rst_h5py (filename=None):%0A import h5py ,os%0A if filename is None: %0A path = os.getcwd()%0A filename =find('*.hdf5', path)%0A #filename= 'SCREENED_COULOMB.hdf5'%0A with h5py.File(filename, 'r') as f:%0A #print(%22Keys: %25s%22 %25 f.keys())%0A a_group_key = list(f.keys())%5B0%5D%0A # Get the data%0A data = list(f%5Ba_group_key%5D)%0A msg = 'RESTART: Full matrix elements of screened interactions (W_c) was read from %7B%7D'.format(filename)%0A return data, msg%0A%0A%0Adef write_rst_h5py(data, filename = None):%0A import h5py%0A if filename is None: filename= 'SCREENED_COULOMB.hdf5'%0A with h5py.File(filename, 'w') as data_file:%0A data_file.create_dataset('W_c', data=data)%0A data_file.close%0A msg = 'Full matrix elements of screened interactions (W_c) stored in %7B%7D'.format(filename)%0A return msg%0A%0A%0Adef write_rst_yaml (data , filename=None):%0A import yaml%0A if filename is None: filename= 'SCREENED_COULOMB.yaml'%0A with open(filename, 'w+', encoding='utf8') as outfile:%0A yaml.dump(data, outfile, default_flow_style=False, allow_unicode=True)%0A msg = 'Full matrix elements of screened interactions stored in %7B%7D'.format(filename)%0A return msg%0A%0A%0Adef read_rst_yaml (filename=None):%0A import yaml, os%0A if filename is None: %0A path = os.getcwd()%0A filename =find('*.yaml', path)%0A with open(filename, 'r') as stream:%0A try:%0A data = yaml.load(stream)%0A msg = 'RESTART: Full matrix elements of screened interactions (W_c) was read from %7B%7D'.format(filename)%0A return data, msg%0A except yaml.YAMLError as exc:%0A return exc%0A
|
|
a0c303e9c1f7ac75e078e6f3ae9586ba68a24f63 | add the solution | python/oj/mergeSort.py | python/oj/mergeSort.py | Python | 0.000256 | @@ -0,0 +1,736 @@
+#!/usr/bin/python%0A# coding:utf8%0A%0A'''%0A@author: shaoyuliang%0A@contact: [email protected]%0A@since: 7/16/14%0A%0A'''%0A%0A# https://oj.leetcode.com/problems/merge-sorted-array/%0A%0Aclass Solution:%0A # @param A a list of integers%0A # @param m an integer, length of A%0A # @param B a list of integers%0A # @param n an integer, length of B%0A # @return nothing%0A def merge(self, A, m, B, n):%0A for i in range(n):%0A A.append(B%5Bi%5D)%0A%0A a = 0%0A b = m%0A while a %3C b and b %3C m + n:%0A if A%5Ba%5D %3C A%5Bb%5D:%0A a += 1%0A continue%0A else:%0A c = A.pop(b)%0A b += 1%0A A.insert(a, c)%0A%0A%0AA = %5B1, 3, 5%5D%0ASolution().merge(A, 3, %5B2, 4%5D, 2)%0Aprint A
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.