commit
stringlengths
40
40
subject
stringlengths
1
3.25k
old_file
stringlengths
4
311
new_file
stringlengths
4
311
old_contents
stringlengths
0
26.3k
lang
stringclasses
3 values
proba
float64
0
1
diff
stringlengths
0
7.82k
8080128d2ca5718ac971160bf964c3ca73b235b7
add downloader
operators/downloader.py
operators/downloader.py
Python
0.000001
@@ -0,0 +1,904 @@ +import os%0A%0Afrom drivers import driver%0Afrom file_utils import file_util%0A%0A%0Aclass Downloader(object):%0A def __init__(self, server_driver):%0A %22%22%22Init a Uploader object%0A%0A Args:%0A server_driver: a driver already connected to cloud service%0A %22%22%22%0A if not issubclass(driver.Driver, driver.Driver):%0A raise TypeError('Driver should be a subclass of drivers.driver.Driver')%0A self.driver = server_driver%0A%0A def download(self, remote_filename, local_filename=None, local_dir=None):%0A if local_filename is None and local_dir is None:%0A raise AttributeError('Need at least one of local_filename or local_dir.')%0A if local_dir:%0A local_filename = os.path.join(local_dir, file_util.path_leaf(remote_filename))%0A self.driver.download(local_filename=local_filename,%0A remote_filename=remote_filename)%0A
5be32f4022135a10585cf094b6fb8118dd87a2f6
Add files via upload (#396)
ciphers/Atbash.py
ciphers/Atbash.py
Python
0
@@ -0,0 +1,360 @@ +def Atbash():%0A inp=raw_input(%22Enter the sentence to be encrypted %22)%0A output=%22%22%0A for i in inp:%0A extract=ord(i)%0A if extract%3E=65 and extract%3C=90:%0A output+=(unichr(155-extract))%0A elif extract%3E=97 and extract%3C=122:%0A output+=(unichr(219-extract))%0A else:%0A output+=i%0A print output%0A%0AAtbash() ;
8d1016437e87794fb39b447b51427bae98a51bc2
Add one public IP provider
classes/jsonip.py
classes/jsonip.py
Python
0
@@ -0,0 +1,245 @@ +from json import load%0Afrom urllib2 import urlopen%0A%0Aclass JsonIp:%0A%0A def __init__(self):%0A url = 'https://jsonip.com/'%0A uri = urlopen(url)%0A response = load(uri)%0A self.ip = response%5B%22ip%22%5D%0A # self.ip = '1.1.1.1'%0A
b1be1bbf785406f4d286c7eb85ea459309ea03a2
Fix file hierarchy.
batch_image_resizer2/batch_image_resizer.py
batch_image_resizer2/batch_image_resizer.py
Python
0
@@ -0,0 +1,559 @@ +%22%22%22Resize images in a folder using imagemagick command line tools.%0A%0Ahttp://hakanu.net%0A%22%22%22%0A%0Aimport glob%0Aimport os%0A%0Adef main():%0A print 'Started'%0A images = glob.glob(%22/home/h/Desktop/all_karikatur_resized/*.jpg%22)%0A counter = 0%0A for image in images:%0A print 'Processing: ', image%0A index = image%5Bimage.rfind('/') + 1:image.rfind('.jpg')%5D%0A print 'index: ', index%0A os.system(%22convert %22 + index + %22.jpg -resize 128x128 resize_128_%22 + index + %22.jpg%22)%0A counter += 1%0A if counter %25 100 == 0:%0A print 'Completed: ', counter%0A print '%5Cn'%0A%0Amain()%0A
6cbc9230e241511ccc922eb179f62e08db78bf14
1689. Partitioning Into Minimum Number Of Deci-Binary Numbers
LeetCode/PartitioningIntoMinimumNumberOfDeciBinaryNumbers.py
LeetCode/PartitioningIntoMinimumNumberOfDeciBinaryNumbers.py
Python
0.999999
@@ -0,0 +1,180 @@ +%22%22%22 a convoluted way of describing finding the biggest decimal digit :D %22%22%22%0A%0Aclass Solution:%0A def minPartitions(self, n: str) -%3E int:%0A return max(int(d) for d in str(n))%0A
98b85a9fd8d5082e40996dfba0359b0ec32a9267
Add solution for "Cakes" kata https://www.codewars.com/kata/525c65e51bf619685c000059
codewars/cakes.py
codewars/cakes.py
Python
0.000191
@@ -0,0 +1,1176 @@ +# Cakes%0A# https://www.codewars.com/kata/525c65e51bf619685c000059%0A%0Aimport math%0Aimport unittest%0Afrom typing import Dict%0A%0A%0Adef cakes_1(recipe, available):%0A # type: (Dict%5Bstr, int%5D, Dict%5Bstr, int%5D) -%3E int%0A lowest_available = math.inf%0A%0A for i, a in recipe.items():%0A if i in available.keys():%0A av = available.get(i) / a%0A if lowest_available %3E av:%0A lowest_available = av%0A else:%0A lowest_available = 0%0A break%0A%0A return int(lowest_available)%0A%0A%0Adef cakes(recipe, available):%0A # type: (Dict%5Bstr, int%5D, Dict%5Bstr, int%5D) -%3E int%0A return min(available.get(i, 0) // recipe%5Bi%5D for i in recipe)%0A%0A%0Aclass TestCakes(unittest.TestCase):%0A def test(self):%0A recipe = %7B%22flour%22: 500, %22sugar%22: 200, %22eggs%22: 1%7D%0A available = %7B%22flour%22: 1200, %22sugar%22: 1200, %22eggs%22: 5, %22milk%22: 200%7D%0A self.assertEquals(cakes(recipe, available), 2, 'Wrong result for example #1')%0A%0A recipe = %7B%22apples%22: 3, %22flour%22: 300, %22sugar%22: 150, %22milk%22: 100, %22oil%22: 100%7D%0A available = %7B%22sugar%22: 500, %22flour%22: 2000, %22milk%22: 2000%7D%0A self.assertEquals(cakes(recipe, available), 0, 'Wrong result for example #2')%0A
5b97e6fc0446912d5b9b8da65e60d06165ed1b8b
Add profile tests
budgetsupervisor/users/tests/test_models.py
budgetsupervisor/users/tests/test_models.py
Python
0.000001
@@ -0,0 +1,464 @@ +from users.models import Profile%0A%0A%0Adef test_profile_is_created_when_user_is_created(user_foo):%0A assert len(Profile.objects.all()) == 1%0A assert hasattr(user_foo, %22profile%22)%0A%0A%0Adef test_profile_is_not_created_when_user_is_updated(user_foo):%0A assert len(Profile.objects.all()) == 1%0A user_foo.username = %22abc%22%0A user_foo.save()%0A assert len(Profile.objects.all()) == 1%0A%0A%0Adef test_profile_str(user_foo):%0A assert str(user_foo.profile) == str(user_foo)%0A
275adbea5477bbc6938e59edab23e1df182435ea
Create split-array-with-equal-sum.py
Python/split-array-with-equal-sum.py
Python/split-array-with-equal-sum.py
Python
0.009008
@@ -0,0 +1,933 @@ +# Time: O(n%5E2)%0A# Space: O(n)%0A%0Aclass Solution(object):%0A def splitArray(self, nums):%0A %22%22%22%0A :type nums: List%5Bint%5D%0A :rtype: bool%0A %22%22%22%0A if len(nums) %3C 7:%0A return False%0A %0A accumulated_sum = %5B0%5D * len(nums)%0A accumulated_sum%5B0%5D = nums%5B0%5D%0A for i in xrange(1, len(nums)):%0A accumulated_sum%5Bi%5D = accumulated_sum%5Bi-1%5D + nums%5Bi%5D%0A for j in xrange(3, len(nums)-3):%0A lookup = set()%0A for i in xrange(1, j-1):%0A if accumulated_sum%5Bi-1%5D == accumulated_sum%5Bj-1%5D - accumulated_sum%5Bi%5D:%0A lookup.add(accumulated_sum%5Bi-1%5D)%0A for k in xrange(j+2, len(nums)-1):%0A if accumulated_sum%5B-1%5D - accumulated_sum%5Bk%5D == accumulated_sum%5Bk-1%5D - accumulated_sum%5Bj%5D and %5C%0A accumulated_sum%5Bk - 1%5D - accumulated_sum%5Bj%5D in lookup:%0A return True%0A return False%0A
5e427315f46c026dd3b72b49349d3dcdbf04d138
add financial_insights
financial_insights.py
financial_insights.py
Python
0.000242
@@ -0,0 +1,1036 @@ +'''%0ACreated on Apr, 2017%0A%0A@author: hugo%0A%0A'''%0A%0Aimport numpy as np%0A%0Adef calc_ranks(x):%0A %22%22%22Given a list of items, return a list(in ndarray type) of ranks.%0A %22%22%22%0A n = len(x)%0A index = list(zip(*sorted(list(enumerate(x)), key=lambda d:d%5B1%5D, reverse=True))%5B0%5D)%0A rank = np.zeros(n)%0A rank%5Bindex%5D = range(1, n + 1)%0A return rank%0A%0Adef rank_bank_topic(bank_doc_map, doc_topic_dist):%0A %22%22%22Rank topics for banks%0A %22%22%22%0A bank_topic_ranks = %7B%7D%0A for each_bank in bank_doc_map:%0A rank = %5B%5D%0A for each_doc in bank_doc_map%5Beach_bank%5D:%0A rank.append(calc_ranks(doc_topic_dist%5Beach_doc%5D))%0A rank = np.r_%5Brank%5D%0A # compute ranking score%0A bank_topic_ranks%5Beach_bank%5D = np.sum(1. / rank, axis=0)%0A return bank_topic_ranks%0A%0Aif __name__ == '__main__':%0A n = 10%0A bank_doc_map = %7B'bank_0': %5B'doc_0', 'doc_1'%5D, 'bank_1': %5B'doc_2', 'doc_3', 'doc_4'%5D%7D%0A doc_topic_dist = dict(%5B('doc_%25s' %25 i, np.random.randn(n)) for i in range(5)%5D)%0A rank = rank_bank_topic(bank_doc_map, doc_topic_dist)%0A
877644f7325b8abb585c06b2b2bf77a2a59d4c8f
set default button in tx detail window
gui/qt/transaction_dialog.py
gui/qt/transaction_dialog.py
#!/usr/bin/env python # # Electrum - lightweight Bitcoin client # Copyright (C) 2012 thomasv@gitorious # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import sys, time, datetime, re, threading from electrum.i18n import _, set_language from electrum.util import print_error, print_msg import os.path, json, ast, traceback import shutil import StringIO try: import PyQt4 except: sys.exit("Error: Could not import PyQt4 on Linux systems, you may try 'sudo apt-get install python-qt4'") from PyQt4.QtGui import * from PyQt4.QtCore import * import PyQt4.QtCore as QtCore from electrum import transaction from util import MyTreeWidget class TxDialog(QDialog): def __init__(self, tx, parent): self.tx = tx tx_dict = tx.as_dict() self.parent = parent self.wallet = parent.wallet QDialog.__init__(self) self.setMinimumWidth(600) self.setWindowTitle(_("Transaction")) self.setModal(1) vbox = QVBoxLayout() self.setLayout(vbox) vbox.addWidget(QLabel(_("Transaction ID:"))) self.tx_hash_e = QLineEdit() self.tx_hash_e.setReadOnly(True) vbox.addWidget(self.tx_hash_e) self.status_label = QLabel() vbox.addWidget(self.status_label) self.date_label = QLabel() vbox.addWidget(self.date_label) self.amount_label = QLabel() vbox.addWidget(self.amount_label) self.fee_label = QLabel() vbox.addWidget(self.fee_label) self.add_io(vbox) vbox.addStretch(1) buttons = QHBoxLayout() vbox.addLayout( buttons ) buttons.addStretch(1) self.sign_button = b = QPushButton(_("Sign")) b.clicked.connect(self.sign) buttons.addWidget(b) self.broadcast_button = b = QPushButton(_("Broadcast")) b.clicked.connect(self.broadcast) b.hide() buttons.addWidget(b) self.save_button = b = QPushButton(_("Save")) b.clicked.connect(self.save) buttons.addWidget(b) cancelButton = QPushButton(_("Close")) cancelButton.clicked.connect(lambda: self.done(0)) buttons.addWidget(cancelButton) self.update() def sign(self): tx_dict = self.tx.as_dict() input_info = json.loads(tx_dict["input_info"]) self.parent.sign_raw_transaction(self.tx, input_info) self.update() def save(self): fileName = self.parent.getSaveFileName(_("Select where to save your signed transaction"), 'signed_%s.txn' % (self.tx.hash()[0:8]), "*.txn") if fileName: with open(fileName, "w+") as f: f.write(json.dumps(self.tx.as_dict(),indent=4) + '\n') self.show_message(_("Transaction saved successfully")) def update(self): tx_hash = self.tx.hash() is_relevant, is_mine, v, fee = self.wallet.get_tx_value(self.tx) if self.tx.is_complete: status = _("Status: Signed") self.sign_button.hide() if tx_hash in self.wallet.transactions.keys(): conf, timestamp = self.wallet.verifier.get_confirmations(tx_hash) if timestamp: time_str = datetime.datetime.fromtimestamp(timestamp).isoformat(' ')[:-3] else: time_str = 'pending' status = _("Status: %d confirmations")%conf self.broadcast_button.hide() else: time_str = None conf = 0 self.broadcast_button.show() else: status = _("Status: Unsigned") time_str = None self.sign_button.show() self.broadcast_button.hide() self.tx_hash_e.setText(tx_hash) self.status_label.setText(status) if time_str is not None: self.date_label.setText(_("Date: %s")%time_str) self.date_label.show() else: self.date_label.hide() if is_relevant: if is_mine: if fee is not None: self.amount_label.setText(_("Amount sent:")+' %s'% self.parent.format_amount(v-fee) + ' ' + self.parent.base_unit()) self.fee_label.setText(_("Transaction fee:")+' %s'% self.parent.format_amount(fee) + ' ' + self.parent.base_unit()) else: self.amount_label.setText(_("Amount sent:")+' %s'% self.parent.format_amount(v) + ' ' + self.parent.base_unit()) self.fee_label.setText(_("Transaction fee: unknown")) else: self.amount_label.setText(_("Amount received:")+' %s'% self.parent.format_amount(v) + ' ' + self.parent.base_unit()) else: self.amount_label.setText(_("Transaction unrelated to your wallet")) def exec_menu(self, position,l): item = l.itemAt(position) if not item: return addr = unicode(item.text(0)) menu = QMenu() menu.addAction(_("Copy to clipboard"), lambda: self.parent.app.clipboard().setText(addr)) menu.exec_(l.viewport().mapToGlobal(position)) def add_io(self, vbox): vbox.addWidget(QLabel(_("Inputs"))) lines = map(lambda x: x.get('address') , self.tx.inputs ) i_text = QTextEdit('\n'.join(lines)) i_text.setReadOnly(True) i_text.setMaximumHeight(100) vbox.addWidget(i_text) vbox.addWidget(QLabel(_("Outputs"))) lines = map(lambda x: x[0] + u'\t\t' + self.parent.format_amount(x[1]), self.tx.outputs) o_text = QTextEdit() o_text.setText('\n'.join(lines)) o_text.setReadOnly(True) o_text.setMaximumHeight(100) vbox.addWidget(o_text) def broadcast(self): result, result_message = self.wallet.sendtx( self.tx ) if result: self.show_message(_("Transaction successfully sent:")+' %s' % (result_message)) if dialog: dialog.done(0) else: self.show_message(_("There was a problem sending your transaction:") + '\n %s' % (result_message)) def show_message(self, msg): QMessageBox.information(self, _('Message'), msg, _('OK'))
Python
0
@@ -2783,16 +2783,62 @@ Button)%0A + cancelButton.setDefault(True)%0A %0A
b566663291301bb3f84348d1208d8bf69b517421
update URL on language match only
flexget/plugins/urlrewrite_serienjunkies.py
flexget/plugins/urlrewrite_serienjunkies.py
from __future__ import unicode_literals, division, absolute_import import re import logging from flexget import plugin from flexget.event import event from flexget.plugins.plugin_urlrewriting import UrlRewritingError from flexget.utils import requests from flexget.utils.soup import get_soup log = logging.getLogger('serienjunkies') LANGUAGE = ['de', 'en', 'both'] HOSTER = ['ul', 'cz', 'so'] class UrlRewriteSerienjunkies(object): """ Serienjunkies urlrewriter Version 1.0.0 Language setting works like a whitelist, the selected is needed, but others are still possible. Configuration language: [de|en|both] default "en" hoster: [ul|cz|so] default "ul" """ schema = { 'type': 'object', 'properties': { 'language': {'type': 'string', 'enum': LANGUAGE, 'default': 'en'}, 'hoster': {'type': 'string', 'enum': HOSTER, 'default': 'ul'} }, 'additionalProperties': False } # urlrewriter API def url_rewritable(self, task, entry): url = entry['url'] if url.startswith('http://download.serienjunkies.org/'): return False if url.startswith('http://www.serienjunkies.org/') or url.startswith('http://serienjunkies.org/'): return True return False # urlrewriter API def url_rewrite(self, task, entry): series_url = entry['url'] download_title = entry['title'] search_title = re.sub('\[.*\] ', '', download_title) self.config = task.config.get('serienjunkies') download_url = self.parse_download(series_url, search_title, self.config, entry) log.debug('TV Show URL: %s' % series_url) log.debug('Episode: %s' % search_title) log.debug('Download URL: %s' % download_url) entry['url'] = download_url @plugin.internet(log) def parse_download(self, series_url, search_title, config, entry): page = requests.get(series_url).content try: soup = get_soup(page) except Exception as e: raise UrlRewritingError(e) config = config or {} config.setdefault('hoster', 'ul') config.setdefault('language', 'en') # find matching download episode_title = soup.find('strong', text=search_title) if not episode_title: raise UrlRewritingError('Unable to find episode') # find download container episode = episode_title.parent if not episode: raise UrlRewritingError('Unable to find episode container') # find episode language episode_lang = episode.find_previous('strong', text=re.compile('Sprache')).next_sibling if not episode_lang: raise UrlRewritingError('Unable to find episode language') # filter language if config['language'] in ['de', 'both']: if not re.search('german|deutsch', episode_lang, flags=re.IGNORECASE): log.verbose('Language doesn\'t match') elif config['language'] in ['en', 'both']: if not re.search('englisc?h', episode_lang, flags=re.IGNORECASE): log.verbose('Language doesn\'t match') # find download links links = episode.find_all('a') if not links: raise UrlRewritingError('Unable to find download links') for link in links: if not link.has_attr('href'): continue url = link['href'] pattern = 'http:\/\/download\.serienjunkies\.org.*%s_.*\.html' % config['hoster'] if re.match(pattern, url): return url else: log.verbose('Hoster doesn\'t match') continue raise UrlRewritingError('URL-Rewriting failed, enable verbose logging for details.') @event('plugin.register') def register_plugin(): plugin.register(UrlRewriteSerienjunkies, 'serienjunkies', groups=['urlrewriter'], api_ver=2)
Python
0.000001
@@ -1645,16 +1645,117 @@ entry)%0A + if download_url is None:%0A download_url = entry%5B'url'%5D %0A #Debug Information%0A @@ -3072,35 +3072,36 @@ -log.verbose +entry.reject ('Language d @@ -3105,34 +3105,71 @@ e doesn%5C't match -') + selected')%0A return None %0A elif co @@ -3298,27 +3298,28 @@ -log.verbose +entry.reject ('Langua @@ -3335,18 +3335,55 @@ 't match -') + selected')%0A return None %0A%0A @@ -3891,16 +3891,25 @@ 't match + selected ')%0A
6c133d4de6a79eab6bfc2da9ff9a0045e0a0994d
add problem hanckerrank 009
hackerrank/009_sherlock_and_the_beast.py
hackerrank/009_sherlock_and_the_beast.py
Python
0.999803
@@ -0,0 +1,2162 @@ +#!/bin/python3%0D%0A%0D%0A%22%22%22%0D%0Ahttps://www.hackerrank.com/challenges/sherlock-and-the-beast?h_r=next-challenge&h_v=zen%0D%0A%0D%0ASherlock Holmes suspects his archenemy, Professor Moriarty, is once again plotting something diabolical. Sherlock's companion, Dr. Watson, suggests Moriarty may be responsible for MI6's recent issues with their supercomputer, The Beast.%0D%0A%0D%0AShortly after resolving to investigate, Sherlock receives a note from Moriarty boasting about infecting The Beast with a virus; however, he also gives him a clue%E2%80%94a number, N. Sherlock determines the key to removing the virus is to find the largest Decent Number having N digits.%0D%0A%0D%0AA Decent Number has the following properties:%0D%0A%0D%0AIts digits can only be 3's and/or 5's.%0D%0AThe number of 3's it contains is divisible by 5.%0D%0AThe number of 5's it contains is divisible by 3.%0D%0AIf there are more than one such number, we pick the largest one.%0D%0A%0D%0AMoriarty's virus shows a clock counting down to The Beast's destruction, and time is running out fast. Your task is to help Sherlock find the key before The Beast is destroyed!%0D%0A%0D%0A%0D%0AConstraints%0D%0A%0D%0A1%3C=T%3C=20%0D%0A1%3C=N%3C=100000%0D%0A%0D%0AInput Format%0D%0A%0D%0AThe first line is an integer, T, denoting the number of test cases.%0D%0A%0D%0AThe T subsequent lines each contain an integer, N, detailing the number of digits in the number.%0D%0A%0D%0AOutput Format%0D%0A%0D%0APrint the largest Decent Number having N digits; if no such number exists, tell Sherlock by printing -1.%0D%0A%0D%0ASample Input%0D%0A%0D%0A4%0D%0A1%0D%0A3%0D%0A5%0D%0A11%0D%0A%0D%0ASample Output%0D%0A%0D%0A-1%0D%0A555%0D%0A33333%0D%0A55555533333%0D%0A%0D%0AExplanation%0D%0A%0D%0AFor N = 1, there is no decent number having 1 digit (so we print -1).%0D%0AFor N = 3, 555 is the only possible number. The number 5 appears three times in this number, so our count of 5's is evenly divisible by 3 (Decent Number Property 3).%0D%0AFor N = 5, 33333 is the only possible number. The number 3 appears five times in this number, so our count of 3's is evenly divisible by 5 (Decent Number Property 2).%0D%0AFor N = 11, 5555533333 and all permutations of these digits are valid numbers; among them, the given number is the largest one.%0D%0A%0D%0A%22%22%22%0D%0A%0D%0Aimport sys%0D%0A%0D%0A%0D%0At = int(input().strip())%0D%0Afor a0 in range(t):%0D%0A n = int(input().strip())%0D%0A
ef1fa03d753f5d8a0b32831320a1b3e076ace363
Add a test runner for our jqplot demo too
moksha/apps/demo/MokshaJQPlotDemo/run_tests.py
moksha/apps/demo/MokshaJQPlotDemo/run_tests.py
Python
0
@@ -0,0 +1,152 @@ +#!/usr/bin/env python%0A%22%22%22%0Anose runner script.%0A%22%22%22%0A__requires__ = 'moksha'%0A%0Aimport pkg_resources%0Aimport nose%0A%0Aif __name__ == '__main__':%0A nose.main()%0A
d8c5fa6ebe1ae5d690d832a8e1d8403922a27403
create a tmpfs at /tmp for verify on prow
scenarios/kubernetes_verify.py
scenarios/kubernetes_verify.py
#!/usr/bin/env python # Copyright 2016 The Kubernetes Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Need to figure out why this only fails on travis # pylint: disable=bad-continuation """Runs verify/test-go checks for kubernetes/kubernetes.""" import argparse import os import re import subprocess import sys BRANCH_VERSION = { '1.2': '1.4', '1.3': '1.4', 'master': '1.9', } VERSION_TAG = { '1.4': '1.4-v20161130-8958f82', '1.5': '1.5-v20161205-d664d14', '1.6': '1.6-v20161205-ad918bc', '1.7': '1.7-v20170713-c28e0556', '1.8': '1.8-v20170906-3a1c5ae5', '1.9': '1.9-v20171018-6ddbad97', } def check_output(*cmd): """Log and run the command, return output, raising on errors.""" print >>sys.stderr, 'Run:', cmd return subprocess.check_output(cmd) def check(*cmd): """Log and run the command, raising on errors.""" print >>sys.stderr, 'Run:', cmd subprocess.check_call(cmd) def retry(func, times=5): """call func until it returns true at most times times""" success = False for _ in range(0, times): success = func() if success: return success return success def try_call(cmds): """returns true if check(cmd) does not throw an exception over all cmds where cmds = [[cmd, arg, arg2], [cmd2, arg]]""" try: for cmd in cmds: check(*cmd) return True # pylint: disable=bare-except except: return False def get_git_cache(k8s): git = os.path.join(k8s, ".git") if not os.path.isfile(git): return None with open(git) as git_file: return git_file.read().replace("gitdir: ", "").rstrip("\n") def main(branch, script, force, on_prow): """Test branch using script, optionally forcing verify checks.""" # If branch has 3-part version, only take first 2 parts. verify_branch = re.match(r'master|release-(\d+\.\d+)', branch) if not verify_branch: raise ValueError(branch) # Extract version if any. ver = verify_branch.group(1) or verify_branch.group(0) tag = VERSION_TAG[BRANCH_VERSION.get(ver, ver)] force = 'y' if force else 'n' artifacts = '%s/_artifacts' % os.environ['WORKSPACE'] k8s = os.getcwd() if not os.path.basename(k8s) == 'kubernetes': raise ValueError(k8s) check('rm', '-rf', '.gsutil') remote = 'bootstrap-upstream' uri = 'https://github.com/kubernetes/kubernetes.git' current_remotes = check_output('git', 'remote') if re.search('^%s$' % remote, current_remotes, flags=re.MULTILINE): check('git', 'remote', 'remove', remote) check('git', 'remote', 'add', remote, uri) check('git', 'remote', 'set-url', '--push', remote, 'no_push') # If .git is cached between runs this data may be stale check('git', 'fetch', remote) if not os.path.isdir(artifacts): os.makedirs(artifacts) if on_prow: # TODO(bentheelder): on prow REPO_DIR should be /go/src/k8s.io/kubernetes # however these paths are brittle enough as is... git_cache = get_git_cache(k8s) cmd = [ 'docker', 'run', '--rm=true', '--privileged=true', '-v', '/var/run/docker.sock:/var/run/docker.sock', '-v', '/etc/localtime:/etc/localtime:ro', '-v', '%s:/go/src/k8s.io/kubernetes' % k8s, ] if git_cache is not None: cmd.extend(['-v', '%s:%s' % (git_cache, git_cache)]) cmd.extend([ '-v', '/workspace/k8s.io/:/workspace/k8s.io/', '-v', '%s:/workspace/artifacts' % artifacts, '-e', 'KUBE_FORCE_VERIFY_CHECKS=%s' % force, '-e', 'KUBE_VERIFY_GIT_BRANCH=%s' % branch, '-e', 'REPO_DIR=%s' % k8s, # hack/lib/swagger.sh depends on this 'gcr.io/k8s-testimages/kubekins-test:%s' % tag, 'bash', '-c', 'cd kubernetes && %s' % script, ]) check(*cmd) else: check( 'docker', 'run', '--rm=true', '--privileged=true', '-v', '/var/run/docker.sock:/var/run/docker.sock', '-v', '/etc/localtime:/etc/localtime:ro', '-v', '%s:/go/src/k8s.io/kubernetes' % k8s, '-v', '%s:/workspace/artifacts' % artifacts, '-e', 'KUBE_FORCE_VERIFY_CHECKS=%s' % force, '-e', 'KUBE_VERIFY_GIT_BRANCH=%s' % branch, '-e', 'REPO_DIR=%s' % k8s, # hack/lib/swagger.sh depends on this 'gcr.io/k8s-testimages/kubekins-test:%s' % tag, 'bash', '-c', 'cd kubernetes && %s' % script, ) if __name__ == '__main__': PARSER = argparse.ArgumentParser( 'Runs verification checks on the kubernetes repo') PARSER.add_argument( '--branch', default='master', help='Upstream target repo') PARSER.add_argument( '--force', action='store_true', help='Force all verify checks') PARSER.add_argument( '--script', default='./hack/jenkins/test-dockerized.sh', help='Script in kubernetes/kubernetes that runs checks') PARSER.add_argument( '--prow', action='store_true', help='Force Prow mode' ) ARGS = PARSER.parse_args() main(ARGS.branch, ARGS.script, ARGS.force, ARGS.prow)
Python
0
@@ -4275,32 +4275,149 @@ depends on this%0A + '-e', 'TMPDIR=/tmp', # https://golang.org/src/os/file_unix.go%0A '--tmpfs /tmp:exec,mode=777',%0A 'gcr
b261704bc0ada9cfae773eaf1e40b18dc49d6ceb
add outline of backgrond job processor and task interface
portality/background.py
portality/background.py
Python
0
@@ -0,0 +1,2207 @@ +from portality import models%0Afrom portality.core import app%0A%0Aclass BackgroundApi(object):%0A%0A @classmethod%0A def execute(self, background_task):%0A job = background_task.background_job%0A ctx = None%0A if job.user is not None:%0A ctx = app.test_request_context(%22/%22)%0A ctx.push()%0A%0A try:%0A background_task.run()%0A except:%0A background_task.log()%0A%0A try:%0A background_task.cleanup()%0A except:%0A background_task.log()%0A%0A background_task.report()%0A job.save()%0A%0A if ctx is not None:%0A ctx.pop()%0A%0Aclass BackgroundTask(object):%0A %22%22%22%0A All background tasks should extend from this object and override at least the following methods:%0A%0A - run%0A - cleanup%0A - report%0A - log%0A - prepare (class method)%0A%0A %22%22%22%0A def __init__(self, background_job):%0A self.background_job = background_job%0A%0A def run(self):%0A %22%22%22%0A Execute the task as specified by the background_jon%0A :return:%0A %22%22%22%0A raise NotImplementedError()%0A%0A def cleanup(self):%0A %22%22%22%0A Cleanup after a successful OR failed run of the task%0A :return:%0A %22%22%22%0A raise NotImplementedError()%0A%0A def report(self):%0A %22%22%22%0A Augment the background_job with information about the task run%0A :return:%0A %22%22%22%0A raise NotImplementedError()%0A%0A def log(self):%0A %22%22%22%0A Log any exceptions or other errors in running the task%0A :return:%0A %22%22%22%0A raise NotImplementedError()%0A%0A @classmethod%0A def prepare(cls, **kwargs):%0A %22%22%22%0A Take an arbitrary set of keyword arguments and return an instance of a BackgroundJob,%0A or fail with a suitable exception%0A%0A :param kwargs: arbitrary keyword arguments pertaining to this task type%0A :return: a BackgroundJob instance representing this task%0A %22%22%22%0A raise NotImplementedError()%0A%0A @classmethod%0A def submit(cls, background_job):%0A %22%22%22%0A Submit the specified BackgroundJob to the background queue%0A%0A :param background_job: the BackgroundJob instance%0A :return:%0A %22%22%22%0A pass
c8ec0689950a5fea0aff98afe54b172bd84e2ce9
Add example using Tom's registration code in scipy.
examples/coregister.py
examples/coregister.py
Python
0
@@ -0,0 +1,653 @@ +%22%22%22Example using Tom's registration code from scipy.%0A%0A%22%22%22%0A%0Afrom os import path%0Afrom glob import glob%0A%0Aimport scipy.ndimage._registration as reg%0A%0A# Data files%0Abasedir = '/Users/cburns/data/twaite'%0Aanatfile = path.join(basedir, 'ANAT1_V0001.img')%0Afuncdir = path.join(basedir, 'fMRIData')%0Afileglob = path.join(funcdir, 'FUNC1_V000?.img') # Get first 10 images%0A%0Aif __name__ == '__main__':%0A print 'Coregister anatomical:%5Cn', anatfile%0A print '%5CnWith these functional images:'%0A funclist = glob(fileglob)%0A for func in funclist:%0A print func%0A measures, imageF_anat, fmri_series = %5C%0A reg.demo_MRI_coregistration(anatfile, funclist%5B0:4%5D)%0A
114f8012a7faec4fe107c1d68c2ead10cdd88fbe
update zero.1flow.io settings for sparks 2.x.
oneflow/settings/zero_1flow_io.py
oneflow/settings/zero_1flow_io.py
# -*- coding: utf-8 -*- # Settings for zero.1flow.io, a master clone used to validate migrations. import os from sparks.django.settings import include_snippets include_snippets( os.path.dirname(__file__), ( '000_nobother', '00_production', '1flow_io', 'common', 'db_common', 'db_production', 'cache_common', 'cache_production', 'mail_production', 'raven_development', 'common_production', ), globals() ) # Overide real production settings, to be able to distinguish. SITE_DOMAIN = 'zero.1flow.io' ALLOWED_HOSTS += ['localhost', SITE_DOMAIN]
Python
0
@@ -96,18 +96,8 @@ s.%0A%0A -import os%0A from @@ -170,35 +170,8 @@ %0A - os.path.dirname(__file__), (%0A @@ -449,16 +449,26 @@ ),%0A + __file__, globals
db60219a1446bb75dd98bfbb12ee6ec4eda6d6bb
add structure for the pcaptotal API
web/api.py
web/api.py
Python
0
@@ -0,0 +1,1183 @@ +from flask import jsonify, abort, make_response%0Afrom flask.ext.httpauth import HTTPBasicAuth%0Aauth = HTTPBasicAuth()%0Afrom app import app%0Atasks = %5B%0A %7B%0A 'id': 1,%0A 'title': u'Buy groceries',%0A 'description': u'Milk, Cheese, Pizza, Fruit, Tylenol', %0A 'done': False%0A %7D,%0A %7B%0A 'id': 2,%0A 'title': u'Learn Python',%0A 'description': u'Need to find a good Python tutorial on the web', %0A 'done': False%0A %7D%0A%5D%0A%[email protected]('/todo/api/v1.0/tasks', methods=%5B'GET'%5D)%0A#@auth.login_required%0Adef get_tasks():%0A return jsonify(%7B'tasks': tasks%7D)%0A%0A%[email protected]('/todo/api/v1.0/tasks/%3Cint:task_id%3E', methods=%5B'GET'%5D)%0A#@auth.login_required%0Adef get_task(task_id):%0A task = %5Btask for task in tasks if task%5B'id'%5D == task_id%5D%0A if len(task) == 0:%0A abort(404)%0A return jsonify(%7B'task': task%5B0%5D%7D)%0A%[email protected](404)%0Adef not_found(error):%0A return make_response(jsonify(%7B'error': 'Not found'%7D), 404)%0A%[email protected]_password%0Adef get_password(username):%0A if username == 'ask3m':%0A return 'ask3m'%0A return None%0A%[email protected]_handler%0Adef unauthorized():%0A return make_response(jsonify(%7B'error': 'Unauthorized access'%7D), 401)%0A%0A%0A%0A
62b01c3c1614d5719cc69be951b2f6c660e40faa
Add generic function for iterating arrays.
pyldap/libldap/tools.py
pyldap/libldap/tools.py
Python
0
@@ -0,0 +1,162 @@ +def iterate_array(arr, f=None):%0A i = 0%0A while True:%0A if not arr%5Bi%5D:%0A break%0A yield arr%5Bi%5D if f is None else f(arr%5Bi%5D)%0A i += 1
d8cd42940df8c1d2fc9ae28e9c5caa21995ca68c
Add word-counter.py
python3/word-counter.py
python3/word-counter.py
Python
0.002063
@@ -0,0 +1,1122 @@ +#!/usr/bin/env python3%0A%0Afrom collections import Counter%0Aimport argparse%0Aimport re%0Afrom itertools import islice%0Aimport operator%0A%0Aparser = argparse.ArgumentParser()%0Aparser.add_argument('--numWords',type=int,default=10)%0Aparser.add_argument('--maxTuples',type=int,default=4)%0Aparser.add_argument('--minWordLength',type=int,default=5)%0Aparser.add_argument('file',type=str)%0Aargs = parser.parse_args()%0A%0A# Inspired by http://stackoverflow.com/questions/6822725%0Adef window(seq, n):%0A it = iter(seq)%0A result = tuple(islice(it, n))%0A if len(result) == n:%0A yield result%0A for elem in it:%0A result = result%5B1:%5D + (elem,)%0A containsShortWord = False%0A for i in result:%0A if len(i) %3C args.minWordLength:%0A containsShortWord = True%0A break%0A if not containsShortWord:%0A yield result%0A%0Awith open(args.file,'r') as f:%0A content = f.read().replace('%5Cn',' ')%0A words = re.findall(r'%5CS+', content)%0A for i in range(1,args.maxTuples+1):%0A print(%22%5Cn=== Sliding Window: %7B%7D ===%22.format(i))%0A for tup in Counter(window(words,i)).most_common(args.numWords):%0A print(%22 %7B%7D: '%7B%7D'%22.format(tup%5B1%5D,%22 %22.join(tup%5B0%5D)))%0A
377a8be7c0b1e77f0e9c2dfd55f603e199727907
make pairs
files/v8/make_pairs.py
files/v8/make_pairs.py
Python
0.999365
@@ -0,0 +1,172 @@ +import fileinput%0A%0Apops=%5B%5D%0Afor line in fileinput.input():%0A%09pops.append(line%5B:-1%5D)%0A%0Afor i in range(len(pops)-1):%0A%09for j in range(i+1, len(pops)):%0A%09%09print pops%5Bi%5D+%22,%22+pops%5Bj%5D%0A
9c0ddb1c4fff5fb3f44ad77192a7f435bc7a22fe
Create AdafruitMotorHat4Pi.py
service/AdafruitMotorHat4Pi.py
service/AdafruitMotorHat4Pi.py
Python
0
@@ -0,0 +1,602 @@ +# Start the services needed%0Araspi = Runtime.start(%22raspi%22,%22RasPi%22)%0Ahat = Runtime.start(%22hat%22,%22AdafruitMotorHat4Pi%22)%0Am1 = Runtime.start(%22m1%22,%22MotorHat4Pi%22)%0A# Attach the HAT to i2c bus 1 and address 0x60%0Ahat.attach(%22raspi%22,%221%22,%220x60%22)%0A# Use the M1 motor port and attach the motor to the hat%0Am1.setMotor(%22M1%22)%0Am1.attach(%22hat%22)%0A# Now everything is wired up and we run a few tests%0A# Full speed forward%0Am1.move(1) %0Asleep(3)%0A# half speed forward%0Am1.move(.5)%0Asleep(3)%0A# Move backward at 60%25 speed%0Am1.move(-.6)%0Asleep(3)%0A# Stop%0Am1.move(0)%0A# Now you should be able to use the GUI or a script to control the motor%0A
2b2d711a5ba8be5cebe5913870c4dea1b9498af1
Remove misleading fileno method from NpipeSocket class
docker/transport/npipesocket.py
docker/transport/npipesocket.py
import functools import io import six import win32file import win32pipe cERROR_PIPE_BUSY = 0xe7 cSECURITY_SQOS_PRESENT = 0x100000 cSECURITY_ANONYMOUS = 0 RETRY_WAIT_TIMEOUT = 10000 def check_closed(f): @functools.wraps(f) def wrapped(self, *args, **kwargs): if self._closed: raise RuntimeError( 'Can not reuse socket after connection was closed.' ) return f(self, *args, **kwargs) return wrapped class NpipeSocket(object): """ Partial implementation of the socket API over windows named pipes. This implementation is only designed to be used as a client socket, and server-specific methods (bind, listen, accept...) are not implemented. """ def __init__(self, handle=None): self._timeout = win32pipe.NMPWAIT_USE_DEFAULT_WAIT self._handle = handle self._closed = False def accept(self): raise NotImplementedError() def bind(self, address): raise NotImplementedError() def close(self): self._handle.Close() self._closed = True @check_closed def connect(self, address): win32pipe.WaitNamedPipe(address, self._timeout) try: handle = win32file.CreateFile( address, win32file.GENERIC_READ | win32file.GENERIC_WRITE, 0, None, win32file.OPEN_EXISTING, cSECURITY_ANONYMOUS | cSECURITY_SQOS_PRESENT, 0 ) except win32pipe.error as e: # See Remarks: # https://msdn.microsoft.com/en-us/library/aa365800.aspx if e.winerror == cERROR_PIPE_BUSY: # Another program or thread has grabbed our pipe instance # before we got to it. Wait for availability and attempt to # connect again. win32pipe.WaitNamedPipe(address, RETRY_WAIT_TIMEOUT) return self.connect(address) raise e self.flags = win32pipe.GetNamedPipeInfo(handle)[0] self._handle = handle self._address = address @check_closed def connect_ex(self, address): return self.connect(address) @check_closed def detach(self): self._closed = True return self._handle @check_closed def dup(self): return NpipeSocket(self._handle) @check_closed def fileno(self): return int(self._handle) def getpeername(self): return self._address def getsockname(self): return self._address def getsockopt(self, level, optname, buflen=None): raise NotImplementedError() def ioctl(self, control, option): raise NotImplementedError() def listen(self, backlog): raise NotImplementedError() def makefile(self, mode=None, bufsize=None): if mode.strip('b') != 'r': raise NotImplementedError() rawio = NpipeFileIOBase(self) if bufsize is None or bufsize <= 0: bufsize = io.DEFAULT_BUFFER_SIZE return io.BufferedReader(rawio, buffer_size=bufsize) @check_closed def recv(self, bufsize, flags=0): err, data = win32file.ReadFile(self._handle, bufsize) return data @check_closed def recvfrom(self, bufsize, flags=0): data = self.recv(bufsize, flags) return (data, self._address) @check_closed def recvfrom_into(self, buf, nbytes=0, flags=0): return self.recv_into(buf, nbytes, flags), self._address @check_closed def recv_into(self, buf, nbytes=0): if six.PY2: return self._recv_into_py2(buf, nbytes) readbuf = buf if not isinstance(buf, memoryview): readbuf = memoryview(buf) err, data = win32file.ReadFile( self._handle, readbuf[:nbytes] if nbytes else readbuf ) return len(data) def _recv_into_py2(self, buf, nbytes): err, data = win32file.ReadFile(self._handle, nbytes or len(buf)) n = len(data) buf[:n] = data return n @check_closed def send(self, string, flags=0): err, nbytes = win32file.WriteFile(self._handle, string) return nbytes @check_closed def sendall(self, string, flags=0): return self.send(string, flags) @check_closed def sendto(self, string, address): self.connect(address) return self.send(string) def setblocking(self, flag): if flag: return self.settimeout(None) return self.settimeout(0) def settimeout(self, value): if value is None: # Blocking mode self._timeout = win32pipe.NMPWAIT_WAIT_FOREVER elif not isinstance(value, (float, int)) or value < 0: raise ValueError('Timeout value out of range') elif value == 0: # Non-blocking mode self._timeout = win32pipe.NMPWAIT_NO_WAIT else: # Timeout mode - Value converted to milliseconds self._timeout = value * 1000 def gettimeout(self): return self._timeout def setsockopt(self, level, optname, value): raise NotImplementedError() @check_closed def shutdown(self, how): return self.close() class NpipeFileIOBase(io.RawIOBase): def __init__(self, npipe_socket): self.sock = npipe_socket def close(self): super(NpipeFileIOBase, self).close() self.sock = None def fileno(self): return self.sock.fileno() def isatty(self): return False def readable(self): return True def readinto(self, buf): return self.sock.recv_into(buf) def seekable(self): return False def writable(self): return False
Python
0
@@ -2423,82 +2423,8 @@ e)%0A%0A - @check_closed%0A def fileno(self):%0A return int(self._handle)%0A%0A
b376b0dca7ec73451ff36ebae1718fa11ec159f0
Add utils.py for general purpose functions
manyfaced/common/utils.py
manyfaced/common/utils.py
Python
0.000003
@@ -0,0 +1,1353 @@ +import time%0Aimport pickle%0Afrom socket import error as socket_error%0A%0Afrom common.status import CLIENT_TIMEOUT%0A%0A%0Adef dump_file(data):%0A try:%0A with file('temp.db') as f:%0A string_file = f.read()%0A db = pickle.loads(string_file)%0A except:%0A db = list()%0A db.append(data)%0A with open('temp.db', %22w%22) as f:%0A f.write(str(pickle.dumps(db)))%0A%0A%0Adef recv_timeout(the_socket, timeout=CLIENT_TIMEOUT):%0A # make socket non blocking%0A the_socket.setblocking(0)%0A%0A # total data partwise in an array%0A total_data = %5B%5D%0A%0A # beginning time%0A begin = time.time()%0A while True:%0A # if you got some data, then break after timeout%0A if total_data and time.time() - begin %3E timeout:%0A break%0A%0A # if you got no data at all, wait a little longer, twice the timeout%0A elif time.time() - begin %3E timeout * 2:%0A break%0A%0A # recv something%0A try:%0A data = the_socket.recv(8192)%0A if data:%0A total_data.append(data)%0A # change the beginning time for measurement%0A begin = time.time()%0A else:%0A # sleep for sometime to indicate a gap%0A time.sleep(0.1)%0A except socket_error:%0A pass%0A%0A # join all parts to make final string%0A return ''.join(total_data)%0A
b9759f60c9f107c3d2c319f53ed2985ee58dc319
Write test for mock_pose generator.
src/tests/test_mock_pose.py
src/tests/test_mock_pose.py
Python
0
@@ -0,0 +1,1175 @@ +try:%0A from unittest.mock import patch, MagicMock%0Aexcept ImportError:%0A from mock import patch, MagicMock%0A%0Aimport pytest%0A%0Aimport rospy%0A%0AMockTf2 = MagicMock()%0Amodules = %7B%22tf2_ros%22: MockTf2%7D%0Apatcher = patch.dict(%22sys.modules%22, modules)%0Apatcher.start()%0A%0A%0Atry:%0A rospy.init_node(%22pytest%22, anonymous=True)%0Aexcept rospy.exceptions.ROSException:%0A pass%0A%0A%[email protected](scope=%22module%22)%0Adef teardown_module():%0A def fin():%0A patcher.stop()%0A%0A%0Aclass TestPoseGenerator(object):%0A def test_tf_and_pose_same(self):%0A from mock_pose import PoseGenerator%0A%0A pose = PoseGenerator.generate_pose()%0A transform = PoseGenerator.pose_to_tf(pose)%0A%0A assert transform.transform.translation.x == pose.pose.position.x%0A assert transform.transform.translation.y == pose.pose.position.y%0A assert transform.transform.translation.z == pose.pose.position.z%0A%0A assert transform.transform.rotation.x == pose.pose.orientation.x%0A assert transform.transform.rotation.y == pose.pose.orientation.y%0A assert transform.transform.rotation.z == pose.pose.orientation.z%0A assert transform.transform.rotation.w == pose.pose.orientation.w%0A
aed3fc48ba392f56441e35c2860be7319ee96822
Add default party accounts in customer/supplier
erpnext/patches/v4_2/party_model.py
erpnext/patches/v4_2/party_model.py
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors # License: GNU General Public License v3. See license.txt from __future__ import unicode_literals import frappe def execute(): try: frappe.reload_doc("accounts", "doctype", "account") receivable_payable_accounts = create_receivable_payable_account() set_party_in_jv_and_gl_entry(receivable_payable_accounts) delete_individual_party_account(receivable_payable_accounts) remove_customer_supplier_account_report() except: print frappe.get_traceback() pass def link_warehouse_account(): frappe.db.sql("""update tabAccount set warehouse=master_name where ifnull(account_type, '') = 'Warehouse' and ifnull(master_name, '') != ''""") def create_receivable_payable_account(): receivable_payable_accounts = frappe._dict() def _create_account(args): account = frappe.new_doc("Account") account.group_or_ledger = "Ledger" account.update(args) account.insert() frappe.db.set_value("Company", args["company"], ("receivables_group" if args["account_type"]=="Receivable" else "payables_group"), account.name) receivable_payable_accounts.setdefault(args["company"], {}).setdefault(args["account_type"], account.name) for company in frappe.db.sql_list("select name from tabCompany"): _create_account({ "account_name": "Debtors", "account_type": "Receivable", "company": company, "parent_account": get_parent_account(company, "Customer") }) _create_account({ "account_name": "Creditors", "account_type": "Payable", "company": company, "parent_account": get_parent_account(company, "Supplier") }) return receivable_payable_accounts def get_parent_account(company, master_type): parent_account = frappe.db.get_value("Company", company, "receivables_group" if master_type=="Customer" else "payables_group") if not parent_account: parent_account = frappe.db.get_value("Account", {"company": company, "account_name": "Accounts Receivable" if master_type=="Customer" else "Accounts Payable"}) if not parent_account: parent_account = frappe.db.sql_list("""select parent_account from tabAccount where company=%s and ifnull(master_type, '')=%s and ifnull(master_name, '')!='' limit 1""", (company, master_type)) parent_account = parent_account[0][0] if parent_account else None return parent_account def set_party_in_jv_and_gl_entry(receivable_payable_accounts): accounts = frappe.db.sql("""select name, master_type, master_name, company from `tabAccount` where ifnull(master_type, '') in ('Customer', 'Supplier') and ifnull(master_name, '') != ''""", as_dict=1) account_map = frappe._dict() for d in accounts: account_map.setdefault(d.name, d) if not account_map: return for dt in ["Journal Voucher Detail", "GL Entry"]: records = frappe.db.sql("""select name, account from `tab%s` where account in (%s)""" % (dt, ", ".join(['%s']*len(account_map))), tuple(account_map.keys())) for d in records: account_details = account_map.get(d.account, {}) account_type = "Receivable" if account_details.get("master_type")=="Customer" else "Payable" new_account = receivable_payable_accounts[account_details.get("company")][account_type] frappe.db.sql("update `tab{0}` set account=%s, party_type=%s, party=%s where name=%s".format(dt), (new_account, account_details.get("master_type"), account_details.get("master_name"), d.name)) def delete_individual_party_account(): frappe.db.sql("""delete from `tabAccount` where ifnull(master_type, '') in ('Customer', 'Supplier') and ifnull(master_name, '') != ''""") def remove_customer_supplier_account_report(): for d in ["Customer Account Head", "Supplier Account Head"]: frappe.delete_doc("Report", d)
Python
0
@@ -198,15 +198,8 @@ ():%0A -%09try:%0A%09 %09fra @@ -247,17 +247,16 @@ count%22)%0A -%09 %09receiva @@ -314,16 +314,49 @@ count()%0A +%09if receivable_payable_accounts:%0A %09%09set_pa @@ -445,35 +445,8 @@ unt( -receivable_payable_accounts )%0A%09%09 @@ -492,53 +492,67 @@ ()%0A%09 -except:%0A%09%09print frappe.get_traceback()%0A%09%09pass +%09add_default_accounts_in_party(receivable_payable_accounts) %0A%0A%0Ad @@ -3739,12 +3739,499 @@ Report%22, d)%0A +%0Adef add_default_accounts_in_party(receivable_payable_accounts):%0A%09for dt in %5B%22Customer%22, %22Supplier%22%5D:%0A%09%09for p in frappe.db.sql(%22%22%22select name from %60tab%7B0%7D%60 where docstatus %3C 2%22%22%22.format(dt)):%0A%09%09%09try:%0A%09%09%09%09party = frappe.get_doc(dt, p%5B0%5D)%0A%09%09%09%09for company, accounts in receivable_payable_accounts.items():%0A%09%09%09%09%09party.append(%22party_accounts%22, %7B%0A%09%09%09%09%09%09%22company%22: company,%0A%09%09%09%09%09%09%22account%22: accounts%5B%22Receivable%22 if dt == %22Customer%22 else %22Payable%22%5D%0A%09%09%09%09%09%7D)%0A%09%09%09%09party.save()%0A%09%09%09except:%0A%09%09%09%09pass%0A
1ce12ab6eb2a3b5578eba253929275bb3b394b76
Create line_follow.py
line_follow.py
line_follow.py
Python
0.000005
@@ -0,0 +1,951 @@ +from Myro import *%0Ainit(%22/dev/tty.scribbler%22)%0A%0A# To stop the Scribbler, wave your hand/something in front of the fluke%0Awhile getObstacle('center') %3C 6300:%0A # Get the reading from the line sensors on the bottom of Scribbler%0A left, right = getLine()%0A %0A # If both left and right sensors are on track%0A if left == 1 and right == 1:%0A motors(-.1, -.1)%0A %0A # If just the right is on track, turn left%0A elif right == 1:%0A motors(.1,-.1)%0A %0A # If just the left is on track, turn right%0A elif left == 1:%0A motors(-.1,.1)%0A %0A # If both are off track, go backwards in a random direction.%0A # randomNumber returns a number between 0 and 1, so I scale that to go slower%0A elif left == 0 and right == 0:%0A motors(.1*randomNumber(),.1*randomNumber())%0A %0A# When it's done, stop and beep happily %0Astop()%0Abeep(.1,600)%0Abeep(.1,650)%0Abeep(.1,700)%0Abeep(.1,750)%0Abeep(.1,800)%0Abeep(.1,850)%0A
5fa39bb65f88fa3596cc3831890cd258cc5768e1
Add the module file
lantz/drivers/ni/__init__.py
lantz/drivers/ni/__init__.py
Python
0.000002
@@ -0,0 +1,345 @@ +%EF%BB%BF# -*- coding: utf-8 -*-%0A%22%22%22%0A lantz.drivers.ni%0A ~~~~~~~~~~~~~~~~%0A%0A :company: National Instruments%0A :description: %0A :website: http://www.ni.com/%0A%0A ----%0A%0A :copyright: 2012 by Lantz Authors, see AUTHORS for more details.%0A :license: BSD, see LICENSE for more details.%0A%22%22%22%0A%0Afrom .daqe import NI6052E%0A%0A__all__ = %5B'NI6052E', %5D%0A%0A
b3dcbe95d766d902d22a0c4c171cbbe5ce207571
Add test for longitivity-testing: both LED-s ON at the same time for extended periods of time
python/tests/stress_test.py
python/tests/stress_test.py
Python
0.000001
@@ -0,0 +1,790 @@ +#!/usr/bin/env python%0Aimport time%0Aimport sys%0Aimport os%0Afrom random import randint%0A%0A# Hack to import from a parent dir%0A# http://stackoverflow.com/a/11158224/401554%0Aparentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))%0Asys.path.insert(0, parentdir)%0A%0Afrom octo import Octo%0A%0Aocto = Octo('/dev/ttyACM0')%0Aocto.reset()%0A%0A# Test that the LEDs don't burn out or short or what knot during continious active state%0Awhile True:%0A time.sleep(1)%0A octo.led0(randint(0,255),randint(0,255),randint(0,255))%0A time.sleep(1)%0A octo.led1(randint(0,255),randint(0,255),randint(0,255))%0A time.sleep(randint(60,120))%0A octo.led1(randint(0,255),randint(0,255),randint(0,255))%0A time.sleep(1)%0A octo.led0(randint(0,255),randint(0,255),randint(0,255))%0A time.sleep(randint(60,120))%0A
3063099427f29915fae70b453d5ff5f0200a8869
Correct import in loading module
oscar/core/loading.py
oscar/core/loading.py
import sys import traceback from django.conf import settings from django.db.models import get_model as django_get_model from core.exceptions import (ModuleNotFoundError, ClassNotFoundError, AppNotFoundError) def get_class(module_label, classname): """ Dynamically import a single class from the given module. This is a simple wrapper around `get_classes` for the case of loading a single class. Args: module_label (str): Module label comprising the app label and the module name, separated by a dot. For example, 'catalogue.forms'. classname (str): Name of the class to be imported. Returns: The requested class object or `None` if it can't be found """ return get_classes(module_label, [classname])[0] def get_classes(module_label, classnames): """ Dynamically import a list of classes from the given module. This works by looping over ``INSTALLED_APPS`` and looking for a match against the passed module label. If the requested class can't be found in the matching module, then we attempt to import it from the corresponding core Oscar app (assuming the matched module isn't in Oscar). This is very similar to ``django.db.models.get_model`` function for dynamically loading models. This function is more general though as it can load any class from the matching app, not just a model. Args: module_label (str): Module label comprising the app label and the module name, separated by a dot. For example, 'catalogue.forms'. classname (str): Name of the class to be imported. Returns: The requested class object or ``None`` if it can't be found Examples: Load a single class: >>> get_class('dashboard.catalogue.forms', 'ProductForm') oscar.apps.dashboard.catalogue.forms.ProductForm Load a list of classes: >>> get_classes('dashboard.catalogue.forms', ... ['ProductForm', 'StockRecordForm']) [oscar.apps.dashboard.catalogue.forms.ProductForm, oscar.apps.dashboard.catalogue.forms.StockRecordForm] Raises: AppNotFoundError: If no app is found in ``INSTALLED_APPS`` that matches the passed module label. ImportError: If the attempted import of a class raises an ``ImportError``, it is re-raised """ # e.g. split 'dashboard.catalogue.forms' in 'dashboard.catalogue', 'forms' package, module = module_label.rsplit('.', 1) # import from Oscar package (should succeed in most cases) # e.g. 'oscar.apps.dashboard.catalogue.forms' oscar_module_label = "oscar.apps.%s" % module_label oscar_module = _import_oscar_module(oscar_module_label, classnames) # returns e.g. 'oscar.apps.dashboard.catalogue', # 'yourproject.apps.dashboard.catalogue' or 'dashboard.catalogue' installed_apps_entry = _get_installed_apps_entry(package) if not installed_apps_entry.startswith('oscar.apps.'): # Attempt to import the classes from the local module # e.g. 'yourproject.dashboard.catalogue.forms' local_module_label = installed_apps_entry + '.' + module local_module = _import_local_module(local_module_label, classnames) else: # The entry is obviously an Oscar one, we don't import again local_module = None if oscar_module is local_module is None: # This intentionally doesn't rise an ImportError, because it would get # masked by in some circular import scenarios. raise ModuleNotFoundError( "The module with label '%s' could not be imported. This either" "means that it indeed does not exist, or you might have a problem" " with a circular import." % module_label ) # return imported classes, giving preference to ones from the local package return _pluck_classes([local_module, oscar_module], classnames) def _import_local_module(local_module_label, classnames): try: return __import__(local_module_label, fromlist=classnames) except ImportError: # There are 2 reasons why there is ImportError: # 1. local_app does not exist # 2. local_app exists but is corrupted (ImportError inside of the app) # # Obviously, for the reason #1 we want to fall back to use Oscar app. # For the reason #2 we want to propagate error (the dev obviously wants # to override app and not use Oscar app) # # ImportError does not provide easy way to distinguish those two cases. # Fortunately, the traceback of the ImportError starts at __import__ # statement. If the traceback has more than one frame, it means that # application was found and ImportError originates within the local app __, __, exc_traceback = sys.exc_info() frames = traceback.extract_tb(exc_traceback) if len(frames) > 1: raise def _import_oscar_module(oscar_module_label, classnames): try: return __import__(oscar_module_label, fromlist=classnames) except ImportError: # Oscar does not have this application, can't fallback to it return None def _pluck_classes(modules, classnames): klasses = [] for classname in classnames: klass = None for module in modules: if hasattr(module, classname): klass = getattr(module, classname) break if not klass: packages = [m.__name__ for m in modules if m is not None] raise ClassNotFoundError("No class '%s' found in %s" % ( classname, ", ".join(packages))) klasses.append(klass) return klasses def _get_installed_apps_entry(app_name): """ Walk through INSTALLED_APPS and return the first match. This does depend on the order of INSTALLED_APPS and will break if e.g. 'dashboard.catalogue' comes before 'catalogue' in INSTALLED_APPS. """ for installed_app in settings.INSTALLED_APPS: if installed_app.endswith(app_name): return installed_app raise AppNotFoundError("No app found matching '%s'" % app_name) def get_profile_class(): """ Return the profile model class """ setting = getattr(settings, 'AUTH_PROFILE_MODULE', None) if setting is None: return None app_label, model_name = settings.AUTH_PROFILE_MODULE.split('.') return get_model(app_label, model_name) def feature_hidden(feature_name): """ Test if a certain Oscar feature is disabled. """ return (feature_name is not None and feature_name in settings.OSCAR_HIDDEN_FEATURES) def get_model(app_label, model_name, *args, **kwargs): """ Gets a model class by it's app label and model name. Fails loudly if the model class can't be imported. This is merely a thin wrapper around Django's get_model function. """ model = django_get_model(app_label, model_name, *args, **kwargs) if model is None: raise ImportError( "{app_label}.{model_name} could not be imported.".format( app_label=app_label, model_name=model_name)) return model
Python
0.000001
@@ -114,21 +114,28 @@ t_model%0A +%0A from +oscar. core.exc @@ -191,16 +191,22 @@ dError,%0A +
9bc154d662464a0073b8b7cd3bcf39312a4ac1d7
add ifttt notification
ifttt_notification.py
ifttt_notification.py
Python
0
@@ -0,0 +1,312 @@ +import requests%0A%0A%0Adef Air_alert():%0A report = %7B%7D%0A report%5B%22value1%22%5D = %22test%22%0A report%5B%22value2%22%5D = %22second%22%0A report%5B%22value3%22%5D = %22third%22%0A requests.post(%0A %22https://maker.ifttt.com/trigger/Air_Test/with/key/%7Buser_key%7D%22.format(user_key=%22%22), data=report)%0A%0Aif __name__ == %22__main__%22:%0A Air_alert()%0A
407c08899eccea60a2ae534ab0c1b000c58708ab
Implement some tests for AgentAPI
tests/test_agent_api.py
tests/test_agent_api.py
Python
0.000002
@@ -0,0 +1,3543 @@ +# No shebang line, this module is meant to be imported%0A#%0A# Copyright 2013 Oliver Palmer%0A# Copyright 2013 Ambient Entertainment GmbH & Co. KG%0A#%0A# Licensed under the Apache License, Version 2.0 (the %22License%22);%0A# you may not use this file except in compliance with the License.%0A# You may obtain a copy of the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS,%0A# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.%0A# See the License for the specific language governing permissions and%0A# limitations under the License.%0A%0Aimport os%0Afrom utcore import ModelTestCase%0Afrom pyfarm.master.utility import dumps%0Atry:%0A from json import loads%0Aexcept ImportError:%0A from simplejson import loads%0A%0Aclass TestAgentAPI(ModelTestCase):%0A def test_agents_schema(self):%0A response = self.client.get(%22/api/v1/agents/schema%22)%0A self.assert200(response)%0A self.assertEquals(response.json, %7B%22ram%22: %22INTEGER%22,%0A %22free_ram%22: %22INTEGER%22,%0A %22use_address%22: %22INTEGER%22,%0A %22ip%22: %22IPv4Address%22,%0A %22hostname%22: %22VARCHAR(255)%22,%0A %22cpus%22: %22INTEGER%22,%0A %22port%22: %22INTEGER%22,%0A %22state%22: %22INTEGER%22,%0A %22ram_allocation%22: %22FLOAT%22,%0A %22cpu_allocation%22: %22FLOAT%22,%0A %22id%22: %22INTEGER%22,%0A %22remote_ip%22: %22IPv4Address%22%7D)%0A%0A def test_agent_read_write(self):%0A response1 = self.client.post(%22/api/v1/agents%22,%0A content_type=%22application/json%22,%0A data = dumps(%7B%22cpu_allocation%22: 1.0,%0A %22cpus%22: 16,%0A %22free_ram%22: 133,%0A %22hostname%22: %22testagent1%22,%0A %22ip%22: %2210.0.200.1%22,%0A %22port%22: 64994,%0A %22ram%22: 2048,%0A %22ram_allocation%22: 0.8,%0A %22state%22: 8%0A %7D))%0A self.assertStatus(response1, 201)%0A id = loads(response1.data)%5B'id'%5D%0A%0A response2 = self.client.get(%22/api/v1/agents/%25d%22 %25 id)%0A self.assert200(response2)%0A agent_data = loads(response2.data)%0A assert len(agent_data) == 12%0A assert response2.json == %7B%0A %22ram%22: 2048,%0A %22cpu_allocation%22: 1.0,%0A %22use_address%22: 22,%0A %22ip%22: %2210.0.200.1%22,%0A %22hostname%22: %22testagent1%22,%0A %22cpus%22: 16,%0A %22ram_allocation%22: 0.8,%0A %22port%22: 64994,%0A %22state%22: 8,%0A %22free_ram%22: 133,%0A %22id%22: id,%0A %22remote_ip%22: None%0A %7D%0A # TODO Test updating an agent%0A
2c2e73cb6e0f9baee0bbc0ad5338d09b5665d573
Remove useless import.
pymatgen/entries/compatibility.py
pymatgen/entries/compatibility.py
#!/usr/bin/env python """ This module implements Compatibility corrections for mixing runs of different functionals. """ from __future__ import division __author__ = "Shyue Ping Ong, Anubhav Jain" __copyright__ = "Copyright 2012, The Materials Project" __version__ = "1.0" __maintainer__ = "Shyue Ping Ong" __email__ = "[email protected]" __date__ = "Mar 19, 2012" import os import ConfigParser from collections import defaultdict from pymatgen.core.composition import Composition from pymatgen.entries.post_processors_abc import EntryPostProcessor from pymatgen.io.vaspio_set import VaspInputSet from pymatgen.util.decorators import cached_class class Compatibility(EntryPostProcessor): """ This class implements the GGA/GGA+U mixing scheme, which allows mixing of entries. This is a base class from which other specific compatibility schemes are implemented. """ def __init__(self, input_set_name, compat_type): """ Args: input_set_name: The name of the input set to use. Can be either MaterialsProject or MITMatgen. compat_type: Two options, GGA or Advanced. GGA means all GGA+U entries are excluded. Advanced means mixing scheme is implemented to make entries compatible with each other, but entries which are supposed to be done in GGA+U will have the equivalent GGA entries excluded. For example, Fe oxides should have a U value under the Advanced scheme. A GGA Fe oxide run will therefore be excluded under the scheme. """ self.compat_type = compat_type self.input_set_name = input_set_name self.input_set = VaspInputSet(input_set_name) module_dir = os.path.dirname(os.path.abspath(__file__)) self._config = ConfigParser.SafeConfigParser() self._config.optionxform = str self._config.readfp(open(os.path.join(module_dir, "Compatibility.cfg"))) u_corrections = {} for el in self.input_set.incar_settings["LDAUU"].keys(): name = "{}{}UCorrections{}".format(input_set_name, compat_type, el) if name in self._config.sections(): corr = dict(self._config.items(name)) u_corrections[el] = {k: float(v) for k, v in corr.items()} cpd_energies = dict(self._config.items("{}{}CompoundEnergies" .format(input_set_name, compat_type))) self.u_corrections = u_corrections self.cpd_energies = {k: float(v) for k, v in cpd_energies.items()} self.valid_potcars = set(self.input_set.potcar_settings.values()) self.u_settings = self.input_set.incar_settings["LDAUU"] if compat_type == "GGA": self.u_corrections = {} self.u_settings = {} def requires_hubbard(self, comp): """ Check if a particular composition requies U parameters to be set. Args: comp: Composition Returns: True if hubbard U parameter required. False otherwise. """ comp = Composition(comp) elements = sorted([el for el in comp.elements if comp[el] > 0], key=lambda el: el.X) most_electroneg = elements[-1].symbol usettings = self.u_settings.get(most_electroneg, {}) return any([usettings.get(el.symbol, 0) for el in comp.elements]) def process_entry(self, entry): """ Process a single entry with the chosen Compatibility scheme. Args: entry: A ComputedEntry object. Returns: An adjusted entry if entry is compatible, otherwise None is returned. """ if entry.parameters.get("run_type", "GGA") == "HF": return None cpdenergies = self.cpd_energies calc_u = entry.parameters["hubbards"] calc_u = defaultdict(int) if calc_u is None else calc_u comp = entry.composition #Check that POTCARs are valid rform = comp.reduced_formula if rform not in cpdenergies: psp_settings = set([sym.split(" ")[1] for sym in entry.parameters["potcar_symbols"]]) if not self.valid_potcars.issuperset(psp_settings): return None #correct all compounds that are wrong, e.g. O2 molecule if rform in cpdenergies: entry.structureid = -comp.keys()[0].Z entry.correction = cpdenergies[rform] * comp.num_atoms \ - entry.uncorrected_energy else: elements = sorted([el for el in comp.elements if comp[el] > 0], key=lambda el: el.X) most_electroneg = elements[-1].symbol correction = 0 ucorr = self.u_corrections.get(most_electroneg, {}) usettings = self.u_settings.get(most_electroneg, {}) for el in comp.elements: sym = el.symbol #Check for bad U values if calc_u.get(sym, 0) != usettings.get(sym, 0): return None if sym in ucorr: correction += float(ucorr[sym]) * comp[el] entry.correction = correction return entry def process_entries(self, entries): """ Process a sequence of entries with the chosen Compatibility scheme. Args: entries - A sequence of entries. Returns: An list of adjusted entries. Entries in the original list which are not compatible are excluded. """ return filter(None, map(self.process_entry, entries)) @property def corrected_compound_formulas(self): return self.cpd_energies.keys() def __str__(self): return "{} {} Compatibility".format(self.input_set_name, self.compat_type) class MaterialsProjectCompatibility(Compatibility): """ This class implements the GGA/GGA+U mixing scheme, which allows mixing of entries. Note that this should only be used for VASP calculations using the MaterialsProject parameters (see pymatgen.io.vaspio_set MaterialsProjectVaspInputSet). Using this compatibility scheme on runs with different parameters is not valid. """ def __init__(self, compat_type="Advanced"): """ Args: compat_type: Two options, GGA or Advanced. GGA means all GGA+U entries are excluded. Advanced means mixing scheme is implemented to make entries compatible with each other, but entries which are supposed to be done in GGA+U will have the equivalent GGA entries excluded. For example, Fe oxides should have a U value under the Advanced scheme. A GGA Fe oxide run will therefore be excluded under the scheme. """ Compatibility.__init__(self, "MaterialsProject", compat_type) class MITCompatibility(MaterialsProjectCompatibility): """ This class implements the GGA/GGA+U mixing scheme, which allows mixing of entries. Note that this should only be used for VASP calculations using the MIT parameters (see pymatgen.io.vaspio_set MITVaspInputSet). Using this compatibility scheme on runs with different parameters is not valid. """ def __init__(self, compat_type="Advanced"): """ Args: compat_type: Two options, GGA or Advanced. GGA means all GGA+U entries are excluded. Advanced means mixing scheme is implemented to make entries compatible with each other, but entries which are supposed to be done in GGA+U will have the equivalent GGA entries excluded. For example, Fe oxides should have a U value under the Advanced scheme. A GGA Fe oxide run will therefore be excluded under the scheme. """ Compatibility.__init__(self, "MITMatgen", compat_type)
Python
0
@@ -596,58 +596,8 @@ Set%0A -from pymatgen.util.decorators import cached_class%0A %0A%0Acl
369676cfacd35c7b3321edaef97bf64f063e7d50
Add nephrectomy model
radar/radar/models/nephrectomy.py
radar/radar/models/nephrectomy.py
Python
0.000007
@@ -0,0 +1,1221 @@ +from sqlalchemy import Column, Integer, ForeignKey, Date, Index%0Afrom sqlalchemy.orm import relationship%0A%0Afrom radar.database import db%0Afrom radar.models.common import MetaModelMixin, IntegerLookupTable%0A%0ANEPHRECTOMY_SIDES = OrderedDict(%5B%0A ('LEFT', 'Left'),%0A ('RIGHT', 'Right'),%0A ('BILATERAL', 'Bilateral'),%0A%5D)%0A%0ANEPHRECTOMY_KIDNEY_TYPES = OrderedDict(%5B%0A ('TRANSPLANT', 'Transplant'),%0A ('NATURAL', 'Natural'),%0A%5D)%0A%0ANEPHRECTOMY_ENTRY_TYPES = OrderedDict(%5B%0A ('O', 'Open'),%0A ('HA', 'Hand Assisted'),%0A ('TPL', 'Transperitoneal Laparoscopic'),%0A ('RPL', 'Retroperitoneal Laparoscopic'),%0A%5D)%0A%0A%0Aclass Nephrectomy(db.Model, MetaModelMixin):%0A __tablename__ = 'nephrectomy'%0A%0A id = Column(Integer, primary_key=True)%0A%0A patient_id = Column(Integer, ForeignKey('patients.id'), nullable=False)%0A patient = relationship('Patient')%0A%0A data_source_id = Column(Integer, ForeignKey('data_sources.id'), nullable=False)%0A data_source = relationship('DataSource')%0A%0A date = Column(Date, nullable=False)%0A kidney_side = Column(String, nullable=False)%0A kidney_type = Column(String, nullable=False)%0A entry_type = Column(String, nullable=False%0A%0AIndex('nephrectomy_patient_id_idx', Dialysis.patient_id)%0A
0378a225c5519ad39fee6a132c455e1848151a44
Create run_test.py
recipes/django-braces/run_test.py
recipes/django-braces/run_test.py
Python
0.000004
@@ -0,0 +1,187 @@ +import django%0Afrom django.conf import settings%0Asettings.configure(INSTALLED_APPS=%5B'braces', 'django.contrib.contenttypes', 'django.contrib.auth'%5D) %0Adjango.setup() %0A %0Aimport braces%0A
7e9c90c179df8666a75eef1610dbda764add1408
Create elec_temp_join.py
elec_temp_join.py
elec_temp_join.py
Python
0.000001
@@ -0,0 +1,912 @@ +import numpy as np%0Aimport pandas as pd%0Aimport geopandas as gpd%0Afrom geopandas import tools%0A%0Autility = '/home/akagi/Desktop/electricity_data/Electric_Retail_Service_Ter.shp'%0Autil = gpd.read_file(utility) %0A%0Aurbarea = '/home/akagi/GIS/census/cb_2013_us_ua10_500k/cb_2013_us_ua10_500k.shp'%0Aua = gpd.read_file(urbarea)%0A%0Aua = ua.to_crs(util.crs)%0A%0Aj = tools.sjoin(util, ua)%0A%0Agrid = '/home/akagi/gridcells.shp'%0Ag = gpd.read_file(grid)%0Acoords = g.centroid.apply(lambda x: x.coords%5B0%5D)%0Acoordstr = coords.apply(lambda x: 'data_%25s_%25s' %25 (x%5B1%5D, x%5B0%5D))%0Ag%5B'coordstr'%5D = coordstr%0A%0Aua_g = tools.sjoin(ua, g)%0Aua_g%5B'grid_geom'%5D = ua_g%5B'index_right'%5D.map(g%5B'geometry'%5D)%0Aua_g.apply(lambda x: (x%5B'geometry'%5D.centroid).distance(x%5B'grid_geom'%5D.centroid), axis=1)%0A%0Aua_g = ua_g.reset_index().loc%5Bua_g.reset_index().groupby('index').idxmin('dist')%5B'FID'%5D.values%5D.set_index('index')%0A%0Aj%5B'grid_cell'%5D = j%5B'index_right'%5D.map(ua_g%5B'coordstr'%5D)%0A
e9135583af7a862bd426b4a068743765c4604da3
add test for dials.convert_to_cbf (only works on dls computers)
test/command_line/test_convert_to_cbf.py
test/command_line/test_convert_to_cbf.py
Python
0
@@ -0,0 +1,763 @@ +from __future__ import absolute_import, division, print_function%0A%0Aimport glob%0Aimport os%0Aimport pytest%0A%0Aimport procrunner%0A%0Apytestmark = pytest.mark.skipif(%0A not os.access(%22/dls/i04/data/2019/cm23004-1/20190109/Eiger%22, os.R_OK),%0A reason=%22Test images not available%22,%0A)%0A%0A%[email protected](%0A %22master_h5%22,%0A %5B%0A %22/dls/i04/data/2019/cm23004-1/20190109/Eiger/gw/Thaum/Thau_4/Thau_4_1_master.h5%22,%0A %22/dls/i04/data/2019/cm23004-1/20190109/Eiger/gw/Thaum/Thau_4/Thau_4_1.nxs%22,%0A %5D,%0A)%0Adef test_convert_to_cbf(master_h5):%0A result = procrunner.run(%5B%22dials.convert_to_cbf%22, master_h5%5D)%0A assert result%5B%22exitcode%22%5D == 0%0A assert result%5B%22stderr%22%5D == %22%22%0A g = glob.glob(%22as_cbf_*.cbf%22)%0A assert len(g) == 900 # need a smaller test set!%0A
12731a74be889eff48e0e505de666ef3180794fe
add missing file
rmake/plugins/plugin.py
rmake/plugins/plugin.py
Python
0.000003
@@ -0,0 +1,2653 @@ +#%0A# Copyright (c) 2006 rPath, Inc.%0A#%0A# This program is distributed under the terms of the Common Public License,%0A# version 1.0. A copy of this license should have been distributed with this%0A# source file in a file called LICENSE. If it is not present, the license%0A# is always available at http://www.opensource.org/licenses/cpl.php.%0A#%0A# This program is distributed in the hope that it will be useful, but%0A# without any warranty; without even the implied warranty of merchantability%0A# or fitness for a particular purpose. See the Common Public License for%0A# full details.%0A#%0A%22%22%22%0ADefinition of plugins available for rmake plugins.%0A%0APlugin writers should derive from one of these classes.%0A%0AThe plugin will be called with the hooks described here, if the%0Acorrect program is being run. For example, when running rmake-server,%0Athe server hooks will be run.%0A%22%22%22%0Afrom rmake.lib.pluginlib import Plugin%0A%0ATYPE_CLIENT = 0%0ATYPE_SERVER = 1%0ATYPE_SUBSCRIBER = 2%0A%0Aclass ClientPlugin(Plugin):%0A%0A types = %5BTYPE_CLIENT%5D%0A%0A def client_preInit(self, main):%0A %22%22%22%0A Called right after plugins have been loaded.%0A %22%22%22%0A pass%0A%0A def client_preCommand(self, main, client):%0A %22%22%22%0A Called after the command-line client has instantiated, %0A but before the command has been executed.%0A %22%22%22%0A pass%0A%0Aclass ServerPlugin(Plugin):%0A%0A types = %5BTYPE_SERVER%5D%0A%0A def server_preConfig(self, main):%0A %22%22%22%0A Called before the configuration file has been read in.%0A %22%22%22%0A pass%0A%0A def server_preInit(self, main, argv):%0A %22%22%22%0A Called before the server has been instantiated.%0A %22%22%22%0A pass%0A%0A def server_postInit(self, server):%0A %22%22%22%0A Called after the server has been instantiated but before%0A serving is done.%0A %22%22%22%0A pass%0A%0A def server_pidDied(self, pid, status):%0A %22%22%22%0A Called when the server collects a child process that has died.%0A %22%22%22%0A pass%0A%0A def server_loop(self, server):%0A %22%22%22%0A Called once per server loop, between requests.%0A %22%22%22%0A pass%0A%0A def server_builderInit(self, server, builder):%0A %22%22%22%0A Called when the server instantiates a builder for a job.%0A %22%22%22%0A pass%0A%0A def server_shutDown(self, server):%0A %22%22%22%0A Called when the server is halting.%0A %22%22%22%0A pass%0A%0Aclass SubscriberPlugin(Plugin):%0A%0A types = %5BTYPE_SUBSCRIBER%5D%0A protocol = None%0A%0A def subscriber_get(self, uri, name):%0A %22%22%22%0A Should return a child of the StatusSubscirber class.%0A %22%22%22%0A pass%0A
3e0e898d0d3ab494edc5dbc65ccde4020f427be8
Create quiz-eliecer.py
laboratorios/quiz-eliecer.py
laboratorios/quiz-eliecer.py
Python
0.000001
@@ -0,0 +1,408 @@ + %0Abase=5%0Aaltura=7%0A%0Aperimetro=2*5+2*7%0Aprint (%22mi perimetro es%22 + str(perimetro))%0A%0Aarea=5*7%0Aprint (%22mi area es%22 + str (area))%0A%0Ametrop=perimetro/100%0Aprint (%22mi perimetro en metro es%22 + str(metrop))%0A%0Apulgadap=perimetro/2.54%0Aprint (%22mi perimetro en pulgada es%22 + str(pulgadap))%0A%0Ametroa=area/100%0Aprint (%22mi area en metro es%22 + str(metroa))%0A%0Apulgadaa=area/2.54%0Aprint (%22mi area en pulgada es%22 + str(pulgadaa))%0A%0A%0A%0A%0A%0A%0A
ee554d89d1c822537345ce4d03d2bff8783d7f1b
Disable nacl_integration due to #261724.
chrome/test/nacl_test_injection/buildbot_nacl_integration.py
chrome/test/nacl_test_injection/buildbot_nacl_integration.py
#!/usr/bin/env python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import os import subprocess import sys def Main(args): pwd = os.environ.get('PWD', '') is_integration_bot = 'nacl-chrome' in pwd # This environment variable check mimics what # buildbot_chrome_nacl_stage.py does. is_win64 = (sys.platform in ('win32', 'cygwin') and ('64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or '64' in os.environ.get('PROCESSOR_ARCHITEW6432', ''))) # On the main Chrome waterfall, we may need to control where the tests are # run. # If there is serious skew in the PPAPI interface that causes all of # the NaCl integration tests to fail, you can uncomment the # following block. (Make sure you comment it out when the issues # are resolved.) *However*, it is much preferred to add tests to # the 'tests_to_disable' list below. #if not is_integration_bot: # return tests_to_disable = [] # In general, you should disable tests inside this conditional. This turns # them off on the main Chrome waterfall, but not on NaCl's integration bots. # This makes it easier to see when things have been fixed NaCl side. if not is_integration_bot: # http://code.google.com/p/nativeclient/issues/detail?id=2511 tests_to_disable.append('run_ppapi_ppb_image_data_browser_test') if sys.platform == 'darwin': # TODO(mseaborn) fix # http://code.google.com/p/nativeclient/issues/detail?id=1835 tests_to_disable.append('run_ppapi_crash_browser_test') if sys.platform in ('win32', 'cygwin'): # This one is only failing for nacl_glibc on x64 Windows # but it is not clear how to disable only that limited case. # See http://crbug.com/132395 tests_to_disable.append('run_inbrowser_test_runner') script_dir = os.path.dirname(os.path.abspath(__file__)) nacl_integration_script = os.path.join(script_dir, 'buildbot_chrome_nacl_stage.py') cmd = [sys.executable, nacl_integration_script, # TODO(ncbray) re-enable. # https://code.google.com/p/chromium/issues/detail?id=133568 '--disable_glibc', '--disable_tests=%s' % ','.join(tests_to_disable)] cmd += args sys.stdout.write('Running %s\n' % ' '.join(cmd)) sys.stdout.flush() return subprocess.call(cmd) if __name__ == '__main__': sys.exit(Main(sys.argv[1:]))
Python
0.000002
@@ -239,16 +239,134 @@ (args):%0A + if sys.platform == 'darwin':%0A print %3E%3E sys.stderr, %22SKIPPING NACL INTEGRATION DUE TO BUG #261724.%22%0A return 0%0A%0A pwd =
33448340d278da7e0653701d78cbab317893279d
Add a simple analysis tool to get some structural properties about an AG's specfile.
AG/datasets/analyze.py
AG/datasets/analyze.py
Python
0
@@ -0,0 +1,1776 @@ +#!/usr/bin/python%0A%0Aimport os%0Aimport sys%0Aimport lxml %0Afrom lxml import etree%0Aimport math %0A%0Aclass StatsCounter(object):%0A%0A prefixes = %7B%7D%0A cur_tag = None%0A%0A def start( self, tag, attrib ):%0A self.cur_tag = tag%0A%0A def end( self, tag ):%0A pass%0A #self.cur_tag = None %0A%0A def data( self, _data ):%0A if self.cur_tag != %22File%22 and self.cur_tag != %22Dir%22:%0A return %0A%0A data = _data.rstrip(%22/%22)%0A if data == %22%22:%0A return %0A%0A dir_name = os.path.dirname( data )%0A if dir_name == %22%22:%0A return %0A%0A if not self.prefixes.has_key( dir_name ):%0A self.prefixes%5B dir_name %5D = 0%0A%0A self.prefixes%5B dir_name %5D += 1%0A%0A def close( self ):%0A return %22closed!%22%0A%0Aif __name__ == %22__main__%22:%0A%0A counter = StatsCounter()%0A parser = etree.XMLParser( target=counter )%0A%0A fd = open( sys.argv%5B1%5D, %22r%22 )%0A %0A while True:%0A buf = fd.read( 32768 )%0A if len(buf) == 0:%0A break%0A%0A parser.feed( buf )%0A%0A result = parser.close()%0A%0A order = counter.prefixes.keys()%0A order.sort()%0A%0A size_bins = %7B%7D%0A%0A for path in order:%0A count = counter.prefixes%5Bpath%5D%0A print %22%25 15s %25s%22 %25 (count, path)%0A%0A size_bin = int(math.log(count, 10))%0A %0A if not size_bins.has_key( size_bin ):%0A size_bins%5B size_bin %5D = 1%0A%0A else:%0A size_bins%5B size_bin %5D += 1%0A%0A print %22%22%0A print %22sizes%22%0A max_bin = max( size_bins.keys() )%0A%0A bin_fmt = r%221e%250%22 + str( int(math.log(max_bin, 10)) + 1 ) + %22s%22%0A%0A for size in xrange( 0, max_bin + 1 ):%0A binsize = 0%0A if size_bins.has_key( size ):%0A binsize = size_bins%5Bsize%5D%0A%0A bin_str = bin_fmt %25 size%0A print %22%25s %25s%22 %25 (bin_str, binsize)%0A%0A%0A %0A
28b9fa5b1be386b0ee0641086c11897141177a36
Update analysis_fun.py
examples/funloc/analysis_fun.py
examples/funloc/analysis_fun.py
# -*- coding: utf-8 -*- # Copyright (c) 2014, LABS^N # Distributed under the (new) BSD License. See LICENSE.txt for more info. """ ---------------------------------- Example experiment analysis script ---------------------------------- This sample script shows how to preprocess a simple MEG experiment from start to finish. The experiment was a simple audio/visual oddball detection task. One potential purpose would be e.g. functional localization of auditory and visual cortices. Note that you will need to change the "acq_ssh" and "sss_ssh" parameters to reflect your username/password on the relevant machines. You will also need to set up public key authentication between your machine and the two remote machines (acquisition/minea and SSS/kasga). Tutorial here: * https://help.ubuntu.com/community/SSH/OpenSSH/Keys The deidentified structural directories for the one subject is needed to do the forward and inverse solutions, extract this into your SUBJECTS_DIR directory: * http://lester.ilabs.uw.edu/files/AKCLEE_110_slim.tar.gz """ import mnefun from score import score import numpy as np try: # Use niprov as handler for events, or if it's not installed, ignore from niprov.mnefunsupport import handler except ImportError: handler = None params = mnefun.Params(tmin=-0.2, tmax=0.5, t_adjust=-4e-3, n_jobs=6, n_jobs_mkl=1, n_jobs_fir='cuda', n_jobs_resample='cuda', decim=5, proj_sfreq=200, filter_length='5s') params.subjects = ['subj_01', 'subj_02'] params.structurals = [None, 'AKCLEE_110_slim'] # None means use sphere params.dates = [(2014, 2, 14), (2014, 2, 10)] params.score = score # scoring function to use params.subject_indices = np.arange(2) # which subjects to run params.plot_drop_logs = False # turn off for demo or plots will block params.acq_ssh = 'minea' # can also be e.g., "[email protected]" params.acq_dir = '/sinuhe/data01/eric_non_space' params.sws_ssh = 'kasga' params.sws_dir = '/data06/larsoner' # set the niprov handler to deal with events: params.on_process = handler params.run_names = ['%s_funloc'] params.get_projs_from = np.arange(1) params.inv_names = ['%s'] params.inv_runs = [np.arange(1)] params.runs_empty = ['%s_erm'] params.proj_nums = [[1, 1, 0], # ECG: grad/mag/eeg [1, 1, 2], # EOG [0, 0, 0]] # Continuous (from ERM) params.cov_method = 'shrunk' # cleaner noise covariance regularization # The scoring function needs to produce an event file with these values params.in_names = ['Aud', 'Vis', 'AudDev', 'VisDev'] params.in_numbers = [10, 11, 20, 21] # These lines define how to translate the above event types into evoked files params.analyses = [ 'All', 'AV', ] params.out_names = [ ['All'], params.in_names, ] params.out_numbers = [ [1, 1, 1, 1], # Combine all trials params.in_numbers, # Leave events split the same way they were scored ] params.must_match = [ [], [0, 1], # only make the standard event counts match ] # Set what will run mnefun.do_processing( params, fetch_raw=True, # Fetch raw recording files from acq machine do_score=True, # do scoring # Make SUBJ/raw_fif/SUBJ_prebad.txt file with space-separated # list of bad MEG channel numbers, needed for running SSS. push_raw=True, # Push raw files and SSS script to SSS workstation do_sss=True, # Run SSS remotely fetch_sss=True, # Fetch SSSed files do_ch_fix=True, # Fix channel ordering # Examine SSS'ed files and make SUBJ/bads/bad_ch_SUBJ_post-sss.txt, # usually only contains EEG channels, needed for preprocessing. gen_ssp=True, # Generate SSP vectors apply_ssp=True, # Apply SSP vectors and filtering write_epochs=True, # Write epochs to disk gen_covs=True, # Generate covariances # Make SUBJ/trans/SUBJ-trans.fif file in mne_analyze, needed for fwd calc. gen_fwd=True, # Generate forward solutions (and source space if needed) gen_inv=True, # Generate inverses gen_report=True, # Write mne report html of results to disk print_status=True, # Print completeness status update )
Python
0.000001
@@ -1959,17 +1959,17 @@ he/data0 -1 +2 /eric_no
16165a9387807d54b91873e95a677bfe5d251aba
Add healthcheck module to contrib
kitnirc/contrib/healthcheck.py
kitnirc/contrib/healthcheck.py
Python
0
@@ -0,0 +1,2458 @@ +import logging%0Aimport sys%0Aimport threading%0Aimport time%0A%0Afrom kitnirc.modular import Module%0A%0A%0A_log = logging.getLogger(__name__)%0A%0A%0Aclass HealthcheckModule(Module):%0A %22%22%22A KitnIRC module which checks connection health.%0A%0A By default, this module will request a PONG response from the server%0A if it hasn't seen any traffic in the past minute, and will assume the%0A connection has dropped and exit the process if it doesn't see any traffic%0A for 90 seconds.%0A%0A These delays can be changed by setting %22delay%22 and %22timeout%22 under the%0A %5Bhealthcheck%5D configuration section.%0A %22%22%22%0A%0A def __init__(self):%0A config = self.controller.config%0A%0A if config.has_option(%22healthcheck%22, %22delay%22):%0A self.delay = config.getint(%22healthcheck%22, %22delay%22)%0A else:%0A self.delay = 60%0A%0A if config.has_option(%22healthcheck%22, %22timeout%22):%0A self.timeout = config.getint(%22healthcheck%22, %22timeout%22)%0A else:%0A self.timeout = 90%0A%0A assert self.timeout %3E self.delay%0A%0A self.last_activity = time.clock()%0A self._stop = False%0A self.thread = threading.Thread(target=self.loop, name='healthcheck')%0A self.thread.daemon = True%0A%0A def start(self, *args, **kwargs):%0A super(HealthcheckModule, self).start(*args, **kwargs)%0A self._stop = False%0A self.thread.start()%0A%0A def stop(self, *args, **kwargs):%0A super(HealthcheckModule, self).stop(*args, **kwargs)%0A self._stop = True%0A # In any normal circumstances, the healthcheck thread should finish%0A # in about a second or less. We'll give it a little extra buffer.%0A self.thread.join(2.0)%0A if self.thread.is_alive():%0A _log.warning(%22Healthcheck thread alive 2s after shutdown request.%22)%0A%0A def loop(self):%0A _log.info(%22Healthcheck running: delay=%25d timeout=%25d%22,%0A self.delay, self.timeout)%0A while not self._stop:%0A elapsed = time.clock() - self.last_activity%0A%0A if elapsed %3E self.timeout:%0A _log.fatal(%22No incoming in last %25d seconds - exiting.%22, elapsed)%0A logging.shutdown()%0A sys.exit(1)%0A elif elapsed %3E self.delay:%0A self.controller.client.send(%22PING%22)%0A%0A time.sleep(1)%0A%0A @Module.handle(%22LINE%22)%0A def activity(self, line):%0A self.last_activity = time.clock()%0A%0A%0Amodule = HealthcheckModule%0A%0A# vim: set ts=4 sts=4 sw=4 et:%0A
2762c66cb7336e255b37f913326eb46ff218ca05
make sure we dont create bogus package.
dodo.py
dodo.py
"""dodo file. test + management stuff""" import glob import os import pytest from doit.tools import create_folder DOIT_CONFIG = {'default_tasks': ['checker', 'ut']} CODE_FILES = glob.glob("doit/*.py") TEST_FILES = glob.glob("tests/test_*.py") TESTING_FILES = glob.glob("tests/*.py") PY_FILES = CODE_FILES + TESTING_FILES def task_checker(): """run pyflakes on all project files""" for module in PY_FILES: yield {'actions': ["pyflakes %(dependencies)s"], 'name':module, 'file_dep':(module,), 'title': (lambda task: task.name)} def run_test(test): return not bool(pytest.main(test)) def task_ut(): """run unit-tests""" for test in TEST_FILES: yield {'name': test, 'actions': [(run_test, (test,))], 'file_dep': PY_FILES, 'verbosity': 0} ################## coverage tasks def task_coverage(): """show coverage for all modules including tests""" return {'actions': ["coverage run --parallel-mode `which py.test` ", "coverage combine", ("coverage report --show-missing %s" % " ".join(CODE_FILES + TEST_FILES)) ], 'verbosity': 2} def task_coverage_code(): """show coverage for all modules (exclude tests)""" return {'actions': ["coverage run --parallel-mode `which py.test` ", "coverage combine", "coverage report --show-missing %s" % " ".join(CODE_FILES)], 'verbosity': 2} def task_coverage_module(): """show coverage for individual modules""" to_strip = len('tests/test_') for test in TEST_FILES: source = "doit/" + test[to_strip:] yield {'name': test, 'actions': ["coverage run --parallel-mode `which py.test` -v %s" % test, "coverage combine", "coverage report --show-missing %s %s" % (source, test)], 'verbosity': 2} ############# python3 # distribute => setup.py test together with use_2to3 doesnt work hence this def task_test3(): """run unitests on python3""" this_folder = os.path.dirname(os.path.abspath(__file__)) test_folder = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../doit3test") return {'actions': [ "rm -rf %s" % test_folder, "cp -r %s %s" % (this_folder, test_folder), "2to3 --write --nobackups %s" % test_folder, "py.test-3.2 %s" % test_folder, ], 'verbosity': 2, } ############################ website DOC_ROOT = 'doc/' DOC_BUILD_PATH = DOC_ROOT + '_build/html/' def task_epydoc(): """# generate API docs""" target_path = DOC_BUILD_PATH + 'api/' return {'actions':[(create_folder, [target_path]), ("epydoc --config %sepydoc.config " % DOC_ROOT + "-o %(targets)s")], 'file_dep': CODE_FILES, 'targets': [target_path]} def task_sphinx(): """generate website docs""" action = "sphinx-build -b html -d %s_build/doctrees %s %s" return {'actions': [action % (DOC_ROOT, DOC_ROOT, DOC_BUILD_PATH)]} def task_website(): """dodo file create website html files""" return {'actions': None, 'task_dep': ['epydoc', 'sphinx'], } def task_website_update(): """update website on sourceforge""" return {'actions': ["rsync -avP -e ssh %s* schettino72,[email protected]:htdocs/" % DOC_BUILD_PATH]} ################### dist def task_revision(): """create file with repo rev number""" return {'actions': ["hg tip --template '{rev}:{node}' > revision.txt"]} def task_manifest(): """create manifest file for distutils """ cmd = "hg manifest > MANIFEST;echo 'revision.txt' >> MANIFEST" return {'actions': [cmd]} def task_sdist(): """create source dist package""" return {'actions': ["python setup.py sdist"], 'task_dep': ['revision', 'manifest'], } def task_pypi(): """upload package to pypi""" return {'actions': ["python setup.py sdist upload"], 'task_dep': ['revision', 'manifest'], } # sfood -i doit/ | sfood-graph | dot -Tpng -o doit-dep.png
Python
0
@@ -3858,24 +3858,218 @@ stutils %22%22%22%0A +%0A def check_version():%0A # using a MANIFEST file directly is broken on python2.7%0A # http://bugs.python.org/issue11104%0A import sys%0A assert sys.version_info %3C (2,7)%0A%0A cmd = %22h @@ -4147,16 +4147,31 @@ ions': %5B +check_version, cmd%5D%7D%0A%0Ad
cad7db237c68139d3f4f7dd691205b207edb0b79
Refactor plugin api code into its own module
confluent/pluginapi.py
confluent/pluginapi.py
Python
0
@@ -0,0 +1,3338 @@ +# concept here that mapping from the resource tree and arguments go to%0A# specific python class signatures. The intent is to require%0A# plugin authors to come here if they *really* think they need new 'commands'%0A# and hopefully curtail deviation by each plugin author%0A%0A# have to specify a standard place for cfg selection of *which* plugin%0A# as well a standard to map api requests to python funcitons%0A# e.g. %3Cnodeelement%3E/power/state maps to some plugin HardwareManager.get_power/set_power%0A# selected by hardwaremanagement.method%0A# plugins can advertise a set of names if there is a desire for readable things%0A# exceptions to handle os images%0A# endpoints point to a class... usually, the class should have:%0A# -create%0A# -retrieve%0A# -update%0A# -delete%0A# functions. Console is special and just get's passed through%0A# see API.txt%0A%0Aimport os%0Aimport sys%0A%0Apluginmap = %7B%7D%0A%0A%0Adef load_plugins():%0A # To know our plugins directory, we get the parent path of 'bin'%0A path=os.path.dirname(os.path.realpath(__file__))%0A plugindir = os.path.realpath(os.path.join(path,'..','plugins'))%0A sys.path.append(plugindir)%0A plugins = set()%0A #two passes, to avoid adding both py and pyc files%0A for plugin in os.listdir(plugindir):%0A plugin = os.path.splitext(plugin)%5B0%5D%0A plugins.add(plugin)%0A for plugin in plugins:%0A if plugin.startswith('.'):%0A continue%0A tmpmod = __import__(plugin)%0A if 'plugin_names' in tmpmod.__dict__:%0A for name in tmpmod.plugin_names:%0A pluginmap%5Bname%5D = tmpmod%0A else:%0A pluginmap%5Bplugin%5D = tmpmod%0A%0Anodetree = %7B%0A '/': %5B'power/', 'boot/', 'console/', 'attributes/'%5D,%0A '/power/': %5B'state'%5D,%0A '/boot/': %5B'device'%5D,%0A '/console/': %5B'session', 'logging'%5D,%0A%7D%0A%0A# _ elements are for internal use (e.g. special console scheme)%0Anodeelements = %7B%0A '_console/session': %7B%0A 'pluginattrs': %5B'console.method' ,'hardwaremanagement.method'%5D,%0A %7D,%0A 'console/session': %7B%0A 'pluginattrs': %5B'console.method' ,'hardwaremanagement.method'%5D,%0A %7D,%0A 'power/state': %7B%0A 'pluginattrs': %5B'hardwaremanagement.method'%5D,%0A %7D,%0A 'boot/device': %7B%0A 'pluginattrs': %5B'hardwaremanagement.method'%5D,%0A %7D%0A%7D%0A%0Adef handle_path(path, operation, configmanager):%0A '''Given a full path request, return an object.%0A%0A The plugins should generally return some sort of iterator.%0A An exception is made for console/session, which should return%0A a class with read(), write(bytes), and close()%0A '''%0A if (path.startswith(%22/node/%22) or path.startswith(%22/system/%22) or%0A # single node requests%0A path.startswith(%22/vm/%22)):%0A nodeidx = path.find(%22/%22,1) + 1%0A node = path%5Bnodeidx:%5D%0A node, _, element = path.partition(%22/%22)%0A if element not in nodeelements:%0A raise Exception(%22Invalid element requested%22)%0A plugroute = nodeelements%5Belement%5D%0A if 'pluginattrs' in plugroute:%0A nodeattr = configmanager.get_node_attributes(%0A %5Bnode%5D, plugroute%5B'pluginattrs'%5D)%0A for attrname in plugroute%5B'pluginattrs'%5D:%0A if attrname in nodeattr:%0A return pluginmap%5Bnodeattr%5Battrname%5D%5D.__dict__%5Boperation%5D(%0A node=(node), operation=operation,%0A configmanager=configmanager)%0A%0A%0A%0A
bdcd69ed9cc0f87202f8d79e26fb58f42a4b95bb
Fix iteration, thanks @chrisseto
scrapi/base/__init__.py
scrapi/base/__init__.py
# Classes for scrAPI Harvesters from __future__ import unicode_literals import abc import logging from datetime import date, timedelta from lxml import etree from scrapi import util from scrapi import requests from scrapi.linter import lint from scrapi.base.schemas import OAISCHEMA from scrapi.base.helpers import update_schema from scrapi.base.transformer import XMLTransformer from scrapi.linter.document import RawDocument, NormalizedDocument logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) class BaseHarvester(object): """ This is a base class that all harvesters should inheret from Defines the copy to unicde method, which is useful for getting standard unicode out of xml results. """ __metaclass__ = abc.ABCMeta @abc.abstractmethod def harvest(self, days_back=1): raise NotImplementedError @abc.abstractmethod def normalize(self, raw_doc): raise NotImplementedError def lint(self): return lint(self.harvest, self.normalize) class XMLHarvester(BaseHarvester, XMLTransformer): def normalize(self, raw_doc): transformed = self.transform(etree.XML(raw_doc['doc'])) transformed['source'] = self.name return NormalizedDocument(transformed) class OAIHarvester(XMLHarvester): """ Create a harvester with a oai_dc namespace, that will harvest documents within a certain date range Contains functions for harvesting from an OAI provider, normalizing, and outputting in a way that scrapi can understand, in the most generic terms possible. For more information, see the OAI PMH specification: http://www.openarchives.org/OAI/openarchivesprotocol.html """ NAMESPACES = {'dc': 'http://purl.org/dc/elements/1.1/', 'oai_dc': 'http://www.openarchives.org/OAI/2.0/', 'ns0': 'http://www.openarchives.org/OAI/2.0/'} RECORDS_URL = '?verb=ListRecords' META_PREFIX_DATE = '&metadataPrefix=oai_dc&from={}' RESUMPTION = '&resumptionToken=' DEFAULT_ENCODING = 'UTF-8' record_encoding = None def __init__(self, name, base_url, timezone_granularity=False, timeout=0.5, property_list=None, approved_sets=None): self.NAME = name self.base_url = base_url self.property_list = property_list or ['date', 'language', 'type'] self.approved_sets = approved_sets self.timeout = timeout self.timezone_granularity = timezone_granularity @property def name(self): return self.NAME @property def namespaces(self): return self.NAMESPACES @property def schema(self): properties = { 'properties': { item: ( '//dc:{}/node()'.format(item), '//ns0:{}/node()'.format(item), self.resolve_property ) for item in self.property_list } } return update_schema(OAISCHEMA, properties) def resolve_property(self, dc, ns0): if isinstance(dc, list) and isinstance(ns0, list): ret = dc.extend(ns0) return [val for val in ret if ret] elif not dc: return ns0 elif not ns0: return dc else: return [dc, ns0] def harvest(self, days_back=1): start_date = str(date.today() - timedelta(int(days_back))) records_url = self.base_url + self.RECORDS_URL request_url = records_url + self.META_PREFIX_DATE.format(start_date) if self.timezone_granularity: request_url += 'T00:00:00Z' records = self.get_records(request_url, start_date) rawdoc_list = [] for record in records: doc_id = record.xpath( 'ns0:header/ns0:identifier', namespaces=self.NAMESPACES)[0].text record = etree.tostring(record, encoding=self.record_encoding) rawdoc_list.append(RawDocument({ 'doc': record, 'source': util.copy_to_unicode(self.name), 'docID': util.copy_to_unicode(doc_id), 'filetype': 'xml' })) return rawdoc_list def get_records(self, url, start_date, resump_token=''): data = requests.get(url, throttle=self.timeout) doc = etree.XML(data.content) records = doc.xpath( '//ns0:record', namespaces=self.NAMESPACES ) token = doc.xpath( '//ns0:resumptionToken/node()', namespaces=self.NAMESPACES ) if len(token) == 1: base_url = url.replace( self.META_PREFIX_DATE.format(start_date), '') base_url = base_url.replace(self.RESUMPTION + resump_token, '') url = base_url + self.RESUMPTION + token[0] records += self.get_records(url, start_date, resump_token=token[0]) return records def normalize(self, raw_doc): str_result = raw_doc.get('doc') result = etree.XML(str_result) if self.approved_sets: set_spec = result.xpath( 'ns0:header/ns0:setSpec/node()', namespaces=self.NAMESPACES ) # check if there's an intersection between the approved sets and the # setSpec list provided in the record. If there isn't, don't normalize. if not {x.replace('publication:', '') for x in set_spec}.intersection(self.approved_sets): logger.info('Series {} not in approved list'.format(set_spec)) return None status = result.xpath('ns0:header/@status', namespaces=self.NAMESPACES) if status and status[0] == 'deleted': logger.info('Deleted record, not normalizing {}'.format(raw_doc['docID'])) return None return super(OAIHarvester, self).normalize(raw_doc)
Python
0
@@ -3181,19 +3181,19 @@ ret if -ret +val %5D%0A
a9ca7f2f22551256213ecd32047022048c72db5c
Add Python 3 Script for Converting Image Types
scripts/convert_svgs.py
scripts/convert_svgs.py
Python
0
@@ -0,0 +1,538 @@ +import cairosvg%0Aimport os%0A%0A# MUST RUN IN PYTHON 3 and pip install cairosvg%0A%0Afile_dir = '../data/hough_test/Test_Set_1/'%0A%0Asvgs = os.listdir(os.path.join(file_dir, 'SVGs'))%0A%0Afor svg in svgs:%0A name = svg.split('.svg')%5B0%5D%0A cairosvg.svg2png(url=os.path.join(file_dir, 'SVGs', svg),%0A write_to=os.path.join(file_dir, 'PNGs', '%7B0%7D.png'.format(name)), dpi=600)%0A # cairosvg.svg2pdf(url=os.path.join(file_dir, 'SVGs', svg),%0A # write_to=os.path.join(file_dir, 'PDFs', '%7B0%7D.pdf'.format(name)), dpi=600)
c3e5c8f9691b55785a25d86fd647d6aeabbaaf8b
Fix pep8
test/configuration/__init__.py
test/configuration/__init__.py
import unittest from os.path import dirname, join from mycroft.configuration import ConfigurationLoader, ConfigurationManager, \ DEFAULT_CONFIG, SYSTEM_CONFIG, USER_CONFIG, RemoteConfiguration __author__ = 'jdorleans' class AbstractConfigurationTest(unittest.TestCase): def setUp(self): self.config_path = join(dirname(__file__), 'mycroft.conf') @staticmethod def create_config(lang='en-us', module='mimic', voice="ap"): config = { 'lang': lang, 'tts': { 'module': module, module: {'voice': voice} } } return config def assert_config(self, config, lang='en-us', module='mimic', voice="ap"): self.assertIsNotNone(config) lan = config.get('lang') self.assertIsNotNone(lan) self.assertEquals(lan, lang) tts = config.get('tts') self.assertIsNotNone(tts) mod = tts.get('module') self.assertEquals(mod, module) voi = tts.get(mod, {}).get("voice") self.assertEquals(voi, voice) class ConfigurationLoaderTest(AbstractConfigurationTest): def test_init_config_with_defaults(self): self.assertEquals(ConfigurationLoader.init_config(), {}) def test_init_config_with_new_config(self): config = {'a': 'b'} self.assertEquals(ConfigurationLoader.init_config(config), config) def test_init_locations_with_defaults(self): locations = [DEFAULT_CONFIG, SYSTEM_CONFIG, USER_CONFIG] self.assertEquals(ConfigurationLoader.init_locations(), locations) def test_init_locations_with_new_location(self): locations = [self.config_path] self.assertEquals(ConfigurationLoader.init_locations(locations), locations) def test_validate_data(self): try: ConfigurationLoader.validate({}, []) except TypeError: self.fail() def test_validate_data_with_invalid_data(self): self.assertRaises(TypeError, ConfigurationLoader.validate) def test_load(self): self.assert_config(ConfigurationLoader.load()) def test_load_with_override_custom(self): config = self.create_config('pt-br', 'espeak', 'f1') config = ConfigurationLoader.load(config) self.assert_config(config) def test_load_with_override_default(self): config = self.create_config() config = ConfigurationLoader.load(config, [self.config_path]) self.assert_config(config, 'pt-br', 'espeak', 'f1') def test_load_with_extra_custom(self): my_config = {'key': 'value'} config = ConfigurationLoader.load(my_config) self.assert_config(config) value = config.get('key', None) self.assertIsNotNone(value) self.assertEquals(value, my_config.get('key')) def test_load_with_invalid_config_type(self): self.assertRaises(TypeError, ConfigurationLoader.load, 'invalid_type') def test_load_with_invalid_locations_type(self): self.assertRaises(TypeError, ConfigurationLoader.load, None, self.config_path) def test_load_with_invalid_locations_path(self): locations = ['./invalid/mycroft.conf', './invalid_mycroft.conf'] config = ConfigurationLoader.load(None, locations, False) self.assertEquals(config, {}) @unittest.skip('Disabled while unittests are brought upto date') class RemoteConfigurationTest(AbstractConfigurationTest): def test_validate_config(self): try: RemoteConfiguration.validate(self.create_config()) except TypeError: self.fail() def test_validate_config_with_invalid_config(self): self.assertRaises(TypeError, RemoteConfiguration.validate) def test_load_without_remote_config(self): config = self.create_config() self.assertEquals(RemoteConfiguration.load(config), config) class ConfigurationManagerTest(AbstractConfigurationTest): def test_load_defaults(self): ConfigurationManager.load_defaults() self.assert_config(ConfigurationManager.load_defaults()) def test_load_local(self): ConfigurationManager.load_defaults() self.assert_config(ConfigurationManager.load_local()) def test_load_local_with_locations(self): ConfigurationManager.load_defaults() config = ConfigurationManager.load_local([self.config_path]) self.assert_config(config, 'pt-br', 'espeak', 'f1') def test_load_remote(self): ConfigurationManager.load_defaults() self.assert_config(ConfigurationManager.load_remote()) def test_get(self): ConfigurationManager.load_defaults() self.assert_config(ConfigurationManager.get()) def test_load_get_with_locations(self): ConfigurationManager.load_defaults() config = ConfigurationManager.get([self.config_path]) self.assert_config(config, 'pt-br', 'espeak', 'f1')
Python
0.000001
@@ -3374,16 +3374,17 @@ g, %7B%7D)%0A%0A +%0A @unittes
392be3310efc812686c0d43f7ca884d9c730a879
Add stop-by-time script to end simulation after a specified amount of simulated time
scripts/stop-by-time.py
scripts/stop-by-time.py
Python
0
@@ -0,0 +1,655 @@ +# End ROI after x nanoseconds%0A# Usage: -s stop-by-time:1000000 # End after 1 ms of simulated time%0A%0Aimport sim%0A%0Aclass StopByTime:%0A%0A def setup(self, args):%0A args = dict(enumerate((args or '').split(':')))%0A self.time = long(args.get(0, 1e6))%0A self.done = False%0A sim.util.Every(self.time * sim.util.Time.NS, self.periodic, roi_only = True)%0A%0A def periodic(self, time, time_delta):%0A if self.done:%0A return%0A elif time %3E= self.time:%0A print '%5BSTOPBYTIME%5D Ending ROI after %25.0f nanoseconds' %25 (time / 1e6)%0A sim.control.set_roi(False)%0A self.done = True%0A sim.control.abort()%0A%0Asim.util.register(StopByTime())%0A
d195d67fe3e9c3e12bb978bfaa98276e8f9f7140
allow loading ctx from expression
script_runner/ctx_server.py
script_runner/ctx_server.py
######### # Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # * See the License for the specific language governing permissions and # * limitations under the License. import argparse import os import sys import importlib from ctx_proxy import UnixCtxProxy class CtxProxyServer(object): def __init__(self, ctx, socket_path=None): self.ctx = ctx self.proxy = UnixCtxProxy(ctx, socket_path) self.stopped = False def close(self): self.proxy.close() def stop(self): self.stopped = True def serve(self): while not self.stopped: try: self.proxy.poll_and_process(timeout=0.1) except RuntimeError, e: print 'ignoring: {}'.format(e) def parse_args(args=None): parser = argparse.ArgumentParser() parser.add_argument('-s', '--socket-path', default=None) parser.add_argument('module_path') return parser.parse_args(args) def load_ctx(module_path, **kwargs): module_dir = os.path.dirname(module_path) if module_dir not in sys.path: sys.path.append(module_dir) ctx_module = importlib.import_module( os.path.basename(os.path.splitext(module_path)[0])) ctx_module = reload(ctx_module) ctx = getattr(ctx_module, 'ctx') if callable(ctx): ctx = ctx(**kwargs) return ctx def admin_function(ctx_server, module_path): def admin(action, **kwargs): if action == 'load': ctx = load_ctx(module_path, **kwargs) ctx._admin_ = admin_function(ctx_server, module_path) ctx_server.proxy.ctx = ctx elif action == 'stop': ctx_server.stop() else: raise RuntimeError('unknown action: {}'.format(action)) return admin def main(): args = parse_args() ctx = load_ctx(args.module_path) server = CtxProxyServer(ctx, args.socket_path) ctx._admin_ = admin_function(server, args.module_path) print server.proxy.socket_url server.serve() if __name__ == '__main__': main()
Python
0
@@ -686,16 +686,25 @@ portlib%0A +import re %0A%0Afrom c @@ -1383,20 +1383,102 @@ nt(' +-e', '--expression', default=None)%0A parser.add_argument('-p', '-- module -_ +- path' +, default=None )%0A @@ -1516,32 +1516,49 @@ )%0A%0A%0Adef load_ctx +_from_module_path (module_path, ** @@ -1553,26 +1553,16 @@ ule_path -, **kwargs ):%0A m @@ -1819,39 +1819,647 @@ -ctx = getattr(ctx_module, 'ctx' +return getattr(ctx_module, 'ctx')%0A%0A%0A# impl taken from 'pythonpy' package%0Adef load_ctx_from_expression(expression, prefix=''):%0A regex = r%22(%7B%7D%5Ba-zA-Z_%5D%5Ba-zA-Z0-9_%5D*)%5C.?%22.format(prefix)%0A matches = set(re.findall(regex, expression))%0A for module_name in matches:%0A try:%0A module = importlib.import_module(module_name)%0A globals()%5Bmodule_name%5D = module%0A load_ctx_from_expression(expression, prefix='%7B%7D.'.format(module_name))%0A except ImportError as e:%0A pass%0A if not prefix:%0A return eval(expression)%0A%0A%0Adef load_ctx(load_ctx_function, **kwargs):%0A ctx = load_ctx_function( )%0A @@ -2554,27 +2554,33 @@ server, -module_path +load_ctx_function ):%0A d @@ -2663,27 +2663,33 @@ oad_ctx( -module_path +load_ctx_function , **kwar @@ -2745,27 +2745,33 @@ server, -module_path +load_ctx_function )%0A @@ -3009,39 +3009,448 @@ -ctx = load_ctx(args.module_path +if (args.module_path and args.expression) or not %5C%0A (args.module_path or args.expression):%0A sys.exit('ctx-server: error: use either --module-path or --expression')%0A if args.module_path:%0A def load_ctx_function():%0A return load_ctx_from_module_path(args.module_path)%0A else:%0A def load_ctx_function():%0A return load_ctx_from_expression(args.expression)%0A%0A ctx = load_ctx(load_ctx_function )%0A @@ -3600,32 +3600,33 @@ -args.module_path +load_ctx_function )%0A pr
30a0b17d028f279a9877150ac4eb60b1ce135fa2
Add check script for MultiplyHueAndSaturation
checks/check_multiply_hue_and_saturation.py
checks/check_multiply_hue_and_saturation.py
Python
0
@@ -0,0 +1,1078 @@ +from __future__ import print_function, division%0A%0Aimport numpy as np%0A%0Aimport imgaug as ia%0Afrom imgaug import augmenters as iaa%0A%0A%0Adef main():%0A image = ia.quokka_square((128, 128))%0A images_aug = %5B%5D%0A%0A for mul in np.linspace(0.0, 2.0, 10):%0A aug = iaa.MultiplyHueAndSaturation(mul)%0A image_aug = aug.augment_image(image)%0A images_aug.append(image_aug)%0A%0A for mul_hue in np.linspace(0.0, 5.0, 10):%0A aug = iaa.MultiplyHueAndSaturation(mul_hue=mul_hue)%0A image_aug = aug.augment_image(image)%0A images_aug.append(image_aug)%0A%0A for mul_saturation in np.linspace(0.0, 5.0, 10):%0A aug = iaa.MultiplyHueAndSaturation(mul_saturation=mul_saturation)%0A image_aug = aug.augment_image(image)%0A images_aug.append(image_aug)%0A%0A ia.imshow(ia.draw_grid(images_aug, rows=3))%0A%0A images_aug = %5B%5D%0A images_aug.extend(iaa.MultiplyHue().augment_images(%5Bimage%5D * 10))%0A images_aug.extend(iaa.MultiplySaturation().augment_images(%5Bimage%5D * 10))%0A ia.imshow(ia.draw_grid(images_aug, rows=2))%0A%0A%0Aif __name__ == %22__main__%22:%0A main()%0A
769036ffd7a21477a9133c58b352711d85c7a7a0
add regression test for monkey patching of Queue
test/test_queue_monkeypatch.py
test/test_queue_monkeypatch.py
Python
0
@@ -0,0 +1,611 @@ +from __future__ import absolute_import%0A%0Aimport unittest%0A%0Aimport urllib3%0Afrom urllib3.exceptions import EmptyPoolError%0Aimport Queue%0A%0Aclass BadError(Exception):%0A %22%22%22%0A This should not be raised.%0A %22%22%22%0A pass%0A%0AQueue.Empty = BadError%0A%0A%0Aclass TestConnectionPool(unittest.TestCase):%0A %22%22%22%0A %22%22%22%0A def test_queue_monkeypatching(self):%0A http = urllib3.HTTPConnectionPool(host=%22localhost%22, block=True)%0A first_conn = http._get_conn(timeout=1)%0A with self.assertRaises(EmptyPoolError):%0A second_conn = http._get_conn(timeout=1)%0A%0A%0Aif __name__ == '__main__':%0A unittest.main()%0A
338559737e34ca395cec895ac8e822fc3147c7aa
Add basic code
tule.py
tule.py
Python
0
@@ -0,0 +1,2148 @@ +def calcLength(x,y,z=0):%0A return (x**2 + y**2 + z**2)**0.5%0A%0A#lengths are in feet%0AL = 92; W=68; H=22; MidXY=(W/2,L/2)%0A%0Adef feetToYards(inFeet):%0A return inFeet/3.0%0A%0Adef yardsToFeet(inYards):%0A return inYards * 3.0%0A%0A#widthOfStrand is how wide the tule piece (in feet)%0Adef findTotal(widthOfStrand,z=0,printTotal=False):%0A '''%0A Find total in yards.%0A Input:%0A widthOfStrand (number of feet, width of tule)%0A z=0 (how many feet it will %22drape%22 down linearly)%0A printTotal=False (Friendly print)%0A%0A Output:%0A tuple -%3E %0A The length needed (in yards),%0A list of strand lengths (in yards)%0A '''%0A #Length of each break points%0A strandLengths = %5B%5D%0A #Total length%0A total = 0%0A i=0%0A #find along width%0A alongWidth = 0%0A while(alongWidth %3C= W):%0A newX,newY = (MidXY%5B0%5D - alongWidth,MidXY%5B1%5D-L)%0A total += calcLength(newX,newY,z)%0A alongWidth += widthOfStrand%0A # print %22width on %25d: %25f %25f%22 %25(i,alongWidth,total); i+=1%0A%0A #find along length, around gym%0A alongLength = 0; i=0%0A while(alongLength %3C= L):%0A newX,newY = (MidXY%5B0%5D - W,MidXY%5B1%5D- alongLength)%0A # Length of strand needed (in yards)%0A strandLength = calcLength(newX,newY,z)%0A # Add Break point length%0A strandLengths.append(strandLength)%0A # Total length%0A total += strandLength%0A alongLength += widthOfStrand%0A # print %22length on %25d: %25f %25f%22 %25(i,alongLength,total); i+=1%0A #convert to yards%0A total = feetToYards(total)%0A strandLengths = map(feetToYards,strandLengths)%0A #all the strand lengths%0A strandLengths *=2 %0A if printTotal:%0A print '%5CnTotal Length For Room: %25.2f yards' %25(2*total)%0A # Return total length in yards and a list of strand lengths needed%0A return (2*total , strandLengths)%0A%0Adef totalCost(costPerYard,widthOfStrandInFeet,drapingInFeet,printTotal=False):%0A total = findTotal(widthOfStrandInFeet,drapingInFeet,printTotal)%0A cost = total * costPerYard%0A print %22Total length %25.2f yards for $%25.2f (@ $%25.2f per yard)%22 %25(total,cost,costPerYard)%0A return cost%0A%0Aprint %22Imported 'tule.py'%22%0A
242479ace03928b20dc86806f7592ec1148b615b
Add integration test for DraftService.
service/test/integration/test_draft_service.py
service/test/integration/test_draft_service.py
Python
0
@@ -0,0 +1,1203 @@ +#%0A# Copyright (c) 2014 ThoughtWorks, Inc.%0A#%0A# Pixelated is free software: you can redistribute it and/or modify%0A# it under the terms of the GNU Affero General Public License as published by%0A# the Free Software Foundation, either version 3 of the License, or%0A# (at your option) any later version.%0A#%0A# Pixelated is distributed in the hope that it will be useful,%0A# but WITHOUT ANY WARRANTY; without even the implied warranty of%0A# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the%0A# GNU Affero General Public License for more details.%0A#%0A# You should have received a copy of the GNU Affero General Public License%0A# along with Pixelated. If not, see %3Chttp://www.gnu.org/licenses/%3E.%0A%0Afrom twisted.internet import defer%0A%0Afrom test.support.integration import SoledadTestBase, MailBuilder%0A%0A%0Aclass DraftServiceTest(SoledadTestBase):%0A%0A @defer.inlineCallbacks%0A def test_store_and_load_draft(self):%0A input_mail = MailBuilder().with_body('some test text').build_input_mail()%0A%0A stored_draft = yield self.draft_service.create_draft(input_mail)%0A%0A draft = yield self.mail_store.get_mail(stored_draft.ident, include_body=True)%0A%0A self.assertEqual('some test text', draft.body)%0A
c58be8c77fdad5ec8b6c9da9ba6cfc45ae0f6d07
fix typo in fuzz_addresses.py
fuzz-addresses.py
fuzz-addresses.py
Python
0.998582
@@ -0,0 +1,1105 @@ +import sys%0Aimport csv%0Afrom dateutil.parser import parse%0Afrom datetime import datetime%0A%0A#Take a csv with datetime stamps and addresses split across remaing cells and output%0A#a fuzzed csv that contains two columns. A datetime stamp of the first day of the year%0A#in which the location occured, and the country.%0A%0A#call it with python fuzz_addresses.py inputfile.csv outputfile.csv%0A%0A#open the CSV output file%0Ainputfile = open((sys.argv%5B1%5D), %22r%22)%0Areader = csv.reader(inputfile)%0A%0A#open the CSV output file%0Aoutputfile = open((sys.argv%5B2%5D), %22wb%22)%0Awriter = csv.writer(outputfile)%0A%0Afor row in reader:%0A #for the date, which is the first column, strip it down to the year, %0A #and make everything January 1 if you don't care about fuzzing the date, %0A #change the line below to when=row%5B0%5D%0A when=parse(str( parse(row%5B0%5D).year ) +'-1-1')%0A%0A #for the address, get rid of everything but the country which is the last column that has an entry%0A country=''%0A while country=='':%0A country = row.pop(-1)%0A where=country%0A%0A #write the new row%0A writer.writerow(%5Bwhen, where%5D)%0A%0A#close the files%0Ainputfile.close()%0Aoutputfile.close()%0A
0ff705b6bbe2d2844d6b947ca2aa8fc9cc9ead66
Create PedidoDeletar.py
backend/Models/Grau/PedidoDeletar.py
backend/Models/Grau/PedidoDeletar.py
Python
0
@@ -0,0 +1,328 @@ +from Framework.Pedido import Pedido%0Afrom Framework.ErroNoHTTP import ErroNoHTTP%0A%0Aclass PedidoDeletar(Pedido):%0A%0A%09def __init__(self,variaveis_do_ambiente):%0A%09%09super(PedidoDeletar, self).__init__(variaveis_do_ambiente)%0A%09%09try:%0A%09%09%09self.id = self.corpo%5B'id'%5D%09%09%09%0A%09%09except:%0A%09%09%09raise ErroNoHTTP(400)%0A%09%09%0A%09def getId(self):%0A%09%09return self.id%0A
d85442d5961602ae91c385a65e9503c409316b3f
Scrub stale data from redis
bin/scrub_stale_lists.py
bin/scrub_stale_lists.py
Python
0.000001
@@ -0,0 +1,2278 @@ +#!/usr/bin/env python%0A%0Aimport sys%0Aimport os%0Aimport time%0Aimport redis%0Aimport requests%0Aimport logging%0Afrom urlparse import urlparse%0Afrom datetime import timedelta%0A%0A%0Adef main(rds):%0A pf = %22coalesce.v1.%22%0A%0A tasks_removed = 0%0A lists_removed = 0%0A%0A list_keys = rds.smembers(pf + %22list_keys%22)%0A for key in list_keys:%0A logging.debug(%22Inspecting list: %22 + pf + key)%0A coalesce_list = rds.lrange(pf + %22lists.%22 + key, start=0, end=-1)%0A for taskId in coalesce_list:%0A logging.debug(%22 - inspecting task: %22 + taskId)%0A if not is_pending(taskId):%0A logging.debug(%22Removing stale task: %22 + taskId)%0A rds.lrem(pf + 'lists.' + key, taskId, num=0)%0A tasks_removed += 1%0A if not rds.llen(pf + %22lists.%22 + key):%0A logging.debug(%22Removing stale list key: %22 + key)%0A rds.srem(pf + %22list_keys%22, key)%0A lists_removed += 1%0A%0A return tasks_removed, lists_removed%0A%0Adef is_pending(taskId):%0A url = 'https://queue.taskcluster.net/v1/task/%25s/status' %25 (taskId)%0A try:%0A r = requests.get(url, timeout=3)%0A if r.status_code == 404:%0A logging.debug(%22Queue service returned 404 for task: %22 + taskId)%0A return False%0A if not r.json()%5B'status'%5D%5B'state'%5D == 'pending':%0A return False%0A except:%0A logging.debug(%22Failed to get status%22)%0A return True%0A%0Aif __name__ == '__main__':%0A logging.basicConfig(format='%25(asctime)s - %25(levelname)s - %25(message)s',%0A level=logging.DEBUG)%0A%0A try:%0A redis_url = urlparse(os.environ%5B'REDIS_URL'%5D)%0A except KeyError:%0A logging.exception(%22Missing REDIS_URL env variable%22)%0A sys.exit(1)%0A%0A rds = redis.Redis(host=redis_url.hostname,%0A port=redis_url.port,%0A password=redis_url.password)%0A%0A try:%0A start = time.time()%0A logging.info(%22Starting scrub task%22)%0A%0A tasks_removed, lists_removed = main(rds)%0A elapsed = time.time() - start%0A logging.info(%22Completed scrub task in %25s%22 %25 (str(timedelta(seconds=elapsed))))%0A logging.info(%22Removed %25s lists and %25s tasks%22 %25 (tasks_removed, lists_removed))%0A except Exception:%0A logging.exception(%22Fatal error in main loop%22)%0A
9870fdd4b0996254216ff85a4dc0f9706843ca50
Add test for nested while with exc and break.
tests/basics/while_nest_exc.py
tests/basics/while_nest_exc.py
Python
0
@@ -0,0 +1,198 @@ +# test nested whiles within a try-except%0A%0Awhile 1:%0A print(1)%0A try:%0A print(2)%0A while 1:%0A print(3)%0A break%0A except:%0A print(4)%0A print(5)%0A break%0A
6a541b8d5b7c2c742420bbbe758866daef804e90
Add a unit test to verify controller objects do not persist across classes. (#483)
tests/mobly/test_suite_test.py
tests/mobly/test_suite_test.py
Python
0
@@ -0,0 +1,3174 @@ +# Copyright 2018 Google Inc.%0A#%0A# Licensed under the Apache License, Version 2.0 (the %22License%22);%0A# you may not use this file except in compliance with the License.%0A# You may obtain a copy of the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS,%0A# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.%0A# See the License for the specific language governing permissions and%0A# limitations under the License.%0A%0Aimport os%0Aimport mock%0Aimport shutil%0Aimport tempfile%0A%0Afrom future.tests.base import unittest%0A%0Afrom mobly import base_test%0Afrom mobly import config_parser%0Afrom mobly import records%0Afrom mobly import test_runner%0A%0Afrom tests.lib import mock_controller%0Afrom tests.lib import utils%0A%0A%0Aclass TestSuiteTest(unittest.TestCase):%0A %22%22%22Tests for use cases of creating Mobly test suites.%0A%0A Tests here target a combination of test_runner and base_test code.%0A %22%22%22%0A%0A def setUp(self):%0A self.tmp_dir = tempfile.mkdtemp()%0A self.mock_test_cls_configs = config_parser.TestRunConfig()%0A self.summary_file = os.path.join(self.tmp_dir, 'summary.yaml')%0A self.mock_test_cls_configs.summary_writer = records.TestSummaryWriter(%0A self.summary_file)%0A self.mock_test_cls_configs.log_path = self.tmp_dir%0A self.mock_test_cls_configs.user_params = %7B%22some_param%22: %22hahaha%22%7D%0A self.mock_test_cls_configs.reporter = mock.MagicMock()%0A self.base_mock_test_config = config_parser.TestRunConfig()%0A self.base_mock_test_config.test_bed_name = 'SampleTestBed'%0A self.base_mock_test_config.controller_configs = %7B%7D%0A self.base_mock_test_config.user_params = %7B%0A 'icecream': 42,%0A 'extra_param': 'haha'%0A %7D%0A self.base_mock_test_config.log_path = self.tmp_dir%0A%0A def tearDown(self):%0A shutil.rmtree(self.tmp_dir)%0A%0A def test_controller_object_not_persistent_across_classes_in_the_same_run(%0A self):%0A self.foo_test_controller_obj_id = None%0A self.bar_test_controller_obj_id = None%0A test_run_config = self.base_mock_test_config.copy()%0A test_run_config.controller_configs = %7B'MagicDevice': %5B%7B'serial': 1%7D%5D%7D%0A%0A class FooTest(base_test.BaseTestClass):%0A def setup_class(cls):%0A cls.controller = cls.register_controller(mock_controller)%5B0%5D%0A self.foo_test_controller_obj_id = id(cls.controller)%0A%0A class BarTest(base_test.BaseTestClass):%0A def setup_class(cls):%0A cls.controller = cls.register_controller(mock_controller)%5B0%5D%0A self.bar_test_controller_obj_id = id(cls.controller)%0A%0A tr = test_runner.TestRunner(self.tmp_dir,%0A test_run_config.test_bed_name)%0A tr.add_test_class(test_run_config, FooTest)%0A tr.add_test_class(test_run_config, BarTest)%0A tr.run()%0A self.assertNotEqual(self.foo_test_controller_obj_id,%0A self.bar_test_controller_obj_id)%0A%0A%0Aif __name__ == %22__main__%22:%0A unittest.main()%0A
bf28aa8fbe9aa735d017f935aefb89c5ed48f836
Add bubble sort implementation
aids/sorting_and_searching/bubble_sort.py
aids/sorting_and_searching/bubble_sort.py
Python
0
@@ -0,0 +1,375 @@ +'''%0AIn this module, we implement bubble sort%0A%0ATime complexity: O(n %5E 2)%0A%0A'''%0A%0A%0Adef bubble_sort(arr):%0A '''%0A Sort array using bubble sort%0A%0A '''%0A for index_x in xrange(len(arr)):%0A for index_y in xrange(len(arr) - 1, index_x, -1):%0A if arr%5Bindex_y%5D %3C arr%5Bindex_y - 1%5D:%0A arr%5Bindex_y%5D, arr%5Bindex_y - 1%5D = arr%5Bindex_y - 1%5D, arr%5Bindex_y%5D%09%0A
73cab4c1e0a591504176011b53b9774c8782238e
test kalman
tests/tsdb/test_tsdb_kalman.py
tests/tsdb/test_tsdb_kalman.py
Python
0.000006
@@ -0,0 +1,1960 @@ +from tsdb import TSDBClient, TSDB_REST_Client%0Aimport timeseries as ts%0Aimport numpy as np%0Aimport subprocess%0Aimport unittest%0Aimport asyncio%0Aimport asynctest%0Aimport time%0A%0A %0Aclass Test_TSDB_Kalman(asynctest.TestCase):%0A%0A def setUp(self):%0A #############%0A ### SETUP ###%0A #############%0A # We'll use a subprocess to run our server script, according to:%0A # http://stackoverflow.com/questions/3781851/run-a-python-script-from-another-python-script-passing-in-args%0A # We need this log file for some reason, it throws exceptions without it%0A self.server_log_file = open('.tsdb_server.log.test','w')%0A self.server_proc = subprocess.Popen(%5B'python', 'go_server.py'%5D%0A ,stdout=self.server_log_file,stderr=subprocess.STDOUT)%0A time.sleep(1)%0A %0A # This needs to be separate in case the test %0A # fails and then the server will never be shut down%0A def tearDown(self):%0A ################%0A ### SHUTDOWN ###%0A ################%0A # Shuts down the server%0A self.server_proc.terminate()%0A self.server_log_file.close()%0A time.sleep(1)%0A %0A async def test_simple_run(self):%0A%0A %09client = TSDBClient()%0A%0A%09 await client.add_trigger('KalmanFilter', 'insert_ts', %5B'sig_epsilon_estimate', 'sig_eta_estimate'%5D, None)#%5B'mean', 'std'%5D, None)#%0A%0A%09%09sigeta_para = 1%0A%09%09sigeps_para = 10 %0A%09 sigeta = np.random.normal(0,sigeta_para,2000)%0A%09 sigeps = np.random.normal(0,sigeps_para,2000)%0A%0A%09 mus = np.cumsum(sigeta)+20%0A%09 y = mus + sigeps%0A%0A%09 ats = ts.TimeSeries(y,np.arange(2000))%0A%0A%09 await client.insert_ts(1,ats)%0A%09 await client.upsert_meta(1, %7B'order': 1%7D)%0A%0A%09 status, payload = await client.select(%7B'order':%7B'==':1%7D%7D, %5B'sig_epsilon_estimate', 'sig_eta_estimate'%5D, None)%0A%0A%0A%09 assert(np.isclose(payload%5B'1'%5D%5B'sig_epsilon_estimate'%5D, sigeps_para, rtol=0.1))%0A assert(np.isclose(payload%5B'1'%5D%5B'sig_eta_estimate'%5D, sigeta_para, rtol=0.1))%0A%0A
faef1804e1781365fc027ecf08d61fbab56a4679
Add migratioss I forgot to commit out of saltyness
magic/migrations/0003_auto_20170929_0229.py
magic/migrations/0003_auto_20170929_0229.py
Python
0
@@ -0,0 +1,696 @@ +# -*- coding: utf-8 -*-%0A# Generated by Django 1.11.5 on 2017-09-29 05:29%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations, models%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('magic', '0002_auto_20170929_0159'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterField(%0A model_name='card',%0A name='power',%0A field=models.CharField(blank=True, max_length=255, null=True, verbose_name='power'),%0A ),%0A migrations.AlterField(%0A model_name='card',%0A name='toughness',%0A field=models.CharField(blank=True, max_length=255, null=True, verbose_name='toughness'),%0A ),%0A %5D%0A
e61dcb055fb4767e6e662648c89cbdfda4422c97
Update expected error message in this test
docs/source/examples/test_no_depends_fails.py
docs/source/examples/test_no_depends_fails.py
from pych.extern import Chapel @Chapel(sfile="users.onlyonce.chpl") def useTwoModules(x=int, y=int): return int if __name__ == "__main__": print(useTwoModules(2, 4)) import testcase # contains the general testing method, which allows us to gather output import os.path def test_using_multiple_modules(): out = testcase.runpy(os.path.realpath(__file__)) # Ensure that when a used module is nowhere near the exported function, we # get an error message to that effect. assert "error: Cannot find module \'M1\'" in out
Python
0
@@ -522,16 +522,24 @@ module +or enum %5C'M1%5C'%22
2e7048d8feae5ed2c244e617077235b5b771f326
Add deleted selenium runner
test/selenium/src/run_selenium.py
test/selenium/src/run_selenium.py
Python
0
@@ -0,0 +1,1822 @@ +#!/usr/bin/env python2.7%0A# Copyright (C) 2015 Google Inc., authors, and contributors %3Csee AUTHORS file%3E%0A# Licensed under http://www.apache.org/licenses/LICENSE-2.0 %3Csee LICENSE file%3E%0A# Created By: [email protected]%0A# Maintained By: [email protected]%0A%0A%22%22%22 Basic selenium test runner%0A%0AThis script is used for running all selenium tests against the server defined%0Ain the configuration yaml file. The script will wait a defined time for the%0Aserver to start before running the test. If the server fails to start before%0Aits grace time is up, the script will return with an error code of 3. Error%0Acodes 1 and 2 are reserved by pytest and status 0 is returned only if all the%0Atests pass.%0A%22%22%22%0A%0Aimport logging%0Aimport os%0Aimport sys%0Aimport time%0Aimport urllib%0A%0Aimport pytest%0A%0Afrom lib import constants%0Afrom lib import file_ops%0Afrom lib import log%0Afrom lib import environment%0A%0APROJECT_ROOT_PATH = os.path.dirname(os.path.abspath(__file__)) + %22/../%22%0A%0Alogger = logging.getLogger(%22selenium.webdriver.remote.remote_connection%22)%0A%0A%0Adef wait_for_server():%0A %22%22%22 Wait for the server to return a 200 response%0A %22%22%22%0A sys.stdout.write(%22Wating on server: %22)%0A for _ in xrange(environment.SERVER_WAIT_TIME):%0A try:%0A if urllib.urlopen(environment.APP_URL).getcode() == 200:%0A print %22%5BDone%5D%22%0A return True%0A except IOError:%0A sys.stdout.write(%22.%22)%0A sys.stdout.flush()%0A time.sleep(1)%0A print %22%5BFailed%5D%22%0A return False%0A%0A%0Aif __name__ == %22__main__%22:%0A if not wait_for_server():%0A sys.exit(3)%0A%0A file_ops.create_directory(environment.LOG_PATH)%0A file_ops.delete_directory_contents(environment.LOG_PATH)%0A log.set_default_file_handler(%0A logger,%0A PROJECT_ROOT_PATH + constants.path.LOGS_DIR + constants.path.TEST_RUNNER%0A )%0A logger.setLevel(environment.LOGGING_LEVEL)%0A%0A sys.exit(pytest.main())%0A
3bb65466d40c1b59faebe0db40eced260ff60010
Create SampleFunction.py
src/Python/ImplicitFunctions/SampleFunction.py
src/Python/ImplicitFunctions/SampleFunction.py
Python
0.000001
@@ -0,0 +1,1853 @@ +#!/usr/bin/env python%0A%0Aimport vtk%0A%0Adef main():%0A%09value = 2.0%0A%09colors = vtk.vtkNamedColors()%0A%09%0A%09implicitFunction = vtk.vtkSuperquadric()%0A%09implicitFunction.SetPhiRoundness(2.5)%0A%09implicitFunction.SetThetaRoundness(.5)%0A%09%0A%09# Sample the function.%0A%09sample = vtk.vtkSampleFunction()%0A%09sample.SetSampleDimensions(50,50,50)%0A%09sample.SetImplicitFunction(implicitFunction)%0A%09%0A%09xmin, xmax, ymin, ymax, zmin, zmax = -value, value, -value, value, -value, value%0A%09sample.SetModelBounds(xmin, xmax, ymin, ymax, zmin, zmax)%0A%09%0A%09# Create the 0 isosurface.%0A%09contours = vtk.vtkContourFilter()%0A%09contours.SetInputConnection(sample.GetOutputPort())%0A%09contours.GenerateValues(1, 2.0, 2.0)%0A%09%0A%09# Map the contours to graphical primitives.%0A%09contourMapper = vtk.vtkPolyDataMapper()%0A%09contourMapper.SetInputConnection(contours.GetOutputPort())%0A%09contourMapper.SetScalarRange(0.0, 1.2)%0A%09%0A%09# Create an actor for the contours.%0A%09contourActor = vtk.vtkActor()%0A%09contourActor.SetMapper(contourMapper)%0A%09%0A%09# Create a box around the function to indicate the sampling volume. %0A%09%0A%09#Create outline.%0A%09outline = vtk.vtkOutlineFilter()%0A%09outline.SetInputConnection(sample.GetOutputPort())%0A%09%0A%09# Map it to graphics primitives.%0A%09outlineMapper = vtk.vtkPolyDataMapper()%0A%09outlineMapper.SetInputConnection(outline.GetOutputPort())%0A%09%0A%09# Create an actor.%0A%09outlineActor = vtk.vtkActor()%0A%09outlineActor.SetMapper(outlineMapper)%0A%09outlineActor.GetProperty().SetColor(0,0,0)%0A%09%0A%09# Visualize.%0A%09renderer = vtk.vtkRenderer()%0A%09renderWindow = vtk.vtkRenderWindow()%0A%09renderWindow.AddRenderer(renderer)%0A%09interactor = vtk.vtkRenderWindowInteractor()%0A%09interactor.SetRenderWindow(renderWindow)%0A%09%0A%09renderer.AddActor(contourActor)%0A%09renderer.AddActor(outlineActor)%0A%09renderer.SetBackground(colors.GetColor3d(%22Tan%22))%0A%09%09%0A%09# Enable user interface interactor%0A%09renderWindow.Render()%0A%09interactor.Start()%0A%09%09%0Aif __name__ == '__main__':%0A main()%0A
524f47a4d4e0db5b76dfb7ebf9447b6199e48b6d
Add data utils tests.
tests/test_data_utils_filetree.py
tests/test_data_utils_filetree.py
Python
0
@@ -0,0 +1,473 @@ +from uuid import uuid1%0Aimport json%0A%0Aimport pytest%0A%0Afrom flask_jsondash.data_utils import filetree%0A%0A%0Adef test_path_hierarchy(tmpdir):%0A uid = uuid1()%0A tmpfile = tmpdir.mkdir('%7B%7D'.format(uid))%0A data = filetree.path_hierarchy(tmpfile.strpath)%0A assert json.dumps(data)%0A for key in %5B'type', 'name', 'path'%5D:%0A assert key in data%0A%0A%0Adef test_path_hierarchy_invalid_path(tmpdir):%0A with pytest.raises(OSError):%0A filetree.path_hierarchy('invalid-path')%0A
37d0843c76b558d6d7a1892963a30e9a56d73f24
Document typical Stylesheet attributes
praw/models/stylesheet.py
praw/models/stylesheet.py
"""Provide the Stylesheet class.""" from .base import PRAWBase class Stylesheet(PRAWBase): """Represent a stylesheet."""
Python
0
@@ -117,12 +117,783 @@ lesheet. +%0A%0A **Typical Attributes**%0A%0A This table describes attributes that typically belong to objects of this%0A class. Since attributes are dynamically provided (see%0A :ref:%60determine-available-attributes-of-an-object%60), there is not a%0A guarantee that these attributes will always be present, nor is this list%0A necessarily comprehensive.%0A%0A ======================= ===================================================%0A Attribute Description%0A ======================= ===================================================%0A %60%60images%60%60 A %60%60list%60%60 of images used by the stylesheet.%0A %60%60stylesheet%60%60 The contents of the stylesheet, as CSS.%0A ======================= ===================================================%0A %22%22%22%0A
4cde1f2fcc21ff83daabdb5221c462f44991c73f
Create remove-boxes.py
Python/remove-boxes.py
Python/remove-boxes.py
Python
0.000002
@@ -0,0 +1,1542 @@ +# Time: O(n%5E3) ~ O(n%5E4)%0A# Space: O(n%5E3)%0A%0A# Given several boxes with different colors represented by different positive numbers. %0A# You may experience several rounds to remove boxes until there is no box left.%0A# Each time you can choose some continuous boxes with the same color (composed of k boxes, k %3E= 1),%0A# remove them and get k*k points.%0A# Find the maximum points you can get.%0A#%0A# Example 1:%0A# Input:%0A#%0A# %5B1, 3, 2, 2, 2, 3, 4, 3, 1%5D%0A# Output:%0A# 23%0A# Explanation:%0A# %5B1, 3, 2, 2, 2, 3, 4, 3, 1%5D %0A# ----%3E %5B1, 3, 3, 4, 3, 1%5D (3*3=9 points) %0A# ----%3E %5B1, 3, 3, 3, 1%5D (1*1=1 points) %0A# ----%3E %5B1, 1%5D (3*3=9 points) %0A# ----%3E %5B%5D (2*2=4 points)%0A# Note: The number of boxes n would not exceed 100.%0A%0Aclass Solution(object):%0A def removeBoxes(self, boxes):%0A %22%22%22%0A :type boxes: List%5Bint%5D%0A :rtype: int%0A %22%22%22%0A def dfs(boxes, l, r, k, lookup):%0A if l %3E r: return 0%0A if lookup%5Bl%5D%5Br%5D%5Bk%5D: return lookup%5Bl%5D%5Br%5D%5Bk%5D%0A%0A ll, kk = l, k%0A while l %3C r and boxes%5Bl+1%5D == boxes%5Bl%5D:%0A l += 1%0A k += 1%0A result = dfs(boxes, l+1, r, 0, lookup) + (k+1) ** 2%0A for i in xrange(l+1, r+1):%0A if boxes%5Bi%5D == boxes%5Bl%5D:%0A result = max(result, dfs(boxes, l+1, i-1, 0, lookup) + dfs(boxes, i, r, k+1, lookup))%0A lookup%5Bll%5D%5Br%5D%5Bkk%5D = result%0A return result%0A%0A lookup = %5B%5B%5B0%5D*len(boxes) for _ in xrange(len(boxes)) %5D for _ in xrange(len(boxes)) %5D%0A return dfs(boxes, 0, len(boxes)-1, 0, lookup)%0A
06fb2c0371b9cfb5980351d45665d41fdcfae3b5
Add MemoryMetric to Memory measurement
tools/perf/measurements/memory.py
tools/perf/measurements/memory.py
# Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from metrics import histogram from telemetry.page import page_measurement MEMORY_HISTOGRAMS = [ {'name': 'V8.MemoryExternalFragmentationTotal', 'units': 'percent'}, {'name': 'V8.MemoryHeapSampleTotalCommitted', 'units': 'kb'}, {'name': 'V8.MemoryHeapSampleTotalUsed', 'units': 'kb'}, {'name': 'Memory.RendererUsed', 'units': 'kb'}] BROWSER_MEMORY_HISTOGRAMS = [ {'name': 'Memory.BrowserUsed', 'units': 'kb'}] class Memory(page_measurement.PageMeasurement): def __init__(self): super(Memory, self).__init__('stress_memory') self.histograms = ( [histogram.HistogramMetric( h, histogram.RENDERER_HISTOGRAM) for h in MEMORY_HISTOGRAMS] + [histogram.HistogramMetric( h, histogram.BROWSER_HISTOGRAM) for h in BROWSER_MEMORY_HISTOGRAMS]) def DidNavigateToPage(self, page, tab): for h in self.histograms: h.Start(page, tab) def CustomizeBrowserOptions(self, options): options.AppendExtraBrowserArg('--enable-stats-collection-bindings') options.AppendExtraBrowserArg('--enable-memory-benchmarking') # For a hard-coded set of Google pages (such as GMail), we produce custom # memory histograms (V8.Something_gmail) instead of the generic histograms # (V8.Something), if we detect that a renderer is only rendering this page # and no other pages. For this test, we need to disable histogram # customizing, so that we get the same generic histograms produced for all # pages. options.AppendExtraBrowserArg('--disable-histogram-customizer') options.AppendExtraBrowserArg('--memory-metrics') # Old commandline flags used for reference builds. options.AppendExtraBrowserArg('--dom-automation') options.AppendExtraBrowserArg( '--reduce-security-for-dom-automation-tests') def CanRunForPage(self, page): return hasattr(page, 'stress_memory') def MeasurePage(self, page, tab, results): for h in self.histograms: h.GetValue(page, tab, results) if tab.browser.is_profiler_active('tcmalloc-heap'): # The tcmalloc_heap_profiler dumps files at regular # intervals (~20 secs). # This is a minor optimization to ensure it'll dump the last file when # the test completes. tab.ExecuteJavaScript(""" if (chrome && chrome.memoryBenchmarking) { chrome.memoryBenchmarking.heapProfilerDump('final', 'renderer'); chrome.memoryBenchmarking.heapProfilerDump('final', 'browser'); } """)
Python
0.000001
@@ -189,16 +189,43 @@ stogram%0A +from metrics import memory%0A from tel @@ -1013,16 +1013,174 @@ RAMS%5D)%0A%0A + self._memory_metric = None%0A%0A def DidStartBrowser(self, browser):%0A self._memory_metric = memory.MemoryMetric(browser)%0A self._memory_metric.Start()%0A%0A def Di @@ -2866,8 +2866,128 @@ %22%22%22)%0A +%0A def DidRunTest(self, tab, results):%0A self._memory_metric.Stop()%0A self._memory_metric.AddResults(tab, results)%0A%0A
5d3cc76309d8cc3151410b1dfecdf4407f98a5f8
Add missing docstrings
nikola/plugins/compile/markdown/__init__.py
nikola/plugins/compile/markdown/__init__.py
# -*- coding: utf-8 -*- # Copyright © 2012-2017 Roberto Alsina and others. # Permission is hereby granted, free of charge, to any # person obtaining a copy of this software and associated # documentation files (the "Software"), to deal in the # Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the # Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice # shall be included in all copies or substantial portions of # the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR # PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS # OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """Page compiler plugin for Markdown.""" from __future__ import unicode_literals import io import os import threading try: from markdown import Markdown except ImportError: Markdown = None # NOQA nikola_extension = None gist_extension = None podcast_extension = None from nikola.plugin_categories import PageCompiler from nikola.utils import makedirs, req_missing, write_metadata class ThreadLocalMarkdown(threading.local): """Convert to markdown using per-thread Markdown objects.""" def __init__(self, extensions): self.markdown = Markdown(extensions=extensions, output_format="html5") def convert(self, data): """Convert data to HTML and reset internal state.""" result = self.markdown.convert(data) self.markdown.reset() return result class CompileMarkdown(PageCompiler): """Compile Markdown into HTML.""" name = "markdown" friendly_name = "Markdown" demote_headers = True site = None def set_site(self, site): """Set Nikola site.""" super(CompileMarkdown, self).set_site(site) self.config_dependencies = [] extensions = [] for plugin_info in self.get_compiler_extensions(): self.config_dependencies.append(plugin_info.name) extensions.append(plugin_info.plugin_object) plugin_info.plugin_object.short_help = plugin_info.description site_extensions = self.site.config.get("MARKDOWN_EXTENSIONS") self.config_dependencies.append(str(sorted(site_extensions))) extensions.extend(site_extensions) if Markdown is not None: self.converter = ThreadLocalMarkdown(extensions) def compile_string(self, data, source_path=None, is_two_file=True, post=None, lang=None): """Compile Markdown into HTML strings.""" if Markdown is None: req_missing(['markdown'], 'build this site (compile Markdown)') if not is_two_file: _, data = self.split_metadata(data) output = self.converter.convert(data) output, shortcode_deps = self.site.apply_shortcodes(output, filename=source_path, with_dependencies=True, extra_context={'post': post}) return output, shortcode_deps def compile(self, source, dest, is_two_file=True, post=None, lang=None): """Compile the source file into HTML and save as dest.""" if Markdown is None: req_missing(['markdown'], 'build this site (compile Markdown)') makedirs(os.path.dirname(dest)) with io.open(dest, "w+", encoding="utf8") as out_file: with io.open(source, "r", encoding="utf8") as in_file: data = in_file.read() output, shortcode_deps = self.compile_string(data, source, is_two_file, post) out_file.write(output) if post is None: if shortcode_deps: self.logger.error( "Cannot save dependencies for post {0} (post unknown)", source) else: post._depfile[dest] += shortcode_deps def create_post(self, path, **kw): """Create a new post.""" content = kw.pop('content', None) onefile = kw.pop('onefile', False) # is_page is not used by create_post as of now. kw.pop('is_page', False) metadata = {} metadata.update(self.default_metadata) metadata.update(kw) makedirs(os.path.dirname(path)) if not content.endswith('\n'): content += '\n' with io.open(path, "w+", encoding="utf8") as fd: if onefile: fd.write('<!-- \n') fd.write(write_metadata(metadata)) fd.write('-->\n\n') fd.write(content)
Python
0.021382
@@ -1604,20 +1604,17 @@ ert -to m +M arkdown usin @@ -1609,16 +1609,24 @@ arkdown +to HTML using pe @@ -1651,16 +1651,46 @@ objects. +%0A%0A See discussion in #2661. %22%22%22%0A%0A @@ -1722,16 +1722,58 @@ sions):%0A + %22%22%22Create a Markdown instance.%22%22%22%0A
9a5bfb7f5bf114bb4bcf2dd4c88ddd8924a97ed9
add menu function to menu.py
menu.py
menu.py
Python
0.000003
@@ -0,0 +1,408 @@ +#!/usr/bin/end python %0A%0A# Text-based menu for use in pyWype.py %0A%0Adef menu(): %0A %22%22%22 Menu prompt for user to select program option %22%22%22%0A while True: %0A print 'I'%0A print 'II'%0A print 'III' %0A print 'IV'%0A print 'V'%0A%0A choice = raw_input('Select an option (I, II, III, IV, V): ') %0A%0A if choice in ('I', 'II', 'III', 'IV', 'V'): %0A return choice %0A%0Amenu()%0A
fb61398b6a0cdd4f40d16729ab2ff0ca47730526
Add the main file
relay_api/__main__.py
relay_api/__main__.py
Python
0.000004
@@ -0,0 +1,359 @@ +from relay_api.api.server import server%0Afrom relay_api.conf.config import relays%0Aimport relay_api.api.server as api%0A%0A%[email protected](%22/relay-api/relays%22, methods=%5B%22GET%22%5D)%0Adef get_relays():%0A return api.get_relays(relays)%0A%0A%[email protected](%22/relay-api/relays/%3Cint:relay_id%3E%22, methods=%5B%22GET%22%5D)%0Adef get_relay(relay_id):%0A return api.get_relay(relays, relay_id)%0A
72a0d635e497f0f4c6c58d84f7001ec04063ea90
Add mfnd.py that prints todays date
mfnd.py
mfnd.py
Python
0
@@ -0,0 +1,163 @@ +#!/usr/bin/env python3%0A%22%22%22%0AMFND - A simple to-do list application%0A%22%22%22%0A%0Aimport datetime%0A%0Atoday = datetime.date.today()%0A%0Aprint( today.strftime('MFND - %25B %25d, %25Y') )%0A
d3469fd3ab39eeee381457588931636bf0987ea9
Create impossible_bet.py
impossible_bet.py
impossible_bet.py
Python
0.000821
@@ -0,0 +1,1270 @@ +import random%0A%0A%0Adef play_bet(participants=100, times=1000, checks=50):%0A %22%22%22Simulate the bet x times with x participants.%22%22%22%0A wins = 0%0A losses = 0%0A for time in range(times):%0A boxes = list(range(1, participants + 1))%0A random.shuffle(boxes)%0A for participant in range(1, participants + 1):%0A found = False%0A count = 0%0A to_open = participant%0A while found == False and count %3C checks:%0A if boxes%5Bto_open - 1%5D == participant:%0A found = True%0A else:%0A to_open = boxes%5Bto_open - 1%5D%0A%0A count += 1%0A if found == False:%0A losses += 1%0A break%0A elif found == True and participant == participants:%0A wins += 1%0A return (wins, losses)%0A%0Adef results(wins, losses):%0A total = wins + losses%0A win_percentage = (wins / total) * 100%0A lose_percentage = (losses / total) * 100%0A return win_percentage, lose_percentage%0A%0A%0A%0Aif __name__ == '__main__':%0A participants = int(input(print('participants')))%0A times = int(input(print('times')))%0A checks = int(input(print('checks')))%0A%0A print(results(*play_bet(participants=participants, times=times, checks=checks)))%0A
aeabe6bb89a359e644c5adcb4c6456fd3428f6de
Stop using intersphinx
doc/source/conf.py
doc/source/conf.py
# -*- coding: utf-8 -*- # # -- General configuration ---------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.viewcode', 'oslosphinx', ] # autodoc generation is a bit aggressive and a nuisance when doing heavy # text edit cycles. # execute "export SPHINX_DEBUG=1" in your terminal to disable # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. project = u'python-ironicclient' copyright = u'OpenStack Foundation' # A list of ignored prefixes for module index sorting. modindex_common_prefix = ['ironicclient.'] # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = True # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. #html_theme_path = ["."] #html_theme = '_theme' #html_static_path = ['_static'] # Output file base name for HTML help builder. htmlhelp_basename = '%sdoc' % project # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ ( 'index', '%s.tex' % project, u'%s Documentation' % project, u'OpenStack LLC', 'manual' ), ] # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = {'http://docs.python.org/': None}
Python
0.000001
@@ -287,48 +287,8 @@ c',%0A - 'sphinx.ext.intersphinx',%0A @@ -1964,140 +1964,4 @@ ,%0A%5D%0A -%0A# Example configuration for intersphinx: refer to the Python standard library.%0Aintersphinx_mapping = %7B'http://docs.python.org/': None%7D%0A
e8c2406cbcff96196d2404e9df167cc96f468779
add sources api
mcp/interface/sources.py
mcp/interface/sources.py
Python
0
@@ -0,0 +1,779 @@ +import json%0A%0Afrom mcp import sources%0Afrom mcp.interface import common%0A%0Aclass SourcesHandler(common.AuthorizedHandler):%0A%09def forbidden(self):%0A%09%09return True%0A%0A%09def do_get(self):%0A%09%09return 200, json.dumps(list(iter(sources.source_db)))%0A%0Aclass SourceHandler(common.AuthorizedHandler):%0A%09def __init__(self, request, response, groups):%0A%09%09common.AuthorizedHandler.__init__(self, request, response, groups)%0A%09%09self.source = sources.get(self.groups%5B0%5D)%0A%0Aclass SourceInfoHandler(SourceHandler):%0A%09def do_get(self):%0A%09%09return 200, json.dumps(%7B'name': self.source.source, 'url': self.source.url, 'revision': self.source.revision%7D)%0A%0Asources_base = '/sources/'%0Asource_base = sources_base + '(' + sources.sources_allowed + ')'%0A%0Aroutes = %7Bsources_base: SourcesHandler, source_base: SourceInfoHandler%7D%0A
b75356d5325ce5b915f7bdb72c46fda53f190865
Validate test mode schema as well
homeassistant/components/locative/__init__.py
homeassistant/components/locative/__init__.py
""" Support for Locative. For more details about this component, please refer to the documentation at https://home-assistant.io/components/locative/ """ import logging import voluptuous as vol import homeassistant.helpers.config_validation as cv from homeassistant.components.device_tracker import \ DOMAIN as DEVICE_TRACKER_DOMAIN from homeassistant.const import HTTP_UNPROCESSABLE_ENTITY, ATTR_LATITUDE, \ ATTR_LONGITUDE, STATE_NOT_HOME, CONF_WEBHOOK_ID, ATTR_ID from homeassistant.helpers import config_entry_flow from homeassistant.helpers.discovery import async_load_platform from homeassistant.helpers.dispatcher import async_dispatcher_send _LOGGER = logging.getLogger(__name__) DOMAIN = 'locative' DEPENDENCIES = ['webhook'] TRACKER_UPDATE = '{}_tracker_update'.format(DOMAIN) ATTR_DEVICE_ID = 'device' ATTR_TRIGGER = 'trigger' WEBHOOK_SCHEMA = vol.Schema({ vol.Required(ATTR_LATITUDE): cv.latitude, vol.Required(ATTR_LONGITUDE): cv.longitude, vol.Required(ATTR_DEVICE_ID): cv.string, vol.Required(ATTR_TRIGGER): cv.string, vol.Optional(ATTR_ID): cv.string, }, extra=vol.ALLOW_EXTRA) async def async_setup(hass, hass_config): """Set up the Locative component.""" hass.async_create_task( async_load_platform(hass, 'device_tracker', DOMAIN, {}, hass_config) ) return True async def handle_webhook(hass, webhook_id, request): """Handle incoming webhook from Locative.""" try: data = WEBHOOK_SCHEMA(dict(await request.post())) except vol.MultipleInvalid as e: return e.error_message, HTTP_UNPROCESSABLE_ENTITY if ATTR_ID not in data and data[ATTR_TRIGGER] != 'test': _LOGGER.error('Location id not specified.') return ('Location id not specified.', HTTP_UNPROCESSABLE_ENTITY) device = data[ATTR_DEVICE_ID].replace('-', '') location_name = data.get(ATTR_ID, data[ATTR_TRIGGER]).lower() direction = data[ATTR_TRIGGER] gps_location = (data[ATTR_LATITUDE], data[ATTR_LONGITUDE]) if direction == 'enter': async_dispatcher_send( hass, TRACKER_UPDATE, device, gps_location, location_name ) return 'Setting location to {}'.format(location_name) if direction == 'exit': current_state = hass.states.get( '{}.{}'.format(DEVICE_TRACKER_DOMAIN, device)) if current_state is None or current_state.state == location_name: location_name = STATE_NOT_HOME async_dispatcher_send( hass, TRACKER_UPDATE, device, gps_location, location_name ) return 'Setting location to not home' # Ignore the message if it is telling us to exit a zone that we # aren't currently in. This occurs when a zone is entered # before the previous zone was exited. The enter message will # be sent first, then the exit message will be sent second. return 'Ignoring exit from {} (already in {})'.format( location_name, current_state) if direction == 'test': # In the app, a test message can be sent. Just return something to # the user to let them know that it works. return 'Received test message.' _LOGGER.error('Received unidentified message from Locative: %s', direction) return ('Received unidentified message: {}'.format(direction), HTTP_UNPROCESSABLE_ENTITY) async def async_setup_entry(hass, entry): """Configure based on config entry.""" hass.components.webhook.async_register( DOMAIN, 'Locative', entry.data[CONF_WEBHOOK_ID], handle_webhook) return True async def async_unload_entry(hass, entry): """Unload a config entry.""" hass.components.webhook.async_unregister(entry.data[CONF_WEBHOOK_ID]) return True config_entry_flow.register_webhook_flow( DOMAIN, 'Locative Webhook', { 'docs_url': 'https://www.home-assistant.io/components/locative/' } )
Python
0
@@ -187,16 +187,40 @@ s as vol +%0Afrom typing import Dict %0A%0Aimport @@ -874,24 +874,385 @@ r'%0A%0A -WEBHOOK_SCHEMA = +%0Adef _id(value: str) -%3E str:%0A %22%22%22Coerce id by removing '-'.%22%22%22%0A return value.replace('-', '')%0A%0A%0Adef _validate_test_mode(obj: Dict) -%3E Dict:%0A %22%22%22Validate that id is provided outside of test mode.%22%22%22%0A if ATTR_ID not in obj and obj%5BATTR_TRIGGER%5D != 'test':%0A raise vol.Invalid('Location id not specified')%0A return obj%0A%0A%0AWEBHOOK_SCHEMA = vol.All(%0A dict,%0A vol @@ -1261,16 +1261,20 @@ chema(%7B%0A + vol. @@ -1307,32 +1307,36 @@ v.latitude,%0A + + vol.Required(ATT @@ -1363,24 +1363,28 @@ gitude,%0A + + vol.Required @@ -1412,24 +1412,28 @@ string,%0A + + vol.Required @@ -1459,24 +1459,28 @@ string,%0A + + vol.Optional @@ -1486,24 +1486,32 @@ l(ATTR_ID): +vol.All( cv.string,%0A%7D @@ -1508,17 +1508,26 @@ .string, -%0A + _id)%0A %7D, extra @@ -1543,16 +1543,43 @@ W_EXTRA) +,%0A _validate_test_mode%0A) %0A%0A%0Aasync @@ -2054,211 +2054,8 @@ TY%0A%0A - if ATTR_ID not in data and data%5BATTR_TRIGGER%5D != 'test':%0A _LOGGER.error('Location id not specified.')%0A return ('Location id not specified.',%0A HTTP_UNPROCESSABLE_ENTITY)%0A%0A @@ -2083,33 +2083,16 @@ VICE_ID%5D -.replace('-', '') %0A loc
6d0f65f70757ceca0da220e6c54b7ae164752547
Use raise_from for less crummy python 3 tracebacks
invoke/context.py
invoke/context.py
import getpass import re from .config import Config, DataProxy from .exceptions import Failure, AuthFailure, ResponseFailure from .runners import Local from .watchers import FailingResponder class Context(DataProxy): """ Context-aware API wrapper & state-passing object. `.Context` objects are created during command-line parsing (or, if desired, by hand) and used to share parser and configuration state with executed tasks (see :doc:`/concepts/context`). Specifically, the class offers wrappers for core API calls (such as `.run`) which take into account CLI parser flags, configuration files, and/or changes made at runtime. It also acts as a proxy for its `~.Context.config` attribute - see that attribute's documentation for details. Instances of `.Context` may be shared between tasks when executing sub-tasks - either the same context the caller was given, or an altered copy thereof (or, theoretically, a brand new one). """ def __init__(self, config=None): """ :param config: `.Config` object to use as the base configuration. Defaults to an anonymous/default `.Config` instance. """ #: The fully merged `.Config` object appropriate for this context. #: #: `.Config` settings (see their documentation for details) may be #: accessed like dictionary keys (``ctx.config['foo']``) or object #: attributes (``ctx.config.foo``). #: #: As a convenience shorthand, the `.Context` object proxies to its #: ``config`` attribute in the same way - e.g. ``ctx['foo']`` or #: ``ctx.foo`` returns the same value as ``ctx.config['foo']``. self.config = config if config is not None else Config() def run(self, command, **kwargs): """ Execute a local shell command, honoring config options. Specifically, this method instantiates a `.Runner` subclass (according to the ``runner`` config option; default is `.Local`) and calls its ``.run`` method with ``command`` and ``kwargs``. See `.Runner.run` for details on ``command`` and the available keyword arguments. """ runner_class = self.config.get('runner', Local) return runner_class(context=self).run(command, **kwargs) def sudo(self, command, **kwargs): """ Execute a shell command, via ``sudo``. In general, this method is identical to `run`, but adds a handful of convenient behaviors around invoking the ``sudo`` program. It doesn't do anything users could not do themselves by wrapping `run`, but the use case is too common to make users reinvent these wheels themselves. Specifically, `sudo`: * Places a `.FailingResponder` into the ``watchers`` kwarg (see :doc:`/concepts/watchers`) which: * searches for the configured ``sudo`` password prompt; * responds with the configured sudo password (``sudo.password`` from the :doc:`configuration </concepts/configuration>`, or a runtime `getpass <getpass.getpass>` input); * can tell when that response causes an authentication failure, and raises an exception if so. * Builds a ``sudo`` command string using the supplied ``command`` argument prefixed by the ``sudo.prefix`` configuration setting; * Executes that command via a call to `run`, returning the result. As with `run`, these additional behaviors may be configured both via the ``run`` tree of configuration settings (like ``run.echo``) or via keyword arguments, which will override the configuration system. :param str password: Runtime override for ``sudo.password``. :param str prefix: Runtime override for ``sudo.prefix``. """ prompt = self.config.sudo.prompt password = kwargs.pop('password', self.config.sudo.password) if password is None: msg = "No stored sudo password found, please enter it now: " # TODO: use something generic/overrideable that uses getpass by # default. May mean we pop this out as its own class-as-a-method or # something? password = getpass.getpass(msg) # TODO: want to print a "cleaner" echo with just 'sudo <command>'; but # hard to do as-is, obtaining config data from outside a Runner one # holds is currently messy (could fix that), if instead we manually # inspect the config ourselves that duplicates logic. NOTE: once we # figure this out there is an existing, would-fail-if-not-skipped test # for this behavior in test/context.py. # TODO: how to handle "full debug" output exactly (display of actual, # real full sudo command w/ -S and -p), in terms of API/config? Impl is # easy, just go back to passing echo through to 'run'... cmd_str = "sudo -S -p '{0}' {1}".format(prompt, command) watcher = FailingResponder( pattern=re.escape(prompt), response="{0}\n".format(password), failure_sentinel="Sorry, try again.\n", ) # TODO: we always want our auto-added watcher merged - how to square # that with how kwarg always wins currently? # * If we add to self.config, and user gives kwarg, ours is lost # * If we add to kwarg, any user config is lost try: return self.run(cmd_str, watchers=[watcher], **kwargs) except Failure as failure: # Transmute failures driven by our FailingResponder, into auth # failures - the command never even ran. # TODO: wants to be a hook here for users that desire "override a # bad config value for sudo.password" manual input # NOTE: as noted in #294 comments, we MAY in future want to update # this so run() is given ability to raise AuthFailure on its own. # For now that has been judged unnecessary complexity. if isinstance(failure.reason, ResponseFailure): # NOTE: not bothering with 'reason' here, it's pointless. raise AuthFailure(result=failure.result, prompt=prompt) # Reraise for any other error so it bubbles up normally. else: raise
Python
0
@@ -19,16 +19,58 @@ ort re%0A%0A +from invoke.vendor.six import raise_from%0A%0A from .co @@ -6296,21 +6296,171 @@ -raise +# NOTE: using raise_from(..., None) to suppress Python 3's%0A # %22helpful%22 multi-exception output. It's confusing here.%0A error = AuthFai @@ -6502,16 +6502,56 @@ prompt)%0A + raise_from(error, None)%0A
5dabba3941f870f3f365e186fdf852e834649595
Move config to docs
homeassistant/components/sensor/eliqonline.py
homeassistant/components/sensor/eliqonline.py
""" homeassistant.components.sensor.eliqonline ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ monitors home energy use for the eliq online service api documentation: https://my.eliq.se/knowledge/sv-SE/49-eliq-online/299-eliq-online-api access to api access token: https://my.eliq.se/user/settings/api current energy use: https://my.eliq.se/api/datanow?accesstoken=<token> history: https://my.eliq.se/api/data?startdate=2015-12-14&intervaltype=6min&accesstoken=<token> """ import logging from homeassistant.helpers.entity import Entity from homeassistant.const import (STATE_UNKNOWN, CONF_ACCESS_TOKEN, CONF_NAME) _LOGGER = logging.getLogger(__name__) REQUIREMENTS = ['eliqonline==1.0.11'] DEFAULT_NAME = "ELIQ Energy Usage" def setup_platform(hass, config, add_devices, discovery_info=None): """ Set up the sensors """ import eliqonline access_token = config.get(CONF_ACCESS_TOKEN) name = config.get(CONF_NAME, DEFAULT_NAME) channel_id = config.get("channel_id") if access_token is None: _LOGGER.error( "Configuration Error: " "Please make sure you have configured your access token " "that can be aquired from https://my.eliq.se/user/settings/api") return False api = eliqonline.API(access_token) add_devices([EliqSensor(api, channel_id, name)]) class EliqSensor(Entity): """ Implements a Eliq sensor. """ def __init__(self, api, channel_id, name): self._name = name self._unit_of_measurement = "W" self._state = STATE_UNKNOWN self.api = api self.channel_id = channel_id self.update() @property def name(self): """ Returns the name. """ return self._name @property def unit_of_measurement(self): """ Unit of measurement of this entity, if any. """ return self._unit_of_measurement @property def state(self): """ Returns the state of the device. """ return self._state def update(self): """ Gets the latest data """ response = self.api.get_data_now(channelid=self.channel_id) self._state = int(response.power)
Python
0.000001
@@ -83,17 +83,17 @@ ~~~~~~~%0A -m +M onitors @@ -139,348 +139,146 @@ vice +. %0A%0A -api documentation:%0A https://my.eliq.se/knowledge/sv-SE/49-eliq-online/299-eliq-online-api%0A%0Aaccess to api access token:%0A https://my.eliq.se/user/settings/api%0A%0Acurrent energy use:%0A https://my.eliq.se/api/datanow?accesstoken=%3Ctoken%3E%0A%0Ahistory:%0A https://my.eliq.se/api/data?startdate=2015-12-14&intervaltype=6min&accesstoken=%3Ctoken%3E%0A +For more details about this platform, please refer to the documentation at%0Ahttps://home-assistant.io/components/sensor.eliqonline/ %0A%22%22%22%0A -%0A impo @@ -620,15 +620,20 @@ the +Eliq sensor -s +. %22%22%22 @@ -1857,16 +1857,17 @@ est data +. %22%22%22%0A
b2aa91648fe3ae915381e68cac95e5c3f6e5a182
add zhihu_login.py
spider/login/zhihu_login.py
spider/login/zhihu_login.py
Python
0
@@ -0,0 +1,23 @@ +# coding:utf-8%0A# %E6%A8%A1%E6%8B%9F%E8%B4%A6%E6%88%B7%E7%99%BB%E5%BD%95
b46a6189d9617396573903310a8b0a3d09b22fb0
Proper abstract method def
python/thunder/imgprocessing/register.py
python/thunder/imgprocessing/register.py
from numpy import arange, ndarray, argmax, unravel_index from thunder.rdds.images import Images from thunder.utils.common import checkparams class Register(object): def __new__(cls, method="crosscorr"): checkparams(method, ["crosscorr"]) if method == "crosscorr": return super(Register, cls).__new__(CrossCorr) @staticmethod def get_transform(im, ref): pass @staticmethod def apply_transform(im, transform): pass @staticmethod def reference(images, method='mean', startidx=None, stopidx=None, inclusive=True): """ Compute a reference image for use in registration. Parameters ---------- method : str, optional, default = 'mean' How to compute the reference startidx : int, optional, default = None Starting index if computing a mean over a specified range stopidx : int, optional, default = None Stopping index if computing a mean over a specified range inclusive : boolean, optional, default = True When specifying a range, whether boundaries should include or not include the specified end points. """ # TODO easy option for using the mean of the middle n images checkparams(method, ['mean']) if method == 'mean': if startidx is not None and stopidx is not None: if inclusive: range = lambda x: startidx <= x <= stopidx n = stopidx - startidx + 1 else: range = lambda x: startidx < x < stopidx n = stopidx - startidx - 1 ref = images.filterOnKeys(range) else: ref = images n = images.nimages refval = ref.sum() / (1.0 * n) return refval.astype(images.dtype) @staticmethod def _check_reference(images, reference): """ Check the dimensions and type of a reference (relative to an Images object) """ if isinstance(reference, ndarray): if reference.shape != images.dims.count: raise Exception('Dimensions of reference %s do not match dimensions of data %s' % (reference.shape, images.dims.count)) else: raise Exception('Reference must be an array') def estimate(self, images, reference): """ Estimate registration parameters on a collection of images / volumes. Will return a list of Parameters ---------- images : Images An Images object containing the images / volumes to estimate registration for reference : ndarray The reference image / volume to estimate registration against Returns ------- params : list Registration parameters, one per image. Will be returned as a list of key-value pairs, where the key is the same key used to identify each image / volume in the Images object, and the value is a list of registration parameters (in whatever format provided by the registration function; e.g. for CrossCorr will return a list of deltas in x and y) """ if not (isinstance(images, Images)): raise Exception('Input data must be Images or a subclass') self._check_reference(images, reference) # broadcast the reference (a potentially very large array) reference_bc = images.rdd.context.broadcast(reference) # estimate the transform parameters on an image / volume def params(im, ref): if im.ndim == 2: return self.get_transform(im, ref.value) else: t = [] for z in arange(0, im.shape[2]): t.append(self.get_transform(im[:, :, z], ref.value[:, :, z])) return t # TODO don't collect, maybe return as a Series? params = images.rdd.mapValues(lambda x: params(x, reference_bc)).collect() return params def transform(self, images, reference): """ Apply registration to a collection of images / volumes. Parameters ---------- images : Images An Images object containing the images / volumes to apply registration to reference : ndarray The reference image / volume to register against """ if not (isinstance(images, Images)): raise Exception('Input data must be Images or a subclass') self._check_reference(images, reference) # compute and apply transformation on an image / volume def register(im, ref): if im.ndim == 2: t = self.get_transform(im, ref.value) return self.apply_transform(im, t) else: im.setflags(write=True) for z in arange(0, im.shape[2]): t = self.get_transform(im[:, :, z], ref.value[:, :, z]) im[:, :, z] = self.apply_transform(im[:, :, z], t) return im # broadcast the reference (a potentially very large array) reference_bc = images.rdd.context.broadcast(reference) # return the transformed volumes newrdd = images.rdd.mapValues(lambda x: register(x, reference_bc)) return Images(newrdd).__finalize__(images) class CrossCorr(Register): """ Perform affine (translation) registration using cross-correlation """ @staticmethod def get_transform(im, ref): from numpy.fft import fft2, ifft2 fref = fft2(ref) fim = fft2(im) c = abs(ifft2((fim * fref.conjugate()))) d0, d1 = unravel_index(argmax(c), c.shape) if d0 > im.shape[0] // 2: d0 -= im.shape[0] if d1 > im.shape[1] // 2: d1 -= im.shape[1] return [d0, d1] @staticmethod def apply_transform(im, transform): from scipy.ndimage.interpolation import shift return shift(im, map(lambda x: -x, transform), mode='nearest')
Python
0.999986
@@ -341,34 +341,16 @@ sCorr)%0A%0A - @staticmethod%0A def @@ -355,32 +355,38 @@ f get_transform( +self, im, ref):%0A @@ -387,39 +387,42 @@ -pass%0A%0A @staticmethod +raise NotImplementedError%0A %0A def @@ -430,32 +430,38 @@ apply_transform( +self, im, transform):%0A @@ -472,12 +472,33 @@ -pass +raise NotImplementedError %0A%0A
c505927ae756fe1740e8603aadf23dae0ad12ff5
Create 01.CenturiesToMinutes.py
TechnologiesFundamentals/ProgrammingFundamentals/DataTypesAndVariables-Lab/01.CenturiesToMinutes.py
TechnologiesFundamentals/ProgrammingFundamentals/DataTypesAndVariables-Lab/01.CenturiesToMinutes.py
Python
0
@@ -0,0 +1,232 @@ +centuries = int(input())%0Ayears = centuries * 100%0Adays = int(years * 365.2422)%0Ahours = days * 24%0Aminutes = hours * 60%0A%0Aprint(%22%25d centuries = %25d years = %25d days = %25d hours %25d = minutes%22 %25%0A (centuries, years, days, hours, minutes))%0A
ab9800183b3ab229782016aa3f88e6825467d01b
Add forgot username tests
api/radar_api/tests/test_forgot_username.py
api/radar_api/tests/test_forgot_username.py
Python
0
@@ -0,0 +1,558 @@ +def test_forgot_username(app):%0A client = app.test_client()%0A%0A response = client.post('/forgot-username', data=%7B%0A 'email': '[email protected]'%0A %7D)%0A%0A assert response.status_code == 200%0A%0A%0Adef test_email_missing(app):%0A client = app.test_client()%0A%0A response = client.post('/forgot-username', data=%7B%7D)%0A%0A assert response.status_code == 422%0A%0A%0Adef test_user_not_found(app):%0A client = app.test_client()%0A%0A response = client.post('/forgot-username', data=%7B%0A 'email': '[email protected]'%0A %7D)%0A%0A assert response.status_code == 422%0A
9b9fb4df30a8183c4de9f157200c5ff225d11d67
Add the plot script
src/scripts/prepare_gnuplot.py
src/scripts/prepare_gnuplot.py
Python
0.999689
@@ -0,0 +1,1760 @@ +#!/usr/bin/python%0A%0Aimport sys%0Aimport argparse%0Aimport csv%0Afrom string import Template%0A%0Aparser = argparse.ArgumentParser(description='Prepare gnuplot script from the supplied data files.')%0Aparser.add_argument('files', nargs='+', help='The data files.')%0A%0AMIN_Y_RANGE = 0.000001%0A%0AGNUPLOT_SCRIPT_TEMPLATE = Template(%22%22%22%0A%0Areset%0A%0Aset terminal lua tikz latex%0Aset output %22plot.tex%22%0A%0A#set title %22tau_m%22%0Aset style data lines%0Aset key left top%0Aset logscale y%0A#set tics axis%0A#shrink = 0.1%0Aset xrange%5B0:100%5D%0Aset yrange%5B$%7Blower_y_range%7D:$%7Bupper_y_range%7D%5D%0A#set xtics shrink/2%0A#set ytics shrink/2%0A#set size square%0Aset xlabel %22%5C%5C%5C%5C%25 of queries%22%0Aset ylabel %22time in seconds%22%0A%0Aplot $%7Bplot_cmd%7D%0A$%7Bdata%7D%0Apause -1%0A%0A%22%22%22)%0A%0Aif __name__ == %22__main__%22:%0A%09%0A%09args = parser.parse_args()%0A%09%0A%09plot_cmd = %22%22%0A%09data_string = %22%22%0A%09min_data = sys.float_info.max%0A%09max_data = sys.float_info.min%0A%09for data_file in args.files:%0A%09%09%0A%09%09plot_cmd += %22%22%22'-' title %22%25s%22, %22%22%22 %25 data_file%0A%09%09%0A%09%09with open(data_file) as fd:%0A%09%09%09reader = csv.reader(fd, delimiter=',', quotechar='%22')%0A%09%09%09header = reader.next()%0A#%09%09%09print header%0A#%09%09%09time_index = header.index('time')%0A%09%09%09time_index = 0%0A%09%09%09for h in header:%0A%09%09%09%09if h.find('time') %3E= 0:%0A%09%09%09%09%09break%0A%09%09%09%09time_index += 1%0A%09%09%09data = %5B%5D%0A%09%09%09for line in reader:%0A%09%09%09%09data.append(float(line%5Btime_index%5D)/1000)%0A%09%09%09data.sort()%0A%09%09%09%0A%09%09%09if data%5B0%5D %3C min_data:%0A%09%09%09%09min_data = data%5B0%5D%0A%09%09%09if data%5Blen(data) - 1%5D %3E max_data:%0A%09%09%09%09max_data = data%5Blen(data) - 1%5D%0A%09%09%09%0A%09%09%09step = 100.0/len(data)%0A%09%09%09%0A%09%09%09x = step%0A%09%09%09for d in data:%0A%09%09%09%09data_string += %22%25f%5Ct%25f%5Cn%22 %25 (x, d)%0A%09%09%09%09x += step%0A%09%09%09data_string += %22e%5Cn%22%0A%09%09%0A%09%09pass%0A%09%0A%09min_data = max(min_data, MIN_Y_RANGE)%0A%09%0A%09print GNUPLOT_SCRIPT_TEMPLATE.substitute(plot_cmd=plot_cmd, data=data_string, lower_y_range=min_data, upper_y_range=max_data)%0A%09%0A%09pass%0A%0A%0A%0A%0A%0A%0A%0A
d08f9cd114329a3ea66f84421b5abbfcf73c1f69
Add timeout test
odo/backends/tests/test_url.py
odo/backends/tests/test_url.py
from __future__ import print_function import pytest from functools import partial import codecs import os from odo import odo, resource, URL, discover, CSV, TextFile, convert from odo.backends.url import sample from odo.temp import _Temp, Temp from odo.utils import tmpfile, raises import datashape try: from urllib2 import urlopen from urllib2 import HTTPError, URLError except ImportError: from urllib.request import urlopen from urllib.error import HTTPError, URLError pytestmark = pytest.mark.skipif(raises(URLError, partial(urlopen, "http://google.com")), reason='unable to connect to google.com') iris_url = ('https://raw.githubusercontent.com/' 'blaze/blaze/master/blaze/examples/data/iris.csv') ftp_url = "ftp://athena-dist.mit.edu/pub/XNeXT/README.txt" def test_url_resource(): csv = resource(iris_url) assert isinstance(csv, URL(CSV)) def test_sample_different_line_counts(): with sample(resource(iris_url), lines=10) as fn: with open(fn, 'r') as f: assert len(list(f)) == 10 with sample(resource(iris_url), lines=5) as fn: with open(fn, 'r') as f: assert len(list(f)) == 5 def test_sample_different_encoding(): encoding = 'latin-1' lines = 10 with sample(resource(iris_url), lines=lines, encoding=encoding) as fn: with codecs.open(fn, 'r', encoding=encoding) as f: assert len(list(f)) == lines @pytest.mark.xfail(raises=HTTPError) def test_failed_url(): failed_url = "http://foo.com/myfile.csv" with tmpfile('.csv') as fn: odo(failed_url, fn) def test_url_discover(): csv = resource(iris_url) assert isinstance(discover(csv), datashape.DataShape) def test_url_to_local_csv(): with tmpfile('.csv') as fn: csv = odo(iris_url, fn) path = os.path.abspath(csv.path) assert os.path.exists(path) def test_url_txt_resource(): txt = resource(ftp_url) assert isinstance(txt, URL(TextFile)) @pytest.mark.xfail( raises=URLError, reason='MIT Athena FTP is down as of October 23, 2015' ) def test_ftp_to_local_txt(): with tmpfile('.txt') as fn: txt = odo(ftp_url, fn) path = os.path.abspath(txt.path) assert os.path.exists(path) def test_convert(): url_csv = resource(iris_url) t_csv = convert(Temp(CSV), url_csv) assert discover(url_csv) == discover(t_csv) assert isinstance(t_csv, _Temp) @pytest.mark.skipif(os.environ.get('HDFS_TEST_HOST') is None, reason='No HDFS_TEST_HOST envar defined') def test_url_to_hdfs(): from .test_hdfs import tmpfile_hdfs, hdfs, HDFS with tmpfile_hdfs() as target: # build temp csv for assertion check url_csv = resource(iris_url) csv = convert(Temp(CSV), url_csv) # test against url scsv = HDFS(CSV)(target, hdfs=hdfs) odo(iris_url, scsv) assert discover(scsv) == discover(csv)
Python
0.000004
@@ -2239,32 +2239,43 @@ odo(ftp_url, fn +, timeout=5 )%0A path =
27a3ca8d746890c7404845d18b8031763ec6b6a7
add netcat-nonblock.py
python/netcat-nonblock.py
python/netcat-nonblock.py
Python
0.000001
@@ -0,0 +1,3267 @@ +#!/usr/bin/python%0A%0Aimport errno%0Aimport fcntl%0Aimport os%0Aimport select%0Aimport socket%0Aimport sys%0A%0Adef setNonBlocking(fd):%0A flags = fcntl.fcntl(fd, fcntl.F_GETFL)%0A fcntl.fcntl(fd, fcntl.F_SETFL, flags %7C os.O_NONBLOCK)%0A%0A%0Adef nonBlockingWrite(fd, data):%0A try:%0A nw = os.write(fd, data)%0A return nw%0A except OSError as e:%0A if e.errno == errno.EWOULDBLOCK:%0A return -1%0A%0A%0Adef relay(sock):%0A socketEvents = select.POLLIN%0A poll = select.poll()%0A poll.register(sock, socketEvents)%0A poll.register(sys.stdin, select.POLLIN)%0A%0A setNonBlocking(sock)%0A # setNonBlocking(sys.stdin)%0A # setNonBlocking(sys.stdout)%0A%0A done = False%0A stdoutOutputBuffer = ''%0A socketOutputBuffer = ''%0A while not done:%0A events = poll.poll(10000) # 10 seconds%0A for fileno, event in events:%0A if event & select.POLLIN:%0A if fileno == sock.fileno():%0A data = sock.recv(8192)%0A if data:%0A nw = sys.stdout.write(data) # stdout does support non-blocking write, though%0A else:%0A done = True%0A else:%0A assert fileno == sys.stdin.fileno()%0A data = os.read(fileno, 8192)%0A if data:%0A assert len(socketOutputBuffer) == 0%0A nw = nonBlockingWrite(sock.fileno(), data)%0A if nw %3C len(data):%0A if nw %3C 0:%0A nw = 0%0A socketOutputBuffer = data%5Bnw:%5D%0A socketEvents %7C= select.POLLOUT%0A poll.register(sock, socketEvents)%0A poll.unregister(sys.stdin)%0A else:%0A sock.shutdown(socket.SHUT_WR)%0A poll.unregister(sys.stdin)%0A if event & select.POLLOUT:%0A if fileno == sock.fileno():%0A assert len(socketOutputBuffer) %3E 0%0A nw = nonBlockingWrite(sock.fileno(), data)%0A if nw %3C len(data):%0A assert nw %3E 0%0A socketOutputBuffer = socketOutputBuffer%5Bnw:%5D%0A else:%0A socketOutputBuffer = ''%0A socketEvents &= ~select.POLLOUT%0A poll.register(sock, socketEvents)%0A poll.register(sys.stdin, select.POLLIN)%0A%0A%0A%0Adef main(argv):%0A if len(argv) %3C 3:%0A binary = argv%5B0%5D%0A print %22Usage:%5Cn %25s -l port%5Cn %25s host port%22 %25 (argv%5B0%5D, argv%5B0%5D)%0A print (sys.stdout.write)%0A return%0A port = int(argv%5B2%5D)%0A if argv%5B1%5D == %22-l%22:%0A # server%0A server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)%0A server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)%0A server_socket.bind(('', port))%0A server_socket.listen(5)%0A (client_socket, client_address) = server_socket.accept()%0A server_socket.close()%0A relay(client_socket)%0A else:%0A # client%0A sock = socket.create_connection((argv%5B1%5D, port))%0A relay(sock)%0A%0A%0Aif __name__ == %22__main__%22:%0A main(sys.argv)%0A
34fd215d73d87c017cdae299aebd6484e6541991
Revert 176254 > Android: upgrade sandbox_linux_unitests to a stable test > > > BUG=166704 > NOTRY=true > > Review URL: https://chromiumcodereview.appspot.com/11783106
build/android/pylib/gtest/gtest_config.py
build/android/pylib/gtest/gtest_config.py
# Copyright (c) 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Configuration file for android gtest suites.""" # Add new suites here before upgrading them to the stable list below. EXPERIMENTAL_TEST_SUITES = [ ] # Do not modify this list without approval of an android owner. # This list determines which suites are run by default, both for local # testing and on android trybots running on commit-queue. STABLE_TEST_SUITES = [ 'base_unittests', 'cc_unittests', 'content_unittests', 'gpu_unittests', 'ipc_tests', 'media_unittests', 'net_unittests', 'sandbox_linux_unittests', 'sql_unittests', 'sync_unit_tests', 'ui_unittests', 'unit_tests', 'webkit_compositor_bindings_unittests', 'android_webview_unittests', ]
Python
0
@@ -311,16 +311,47 @@ TES = %5B%0A + 'sandbox_linux_unittests',%0A %5D%0A%0A# Do @@ -714,39 +714,8 @@ s',%0A - 'sandbox_linux_unittests',%0A
732fd24d06f49570c24016b7adfb3ad511e2e6af
Add test for ValidationResultIdentifier.to_tuple()
tests/data_context/test_data_context_resource_identifiers.py
tests/data_context/test_data_context_resource_identifiers.py
Python
0.000004
@@ -0,0 +1,2190 @@ +from great_expectations.data_context.types.resource_identifiers import (%0A ValidationResultIdentifier%0A)%0A%0A%0Adef test_ValidationResultIdentifier_to_tuple(expectation_suite_identifier):%0A validation_result_identifier = ValidationResultIdentifier(%0A expectation_suite_identifier,%0A %22my_run_id%22,%0A %22my_batch_identifier%22%0A )%0A assert validation_result_identifier.to_tuple() == (%0A %22my%22, %22expectation%22, %22suite%22, %22name%22,%0A %22my_run_id%22,%0A %22my_batch_identifier%22%0A )%0A assert validation_result_identifier.to_fixed_length_tuple() == (%0A %22my.expectation.suite.name%22,%0A %22my_run_id%22,%0A %22my_batch_identifier%22%0A )%0A%0A validation_result_identifier_no_run_id = ValidationResultIdentifier(%0A expectation_suite_identifier,%0A None,%0A %22my_batch_identifier%22%0A )%0A assert validation_result_identifier_no_run_id.to_tuple() == (%0A %22my%22, %22expectation%22, %22suite%22, %22name%22,%0A %22__none__%22,%0A %22my_batch_identifier%22%0A )%0A assert validation_result_identifier_no_run_id.to_fixed_length_tuple() == (%0A %22my.expectation.suite.name%22,%0A %22__none__%22,%0A %22my_batch_identifier%22%0A )%0A%0A validation_result_identifier_no_batch_identifier = ValidationResultIdentifier(%0A expectation_suite_identifier,%0A %22my_run_id%22,%0A None%0A )%0A assert validation_result_identifier_no_batch_identifier.to_tuple() == (%0A %22my%22, %22expectation%22, %22suite%22, %22name%22,%0A %22my_run_id%22,%0A %22__none__%22%0A )%0A assert validation_result_identifier_no_batch_identifier.to_fixed_length_tuple() == (%0A %22my.expectation.suite.name%22,%0A %22my_run_id%22,%0A %22__none__%22%0A )%0A%0A validation_result_identifier_no_run_id_no_batch_identifier = ValidationResultIdentifier(%0A expectation_suite_identifier,%0A None,%0A None%0A )%0A assert validation_result_identifier_no_run_id_no_batch_identifier.to_tuple() == (%0A %22my%22, %22expectation%22, %22suite%22, %22name%22,%0A %22__none__%22,%0A %22__none__%22%0A )%0A assert validation_result_identifier_no_run_id_no_batch_identifier.to_fixed_length_tuple() == (%0A %22my.expectation.suite.name%22,%0A %22__none__%22,%0A %22__none__%22%0A )
b3ef51e93b090451718ed4c1240b63b8e99cd085
rename example
miepython/02_glass.py
miepython/02_glass.py
Python
0.000515
@@ -0,0 +1,787 @@ +#!/usr/bin/env python3%0A%0A%22%22%22%0APlot the scattering efficiency as a function of wavelength for 4micron glass spheres%0A%22%22%22%0A%0Aimport numpy as np%0Aimport matplotlib.pyplot as plt%0Aimport miepython%0A%0Anum = 100%0Aradius = 2 # in microns%0Alam = np.linspace(0.2,1.2,num) # also in microns%0Ax = 2*np.pi*radius/lam%0A%0A# from https://refractiveindex.info/?shelf=glass&book=BK7&page=SCHOTT%0Am=np.sqrt(1+1.03961212/(1-0.00600069867/lam**2)+0.231792344/(1-0.0200179144/lam**2)+1.01046945/(1-103.560653/lam**2))%0A%0Aqqsca = np.zeros(num)%0A%0Afor i in range(num) :%0A qext, qsca, qabs, qback, g = miepython.mie(m%5Bi%5D,x%5Bi%5D)%0A qqsca%5Bi%5D=qsca%0A %0Aplt.plot(lam*1000,qqsca)%0A%0Aplt.title(%22BK7 glass spheres 4 micron diameter%22)%0Aplt.xlabel(%22Wavelength (nm)%22)%0Aplt.ylabel(%22Scattering Efficiency (-)%22)%0Aplt.show()%0A
d1fe5a06f5e082fd8196f510e2eba7daa3468ef8
Add duplicate_nodes.py file
duplicate_nodes.py
duplicate_nodes.py
Python
0.000003
@@ -0,0 +1,656 @@ +from shutil import copytree, ignore_patterns%0Aimport glob%0Aimport os%0Aimport sys%0A%0A%0Aif __name__ == '__main__':%0A data_dir = './parsedData/'%0A use_symlink = True%0A%0A orig_nodes = os.listdir(data_dir)%0A orig_nodes = %5Bos.path.basename(i) for i in glob.glob(os.path.join(data_dir, '1*'))%5D%0A%0A for dup_cnt in range(100):%0A for orig_node in orig_nodes:%0A src = os.path.join(data_dir, orig_node)%0A dst = os.path.join(data_dir, 'd%25s_%2504d' %25 (orig_node, dup_cnt))%0A if use_symlink:%0A src = os.path.relpath(src, data_dir)%0A os.symlink(src, dst)%0A else:%0A copytree(src, dst)%0A
77f812f76966b90c27131fd65968f548afcdcace
Add loader for basic csv layers without geoms
svir/dialogs/load_basic_csv_as_layer_dialog.py
svir/dialogs/load_basic_csv_as_layer_dialog.py
Python
0
@@ -0,0 +1,2826 @@ +# -*- coding: utf-8 -*-%0A# /***************************************************************************%0A# Irmt%0A# A QGIS plugin%0A# OpenQuake Integrated Risk Modelling Toolkit%0A# -------------------%0A# begin : 2013-10-24%0A# copyright : (C) 2018 by GEM Foundation%0A# email : [email protected]%0A# ***************************************************************************/%0A#%0A# OpenQuake is free software: you can redistribute it and/or modify it%0A# under the terms of the GNU Affero General Public License as published%0A# by the Free Software Foundation, either version 3 of the License, or%0A# (at your option) any later version.%0A#%0A# OpenQuake is distributed in the hope that it will be useful,%0A# but WITHOUT ANY WARRANTY; without even the implied warranty of%0A# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the%0A# GNU General Public License for more details.%0A#%0A# You should have received a copy of the GNU Affero General Public License%0A# along with OpenQuake. If not, see %3Chttp://www.gnu.org/licenses/%3E.%0A%0Aimport os%0Aimport tempfile%0Afrom svir.utilities.utils import import_layer_from_csv%0Afrom svir.utilities.shared import OQ_BASIC_CSV_TO_LAYER_TYPES%0Afrom svir.dialogs.load_output_as_layer_dialog import LoadOutputAsLayerDialog%0A%0A%0Aclass LoadBasicCsvAsLayerDialog(LoadOutputAsLayerDialog):%0A %22%22%22%0A Modal dialog to load as layer a basic csv with no geometries, to be%0A browsed through its attribute table%0A %22%22%22%0A%0A def __init__(self, iface, viewer_dock, session, hostname, calc_id,%0A output_type, path=None, mode=None):%0A assert output_type in OQ_BASIC_CSV_TO_LAYER_TYPES, output_type%0A LoadOutputAsLayerDialog.__init__(%0A self, iface, viewer_dock, session, hostname, calc_id,%0A output_type, path, mode)%0A self.create_file_size_indicator()%0A self.setWindowTitle('Load %25s from CSV, as layer' %25 output_type)%0A self.populate_out_dep_widgets()%0A self.adjustSize()%0A self.set_ok_button()%0A%0A def set_ok_button(self):%0A self.ok_button.setEnabled(bool(self.path))%0A%0A def populate_out_dep_widgets(self):%0A self.show_file_size()%0A%0A def load_from_csv(self):%0A if self.mode == 'testing':%0A dest_shp = tempfile.mkstemp(suffix='.shp')%5B1%5D%0A else:%0A dest_shp = None # the destination file will be selected via GUI%0A csv_path = self.path_le.text()%0A # extract the name of the csv file and remove the extension%0A layer_name = os.path.splitext(os.path.basename(csv_path))%5B0%5D%0A self.layer = import_layer_from_csv(%0A self, csv_path, layer_name, self.iface,%0A save_as_shp=False, dest_shp=dest_shp,%0A zoom_to_layer=False, has_geom=False)%0A
f2a359664bf69a6c8e883d460a49c986b511b80e
add file
eptools/gspread.py
eptools/gspread.py
Python
0
@@ -0,0 +1,1445 @@ +%22%22%22%0AFunctions to access the data in google drive spreadsheets%0A%22%22%22%0A%0Aimport pandas as pd%0A%0Afrom docstamp.gdrive import (get_spreadsheet,%0A worksheet_to_dict)%0A%0A%0Adef get_ws_data(api_key_file, doc_key, ws_tab_idx, header=None, start_row=1):%0A %22%22%22 Return the content of the spreadsheet in the ws_tab_idx tab of%0A the spreadsheet with doc_key as a pandas DataFrame.%0A%0A Parameters%0A ----------%0A api_key_file: str%0A Path to the Google API key json file.%0A%0A doc_key: str%0A%0A ws_tab_idx: int%0A Index of the worksheet within the spreadsheet.%0A%0A header: List%5Bstr%5D%0A List of values to assign to the header of the result.%0A%0A start_row: int%0A Row index from where to start collecting the data.%0A%0A Returns%0A -------%0A content: pandas.DataFrame%0A %22%22%22%0A spread = get_spreadsheet(api_key_file, doc_key)%0A ws = spread.get_worksheet(ws_tab_idx)%0A%0A ws_dict = worksheet_to_dict(ws, header=header, start_row=start_row)%0A return pd.DataFrame(ws_dict)%0A%0A%0Adef find_one_row(substr, df, col_name):%0A %22%22%22 Return one row from %60df%60. The returned row has in %60col_name%60 column%0A a value with a sub-string as %60substr.%0A%0A Raise KeyError if no row is found.%0A %22%22%22%0A for name in df%5Bcol_name%5D:%0A if substr.lower() in name.lower():%0A return df%5Bdf%5Bcol_name%5D == name%5D%0A%0A raise KeyError('Could not find %7B%7D in the '%0A 'pandas dataframe.'.format(substr))%0A
04876b4bea96f983c722cb9bf7845c7cc3b0ecef
add oauth2 example
examples/oauth2.py
examples/oauth2.py
Python
0
@@ -0,0 +1,233 @@ +from imap_tools import MailBox%0A%0A# Authenticate to account using OAuth 2.0 mechanism%0Awith MailBox('imap.my.ru').xoauth2('user', 'token123', 'INBOX') as mailbox:%0A for msg in mailbox.fetch():%0A print(msg.date_str, msg.subject)%0A
c9a0fb540a9ee8005c1ee2d70613c39455891bee
Add analyze_bound_horizontal tests module
tests/plantcv/test_analyze_bound_horizontal.py
tests/plantcv/test_analyze_bound_horizontal.py
Python
0.000001
@@ -0,0 +1,1599 @@ +import pytest%0Aimport cv2%0Afrom plantcv.plantcv import analyze_bound_horizontal, outputs%0A%0A%[email protected]('pos,exp', %5B%5B200, 58%5D, %5B-1, 0%5D, %5B100, 0%5D, %5B150, 11%5D%5D)%0Adef test_analyze_bound_horizontal(pos, exp, test_data):%0A # Clear previous outputs%0A outputs.clear()%0A # Read in test data%0A img = cv2.imread(test_data.small_rgb_img)%0A # img_above_bound_only = cv2.imread(os.path.join(TEST_DATA, TEST_MASK_SMALL_PLANT))%0A mask = cv2.imread(test_data.small_bin_img, -1)%0A object_contours = test_data.load_composed_contours(test_data.small_composed_contours_file)%0A # _ = pcv.analyze_bound_horizontal(img=img, obj=object_contours, mask=mask, line_position=300)%0A # _ = pcv.analyze_bound_horizontal(img=img, obj=object_contours, mask=mask, line_position=100)%0A # _ = pcv.analyze_bound_horizontal(img=img_above_bound_only, obj=object_contours, mask=mask, line_position=1756)%0A # _ = pcv.analyze_bound_horizontal(img=img, obj=object_contours, mask=mask, line_position=1756)%0A _ = analyze_bound_horizontal(img=img, obj=object_contours, mask=mask, line_position=pos)%0A assert outputs.observations%5B%22default%22%5D%5B%22height_above_reference%22%5D%5B%22value%22%5D == exp%0A%0A%0Adef test_analyze_bound_horizontal_grayscale_image(test_data):%0A # Read in test data%0A img = cv2.imread(test_data.small_gray_img, -1)%0A mask = cv2.imread(test_data.small_bin_img, -1)%0A object_contours = test_data.load_composed_contours(test_data.small_composed_contours_file)%0A boundary_img = analyze_bound_horizontal(img=img, obj=object_contours, mask=mask, line_position=200)%0A assert len(boundary_img.shape) == 3%0A
198cf78895db88a8986926038e817ebb2bf75eb2
add migration for notification tables
portal/migrations/versions/458dd2fc1172_.py
portal/migrations/versions/458dd2fc1172_.py
Python
0
@@ -0,0 +1,1399 @@ +from alembic import op%0Aimport sqlalchemy as sa%0A%0A%0A%22%22%22empty message%0A%0ARevision ID: 458dd2fc1172%0ARevises: 8ecdd6381235%0ACreate Date: 2017-12-21 16:38:49.659073%0A%0A%22%22%22%0A%0A# revision identifiers, used by Alembic.%0Arevision = '458dd2fc1172'%0Adown_revision = '8ecdd6381235'%0A%0A%0Adef upgrade():%0A # ### commands auto generated by Alembic - please adjust! ###%0A op.create_table('notifications',%0A sa.Column('id', sa.Integer(), nullable=False),%0A sa.Column('name', sa.Text(), nullable=False),%0A sa.Column('content', sa.Text(), nullable=False),%0A sa.Column('created_at', sa.DateTime(), nullable=False),%0A sa.PrimaryKeyConstraint('id'),%0A sa.UniqueConstraint('name')%0A )%0A op.create_table('user_notifications',%0A sa.Column('id', sa.Integer(), nullable=False),%0A sa.Column('user_id', sa.Integer(), nullable=False),%0A sa.Column('notification_id', sa.Integer(), nullable=False),%0A sa.ForeignKeyConstraint(%5B'notification_id'%5D, %5B'notifications.id'%5D, ondelete='CASCADE'),%0A sa.ForeignKeyConstraint(%5B'user_id'%5D, %5B'users.id'%5D, ondelete='CASCADE'),%0A sa.PrimaryKeyConstraint('id'),%0A sa.UniqueConstraint('user_id', 'notification_id', name='_user_notification')%0A )%0A # ### end Alembic commands ###%0A%0A%0Adef downgrade():%0A # ### commands auto generated by Alembic - please adjust! ###%0A op.drop_table('user_notifications')%0A op.drop_table('notifications')%0A # ### end Alembic commands ###%0A
8c7fa4e16805dc9e8adbd5615c610be8ba92c444
Add argparse tests for gatherkeys
ceph_deploy/tests/parser/test_gatherkeys.py
ceph_deploy/tests/parser/test_gatherkeys.py
Python
0
@@ -0,0 +1,1058 @@ +import pytest%0A%0Afrom ceph_deploy.cli import get_parser%0A%0A%0Aclass TestParserGatherKeys(object):%0A%0A def setup(self):%0A self.parser = get_parser()%0A%0A def test_gather_help(self, capsys):%0A with pytest.raises(SystemExit):%0A self.parser.parse_args('gatherkeys --help'.split())%0A out, err = capsys.readouterr()%0A assert 'usage: ceph-deploy gatherkeys' in out%0A assert 'positional arguments:' in out%0A assert 'optional arguments:' in out%0A%0A def test_gatherkeys_host_required(self, capsys):%0A with pytest.raises(SystemExit):%0A self.parser.parse_args('gatherkeys'.split())%0A out, err = capsys.readouterr()%0A assert %22error: too few arguments%22 in err%0A%0A def test_gatherkeys_one_host(self):%0A args = self.parser.parse_args('gatherkeys host1'.split())%0A assert args.mon == %5B'host1'%5D%0A%0A def test_gatherkeys_multiple_hosts(self):%0A hostnames = %5B'host1', 'host2', 'host3'%5D%0A args = self.parser.parse_args(%5B'gatherkeys'%5D + hostnames)%0A assert args.mon == hostnames%0A
2803b237af18c6d5cd0613eaf4eccf2b61e65100
Create afImgPanel.py
scripts/afImgPanel.py
scripts/afImgPanel.py
Python
0.000001
@@ -0,0 +1,885 @@ +import pymel.core as pm%0Aimport pymel.all as pa%0A%0AimgOp = 0.3%0AimgDep = 10%0A%0A#get current camera%0AcurCam = pm.modelPanel(pm.getPanel(wf=True),q=True,cam=True)%0A#select image and creat imagePlane and setup%0AfileNm = pm.fileDialog2(ds=0,fm=1,cap='open',okc='Select Image')%0AImgPln = pm.imagePlane(fn=fileNm%5B0%5D,lookThrough=curCam,maintainRatio=1)%0Apm.setAttr(ImgPln%5B1%5D+'.displayOnlyIfCurrent',True)%0Apm.setAttr(ImgPln%5B0%5D+'.translateZ',-pm.getAttr(curCam+'.translateZ')/3+-imgDep)%0Apm.setAttr(ImgPln%5B1%5D+'.alphaGain',imgOp)%0Apm.setAttr(ImgPln%5B1%5D+'.textureFilter',1)%0A%0A#aligh to the camera%0A#create locator to be the parent and then create parent constraint%0ApLoc = pm.spaceLocator()%0Apm.parent(ImgPln%5B0%5D,pLoc)%0Apm.parentConstraint(curCam,pLoc)%0A%0A#Toggle image plane visibility%0Aif(pm.getAttr(ImgPln%5B1%5D+'.visibility')):%0A pm.setAttr(ImgPln%5B1%5D+'.visibility',0)%0Aelse:%0A pm.setAttr(ImgPln%5B1%5D+'.visibility',1)%0A
f24fe32329625ec037a9afc8d3bdeed5f41e69a0
Add a script for easy diffing of two Incars.
scripts/diff_incar.py
scripts/diff_incar.py
Python
0.999906
@@ -0,0 +1,1158 @@ +#!/usr/bin/env python%0A%0A'''%0ACreated on Nov 12, 2011%0A'''%0A%0A__author__=%22Shyue Ping Ong%22%0A__copyright__ = %22Copyright 2011, The Materials Project%22%0A__version__ = %220.1%22%0A__maintainer__ = %22Shyue Ping Ong%22%0A__email__ = %[email protected]%22%0A__date__ = %22Nov 12, 2011%22%0A%0Aimport sys%0Aimport itertools%0Afrom pymatgen.io.vaspio import Incar%0Afrom pymatgen.util.string_utils import str_aligned%0A%0Afilepath1 = sys.argv%5B1%5D%0Afilepath2 = sys.argv%5B2%5D%0Aincar1 = Incar.from_file(filepath1)%0Aincar2 = Incar.from_file(filepath2) %0A%0Adef format_lists(v):%0A if isinstance(v, (tuple, list)):%0A return %22 %22.join(%5Bstr(i) + %22*%22 + str(len(tuple(group))) for (i,group) in itertools.groupby(v)%5D)%0A return v%0A%0Ad = incar1.diff(incar2)%0Aoutput = %5B%5B'SAME PARAMS','', ''%5D%5D%0Aoutput.append(%5B'---------------','', ''%5D)%0Aoutput.extend(%5B(k,format_lists(v),format_lists(v)) for k,v in d%5B'Same'%5D.items() if k != %22SYSTEM%22%5D)%0Aoutput.append(%5B'','', ''%5D)%0Aoutput.append(%5B'DIFFERENT PARAM','', ''%5D)%0Aoutput.append(%5B'---------------','', ''%5D)%0Aoutput.extend(%5B(k,format_lists(v%5B'INCAR1'%5D),format_lists(v%5B'INCAR2'%5D)) for k, v in d%5B'Different'%5D.items() if k != %22SYSTEM%22%5D)%0Aprint str_aligned(output, %5B'', filepath1, filepath2%5D)
918ab0bdd0a828c87233129069302199a886f805
Fix not looping issue in uix/video.py
kivy/uix/video.py
kivy/uix/video.py
''' Video ===== The :class:`Video` widget is used to display video files and streams. Depending on your Video core provider, platform, and plugins, you will be able to play different formats. For example, the pygame video provider only supports MPEG1 on Linux and OSX. GStreamer is more versatile, and can read many video containers and codecs such as MKV, OGV, AVI, MOV, FLV (if the correct gstreamer plugins are installed). Our :class:`~kivy.core.video.VideoBase` implementation is used under the hood. Video loading is asynchronous - many properties are not available until the video is loaded (when the texture is created):: def on_position_change(instance, value): print('The position in the video is', value) def on_duration_change(instance, value): print('The duration of the video is', video) video = Video(source='PandaSneezes.avi') video.bind(position=on_position_change, duration=on_duration_change) ''' __all__ = ('Video', ) from kivy.clock import Clock from kivy.uix.image import Image from kivy.core.video import Video as CoreVideo from kivy.resources import resource_find from kivy.properties import (BooleanProperty, NumericProperty, ObjectProperty, OptionProperty) class Video(Image): '''Video class. See module documentation for more information. ''' state = OptionProperty('stop', options=('play', 'pause', 'stop')) '''String, indicates whether to play, pause, or stop the video:: # start playing the video at creation video = Video(source='movie.mkv', state='play') # create the video, and start later video = Video(source='movie.mkv') # and later video.state = 'play' :attr:`state` is an :class:`~kivy.properties.OptionProperty` and defaults to 'play'. ''' play = BooleanProperty(False) ''' .. deprecated:: 1.4.0 Use :attr:`state` instead. Boolean, indicates whether the video is playing or not. You can start/stop the video by setting this property:: # start playing the video at creation video = Video(source='movie.mkv', play=True) # create the video, and start later video = Video(source='movie.mkv') # and later video.play = True :attr:`play` is a :class:`~kivy.properties.BooleanProperty` and defaults to False. .. deprecated:: 1.4.0 Use :attr:`state` instead. ''' eos = BooleanProperty(False) '''Boolean, indicates whether the video has finished playing or not (reached the end of the stream). :attr:`eos` is a :class:`~kivy.properties.BooleanProperty` and defaults to False. ''' loaded = BooleanProperty(False) '''Boolean, indicates whether the video is loaded and ready for playback or not. .. versionadded:: 1.6.0 :attr:`loaded` is a :class:`~kivy.properties.BooleanProperty` and defaults to False. ''' position = NumericProperty(-1) '''Position of the video between 0 and :attr:`duration`. The position defaults to -1 and is set to a real position when the video is loaded. :attr:`position` is a :class:`~kivy.properties.NumericProperty` and defaults to -1. ''' duration = NumericProperty(-1) '''Duration of the video. The duration defaults to -1, and is set to a real duration when the video is loaded. :attr:`duration` is a :class:`~kivy.properties.NumericProperty` and defaults to -1. ''' volume = NumericProperty(1.) '''Volume of the video, in the range 0-1. 1 means full volume, 0 means mute. :attr:`volume` is a :class:`~kivy.properties.NumericProperty` and defaults to 1. ''' options = ObjectProperty({}) '''Options to pass at Video core object creation. .. versionadded:: 1.0.4 :attr:`options` is an :class:`kivy.properties.ObjectProperty` and defaults to {}. ''' def __init__(self, **kwargs): self._video = None super(Image, self).__init__(**kwargs) self.bind(source=self._trigger_video_load) if self.source: self._trigger_video_load() def seek(self, percent): '''Change the position to a percentage of duration. Percentage must be a value between 0-1. .. warning:: Calling seek() before the video is loaded has no impact. .. versionadded:: 1.2.0 ''' if self._video is None: raise Exception('Video not loaded.') self._video.seek(percent) def _trigger_video_load(self, *largs): Clock.unschedule(self._do_video_load) Clock.schedule_once(self._do_video_load, -1) def _do_video_load(self, *largs): if CoreVideo is None: return if self._video: self._video.stop() if not self.source: self._video = None self.texture = None else: filename = self.source # Check if filename is not url if not '://' in filename: filename = resource_find(filename) self._video = CoreVideo(filename=filename, **self.options) self._video.volume = self.volume self._video.bind(on_load=self._on_load, on_frame=self._on_video_frame, on_eos=self._on_eos) if self.state == 'play' or self.play: self._video.play() self.duration = 1. self.position = 0. def on_play(self, instance, value): value = 'play' if value else 'stop' return self.on_state(instance, value) def on_state(self, instance, value): if not self._video: return if value == 'play': if self.eos: self._video.stop() self._video.position = 0. self._video.eos = False self.eos = False self._video.play() elif value == 'pause': self._video.pause() else: self._video.stop() self._video.position = 0 self._video.eos = False def _on_video_frame(self, *largs): self.duration = self._video.duration self.position = self._video.position self.texture = self._video.texture self.canvas.ask_update() def _on_eos(self, *largs): if self._video.eos != 'loop': self.state = 'stop' self.eos = True def _on_load(self, *largs): self.loaded = True self._on_video_frame(largs) def on_volume(self, instance, value): if self._video: self._video.volume = value def unload(self): '''Unload the video. The playback will be stopped. .. versionadded:: 1.8.0 ''' if self._video: self._video.stop() self._video.unload() self._video = None if __name__ == '__main__': from kivy.app import App import sys if len(sys.argv) != 2: print("usage: %s file" % sys.argv[0]) sys.exit(1) class VideoApp(App): def build(self): self.v = Video(source=sys.argv[1], state='play') self.v.bind(state=self.replay) return self.v def replay(self, *args): if self.v.state == 'stop': self.v.state = 'play' VideoApp().run()
Python
0.000001
@@ -4089,16 +4089,92 @@ _load)%0A%0A + if %22eos%22 in kwargs:%0A self.options%5B%22eos%22%5D = kwargs%5B%22eos%22%5D%0A
c36ae47bee44ff8aa8eaf17f8ded88192d7a6573
implement query term search
queryAnswer.py
queryAnswer.py
Python
0.999171
@@ -0,0 +1,521 @@ +import pickle%0A# Loads the posting Index%0Aindex = open(%22posIndex.dat%22, %22rb%22);%0AposIndex = pickle.load(index);%0Aprint posIndex%5B'made'%5D;%0A%0Aquery = %22Juan made of youtube%22%0A# query = raw_input('Please enter your query: ');%0A%0AqueryTerms = ' '.join(query.split());%0AqueryTerms = queryTerms.split(' ');%0Ak = len(queryTerms);%0Aprint (queryTerms);%0A%0Ai = 0;%0Afor term in queryTerms:%0A%09queryTerms%5Bi%5D = term.lower();%0A%09if term in posIndex.keys():%0A%09%09print %22%25s --%3E%5Ct %25s%5Cn%22 %25 (term, posIndex%5Bterm%5D);%0A%09else:%0A%09%09print %22%25s --%3E%5Cn%22 %25 (term);%0A%09i = i +1;%0A%09%0A