commit
stringlengths
40
40
subject
stringlengths
1
3.25k
old_file
stringlengths
4
311
new_file
stringlengths
4
311
old_contents
stringlengths
0
26.3k
lang
stringclasses
3 values
proba
float64
0
1
diff
stringlengths
0
7.82k
5314f764dcfc62b3ec3fd29fdd86ae08dddfe08d
fix typo
main.py
main.py
import sys from time import ctime from tweepy import API from tweepy import Stream from tweepy import OAuthHandler from tweepy.streaming import StreamListener from credentials import * from tweepy.utils import import_simplejson import markovify import random import argparse json = import_simplejson() class Listener(StreamListener): def __init__(self, api, followed_user_id, followed_user_handle, mock_mode): super().__init__(api) self.tweet_data = [] self.followed_user_id = followed_user_id self.followed_user_handle = followed_user_handle self.mock_mode = mock_mode self.reply_list = [] self.next_reply = '' self.load_next_reply(mock_mode) def on_error(self, error): print("Returned error code %s" % error) return False def on_status(self, status): if status.user.id == self.followed_user_id: tweet_text = '@%s %s' % (self.followed_user_handle, self.next_reply) self.api.update_status(tweet_text) print(''%s: Tweeted:'' % (ctime(), tweet_text)) if self.mock_mode: self.update_mock_text(status.text) self.load_next_reply(self.mock_mode) def load_next_reply(self, mock=False): if not mock: with open('reply_list.txt', 'r') as reply_list_file: self.reply_list = reply_list_file.readlines() self.next_reply = random.choice(self.reply_list) else: with open('user_tweet_history.txt') as user_tweet_history_file: text = user_tweet_history_file.read() text_model = markovify.Text(text) self.next_reply = text_model.make_short_sentence(140) @staticmethod def update_mock_text(text): with open('user_tweet_history.txt', 'wa') as user_tweet_history_fd: user_tweet_history_fd.write(text) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--handle', required=True, type=str, dest='followed_handle', action='store', help='Twitter handle (without @)') parser.add_argument('--mock', dest='mock_mode', default=False, action='store_true', help='enable mock mode') args = parser.parse_args() auth = OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET) auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET) api = API(auth) found_users = api.lookup_users(screen_names=[str(args.followed_handle)]) print found_users if len(found_users) != 1: print('Lookup for twitter handle %s failed' % args.followed_handle) sys.exit() followed_user_id = found_users[0].id print followed_user_id twitterStream = Stream(auth, Listener(api, followed_user_id, args.followed_handle, args.mock_mode)) twitterStream.filter(follow=[str(followed_user_id)], async=True)
Python
0.999991
@@ -1044,17 +1044,16 @@ print(' -' %25s: Twee @@ -1057,17 +1057,16 @@ weeted:' -' %25 (ctim
c99bee3628e55873e5bb9b6e98fd0455b6b45c64
add examples for recipe 1.14
code/ch_1-DATA_STRUCTURES_AND_ALGORITHMS/14-sorting_objects_without_native_comparison_support/main.py
code/ch_1-DATA_STRUCTURES_AND_ALGORITHMS/14-sorting_objects_without_native_comparison_support/main.py
Python
0
@@ -0,0 +1,551 @@ +def example_1():%0A class User:%0A def __init__(self, user_id):%0A self.user_id = user_id%0A def __repr__(self):%0A return 'User(%7B%7D)'.format(self.user_id)%0A%0A users = %5BUser(23), User(3), User(99)%5D%0A print(users)%0A print(sorted(users, key = lambda u: u.user_id))%0A%0A from operator import attrgetter%0A print(sorted(users, key = attrgetter('user_id')))%0A%0A print(min(users, key = attrgetter('user_id')))%0A print(max(users, key = attrgetter('user_id')))%0A%0A %0Aif __name__ == '__main__':%0A example_1()%0A
836d4ed6a3ddda4d381345a34358714db74af757
Add an helper push program
share/examples/push.py
share/examples/push.py
Python
0.000001
@@ -0,0 +1,526 @@ +import sys%0Aimport zmq%0Afrom zmq.utils.strtypes import b%0A%0Adef main():%0A # Get the arguments%0A if len(sys.argv) != 4:%0A print(%22Usage: push.py url topic num_messages%22)%0A sys.exit(1)%0A%0A url = sys.argv%5B1%5D%0A topic = sys.argv%5B2%5D%0A num_messages = int(sys.argv%5B3%5D)%0A%0A # Create the socket%0A context = zmq.Context()%0A sock = context.socket(zmq.PUSH)%0A sock.connect(url)%0A%0A for i in range(0, num_messages):%0A sock.send_multipart(%5Bb(topic), b(%22id%22), b(str(i))%5D)%0A%0A%0Aif __name__ == %22__main__%22:%0A main()%0A
a4924a6928facdda942844b1bac8f0a53eb9ff4b
add 1 OPP file: slots
use_slots.py
use_slots.py
Python
0
@@ -0,0 +1,324 @@ +#!/user/bin/env python3%0A# -*- coding: utf-8 -*-%0A%0Aclass Student(object):%0A _slots_ = ('name', 'age')%0A%0A%0Aclass GraduateStudent(Student):%0A pass%0A%0A%0As = Student()%0As.name = 'Michael'%0As.age = 15%0Atry:%0A s.score = 88%0Aexcept AttributeError as e:%0A print('AttributeError:', e)%0A%0Ag = GraduateStudent()%0Ag.score = 99%0Aprint(g.score)%0A
b6b2f268693764deb70553b00904af4aa6def15f
add lamp_genie.py - aladin ์˜คํ”„๋ผ์ธ ๋งค์žฅ์„ ๊ฒ€์ƒ‰ํ•ด์„œ ํ‚ค์›Œ๋“œ์˜ ์ฑ… ISBN๋ฒˆํ˜ธ๋ฅผ ์•Œ๋ ค์ค€๋‹ค.
lamp_genie.py
lamp_genie.py
Python
0
@@ -0,0 +1,1363 @@ +#-*- coding: utf-8 -*-%0Aimport requests%0Aimport BeautifulSoup%0Aimport sys%0Areload(sys)%0Asys.setdefaultencoding('utf-8')%0A%0Amobile_site_url = %22http://www.aladin.co.kr%22%0Asearch_url = %22http://off.aladin.co.kr/usedstore/wsearchresult.aspx?SearchWord=%25s&x=0&y=0%22%0Abook_url = %22http://off.aladin.co.kr/usedstore/wproduct.aspx?ISBN=%25d%22%0A%0Aresponse = requests.get(mobile_site_url + '/m/off/gate.aspx?')%0Acontent = response.content%0A%0Asearch_text = requests.utils.quote(raw_input(%22%EA%B2%80%EC%83%89%ED%95%A0 %EC%B1%85 %EC%A0%9C%EB%AA%A9%EC%9D%B4%EB%82%98 %EA%B8%80%EC%93%B4%EC%9D%B4 : %22).encode('cp949'))%0Ashop_list = BeautifulSoup.BeautifulSoup(content).findAll('td')%0A%0As = requests.Session()%0Afor x in shop_list:%0A print %22=%22 * 50%0A try:%0A shop_location = x.text%0A url = x.find('a')%0A response = s.get(mobile_site_url + url%5B'href'%5D)%0A url = search_url %25 search_text%0A print url%0A response = s.get(url)%0A content = response.content%0A result = BeautifulSoup.BeautifulSoup(content).find('div', %7B'id':'Search3_Result'%7D)%0A try:%0A result_list = set()%0A for x in result.findAll('a'):%0A search_code = str(x).split('ISBN=')%0A if search_code.__len__() %3E 1:%0A isbn = search_code%5B1%5D.split('%22')%5B0%5D%0A result_list.add(isbn)%0A print shop_location, result_list%0A except:%0A print set()%0A except Exception as e:%0A pass%0A
de4e54e1de5905600d539df781994612f03e0672
Add files via upload
matrix.py
matrix.py
Python
0
@@ -0,0 +1,1120 @@ +import numpy as np%0A%0Adef parse_to_matrix(input_file_path, div = '%5Ct', data_type = int):%0A%09input_file = open(input_file_path, 'r')%0A%09matrix = %5B map(data_type,line.strip().split('%25s' %25 div)) for line in input_file if line.strip() != %22%22 %5D%0A%09input_file.close()%0A%09return np.array(matrix)%0A%0Adef parse_to_vectors(input_file_path, div = '%5Ct', data_type = int):%0A%09input_file = open(input_file_path, 'r')%0A%09matrix = %5B map(data_type,line.strip().split('%25s' %25 div)) for line in input_file if line.strip() != %22%22 %5D%0A%09input_file.close()%0A%09return np.array(matrix)%0A%0A%0Adef write_matrix_into_file(matrix, output_file_path):%0A%09output = open(output_file_path, 'w')%0A%09size = len(matrix)%0A%09for row_i in range(size):%0A%09%09vec = matrix%5Brow_i%5D%0A%09%09output.write('%25s' %25 ' '.join(str(i) for i in vec))%0A%09%09output.write('%5Cn')%0A%09output.close()%0A%0Adef write_matrix_into_file(matrix, heads, output_file_path):%0A%09output = open(output_file_path, 'w')%0A%09size = len(matrix)%0A%09for row_i in range(size):%0A%09%09vec = matrix%5Brow_i%5D%0A%09%09head = heads%5Brow_i%5D%0A%09%09output.write('%25s' %25 head)%0A%09%09output.write(' ')%0A%09%09output.write('%25s' %25 ' '.join(str(i) for i in vec))%0A%09%09output.write('%5Cn')%0A%09output.close()%0A
42e485b7367e1a707a73b834f39fc6d3f356b61d
remove valid config check
verbs/gdb.py
verbs/gdb.py
"""implement 'gdb' verb (debugs a single target with gdb) gdb gdb [target] gdb [target] [config] """ import subprocess from mod import log, util, config, project, settings #------------------------------------------------------------------------------- def gdb(fips_dir, proj_dir, cfg_name, target=None) : """debug a single target with gdb""" # prepare proj_name = util.get_project_name_from_dir(proj_dir) util.ensure_valid_project_dir(proj_dir) # load the config(s) configs = config.load(fips_dir, proj_dir, cfg_name) if configs : for cfg in configs : # check if config is valid config_valid, _ = config.check_config_valid(fips_dir, cfg, print_errors = True) if config_valid : deploy_dir = util.get_deploy_dir(fips_dir, proj_name, cfg) log.colored(log.YELLOW, "=== gdb: {}".format(cfg['name'])) cmdLine = ['gdb', target] subprocess.call(args = cmdLine, cwd = deploy_dir) else : log.error("Config '{}' not valid in this environment".format(cfg['name'])) else : log.error("No valid configs found for '{}'".format(cfg_name)) if num_valid_configs != len(configs) : log.error('{} out of {} configs failed!'.format(len(configs) - num_valid_configs, len(configs))) return False else : log.colored(log.GREEN, '{} configs built'.format(num_valid_configs)) return True #------------------------------------------------------------------------------- def run(fips_dir, proj_dir, args) : """debug a single target with gdb""" if not util.is_valid_project_dir(proj_dir) : log.error('must be run in a project directory') tgt_name = None cfg_name = None if len(args) > 0 : tgt_name = args[0] if len(args) > 1: cfg_name = args[1] if not cfg_name : cfg_name = settings.get(proj_dir, 'config') if not tgt_name : tgt_name = settings.get(proj_dir, 'target') if not tgt_name : log.error('no target specified') gdb(fips_dir, proj_dir, cfg_name, tgt_name) #------------------------------------------------------------------------------- def help() : """print 'gdb' help""" log.info(log.YELLOW + "fips gdb\n" "fips gdb [target]\n" "fips gdb [target] [config]\n" + log.DEF + " debug a single target in current or named config")
Python
0.000001
@@ -1203,269 +1203,8 @@ ))%0A%0A - if num_valid_configs != len(configs) :%0A log.error('%7B%7D out of %7B%7D configs failed!'.format(len(configs) - num_valid_configs, len(configs)))%0A return False%0A else :%0A log.colored(log.GREEN, '%7B%7D configs built'.format(num_valid_configs))%0A
0118316df964c09198747255f9f3339ed736066d
Create test.py
test/test.py
test/test.py
Python
0.000005
@@ -0,0 +1,189 @@ +# TweetPy%0A# Test%0A%0Aimport unittest%0Aimport tweet%0A%0Aclass SampleTestClass(unittest.TestCase):%0A%0A def sampleTest(self):%0A #do something%0A a = 1%0A%0Aif __name__ == '__main__':%0A unittest.main()%0A
ba180a1798296346116a7c11557ddbe4aa40da8b
Solving p20
p020.py
p020.py
Python
0.999485
@@ -0,0 +1,55 @@ +sum(%5Bint(digit) for digit in str(math.factorial(100))%5D)
1e96ec2104e6e30af3bcf7c9dd61bd8f157b7519
Solving p028
p028.py
p028.py
Python
0.999362
@@ -0,0 +1,314 @@ +def spiral_corners(size):%0A yield 1%0A for x in range(3, size+1, 2):%0A base_corner = x**2%0A corner_diff = x-1%0A for corner in (3,2,1,0):%0A yield base_corner-corner_diff*corner%0A%0Adef solve_p026():%0A return sum(spiral_corners(1001))%0A%0Aif __name__ == '__main__':%0A print solve_p026()%0A
fa4b01102d1226ccc3dcf58119053bbc8839c36e
add ex42
lpthw/ex42.py
lpthw/ex42.py
Python
0.998437
@@ -0,0 +1,1108 @@ +#!/usr/bin/env python%0A%0A# Exercise 42: Is-A, Has-A, Objects, and Classes%0A%0A## Animal is-a object (yes, sort of confusing) look at the extra credit%0Aclass Animal(object):%0A pass%0A%0A## ??%0Aclass Dog(Animal):%0A%0A def __init__(self, name):%0A ## ??%0A self.name = name%0A%0A## ??%0Aclass Cat(Animal):%0A%0A def __init__(self, name):%0A ## ??%0A self.name = name%0A%0A## ??%0Aclass Person(object):%0A%0A def __init__(self, name):%0A ## ??%0A self.name = name%0A%0A ## Person has-a pet of some kind%0A self.pet = None%0A%0A## ??%0Aclass Employee(Person):%0A%0A def __init__(self, name, salary):%0A ## ?? hmm what is this strange magic?%0A super(Employee, self).__init__(name)%0A ## ??%0A self.salary = salary%0A%0A## ??%0Aclass Fish(object):%0A pass%0A%0A## ??%0Aclass Salmon(Fish):%0A pass%0A%0A## ??%0Aclass Halibut(Fish):%0A pass%0A%0A%0A## rover is-a Dog%0Arover = Dog(%22Rover%22)%0A%0A## ??%0Asatan = Cat(%22Satan%22)%0A%0A## ??%0Amary = Person(%22Mary%22)%0A%0A## ??%0Amary.pet = satan%0A%0A## ??%0Afrank = Employee(%22Frank%22, 120000)%0A%0A## ??%0Afrank.pet = rover%0A%0A## ??%0Aflipper = Fish()%0A%0A## ??%0Acrouse = Salmon()%0A%0A## ??%0Aharry = Halibut()%0A
1c230f224a34cf34a9d841fa79a092632c47c404
Fix record_wpr.py against against an issue of --interactive.
tools/telemetry/telemetry/page/record_wpr.py
tools/telemetry/telemetry/page/record_wpr.py
#!/usr/bin/env python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import json import logging import os import sys import tempfile import time import urlparse from telemetry import test from telemetry.core import browser_options from telemetry.core import discover from telemetry.core import util from telemetry.core import wpr_modes from telemetry.page import page_measurement from telemetry.page import page_measurement_results from telemetry.page import page_runner from telemetry.page import page_set from telemetry.page import page_test from telemetry.page import test_expectations class RecordPage(page_test.PageTest): def __init__(self, measurements): # This class overwrites PageTest.Run, so that the test method name is not # really used (except for throwing an exception if it doesn't exist). super(RecordPage, self).__init__('Run') self._action_names = set( [measurement().action_name_to_run for measurement in measurements.values() if measurement().action_name_to_run]) self.test = None def CanRunForPage(self, page): return page.url.startswith('http') def CustomizeBrowserOptionsForPage(self, page, options): for compound_action in self._CompoundActionsForPage(page): for action in compound_action: action.CustomizeBrowserOptions(options) def WillNavigateToPage(self, _, tab): """Override to ensure all resources are fetched from network.""" tab.ClearCache() def Run(self, options, page, tab, results): # When recording, sleep to catch any resources that load post-onload. tab.WaitForDocumentReadyStateToBeComplete() if self.test: dummy_results = page_measurement_results.PageMeasurementResults() dummy_results.WillMeasurePage(page) self.test.MeasurePage(page, tab, dummy_results) dummy_results.DidMeasurePage() else: # TODO(tonyg): This should probably monitor resource timing for activity # and sleep until 2s since the last network event with some timeout like # 20s. We could wrap this up as WaitForNetworkIdle() and share with the # speed index metric. time.sleep(3) # Run the actions for all measurements. Reload the page between # actions. should_reload = False for compound_action in self._CompoundActionsForPage(page): if should_reload: self.RunNavigateSteps(page, tab) self._RunCompoundAction(page, tab, compound_action) should_reload = True def _CompoundActionsForPage(self, page): actions = [] for action_name in self._action_names: if not hasattr(page, action_name): continue actions.append(page_test.GetCompoundActionFromPage( page, action_name, self.options.interactive)) return actions def _CreatePageSetForUrl(url): ps_name = urlparse.urlparse(url).hostname + '.json' ps_path = os.path.join(util.GetBaseDir(), 'page_sets', ps_name) ps = {'archive_data_file': '../data/%s' % ps_name, 'pages': [ { 'url': url } ] } with open(ps_path, 'w') as f: f.write(json.dumps(ps)) print 'Created new page set %s' % ps_path return page_set.PageSet.FromFile(ps_path) def Main(base_dir): measurements = discover.DiscoverClasses(base_dir, base_dir, page_measurement.PageMeasurement) tests = discover.DiscoverClasses(base_dir, base_dir, test.Test, index_by_class_name=True) options = browser_options.BrowserFinderOptions() parser = options.CreateParser('%prog <PageSet|Measurement|Test|URL>') page_runner.AddCommandLineOptions(parser) recorder = RecordPage(measurements) recorder.AddCommandLineOptions(parser) _, args = parser.parse_args() if len(args) != 1: parser.print_usage() sys.exit(1) if args[0].endswith('.json'): ps = page_set.PageSet.FromFile(args[0]) elif args[0] in tests: recorder.test = tests[args[0]]().test() ps = tests[args[0]]().CreatePageSet(options) elif args[0] in measurements: recorder.test = measurements[args[0]]() ps = recorder.test.CreatePageSet(args, options) elif args[0].startswith('http'): ps = _CreatePageSetForUrl(args[0]) else: parser.print_usage() sys.exit(1) expectations = test_expectations.TestExpectations() # Set the archive path to something temporary. temp_target_wpr_file_path = tempfile.mkstemp()[1] ps.wpr_archive_info.AddNewTemporaryRecording(temp_target_wpr_file_path) # Do the actual recording. options.browser_options.wpr_mode = wpr_modes.WPR_RECORD options.browser_options.no_proxy_server = True recorder.CustomizeBrowserOptions(options) results = page_runner.Run(recorder, ps, expectations, options) if results.errors or results.failures: logging.warning('Some pages failed. The recording has not been updated for ' 'these pages.') logging.warning('Failed pages:\n%s', '\n'.join(zip(*results.errors + results.failures)[0])) if results.skipped: logging.warning('Some pages were skipped. The recording has not been ' 'updated for these pages.') logging.warning('Skipped pages:\n%s', '\n'.join(zip(*results.skipped)[0])) if results.successes: # Update the metadata for the pages which were recorded. ps.wpr_archive_info.AddRecordedPages(results.successes) else: os.remove(temp_target_wpr_file_path) return min(255, len(results.failures))
Python
0.000055
@@ -1349,32 +1349,41 @@ ionsForPage(page +, options ):%0A for act @@ -2438,27 +2438,36 @@ ForPage(page +, options ):%0A - if sho @@ -2641,24 +2641,33 @@ e(self, page +, options ):%0A actio @@ -2775,16 +2775,68 @@ ontinue%0A + interactive = options and options.interactive%0A ac @@ -2885,16 +2885,16 @@ omPage(%0A + @@ -2918,21 +2918,8 @@ me, -self.options. inte
94bc0d6596aba987943bf40e2289f34240081713
Add lc0041_first_missing_positive.py
lc0041_first_missing_positive.py
lc0041_first_missing_positive.py
Python
0.998416
@@ -0,0 +1,600 @@ +%22%22%22Leetcode 41. First Missing Positive%0AHard%0A%0AURL: https://leetcode.com/problems/first-missing-positive/%0A%0AGiven an unsorted integer array, find the smallest missing positive integer.%0A%0AExample 1:%0AInput: %5B1,2,0%5D%0AOutput: 3%0A%0AExample 2:%0AInput: %5B3,4,-1,1%5D%0AOutput: 2%0A%0AExample 3:%0AInput: %5B7,8,9,11,12%5D%0AOutput: 1%0A%0ANote:%0AYour algorithm should run in O(n) time and uses constant extra space.%0A%22%22%22%0A%0Aclass Solution(object):%0A def firstMissingPositive(self, nums):%0A %22%22%22%0A :type nums: List%5Bint%5D%0A :rtype: int%0A %22%22%22%0A pass%0A%0A%0Adef main():%0A pass%0A%0A%0Aif __name__ == '__main__':%0A main()%0A
1aba0e5ba6aa91c2aa608c2c94411c59e4a3eca5
Create stack_plot.py
stack_plot.py
stack_plot.py
Python
0.000081
@@ -0,0 +1,1382 @@ +# -*- coding: utf-8 -*-%0A%22%22%22%0AIncludes a function for visualization of data with a stack plot.%0A%22%22%22%0Afrom matplotlib import pyplot as plt%0Afrom matplotlib import ticker%0Aimport random%0A%0Adef stack(number_of_topics, TopicTitles, X, Y):%0A %22%22%22Creates a stack plot for the number of papers published from 2002 to 2014%0A for each topic%22%22%22%0A # random colors as RGB%0A colors = %5B(random.randint(0,255),random.randint(0,255),random.randint(0,255)) for i in range(number_of_topics)%5D%0A%0A # Scale the RGB values to the %5B0, 1%5D range, which is the format matplotlib accepts. %0A for i in range(len(colors)): %0A r, g, b = colors%5Bi%5D %0A colors%5Bi%5D = (r / 255., g / 255., b / 255.) %0A %0A plt.figure(num=1,figsize=(30,27)) %0A ax1 = plt.subplot()%0A %0A x_formatter = ticker.ScalarFormatter(useOffset=False)%0A y_formatter = ticker.ScalarFormatter(useOffset=False)%0A ax1.yaxis.set_major_formatter(y_formatter)%0A ax1.xaxis.set_major_formatter(x_formatter)%0A ax1.set_ylabel('Number of Papers')%0A ax1.set_xlabel('Year of Publication')%0A polys = ax1.stackplot(X, Y, colors=colors)%0A %0A legendProxies = %5B%5D%0A for poly in polys:%0A legendProxies.append(plt.Rectangle((0, 0), 1, 1, fc=poly.get_facecolor()%5B0%5D))%0A %0A plt.legend(legendProxies,TopicTitles,prop=%7B'size':8%7D)%0A plt.tight_layout(pad=1.08, h_pad=None, w_pad=None, rect=None)%0A plt.show() %0A
d4ac57f3a328dd98b76f6c8924ddc9d735c32c04
Add py-sphinxcontrib-qthelp package (#13275)
var/spack/repos/builtin/packages/py-sphinxcontrib-qthelp/package.py
var/spack/repos/builtin/packages/py-sphinxcontrib-qthelp/package.py
Python
0
@@ -0,0 +1,788 @@ +# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other%0A# Spack Project Developers. See the top-level COPYRIGHT file for details.%0A#%0A# SPDX-License-Identifier: (Apache-2.0 OR MIT)%0A%0Afrom spack import *%0A%0A%0Aclass PySphinxcontribQthelp(PythonPackage):%0A %22%22%22sphinxcontrib-qthelp is a sphinx extension which outputs QtHelp%0A document.%22%22%22%0A%0A homepage = %22http://sphinx-doc.org/%22%0A url = %22https://pypi.io/packages/source/s/sphinxcontrib-qthelp/sphinxcontrib-qthelp-1.0.2.tar.gz%22%0A%0A version('1.0.2', sha256='79465ce11ae5694ff165becda529a600c754f4bc459778778c7017374d4d406f')%0A%0A depends_on('[email protected]:', type=('build', 'run'))%0A depends_on('py-setuptools', type='build')%0A%0A def test(self):%0A # Requires sphinx, creating a circular dependency%0A pass%0A
6b5587fc7856b5d03b3605e1a31234ff98df88e2
add L3 quiz - Expressions
lesson3/quizExpressions/unit_tests.py
lesson3/quizExpressions/unit_tests.py
Python
0.000001
@@ -0,0 +1,2472 @@ +import re%0A%0Ais_correct = False%0A%0Abrace_regex = %22%7B%7B.*%7D%7D%22%0Acolor_regex = %22(?:brick.)?color%22%0Asize_regex = %22(?:brick.)?size%22%0Aprice_regex = %22(?:brick.)?price%22%0A%0Aheading = widget_inputs%5B%22text1%22%5D%0Abrick_color = widget_inputs%5B%22text2%22%5D%0Abrick_size = widget_inputs%5B%22text3%22%5D%0Abrick_price = widget_inputs%5B%22text4%22%5D%0Abrick_description = widget_inputs%5B%22text5%22%5D%0A%0Acomments = %5B%5D%0Adef commentizer(new):%0A if new not in comments:%0A comments.append(new)%0A%0Aif heading == '':%0A is_correct = True%0Aelse:%0A commentizer(%22Do you think the heading should change if you use a different brick? Why would a different brick make the heading change?%22)%0A%0A#check the brick's color matches a RegEx%0Aif re.search( color_regex, brick_color ):%0A if not re.search( brace_regex, brick_color ):%0A is_correct = False%0A commentizer(%22What you entered into the color field is correct, but it's still regular text. How do you create an expression in Angular?%22)%0A else:%0A is_correct = is_correct and True%0Aelse:%0A is_correct = False%0A commentizer(%22The color field is not correct.%22)%0A%0A#check the brick's size matches a RegEx%0Aif re.search( size_regex, brick_size ):%0A if not re.search( brace_regex, brick_size ):%0A is_correct = False%0A commentizer(%22What you entered into the size field is correct, but it's still regular text. How do you create an expression in Angular?%22)%0A else:%0A is_correct = is_correct and True%0Aelse:%0A is_correct = False%0A commentizer(%22The size field is not correct.%22)%0A%0A#check the brick's price matches a RegEx%0Aif re.search( price_regex, brick_price ):%0A if not re.search( brace_regex, brick_price ):%0A is_correct = False%0A commentizer(%22What you entered into the price field is correct, but it's still regular text. How do you create an expression in Angular?%22)%0A else:%0A is_correct = is_correct and True%0Aelse:%0A is_correct = False%0A commentizer(%22The price field is not correct.%22)%0A%0A# if they're all unchecked%0Aif not any(%5Bheading, brick_color, brick_size, brick_price, brick_description%5D):%0A is_correct = False%0A comments = %5B%5D%0A comments.append('At least one of these should be converted into an expression.%5Cn%5CnLook at the data in the template and ask yourself, %22Will this change if I use a different brick?%22 If the answer is yes, then enter the expression into the appropriate field.')%0A%0A%0Aif is_correct:%0A commentizer(%22Great job!%22)%0A%0Agrade_result%5B%22comment%22%5D = %22%5Cn%5Cn%22.join(comments)%0Agrade_result%5B%22correct%22%5D = is_correct%0A
4731e99882d035a59555e5352311d00c4e122f09
Print useful information about a GTFS feed
onestop/info.py
onestop/info.py
Python
0
@@ -0,0 +1,731 @@ +%22%22%22Provide useful information about a GTFS file.%22%22%22%0Aimport argparse%0A%0Aimport geohash%0Aimport gtfs%0A%0Aif __name__ == %22__main__%22:%0A parser = argparse.ArgumentParser(description='GTFS Information')%0A parser.add_argument('filename', help='GTFS File')%0A parser.add_argument('--debug', help='Show helpful debugging information', action='store_true')%0A args = parser.parse_args()%0A g = gtfs.GTFSReader(args.filename)%0A stops_centroid = g.stops_centroid()%0A stops_centroid_geohash = g.stops_geohash(debug=args.debug)%0A print %22==== GTFS:%22, g.filename%0A print %22Stops centroid:%22,stops_centroid%0A print %22Stops centroid geohash:%22, geohash.encode(stops_centroid)%0A print %22Stops centroid geohash with all stops in neighbors:%22, stops_centroid_geohash%0A
335881f4644a6bb2b5f2abb5b193f39d304dbc71
Fix user agent for the bnn_ sites
pages_scrape.py
pages_scrape.py
import logging import requests def scrape(url, extractor): """ Function to request and parse a given URL. Returns only the "relevant" text. Parameters ---------- url : String. URL to request and parse. extractor : Goose class instance. An instance of Goose that allows for parsing of content. Returns ------- text : String. Parsed text from the specified website. meta : String. Parsed meta description of an article. Usually equivalent to the lede. """ logger = logging.getLogger('scraper_log') try: page = requests.get(url) try: article = extractor.extract(raw_html=page.content) text = article.cleaned_text meta = article.meta_description return text, meta #Generic error catching is bad except Exception, e: print 'There was an error. Check the log file for more information.' logger.warning('Problem scraping URL: {}. {}.'.format(url, e)) except Exception, e: print 'There was an error. Check the log file for more information.' logger.warning('Problem requesting url: {}. {}'.format(url, e))
Python
0.999201
@@ -614,32 +614,190 @@ _log')%0A try:%0A + headers = %7B'User-Agent': %22Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1700.107 Safari/537.36%22%7D%0A%0A page = r @@ -811,16 +811,33 @@ .get(url +, headers=headers )%0A @@ -1418,9 +1418,8 @@ rl, e))%0A -%0A
dddf634f8445fac66aa25265c7f7e859dab4c000
add test file for python
test/test.py
test/test.py
Python
0.000001
@@ -0,0 +1,160 @@ +# Highlighter Demo%0A%0Aclass Person:%0A def __init__(self, x):%0A self.x = x%0A def show(self):%0A print(self.x)%0A%0Aperson = Person(%22Ken%22)%0Aperson.show()%0A
abcbe6443492ba2f011dec0132a0afb3b8cc9b0b
Create __init__.py
hello-world/__init__.py
hello-world/__init__.py
Python
0.000429
@@ -0,0 +1 @@ +%0A
8d36c444fe379b5901692485c2850e86ed714f89
Add sql connection tester
sql_connection_test.py
sql_connection_test.py
Python
0.000007
@@ -0,0 +1,566 @@ +import mysql.connector%0Aimport json%0A%0Awith open(%22config.json%22) as f:%0A config = json.load(f)%0A%0Atry:%0A conn = mysql.connector.connect(%0A user=config%5B%22database_connection%22%5D%5B%22username%22%5D,%0A password=config%5B%22database_connection%22%5D%5B%22password%22%5D,%0A host=config%5B%22database_connection%22%5D%5B%22host%22%5D,%0A database=config%5B%22database_connection%22%5D%5B%22database%22%5D)%0A%0A cursor = conn.cursor()%0A%0A cursor.close()%0A%0A print %22Connection success%22%0A%0Aexcept mysql.connector.errors.ProgrammingError as err:%0A print %22Error connecting to database: %5Cn%7B%7D%22.format(err)%0A
05741f17ffac95d66290d2ec705cbfb66fc74ff9
Add dummpy documentation/stats/plot_sky_locations.py
documentation/stats/plot_sky_locations.py
documentation/stats/plot_sky_locations.py
Python
0
@@ -0,0 +1,265 @@ +from bokeh.plotting import figure, output_file, show%0A%0Aoutput_file(%22example.html%22)%0A%0Ax = %5B1, 2, 3, 4, 5%5D%0Ay = %5B6, 7, 6, 4, 5%5D%0A%0Ap = figure(title=%22example%22, plot_width=300, plot_height=300)%0Ap.line(x, y, line_width=2)%0Ap.circle(x, y, size=10, fill_color=%22white%22)%0A%0Ashow(p)%0A
fe4b226b9b3d6fbc7be7d545c185ed7950f3a5fd
Add Python benchmark
lib/node_modules/@stdlib/math/base/dist/beta/logpdf/benchmark/python/benchmark.scipy.py
lib/node_modules/@stdlib/math/base/dist/beta/logpdf/benchmark/python/benchmark.scipy.py
Python
0.000138
@@ -0,0 +1,1566 @@ +#!/usr/bin/env python%0A%22%22%22Benchmark scipy.stats.beta.logpdf.%22%22%22%0A%0Aimport timeit%0A%0Aname = %22beta:logpdf%22%0Arepeats = 3%0Aiterations = 1000%0A%0A%0Adef print_version():%0A %22%22%22Print the TAP version.%22%22%22%0A%0A print(%22TAP version 13%22)%0A%0A%0Adef print_summary(total, passing):%0A %22%22%22Print the benchmark summary.%0A%0A # Arguments%0A%0A * %60total%60: total number of tests%0A * %60passing%60: number of passing tests%0A%0A %22%22%22%0A%0A print(%22#%22)%0A print(%221..%22 + str(total)) # TAP plan%0A print(%22# total %22 + str(total))%0A print(%22# pass %22 + str(passing))%0A print(%22#%22)%0A print(%22# ok%22)%0A%0A%0Adef print_results(elapsed):%0A %22%22%22Print benchmark results.%0A%0A # Arguments%0A%0A * %60elapsed%60: elapsed time (in seconds)%0A%0A # Examples%0A%0A %60%60%60 python%0A python%3E print_results(0.131009101868)%0A %60%60%60%0A %22%22%22%0A%0A rate = iterations / elapsed%0A%0A print(%22 ---%22)%0A print(%22 iterations: %22 + str(iterations))%0A print(%22 elapsed: %22 + str(elapsed))%0A print(%22 rate: %22 + str(rate))%0A print(%22 ...%22)%0A%0A%0Adef benchmark():%0A %22%22%22Run the benchmark and print benchmark results.%22%22%22%0A%0A setup = %22from scipy.stats import beta; from random import random;%22%0A stmt = %22y = beta.logpdf(random(), 100.56789, 55.54321)%22%0A%0A t = timeit.Timer(stmt, setup=setup)%0A%0A print_version()%0A%0A for i in xrange(repeats):%0A print(%22# python::%22 + name)%0A elapsed = t.timeit(number=iterations)%0A print_results(elapsed)%0A print(%22ok %22 + str(i+1) + %22 benchmark finished%22)%0A%0A print_summary(repeats, repeats)%0A%0A%0Adef main():%0A %22%22%22Run the benchmark.%22%22%22%0A benchmark()%0A%0A%0Aif __name__ == %22__main__%22:%0A main()%0A
c36e390910b62e1ad27066a0be0450c81a6f87c6
Add context manager for logging
d1_common_python/src/d1_common/logging_context.py
d1_common_python/src/d1_common/logging_context.py
Python
0
@@ -0,0 +1,911 @@ +# -*- coding: utf-8 -*-%0A%0A%22%22%22Context manager that enables temporary changes in logging level.%0ANote: Not created by DataONE.%0ASource: https://docs.python.org/2/howto/logging-cookbook.html%0A%22%22%22%0Aimport logging%0Aimport sys%0A%0A%0Aclass LoggingContext(object):%0A def __init__(self, logger, level=None, handler=None, close=True):%0A self.logger = logger%0A self.level = level%0A self.handler = handler%0A self.close = close%0A%0A def __enter__(self):%0A if self.level is not None:%0A self.old_level = self.logger.level%0A self.logger.setLevel(self.level)%0A if self.handler:%0A self.logger.addHandler(self.handler)%0A%0A def __exit__(self, et, ev, tb):%0A if self.level is not None:%0A self.logger.setLevel(self.old_level)%0A if self.handler:%0A self.logger.removeHandler(self.handler)%0A if self.handler and self.close:%0A self.handler.close()%0A # implicit return of None =%3E don't swallow exceptions%0A
6c7a927e2fc0a054470c2a87fa98d07e993657ac
Add tests
test/test.py
test/test.py
Python
0.000001
@@ -0,0 +1,1347 @@ +import os%0Aimport unittest%0A%0Atry:%0A import directio%0Aexcept ImportError:%0A import sys%0A sys.exit(%22%22%22%0A Please install directio:%0A take a look at directio/README%22%22%22)%0A%0A%0Aclass TestDirectio(unittest.TestCase):%0A%0A def setUp(self):%0A super(TestDirectio, self).setUp()%0A flags = os.O_RDWR %7C os.O_DIRECT %7C os.O_SYNC %7C os.O_CREAT %7C os.O_TRUNC%0A self.file = os.open('test.txt', flags, 0o666)%0A self.buffer = bytearray(512)%0A self.msg = b'It just works!'%0A self.buffer%5B:len(self.msg)%5D = self.msg%0A%0A def tearDown(self):%0A super(TestDirectio, self).tearDown()%0A os.close(self.file)%0A%0A def test_read_after_write(self):%0A # can write only immutable buffer, so we buffer wrap in bytes%0A written = directio.write(self.file, bytes(self.buffer))%0A self.assertEqual(written, len(self.buffer))%0A os.lseek(self.file, 0, os.SEEK_SET)%0A got = directio.read(self.file, len(self.buffer))%0A self.assertEqual(got, self.buffer)%0A%0A def test_fails_to_write_not_multiple_of_512(self):%0A self.assertRaises(ValueError, directio.write, self.file, self.msg)%0A%0A def test_fails_to_read_not_multiple_of_512(self):%0A os.lseek(self.file, 0, os.SEEK_SET)%0A self.assertRaises(ValueError, directio.read, self.file, 511)%0A%0A%0Aif __name__ == '__main__':%0A unittest.main()%0A%0A
96a3fc178c9da5a8f917378e40454a0702d746e5
Initialize construction module
drydock/construction.py
drydock/construction.py
Python
0.000001
@@ -0,0 +1,69 @@ +%22%22%22DryDock container construction.%22%22%22%0A%0A%0Adef construct(spec):%0A pass
4c33fe7a927cde83aa53374e9fcaedfa18e51e77
Add function to delete collection
utilities.py
utilities.py
Python
0
@@ -0,0 +1,336 @@ +def delete_collection(ee, id):%0A if 'users' not in id:%0A root_path_in_gee = ee.data.getAssetRoots()%5B0%5D%5B'id'%5D%0A id = root_path_in_gee + '/' + id%0A params = %7B'id': id%7D%0A items_in_collection = ee.data.getList(params)%0A for item in items_in_collection:%0A ee.data.deleteAsset(item%5B'id'%5D)%0A ee.data.deleteAsset(id)
4d9d286ec96e834fcb9acf1f1f52876e81668996
Test script
tools/test.py
tools/test.py
Python
0.000001
@@ -0,0 +1,178 @@ +from fakturo.billingstack.client import Client%0A%0A%0Aclient = Client('http://localhost:9090/billingstack', username='ekarlso', password='secret0')%0Amerchants = client.merchant.list()%0A
6bf12f844bb67c0e97adab2b3a17f3c02f04259b
fix test runner compatibility with old pythons and weird tests (PY-1976)
python/helpers/pycharm/tcunittest.py
python/helpers/pycharm/tcunittest.py
import traceback, sys from unittest import TestResult import datetime from pycharm.tcmessages import TeamcityServiceMessages def strclass(cls): return "%s.%s" % (cls.__module__, cls.__name__) class TeamcityTestResult(TestResult): def __init__(self, stream=sys.stdout): TestResult.__init__(self) self.output = stream self.messages = TeamcityServiceMessages(self.output, prepend_linebreak=True) self.current_suite = None def formatErr(self, err): exctype, value, tb = err return ''.join(traceback.format_exception(exctype, value, tb)) def getTestName(self, test): return test._testMethodName def getTestId(self, test): return test.id def addSuccess(self, test): TestResult.addSuccess(self, test) def addError(self, test, err): TestResult.addError(self, test, err) err = self.formatErr(err) self.messages.testFailed(self.getTestName(test), message='Error', details=err) def addFailure(self, test, err): TestResult.addFailure(self, test, err) err = self.formatErr(err) self.messages.testFailed(self.getTestName(test), message='Failure', details=err) def addSkip(self, test, reason): self.messages.testIgnored(self.getTestName(test), message=reason) def startTest(self, test): suite = test.__class__ if suite != self.current_suite: if self.current_suite: self.messages.testSuiteFinished(strclass(self.current_suite)) self.current_suite = suite self.messages.testSuiteStarted(strclass(self.current_suite), location="python_uttestid://" + strclass(self.current_suite)) setattr(test, "startTime", datetime.datetime.now()) self.messages.testStarted(self.getTestName(test), location="python_uttestid://" + str(test.id())) def stopTest(self, test): start = getattr(test, "startTime", datetime.datetime.now()) d = datetime.datetime.now() - start duration=d.microseconds / 1000 + d.seconds * 1000 + d.days * 86400000 self.messages.testFinished(self.getTestName(test), duration=int(duration)) def endLastSuite(self): if self.current_suite: self.messages.testSuiteFinished(strclass(self.current_suite)) self.current_suite = None class TeamcityTestRunner: def __init__(self, stream=sys.stdout): self.stream = stream def _makeResult(self): return TeamcityTestResult(self.stream) def run(self, test): result = self._makeResult() test(result) result.endLastSuite() return result
Python
0
@@ -645,35 +645,127 @@ -return test._testMethodName +if hasattr(test, '_testMethodName'):%0A return test._testMethodName%0A else:%0A return str(test) %0A%0A
79a236133ea00fa1d1af99426380392fe51ec0f4
Create iis_shortname.py
middileware/iis/iis_shortname.py
middileware/iis/iis_shortname.py
Python
0.000002
@@ -0,0 +1,1608 @@ +#!/usr/bin/env python%0A# encoding: utf-8%0Afrom t import T%0Aimport re%0Aimport urllib2,requests,urllib2,json,urlparse%0Arequests.packages.urllib3.disable_warnings()%0A%0A%0A%0A%0Aclass P(T):%0A def __init__(self):%0A T.__init__(self)%0A def verify(self,head='',context='',ip='',port='',productname=%7B%7D,keywords='',hackinfo='',verify=False):%0A timeout=5%0A if int(port) == 443:%0A protocal = %22https%22%0A else:%0A protocal = %22http%22%0A target_url = protocal + %22://%22+ip+%22:%22+str(port)%0A%0A%0A result = %7B%7D%0A result%5B'result'%5D=False%0A r=None%0A%0A try:%0A%0A status_1=requests.get(url=target_url+'/*~1****/a.aspx',timeout=timeout,verify=verify,allow_redirects=False).status_code%0A status_2=requests.get(url=target_url+'/l1j1e*~1****/a.aspx',timeout=timeout,verify=verify,allow_redirects=False).status_code%0A #print target_url%0A if status_1 == 404 and status_2 == 400:%0A result%5B'result'%5D=True%0A result%5B'VerifyInfo'%5D = %7B%7D%0A result%5B'VerifyInfo'%5D%5B'type'%5D='iis short name Vulnerability'%0A result%5B'VerifyInfo'%5D%5B'URL'%5D =target_url%0A result%5B'VerifyInfo'%5D%5B'payload'%5D= 'null'%0A result%5B'VerifyInfo'%5D%5B'result'%5D =r.content%0A except Exception,e:%0A #print '%5B-%5Derror',%0A print e.text%0A #pass%0A #print traceback.print_exc()%0A finally:%0A if r is not None:%0A r.close()%0A del r%0A return result%0A%0A%0A%0Aif __name__ == '__main__':%0A print P().verify(ip='cos.99.com',port='80')%0A
b6ee9b5d7ece1f68e5278d62f11258d0ba6491c5
Fix required language argument in cli
subliminal/cli.py
subliminal/cli.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals, print_function import argparse import datetime import logging import os import re import sys import babelfish import guessit import pkg_resources from subliminal import (__version__, PROVIDERS_ENTRY_POINT, cache_region, Video, Episode, Movie, scan_videos, download_best_subtitles) DEFAULT_CACHE_FILE = os.path.join('~', '.config', 'subliminal.cache.dbm') def subliminal_parser(): parser = argparse.ArgumentParser(description='Subtitles, faster than your thoughts') parser.add_argument('-l', '--languages', nargs='+', metavar='LANGUAGE', help='wanted languages as alpha2 code (ISO-639-1)') parser.add_argument('-p', '--providers', nargs='+', metavar='PROVIDER', help='providers to use from %s (default: all)' % ', '.join(ep.name for ep in pkg_resources.iter_entry_points(PROVIDERS_ENTRY_POINT))) parser.add_argument('-m', '--min-score', type=int, help='minimum score for subtitles. 0-%d for episodes, 0-%d for movies' % (Episode.scores['hash'], Movie.scores['hash'])) parser.add_argument('-s', '--single', action='store_true', help='download without language code in subtitle\'s filename i.e. .srt only') parser.add_argument('-f', '--force', action='store_true', help='overwrite existing subtitles') parser.add_argument('-c', '--cache-file', default=DEFAULT_CACHE_FILE, help='cache file (default: %(default)s)') parser.add_argument('-a', '--age', help='download subtitles for videos newer than AGE e.g. 12h, 1w2d') parser.add_argument('--hearing-impaired', action='store_true', help='download hearing impaired subtitles') group_verbosity = parser.add_mutually_exclusive_group() group_verbosity.add_argument('-q', '--quiet', action='store_true', help='disable output') group_verbosity.add_argument('-v', '--verbose', action='store_true', help='verbose output') parser.add_argument('--version', action='version', version=__version__) parser.add_argument('paths', nargs='+', metavar='PATH', help='path to video file or folder') return parser def subliminal(): parser = subliminal_parser() args = parser.parse_args() # parse paths try: args.paths = [p.decode('utf-8') for p in args.paths] except UnicodeDecodeError: parser.error('argument paths: encodings is not utf-8: %r' % args.paths) # parse languages try: args.languages = {babelfish.Language.fromalpha2(l) for l in args.languages} except babelfish.Error: parser.error('argument -l/--languages: codes are not ISO-639-1: %r' % args.languages) # parse age if args.age is not None: match = re.match(r'^(?:(?P<weeks>\d+?)w)?(?:(?P<days>\d+?)d)?(?:(?P<hours>\d+?)h)?$', args.age) if not match: parser.error('argument -a/--age: invalid age: %r' % args.age) args.age = datetime.timedelta(**{k: int(v) for k, v in match.groupdict(0).items()}) # setup verbosity if args.verbose: logging.basicConfig(level=logging.DEBUG) elif not args.quiet: logging.basicConfig(level=logging.WARN) # configure cache cache_region.configure('dogpile.cache.dbm', arguments={'filename': os.path.expanduser(args.cache_file)}) # scan videos videos = scan_videos([p for p in args.paths if os.path.exists(p)], subtitles=not args.force, age=args.age) # guess videos videos.extend([Video.fromguess(os.path.split(p)[1], guessit.guess_file_info(p, 'autodetect')) for p in args.paths if not os.path.exists(p)]) # download best subtitles subtitles = download_best_subtitles(videos, args.languages, providers=args.providers, provider_configs=None, single=args.single, min_score=args.min_score, hearing_impaired=args.hearing_impaired) # output result if not subtitles: if not args.quiet: sys.stderr.write('No subtitles downloaded\n') exit(1) if not args.quiet: subtitles_count = sum([len(s) for s in subtitles.values()]) if subtitles_count == 1: print('%d subtitle downloaded' % subtitles_count) else: print('%d subtitles downloaded' % subtitles_count)
Python
0.000164
@@ -582,32 +582,47 @@ ges', nargs='+', + required=True, metavar='LANGUA
50af4f518912f758e7961055342642c9d31832a0
Create 6-pwm2.py
Code/6-pwm2.py
Code/6-pwm2.py
Python
0.000013
@@ -0,0 +1,2668 @@ +# CamJam EduKit 3 - Robotics%0A# Worksheet 6 %E2%80%93 Varying the speed of each motor with PWM%0A%0Aimport RPi.GPIO as GPIO # Import the GPIO Library%0Aimport time # Import the Time library%0A%0A# Set the GPIO modes%0AGPIO.setmode(GPIO.BCM)%0AGPIO.setwarnings(False)%0A%0A# Set variables for the GPIO motor pins%0ApinMotorAForwards = 10%0ApinMotorABackwards = 9%0ApinMotorBForwards = 8%0ApinMotorBBackwards = 7%0A%0A# How many times to turn the pin on and off each second%0AFrequency = 20%0A# How long the pin stays on each cycle, as a percent%0ADutyCycleA = 30%0ADutyCycleB = 30%0A# Settng the duty cycle to 0 means the motors will not turn%0AStop = 0%0A%0A# Set the GPIO Pin mode to be Output%0AGPIO.setup(pinMotorAForwards, GPIO.OUT)%0AGPIO.setup(pinMotorABackwards, GPIO.OUT)%0AGPIO.setup(pinMotorBForwards, GPIO.OUT)%0AGPIO.setup(pinMotorBBackwards, GPIO.OUT)%0A%0A# Set the GPIO to software PWM at 'Frequency' Hertz%0ApwmMotorAForwards = GPIO.PWM(pinMotorAForwards, Frequency)%0ApwmMotorABackwards = GPIO.PWM(pinMotorABackwards, Frequency)%0ApwmMotorBForwards = GPIO.PWM(pinMotorBForwards, Frequency)%0ApwmMotorBBackwards = GPIO.PWM(pinMotorBBackwards, Frequency)%0A%0A# Start the software PWM with a duty cycle of 0 (i.e. not moving)%0ApwmMotorAForwards.start(Stop)%0ApwmMotorABackwards.start(Stop)%0ApwmMotorBForwards.start(Stop)%0ApwmMotorBBackwards.start(Stop)%0A%0A# Turn all motors off%0Adef StopMotors():%0A pwmMotorAForwards.ChangeDutyCycle(Stop)%0A pwmMotorABackwards.ChangeDutyCycle(Stop)%0A pwmMotorBForwards.ChangeDutyCycle(Stop)%0A pwmMotorBBackwards.ChangeDutyCycle(Stop)%0A%0A# Turn both motors forwards%0Adef Forwards():%0A pwmMotorAForwards.ChangeDutyCycle(DutyCycleA)%0A pwmMotorABackwards.ChangeDutyCycle(Stop)%0A pwmMotorBForwards.ChangeDutyCycle(DutyCycleB)%0A pwmMotorBBackwards.ChangeDutyCycle(Stop)%0A%0A# Turn both motors backwards%0Adef Backwards():%0A pwmMotorAForwards.ChangeDutyCycle(Stop)%0A pwmMotorABackwards.ChangeDutyCycle(DutyCycleA)%0A pwmMotorBForwards.ChangeDutyCycle(Stop)%0A pwmMotorBBackwards.ChangeDutyCycle(DutyCycleB)%0A%0A# Turn left%0Adef Left():%0A pwmMotorAForwards.ChangeDutyCycle(Stop)%0A pwmMotorABackwards.ChangeDutyCycle(DutyCycleA)%0A pwmMotorBForwards.ChangeDutyCycle(DutyCycleB)%0A pwmMotorBBackwards.ChangeDutyCycle(Stop)%0A%0A# Turn Right%0Adef Right():%0A pwmMotorAForwards.ChangeDutyCycle(DutyCycleA)%0A pwmMotorABackwards.ChangeDutyCycle(Stop)%0A pwmMotorBForwards.ChangeDutyCycle(Stop)%0A pwmMotorBBackwards.ChangeDutyCycle(DutyCycleB)%0A%0A# Your code to control the robot goes below this line%0AForwards()%0Atime.sleep(1) # Pause for 1 second%0A%0ALeft()%0Atime.sleep(0.5) # Pause for half a second%0A%0AForwards()%0Atime.sleep(1)%0A%0ARight()%0Atime.sleep(0.5)%0A%0ABackwards()%0Atime.sleep(0.5)%0A%0AStopMotors()%0A%0AGPIO.cleanup()%0A
2d25c2329a9ae4d084671ab99cf53290fe7547ab
add tests for cython script
streams/simulation/tests/test_integrate_lm10.py
streams/simulation/tests/test_integrate_lm10.py
Python
0
@@ -0,0 +1,1471 @@ +# coding: utf-8%0A%22%22%22%0A Test the Cython integrate code%0A%22%22%22%0A%0Afrom __future__ import absolute_import, unicode_literals, division, print_function%0A%0A__author__ = %22adrn %[email protected]%3E%22%0A%0A# Standard library%0Aimport os, sys%0Aimport glob%0Aimport time%0A%0A# Third-party%0Aimport numpy as np%0Aimport pytest%0Aimport astropy.units as u%0Aimport matplotlib.pyplot as plt%0A%0Afrom .._integrate_lm10 import lm10_acceleration, leapfrog_lm10%0Afrom ...potential import LawMajewski2010%0Afrom ...integrate import leapfrog%0A%0Adef test_cython_vs_python1():%0A r = np.random.random((100,3))%0A %0A a = time.time()%0A for ii in range(10000):%0A lm10_acceleration(r, 2, 1.6, 1.6, 1.69, 0.121) %0A cython = (time.time() - a) / 10000.%0A %0A lm10 = LawMajewski2010()%0A %0A a = time.time()%0A for ii in range(10000):%0A lm10.acceleration_at(r) %0A pure_python = (time.time() - a) / 10000.%0A %0A assert cython %3C pure_python%0A %0Adef test_cython_vs_python2():%0A r = np.random.random((100,3))%0A v = np.random.random((100,3))%0A t = np.arange(0, 7000, 10.)%0A %0A a = time.time()%0A for ii in range(10):%0A leapfrog_lm10(r, v, 1.6, 1.6, 1.69, 0.121, t=t)%0A cython = (time.time() - a) / 10.%0A %0A lm10 = LawMajewski2010()%0A %0A a = time.time()%0A for ii in range(10):%0A leapfrog(lm10.acceleration_at, r, v, t)%0A pure_python = (time.time() - a) / 10.%0A %0A print(cython, pure_python)%0A #assert cython %3C pure_python%0A
af3333906125e9bde3cc5b3ebdb7209c25bcf6ff
Add pinger script
pinger.py
pinger.py
Python
0
@@ -0,0 +1,197 @@ +#!/usr/bin/python3%0Aimport requests%0Aimport datetime%0Aimport time%0A%0Awhile True:%0A hour = datetime.datetime.now().hour%0A if hour %3E 7:%0A requests.get('https://biblion.se')%0A time.sleep(60*29)
e140c21cd0b7d5b0e7cbe7895096476105d03f91
Create update_sql.py
update_sql.py
update_sql.py
Python
0.000002
@@ -0,0 +1,861 @@ +__author__ = 'userme865'%0A# ver 0.1%0A%0Aimport MySQLdb%0Adef update_db():%0A try: # start msql and creat stable at first time%0A conn = MySQLdb.connect(host='localhost', user='root', passwd='', port=3306)%0A cur = conn.cursor()%0A conn.select_db('python')%0A cur.execute('DROP TABLE dataexchange')%0A cur.execute(%0A %22CREATE TABLE dataexchange SELECT indexer.words, group_concat(indexer.pages ORDER BY indexer.words SEPARATOR ',') AS 'pages',group_concat(indexer.pagewords ORDER BY indexer.words SEPARATOR ',') AS 'pagewords' from indexer GROUP BY indexer.words%22)%0A cur.execute(%22DROP TABLE indexer%22)%0A cur.execute(%22CREATE TABLE indexer SELECT* FROM dataexchange%22)%0A conn.commit()%0A cur.close()%0A conn.close()%0A%0A except MySQLdb.Error, e:%0A print %22Mysql Error %25d: %25s%22 %25 (e.args%5B0%5D, e.args%5B1%5D)%0A
3921f1522851767444644d1dc3c126521476d9dc
add util script to help troll autoplot feature ideas
scripts/util/list_stale_autoplots.py
scripts/util/list_stale_autoplots.py
Python
0
@@ -0,0 +1,854 @@ +%22%22%22Look into which autoplots have not been used in a while%22%22%22%0Aimport psycopg2%0Aimport re%0Aimport pandas as pd%0A%0AQRE = re.compile(%22q=(%5B0-9%5D+)%22)%0A%0Apgconn = psycopg2.connect(database='mesosite', host='iemdb', user='nobody')%0Acursor = pgconn.cursor()%0A%0Acursor.execute(%22%22%22SELECT valid, appurl from feature WHERE appurl is not null%0A and appurl != ''%0A %22%22%22)%0Aq = %7B%7D%0Afor row in cursor:%0A appurl = row%5B1%5D%0A valid = row%5B0%5D%0A if appurl.find(%22/plotting/auto/%22) != 0:%0A continue%0A tokens = QRE.findall(appurl)%0A if len(tokens) == 0:%0A print(%22appurl: %25s valid: %25s failed RE%22 %25 (appurl, valid))%0A continue%0A appid = int(tokens%5B0%5D)%0A res = q.setdefault(appid, valid)%0A if res %3C valid:%0A q%5Bappid%5D = valid%0A%0Adf = pd.DataFrame.from_dict(q, orient='index')%0Adf.columns = %5B'valid'%5D%0Adf.sort_values(by='valid', inplace=True)%0Aprint df.head()%0A
faa6872cf008171afa3db6687d23c1bcc9b6dbac
Add views to the main files
Druid/views.py
Druid/views.py
Python
0
@@ -0,0 +1,229 @@ +from django.shortcuts import render%0Afrom gfx.models import Material%0Afrom django.template import RequestContext%0A%0Adef home( request ):%0A%09rc = RequestContext(request)%0A%09return render( request, 'Druid/index.html', context_instance=rc )
bad89393891761334e37b611856449ede3a99470
Fix typo blocking access to exmachina, and report the problem if unable to load the exmachina client library.
plinth.py
plinth.py
#!/usr/bin/env python import os, sys, argparse from gettext import gettext as _ import cfg if not os.path.join(cfg.file_root, "vendor") in sys.path: sys.path.append(os.path.join(cfg.file_root, "vendor")) import cherrypy from cherrypy import _cpserver from cherrypy.process.plugins import Daemonizer Daemonizer(cherrypy.engine).subscribe() import plugin_mount import util as u from logger import Logger #from modules.auth import AuthController, require, member_of, name_is from withsqlite.withsqlite import sqlite_db from exmachina.exmachina import ExMachinaClient import socket __version__ = "0.2.14" __author__ = "James Vasile" __copyright__ = "Copyright 2011-2013, James Vasile" __license__ = "GPLv3 or later" __maintainer__ = "James Vasile" __email__ = "[email protected]" __status__ = "Development" def error_page(status, dynamic_msg, stock_msg): return u.page_template(template="err", title=status, main="<p>%s</p>%s" % (dynamic_msg, stock_msg)) def error_page_404(status, message, traceback, version): return error_page(status, message, """<p>If you believe this missing page should exist, please file a bug with either the Plinth project (<a href="https://github.com/jvasile/plinth/issues">it has an issue tracker</a>) or the people responsible for the module you are trying to access.</p> <p>Sorry for the mistake.</p> """) def error_page_500(status, message, traceback, version): cfg.log.error("500 Internal Server Error. Trackback is above.") more="""<p>This is an internal error and not something you caused or can fix. Please report the error on the <a href="https://github.com/jvasile/Plinth/issues">bug tracker</a> so we can fix it.</p>""" return error_page(status, message, "<p>%s</p><pre>%s</pre>" % (more, "\n".join(traceback.split("\n")))) class Root(plugin_mount.PagePlugin): @cherrypy.expose def index(self): ## TODO: firstboot hijacking root should probably be in the firstboot module with a hook in plinth.py with sqlite_db(cfg.store_file, table="firstboot") as db: if not 'state' in db: raise cherrypy.InternalRedirect('/firstboot') elif db['state'] < 5: cfg.log("First Boot state = %d" % db['state']) raise cherrypy.InternalRedirect('/firstboot/state%d' % db['state']) if cherrypy.session.get(cfg.session_key, None): raise cherrypy.InternalRedirect('/router') else: raise cherrypy.InternalRedirect('/help/about') def load_modules(): """Import all the symlinked .py files in the modules directory and all the .py files in directories linked in the modules directory (but don't dive deeper than that). Also, ignore the installed directory.""" for name in os.listdir("modules"): if name.endswith(".py") and not name.startswith('.'): cfg.log.info("importing modules/%s" % name) try: __import__("modules.%s" % (name[:-3])) except ImportError, e: cfg.log.error(_("Couldn't import modules/%s: %s") % (name, e)) else: cfg.log("skipping %s" % name) def parse_arguments(): parser = argparse.ArgumentParser(description='Plinth web interface for the FreedomBox.') parser.add_argument('--pidfile', default="", help='specify a file in which the server may write its pid') parser.add_argument('--listen-exmachina-key', default=False, action='store_true', help='listen for JSON-RPC shared secret key on stdin at startup') args=parser.parse_args() if args.pidfile: cfg.pidfile = args.pidfile else: try: if not cfg.pidfile: cfg.pidfile = "plinth.pid" except AttributeError: cfg.pidfile = "plinth.pid" if args.listen_exmachina_key: # this is where we optionally try to read in a shared secret key to # authenticate connections to exmachina cfg.exmachina_secret_key = sys.stdin.readline().strip() else: cfg.exmachina_secret_key = None def setup(): parse_arguments() try: if cfg.pidfile: from cherrypy.process.plugins import PIDFile PIDFile(cherrypy.engine, cfg.pidfile).subscribe() except AttributeError: pass try: from exmachina import ExMachinaClient except ImportError: cfg.exmachina = None else: try: cfg.exmachina = ExMachinaClient( secret_key=cfg.exmachina_secret_key or None) except socket.error: cfg.exmachina = None print "couldn't connect to exmachina daemon, but continuing anyways..." os.chdir(cfg.python_root) cherrypy.config.update({'error_page.404': error_page_404}) cherrypy.config.update({'error_page.500': error_page_500}) cfg.log = Logger() load_modules() cfg.html_root = Root() cfg.users = plugin_mount.UserStoreModule.get_plugins()[0] cfg.page_plugins = plugin_mount.PagePlugin.get_plugins() cfg.log("Loaded %d page plugins" % len(cfg.page_plugins)) cfg.forms = plugin_mount.FormPlugin.get_plugins() # Add an extra server server = _cpserver.Server() server.socket_host = '127.0.0.1' server.socket_port = 52854 server.subscribe() # Configure default server cherrypy.config.update( {'server.socket_host': cfg.host, 'server.socket_port': cfg.port, 'server.thread_pool':10, 'tools.staticdir.root': cfg.file_root, 'tools.sessions.on':True, 'tools.auth.on':True, 'tools.sessions.storage_type':"file", 'tools.sessions.timeout':90, 'tools.sessions.storage_path':"%s/cherrypy_sessions" % cfg.data_dir,}) config = { '/': {'tools.staticdir.root': '%s/static' % cfg.file_root, 'tools.proxy.on': True,}, '/static': {'tools.staticdir.on': True, 'tools.staticdir.dir': "."}, '/favicon.ico':{'tools.staticfile.on': True, 'tools.staticfile.filename': "%s/static/theme/favicon.ico" % cfg.file_root}} cherrypy.tree.mount(cfg.html_root, '/', config=config) cherrypy.engine.signal_handler.subscribe() def main(): setup() print "%s %d" % (cfg.host, cfg.port) cherrypy.engine.start() cherrypy.engine.block() if __name__ == '__main__': main()
Python
0
@@ -4270,32 +4270,42 @@ from exmachina +.exmachina import ExMachin @@ -4362,16 +4362,99 @@ = None%0A + print %22unable to import exmachina client library, but continuing anyways...%22%0A else:
fc23860b1adbf7c75dfd53dc213c24a65b455597
Create ExtractData.py
ExtractData.py
ExtractData.py
Python
0.000001
@@ -0,0 +1 @@ +%0A
552fc246e055eb4a29390a89b04c9a8d796cfa12
fix bug 'dtype' is an invalid keyword to ones_like
qutip/ptrace.py
qutip/ptrace.py
#This file is part of QuTIP. # # QuTIP is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # QuTIP is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with QuTIP. If not, see <http://www.gnu.org/licenses/>. # # Copyright (C) 2011-2012, Paul D. Nation & Robert J. Johansson # ########################################################################### from scipy import * import scipy.sparse as sp from scipy.linalg import * from qutip.Qobj import * def ptrace(rho,sel): """ Compute partial trace of composite quantum object formed by :func:`qutip.tensor` Args: rho (Qobj): Input composite quantum object. sel (int or list/array): index or indices for components to keep. Returns: The density matrix of components from sel as a Qobj. """ if isinstance(sel,int): sel=array([sel]) sel=asarray(sel) drho=rho.dims[0] N=prod(drho) M=prod(asarray(drho).take(sel)) if prod(rho.dims[1]) == 1: rho = rho * rho.dag() perm = sp.lil_matrix((M*M,N*N)) rest=setdiff1d(arange(len(drho)),sel) #all elements in range(len(drho)) not in sel set ilistsel=selct(sel,drho) indsel=list2ind(ilistsel,drho) ilistrest=selct(rest,drho) indrest=list2ind(ilistrest,drho) irest=(indrest-1)*N+indrest-2 # Possibly use parfor here if M > some value ? perm.rows=array([(irest+(indsel[int(floor(m/M))]-1)*N+indsel[int(mod(m,M))]).T[0] for m in xrange(M**2)]) perm.data=ones_like(perm.rows,dtype=int) perm.tocsr() rws=prod(shape(rho.data)) rho1=Qobj() rhdata=perm*csr_to_col(rho.data) rhdata=rhdata.tolil().reshape((M,M)) rho1.data=rhdata.tocsr() dims_kept0=asarray(rho.dims[0]).take(sel) dims_kept1=asarray(rho.dims[0]).take(sel) rho1.dims=[dims_kept0.tolist(),dims_kept1.tolist()] rho1.shape=[prod(dims_kept0),prod(dims_kept1)] return Qobj(rho1) def list2ind(ilist,dims): """! Private function returning indicies """ ilist=asarray(ilist) dims=asarray(dims) irev=fliplr(ilist)-1 fact=append(array([1]),(cumprod(flipud(dims)[:-1]))) fact=fact.reshape(len(fact),1) return array(sort(dot(irev,fact)+1,0),dtype=int) def selct(sel,dims): """ Private function finding selected components """ sel=asarray(sel)#make sure sel is array dims=asarray(dims)#make sure dims is array rlst=dims.take(sel) rprod=prod(rlst) ilist=ones((rprod,len(dims)),dtype=int); counter=arange(rprod) for k in xrange(len(sel)): ilist[:,sel[k]]=remainder(fix(counter/prod(dims[sel[k+1:]])),dims[sel[k]])+1 return ilist def csr_to_col(mat): """ Private function for reshape density matrix csr_matrix to a column csr_matrix without using lil (reshape) or csc (transpose) matrices which fail for large matricies. """ mat.sort_indices() rows=array([len(range(mat.indptr[i],mat.indptr[i+1])) for i in xrange(mat.shape[1])]) rows=[[k for j in xrange(rows[k])] for k in xrange(len(rows))] rows=array([item for sublist in rows for item in sublist]) datlen=len(mat.data) ptrs=zeros((datlen+2),dtype=int) ptrs[1:-1]=(mat.shape[1]*rows+mat.indices)+1 ptrs[-1]=prod(mat.shape) values=arange(datlen+1)#values to use in ptrs counts=diff(ptrs) #number of times values should be repeated ptrs=zeros(sum(counts)+1,dtype=int) ptrs[-1]=datlen ptrs[:-1]=repeat(values,counts) #append the number of data elems (per csr format) inds=zeros(datlen,dtype=int) #since this is col vec, all inds = 0 out=sp.csr_matrix((mat.data,inds,ptrs),shape=(prod(mat.shape),1),dtype=complex) return out
Python
0.000003
@@ -1932,24 +1932,25 @@ M**2)%5D)%0A +# perm.data=on @@ -1974,24 +1974,59 @@ ,dtype=int)%0A + perm.data=ones_like(perm.rows)%0A perm.toc
ff3b36b4d64af54b6bd22f107a9d5dd5cf4f4473
solve problem no.1152
1152/answer.py
1152/answer.py
Python
0.999818
@@ -0,0 +1,166 @@ +from sys import stdin%0Ainput = stdin.readline().strip()%0A%0Aif input == %22%22:%0A print(0)%0A exit()%0A%0Ai = 1%0Afor char in input:%0A if char == ' ':%0A i += 1%0A%0Aprint(i)
82bfe668b11ac76159f2a599734ba33c4ef57026
Add another views_graph_service file
portal/views_graph_service.py
portal/views_graph_service.py
Python
0
@@ -0,0 +1,2457 @@ +from flask import (flash, redirect, render_template, request,%0A session, url_for)%0Aimport requests%0A%0Afrom portal import app, datasets%0Afrom portal.decorators import authenticated%0Afrom portal.utils import get_portal_tokens%0A%0A%[email protected]('/graph', methods=%5B'GET', 'POST'%5D)%0A@authenticated%0Adef graph():%0A if request.method == 'GET':%0A return render_template('graph.jinja2', datasets=datasets)%0A%0A selected_ids = request.form.getlist('dataset')%0A selected_year = request.form.get('year')%0A%0A if not (selected_ids and selected_year):%0A flash(%22Please select at least one dataset and a year to graph.%22)%0A return redirect(url_for('graph'))%0A%0A service_token = get_portal_tokens()%5B'service'%5D%0A service_url = '%7B%7D/%7B%7D'.format(app.config%5B'SERVICE_URL_BASE'%5D, 'api/doit')%0A req_headers = dict(Authorization='Bearer %7B%7D'.format(service_token))%0A%0A req_data = dict(datasets=selected_ids,%0A year=selected_year,%0A user_identity_id=session.get('primary_identity'),%0A user_identity_name=session.get('primary_username'))%0A%0A resp = requests.post(service_url, headers=req_headers, data=req_data,%0A verify=False)%0A%0A resp.raise_for_status()%0A%0A resp_data = resp.json()%0A dest_ep = resp_data.get('dest_ep')%0A dest_path = resp_data.get('dest_path')%0A dest_name = resp_data.get('dest_name')%0A graph_count = resp_data.get('graph_count')%0A%0A flash(%22%25d-file SVG upload to %25s on %25s completed!%22 %25%0A (graph_count, dest_path, dest_name))%0A%0A return redirect(url_for('browse', endpoint_id=dest_ep,%0A endpoint_path=dest_path.lstrip('/')))%0A%0A%[email protected]('/graph/clean-up', methods=%5B'POST'%5D)%0A@authenticated%0Adef graph_cleanup():%0A service_token = get_portal_tokens()%5B'service'%5D%0A service_url = '%7B%7D/%7B%7D'.format(app.config%5B'SERVICE_URL_BASE'%5D, 'api/cleanup')%0A req_headers = dict(Authorization='Bearer %7B%7D'.format(service_token))%0A%0A resp = requests.post(service_url,%0A headers=req_headers,%0A data=dict(%0A user_identity_name=session%5B'primary_username'%5D%0A ),%0A verify=False)%0A%0A resp.raise_for_status()%0A%0A task_id = resp_data%5B'task_id'%5D%0A msg = '%7B%7D (%7B%7D).'.format('Your existing processed graphs have been removed',%0A task_id)%0A flash(msg)%0A return redirect(url_for('graph'))%0A
3684e8be098300006b09c6677a2805e10d623acd
Add GYP file tld_cleanup tool.
net/tools/tld_cleanup/tld_cleanup.gyp
net/tools/tld_cleanup/tld_cleanup.gyp
Python
0.000001
@@ -0,0 +1,554 @@ +# Copyright (c) 2009 The Chromium Authors. All rights reserved.%0A# Use of this source code is governed by a BSD-style license that can be%0A# found in the LICENSE file.%0A%0A%7B%0A 'variables': %7B%0A 'chromium_code': 1,%0A %7D,%0A 'includes': %5B%0A '../../../build/common.gypi',%0A %5D,%0A 'targets': %5B%0A %7B%0A 'target_name': 'tld_cleanup',%0A 'type': 'executable',%0A 'dependencies': %5B%0A '../../../base/base.gyp:base',%0A '../../../build/temp_gyp/googleurl.gyp:googleurl',%0A %5D,%0A 'sources': %5B%0A 'tld_cleanup.cc',%0A %5D,%0A %7D,%0A %5D,%0A%7D%0A
7b98a6bb0c2d1f1fc5c5265149b99e9d21cf784d
make engine.initialize n_chains argument a keyword argument
examples/dha_example.py
examples/dha_example.py
# # Copyright (c) 2010-2013, MIT Probabilistic Computing Project # # Lead Developers: Dan Lovell and Jay Baxter # Authors: Dan Lovell, Baxter Eaves, Jay Baxter, Vikash Mansinghka # Research Leads: Vikash Mansinghka, Patrick Shafto # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import argparse import os # import numpy # import crosscat.settings as S import crosscat.utils.data_utils as du import crosscat.utils.file_utils as fu import crosscat.LocalEngine as LE # parse input parser = argparse.ArgumentParser() parser.add_argument('filename', type=str) parser.add_argument('--inf_seed', default=0, type=int) parser.add_argument('--gen_seed', default=0, type=int) parser.add_argument('--num_chains', default=25, type=int) parser.add_argument('--num_transitions', default=200, type=int) args = parser.parse_args() # filename = args.filename inf_seed = args.inf_seed gen_seed = args.gen_seed num_chains = args.num_chains num_transitions = args.num_transitions # pkl_filename = 'dha_example_num_transitions_%s.pkl.gz' % num_transitions def determine_Q(M_c, query_names, num_rows, impute_row=None): name_to_idx = M_c['name_to_idx'] query_col_indices = [name_to_idx[colname] for colname in query_names] row_idx = num_rows + 1 if impute_row is None else impute_row Q = [(row_idx, col_idx) for col_idx in query_col_indices] return Q def determine_unobserved_Y(num_rows, M_c, condition_tuples): name_to_idx = M_c['name_to_idx'] row_idx = num_rows + 1 Y = [] for col_name, col_value in condition_tuples: col_idx = name_to_idx[col_name] col_code = du.convert_value_to_code(M_c, col_idx, col_value) y = (row_idx, col_idx, col_code) Y.append(y) return Y # set everything up T, M_r, M_c = du.read_model_data_from_csv(filename, gen_seed=gen_seed) num_rows = len(T) num_cols = len(T[0]) col_names = numpy.array([M_c['idx_to_name'][str(col_idx)] for col_idx in range(num_cols)]) # initialze and transition chains seeds = range(num_chains) engine = LE.LocalEngine(inf_seed) X_L_list, X_D_list = engine.initialize(M_c, M_r, T, 'from_the_prior', num_chains) X_L_list, X_D_list = engine.analyze(M_c, T, X_L_list, X_D_list, n_steps=num_transitions) # save the progress to_pickle = dict(X_L_list=X_L_list, X_D_list=X_D_list) fu.pickle(to_pickle, pkl_filename) # to_pickle = fu.unpickle(pkl_filename) # X_L_list = to_pickle['X_L_list'] # X_D_list = to_pickle['X_D_list'] engine = LE.LocalEngine(inf_seed) # can we recreate a row given some of its values? query_cols = [2, 6, 9] query_names = col_names[query_cols] Q = determine_Q(M_c, query_names, num_rows) # condition_cols = [3, 4, 10] condition_names = col_names[condition_cols] samples_list = [] for actual_row_idx in [1, 10, 100]: actual_row_values = T[actual_row_idx] condition_values = [actual_row_values[condition_col] for condition_col in condition_cols] condition_tuples = zip(condition_names, condition_values) Y = determine_unobserved_Y(num_rows, M_c, condition_tuples) samples = engine.simple_predictive_sample(M_c, X_L_list, X_D_list, Y, Q, 10) samples_list.append(samples) round_1 = lambda value: round(value, 2) # impute some values (as if they were missing) for impute_row in [10, 20, 30, 40, 50, 60, 70, 80]: impute_cols = [31, 32, 52, 60, 62] # actual_values = [T[impute_row][impute_col] for impute_col in impute_cols] # conditions are immaterial Y = [] imputed_list = [] for impute_col in impute_cols: impute_names = [col_names[impute_col]] Q = determine_Q(M_c, impute_names, num_rows, impute_row=impute_row) # imputed = engine.impute(M_c, X_L_list, X_D_list, Y, Q, 1000) imputed_list.append(imputed) print print actual_values print map(round_1, imputed_list)
Python
0.000003
@@ -2626,16 +2626,25 @@ prior', +n_chains= num_chai
5f9c7d10957c7b0b0da46b031120fe2434315d0d
Test of new persistence layer.
ndtable/persistence/simple.py
ndtable/persistence/simple.py
Python
0
@@ -0,0 +1,882 @@ +from ndtable.carray import carray, cparams%0Afrom bloscpack import pack_list, unpack_file%0Afrom numpy import array, frombuffer%0A%0Adef test_simple():%0A filename = 'output'%0A%0A # hackish, just experimenting!%0A arr = carray(xrange(10000)).chunks%0A ca = %5Bbytes(chunk.viewof) for chunk in arr%5D%0A pack_list(ca, %7B%7D, filename, %7B'typesize': 8, 'clevel': 0, 'shuffle': False%7D)%0A%0A out_list, meta_info = unpack_file('output')%0A%0A assert out_list%5B0%5D == ca%5B0%5D%0A assert out_list%5B1%5D == ca%5B1%5D%0A%0Adef test_compressed():%0A filename = 'output'%0A%0A # hackish, just experimenting!%0A arr = carray(xrange(10000), cparams(clevel=5, shuffle=True)).chunks%0A ca = %5Bbytes(chunk.viewof) for chunk in arr%5D%0A pack_list(ca, %7B%7D, filename, %7B'typesize': 8, 'clevel': 5, 'shuffle': True%7D)%0A%0A out_list, meta_info = unpack_file('output')%0A%0A assert out_list%5B0%5D == ca%5B0%5D%0A assert out_list%5B1%5D == ca%5B1%5D%0A
e6a4863d9663791fabc4bd6ccdf0ab45ba2a86eb
Add standalone benchmark runner
remote_bench.py
remote_bench.py
Python
0.000001
@@ -0,0 +1,766 @@ +#!/usr/bin/env python%0A# -*- coding: utf-8 -*-%0A%22%22%22%0AStandalone benchmark runner%0A%22%22%22%0A%0Aimport cProfile%0Aimport pstats%0Aimport profile%0Aimport numpy as np%0A%0Aprint(%22Running Rust and Pyproj benchmarks%5Cn%22)%0A%0A# calibrate%0Apr = profile.Profile()%0Acalibration = np.mean(%5Bpr.calibrate(100000) for x in xrange(5)%5D)%0A# add the bias%0Aprofile.Profile.bias = calibration%0A%0AcProfile.run(open('benches/cprofile_rust.py', 'rb'), 'benches/output_stats_rust')%0Arust = pstats.Stats('benches/output_stats_rust')%0A%0AcProfile.run(open('benches/cprofile_pyproj.py', 'rb'), 'benches/output_stats_pyproj')%0Apyproj_ = pstats.Stats('benches/output_stats_pyproj')%0A%0Aprint(%22Rust Benchmark%5Cn%22)%0Arust.sort_stats('cumulative').print_stats(5)%0Aprint(%22Pyproj Benchmark%5Cn%22)%0Apyproj_.sort_stats('cumulative').print_stats(5)%0A
b4042f23d02e77c45d772fe64ae5e98db8b5e4e4
Add new package: re2 (#18302)
var/spack/repos/builtin/packages/re2/package.py
var/spack/repos/builtin/packages/re2/package.py
Python
0.00009
@@ -0,0 +1,718 @@ +# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other%0A# Spack Project Developers. See the top-level COPYRIGHT file for details.%0A#%0A# SPDX-License-Identifier: (Apache-2.0 OR MIT)%0A%0Afrom spack import *%0A%0A%0Aclass Re2(CMakePackage):%0A %22%22%22RE2 is a fast, safe, thread-friendly alternative to backtracking%0A regular expression engines like those used in PCRE, Perl, and Python.%22%22%22%0A%0A homepage = %22https://github.com/google/re2%22%0A url = %22https://github.com/google/re2/archive/2020-08-01.tar.gz%22%0A%0A version('2020-08-01', sha256='6f4c8514249cd65b9e85d3e6f4c35595809a63ad71c5d93083e4d1dcdf9e0cd6')%0A version('2020-04-01', sha256='98794bc5416326817498384a9c43cbb5a406bab8da9f84f83c39ecad43ed5cea')%0A
9c0bcd4e0317aa8b76ebbf3c9ecae82d1b90027d
Create initial night sensor code for Pi
night_sensor/night_feature.py
night_sensor/night_feature.py
Python
0
@@ -0,0 +1,1274 @@ +%22%22%22%0A@author: Sze %22Ron%22 Chau%0A@e-mail: [email protected]%0A@source: https://github.com/wodiesan/sweet-skoomabot%0A@desc Night sensor--%3ERPi for Senior Design 1%0A%22%22%22%0A%0Aimport logging%0Aimport os%0Aimport RPi.GPIO as GPIO%0Aimport serial%0Aimport subprocess%0Aimport sys%0Aimport time%0Aimport traceback%0A%0A# GPIO pins. Uses the BCM numbering system based on RPi B+ board.%0AIR1 = 26%0AIR2 = 19%0AIR3 = 13%0AIR4 = 6%0A%0Adef init_serial():%0A %22%22%22Initialize the serial connection to the light sensor.%22%22%22%0A ser = serial.Serial()%0A #ser.port = %22%5C%5C.%5CCOM4%22 # Windows%0A ser.port = %22/dev/ttyUSB0%22 # Linux%0A ser.baudrate = 57600%0A try:%0A ser.open()%0A except Exception, e:%0A logger.info(%22Possible open serial port: %22 + str(e))%0A print 'Check the serial USB port.'%0A exit()%0A return ser%0A%0A%0Adef init_leds():%0A %22%22%22Initial setup for light sensor and IR LEDs. Currently uses the BCM%0A numbering system based on RPi B+ board.%22%22%22%0A GPIO.setmode(GPIO.BCM)%0A GPIO.setup(IR1, GPIO.OUT, initial=GPIO.HIGH)%0A GPIO.setup(IR2, GPIO.OUT, initial=GPIO.HIGH)%0A GPIO.setup(IR3, GPIO.OUT, initial=GPIO.HIGH)%0A GPIO.setup(IR4, GPIO.OUT, initial=GPIO.HIGH)%0A thread = threading.Thread(target=warnings)%0A thread.daemon = False%0A thread.start()%0A return thread%0A
b7e1e05bfe5aa7a8d91a4d8ee786e61b4aa7bd1b
Add ArrayQueue
ArrayQueue.py
ArrayQueue.py
Python
0.000001
@@ -0,0 +1,506 @@ +class ArrayQueue:%0A def __init__(self, max=10):%0A self._data = %5BNone%5D * max%0A self._size = 0%0A self._front = 0%0A self._max = max%0A%0A def enqueue(self, e):%0A self._data%5B(self._front + self._size) %25 self._max%5D = e%0A self._size += 1%0A%0A def dequeue(self):%0A rst, self._data%5Bself._front%5D = self._data%5Bself._front%5D, None%0A self._front = (self._front + 1) %25 self._max%0A self._size -= 1%0A return rst%0A%0A def __len__(self):%0A return self._size
2ec6caf58bd3295ae08e14e6ab2c01e347d17b2b
save OSA settings in render file metadata
Preferences/PresetList/Preset/Preset.py
Preferences/PresetList/Preset/Preset.py
#!/usr/bin/python3.4 # -*-coding:Utf-8 -* '''module to manage preset''' import xml.etree.ElementTree as xmlMod from usefullFunctions import * from Preferences.PresetList.Preset.Quality import * from Preferences.PresetList.Preset.BounceSet import * from Preferences.PresetList.Preset.Engine import * from Preferences.PresetList.Preset.Options import * import os, time, datetime class Preset: '''class to manage preset''' def __init__(self, xml= None): '''initialize preset with default value or values extracted from an xml object''' if xml is None: self.defaultInit() else: self.fromXml(xml) def defaultInit(self): '''initialize preset with default value''' self.quality = Quality() self.bounce = BounceSet() self.engine = Engine() self.options = Options() def fromXml(self, xml): '''initialize preset with values extracted from an xml object''' self.quality = Quality(xml.find('quality')) self.bounce = BounceSet(xml.find('bounceSet')) self.engine = Engine(xml.find('engine')) self.options = Options(xml.find('options')) def toXml(self, alias = ''): '''export preset into xml syntaxed string''' txt = '<preset alias="'+alias+'" >\n' txt += self.quality.toXml() txt += self.bounce.toXml() txt += self.engine.toXml() txt += self.options.toXml() txt += '</preset>\n' return txt def menu(self, log, alias, versions): '''menu to explore and edit preset settings''' change = False log.menuIn(alias+' Preset') while True: log.print() self.print() print('''\n\n Menu : 1- Edit Quality Settings 2- Edit Bounces Settings (Cycles) 3- Edit Rendering Options 9- Edit Engine Settings 0- Quit ''') choice = input('Action?').strip().lower() if choice in ['0', 'q', 'quit', 'cancel']: log.menuOut() return change elif choice == '1': change = (self.quality.menu(log) or change) elif choice == '2': change = (self.bounce.menu(log) or change) elif choice == '3': change = (self.options.menu(log) or change) elif choice == '9': change = (self.engine.menu(log, versions) or change) else: log.error('Unvalid menu choice', False) def print(self): '''a method to print preset''' self.quality.print() print() self.bounce.print() print() self.options.print() print() self.engine.print() def copy(self): '''A method to get a copy of current object''' xml = '<?xml version="1.0" encoding="UTF-8"?>\n' xml += self.toXml('') xml = xmlMod.fromstring(xml) return Preset(xml) def renameBlenderVersion(self, old, new): '''rename a blender Version if used''' self.engine.renameBlenderVersion(old, new) def useBlenderVersion(self, name): '''check if blender version is used by this preset''' return self.engine.useBlenderVersion(name) def eraseBlenderVersion(self, name): '''erase blender version in preset who use it''' self.engine.eraseBlenderVersion(name) def applyAndRun(self, bpy, preferences, logGroup, socket, task): '''apply settings to a blender scene object and render it, frame by frame''' scene = bpy.context.screen.scene self.quality.apply(scene) self.bounce.apply(scene) self.engine.apply(scene, preferences) self.options.apply(scene) metadata = 'uid:'+task.uid+';Main preset:ยซ'+task.preset+'ยป;'+\ 'group:ยซ'+logGroup.name+'ยป;preset:ยซ'+logGroup.presetName+'ยป;'+\ 'version:ยซ'+self.engine.version+\ 'ยป('+str(bpy.app.version[0])+'.'+str(bpy.app.version[1])+');'+\ 'engine:'+self.engine.engine+';' if self.engine.engine == 'CYCLES': metadata += 'device:'+self.engine.device+\ ';samples:'+str(self.quality.samples)+\ ';exposure(cycles):'+str(self.options.exposureC)+\ ';bounces:'+self.bounce.metadata()+';' else: metadata += 'exposure(BI):'+str(self.options.exposureB)+';' if self.quality.simplify is not None: metadata += 'simplify:'+str(self.quality.simplify)+';' scene.render.stamp_note_text = metadata scene.frame_current = scene.frame_start + len(logGroup.frames) while scene.frame_current <= scene.frame_end \ and task.running != 'until next frame': start = time.time() scene.render.filepath = task.log.getMainPath()\ +logGroup.subpath\ +(logGroup.naming.replace('####', str(scene.frame_current))) bpy.ops.render.render( write_still=True ) endDate = datetime.datetime.today() computeTime = time.time() - start msg = task.uid+' ConfirmFrame('+logGroup.name\ +','+str(scene.frame_current)+','+endDate.strftime('%d:%m:%Y:%H:%M:%S')\ +','+str(computeTime)+') EOS' socket.sendall(msg.encode()) scene.frame_current += 1
Python
0
@@ -3927,24 +3927,388 @@ posureB)+';' +%5C%0A%09%09%09%09%09%09+'OSA:'+%7B True:'enabled', False:'disabled' %7D%5Bself.quality.OSA.enabled%5D+';'%0A%09%09%09%0A%09%09%09if self.quality.OSA.enabled:%0A%09%09%09%09metadata += 'OSA(set)'%0A%09%09%09%09%0A%09%09%09%09if self.quality.OSA.fullSample:%0A%09%09%09%09%09metadata += 'FULL'%0A%09%09%09%09%0A%09%09%09%09metadata += str(self.quality.OSA.samples)%5C%0A%09%09%09%09%09%09%09+self.quality.OSA.FILTERS%5Bself.quality.OSA.filter%5D%5C%0A%09%09%09%09%09%09%09+'@'+str(self.quality.OSA.size)+';' %0A%09%09%0A%09%09if sel
55b6d19fc8c80e3d4ff7842f20d284879f5ea151
Create BubbleSort.py
BubbleSort.py
BubbleSort.py
Python
0.000001
@@ -0,0 +1,462 @@ +%22%22%22%0A%E5%86%92%E6%B3%A1%EF%BC%9A%0A %E5%8E%9F%E5%A7%8B%E7%89%88%E6%9C%AC%EF%BC%9A%E5%B0%86i%E7%94%B10%E5%BC%80%E5%A7%8B%EF%BC%8C%E4%B8%8E%E5%90%8E%E9%9D%A2%E6%AF%8F%E4%B8%80%E4%B8%AAj=i+1 %E8%BF%9B%E8%A1%8C%E6%AF%94%E8%BE%83%EF%BC%8C%E4%BA%A4%E6%8D%A2%0A %E5%86%8Di=1 ...%E8%BF%99%E6%A0%B7%E5%A5%BD%E4%B8%8D%E5%AE%B9%E6%98%93%E6%8D%A2%E5%88%B0%E5%89%8D%E9%9D%A2%E7%AC%AC%E4%B8%80%E4%BD%8D%E7%9A%84%E5%AE%B9%E6%98%93%E8%A2%AB%E5%BA%8F%E5%88%97%E6%9C%80%E5%90%8E%E4%B8%80%E4%B8%AA%E6%9C%80%E5%B0%8F%E5%80%BC%E7%9B%B4%E6%8E%A5%E6%80%BC%E5%88%B0%E6%9C%AB%E5%B0%BE%E5%8E%BB%0A%0A %E7%8E%B0%E5%9C%A8%E7%9A%84%E6%9B%B4%E6%96%B0%E7%89%88%EF%BC%9Ai%E7%94%B10%E5%BC%80%E5%A7%8B%0A j = length-2 %E4%B8%8E j = length-1 %E8%BF%9B%E8%A1%8C%E6%AF%94%E8%BE%83%EF%BC%8C%E6%8D%A2%E4%BD%8D%0A %E7%A1%AE%E4%BF%9D%E7%A7%BB%E5%88%B0%E4%B8%8A%E9%9D%A2%E7%9A%84%E8%BE%83%E5%B0%8F%E5%80%BC%E4%B8%8D%E4%BC%9A%E6%9C%89%E5%A4%AA%E5%A4%A7%E7%9A%84%E5%8F%98%E5%8A%A8 -- %E8%A7%81P381 %E5%9B%BE%0A%22%22%22%0A%0A%0Adef bubble_sort(lists):%0A count = len(lists)%0A for i in range(0, count):%0A for j in range(i, count-1)%5B::-1%5D:%0A if lists%5Bj%5D %3E lists%5Bj+1%5D:%0A lists%5Bj%5D, lists%5Bj+1%5D = lists%5Bj+1%5D, lists%5Bj%5D%0A return lists%0A
17966b6af3039aa6d6308e1592c14527513c70c1
apply oa start date from journals to relative update requests - script
portality/migrate/3053_oa_start_date_from_journals_to_urs/migrate.py
portality/migrate/3053_oa_start_date_from_journals_to_urs/migrate.py
Python
0
@@ -0,0 +1,2196 @@ +%22%22%22%0AThis script can be run to generate a CSV output of accounts which do not have their passwords set, along%0Awith some useful account information, and possible explanations for the lack of password%0A%0A%60%60%60%0Apython accounts_with_missing_passwords.py -o accounts.csv%0A%60%60%60%0A%22%22%22%0Aimport csv%0Aimport esprit%0Afrom portality.core import es_connection%0Afrom portality.util import ipt_prefix%0Afrom portality import models%0A%0AJOURNALS_WITH_OA_START_DATE = %7B%0A %22query%22: %7B%0A %22filtered%22: %7B%0A %22filter%22: %7B%0A %22exists%22 : %7B%0A %22field%22 : %22bibjson.oa_start%22%0A %7D%0A %7D,%0A %22query%22: %7B%0A %22match_all%22: %7B%7D%0A %7D%0A %7D%0A %7D,%0A %22size%22: 200000%0A%7D%0A%0A%0Aif __name__ == %22__main__%22:%0A%0A import argparse%0A parser = argparse.ArgumentParser()%0A parser.add_argument(%22-o%22, %22--out%22, help=%22output file path%22)%0A args = parser.parse_args()%0A%0A if not args.out:%0A print(%22Please specify an output file path with the -o option%22)%0A parser.print_help()%0A exit()%0A%0A conn = es_connection%0A%0A with open(args.out, %22w%22, encoding=%22utf-8%22) as f:%0A writer = csv.writer(f)%0A writer.writerow(%5B%22ID%22, %22OA Start Date%22, %22Current Application ID%22, %22Application found%22%5D)%0A%0A for j in esprit.tasks.scroll(conn, ipt_prefix('journal'),%0A q=JOURNALS_WITH_OA_START_DATE,%0A page_size=100, keepalive='1m'):%0A%0A journal = models.Journal(_source=j)%0A bibjson = journal.bibjson()%0A if journal.current_application is not None:%0A ur = models.Application.pull(journal.current_application)%0A application_found = True%0A if ur is not None:%0A application_found = False%0A urb = ur.bibjson()%0A urb.oa_start = bibjson.oa_start%0A ur.save()%0A%0A try:%0A writer.writerow(%0A %5Bjournal.id, bibjson.oa_start, journal.current_application, application_found%5D)%0A except AttributeError:%0A print(%22Error reading attributes for journal %7B0%7D%22.format(j%5B'id'%5D))%0A%0A
ab50818c18b4275c205419c4c844bfc9ecb7a4c8
add rename.py
FileUtils/rename.py
FileUtils/rename.py
Python
0.000002
@@ -0,0 +1,514 @@ +import os%0Aimport sys%0Aimport re%0A%0Adirname, filename = os.path.split(os.path.abspath(sys.argv%5B0%5D))%0Aos.chdir(dirname)%0AfileList = os.listdir(dirname)%0Aprint dirname%0Aname='edge_effect_'%0Afor fileItem in fileList:%0A%09dotIndex = fileItem.rfind('.')%0A%09fileName = fileItem%5B: dotIndex%5D%0A%09fileExt = fileItem%5BdotIndex : %5D%0A%09print fileName,fileExt%0A%09#m=re.search(%22%5B%5Eqd%5D%5Cw+%22,fileName)%0A%09if fileName.find(name)%3C0 and fileName.find(%22rename%22)%3C0:%0A%09%09print %22111%22%0A%09%09os.rename(fileItem,name+fileName+fileExt)%0A%09 %09pass %0A%09#print 'm.group:'m.group(0)
dc993796fc15e3670c8a702f43fcb9a5d9b4c84e
Add forgotten file.
astrobin_apps_donations/utils.py
astrobin_apps_donations/utils.py
Python
0
@@ -0,0 +1,234 @@ +from subscription.models import UserSubscription%0A%0Adef user_is_donor(user):%0A if user.is_authenticated:%0A return UserSubscription.objects.filter(user = user, subscription__name = 'AstroBin Donor').count() %3E 0%0A return False%0A%0A
e51f3869b4a047489b9bb1e4b88af0e0bdc3078b
Add a command to list all the documents.
paper_to_git/commands/list_command.py
paper_to_git/commands/list_command.py
Python
0
@@ -0,0 +1,1140 @@ +%22%22%22%0AList the Documents and Folders%0A%22%22%22%0A%0Afrom paper_to_git.commands.base import BaseCommand%0Afrom paper_to_git.models import PaperDoc, PaperFolder%0A%0A__all__ = %5B%0A 'ListCommand',%0A %5D%0A%0A%0Aclass ListCommand(BaseCommand):%0A %22%22%22List the PaperDocs and Folders%0A %22%22%22%0A%0A name = 'list'%0A%0A def add(self, parser, command_parser):%0A self.parser = parser%0A command_parser.add_argument('-d', '--docs',%0A default=False, action='store_true',%0A help=(%22%22%22%5C%0A List all the documents currently stored.%22%22%22))%0A command_parser.add_argument('-fd', '--folders',%0A default=False, action='store_true',%0A help=(%22%22%22List all folders in Dropbox Paper%22%22%22))%0A%0A def process(self, args):%0A if args.docs:%0A for doc in PaperDoc.select():%0A print(doc)%0A%0A if args.folders:%0A for folder in PaperFolder.select():%0A print(folder)%0A for doc in folder.docs:%0A print('%7C----%7B%7D'.format(doc))%0A%0A if not (args.docs or args.folders):%0A print(%22Please provide atleast one of the --docs or --folders flags%22)%0A
a797de9014a3d466bb10e9bc318c3e2edec328be
add base for rendering widgets
packages/SCIRun/renderbase.py
packages/SCIRun/renderbase.py
Python
0
@@ -0,0 +1,286 @@ +from core import system%0Afrom core.modules.module_registry import registry%0Afrom packages.spreadsheet.basic_widgets import SpreadsheetCell, CellLocation%0A%0Aclass Render(SpreadsheetCell):%0A def compute(self): %0A pass%0A%0Adef registerRender():%0A registry.add_module(Render, abstract=True)%0A
62ccbd84a2560a70c0964e2106a1a756dc060d96
add comment to include just a suite and mark tests tagged as inprogress as noncritical
test/run_tests.py
test/run_tests.py
#!/usr/bin/env python # Copyright 2008-2009 Nokia Siemens Networks Oyj # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys from subprocess import Popen, call from tempfile import TemporaryFile from robot import utils from run_unit_tests import run_unit_tests ROOT = os.path.dirname(__file__) TESTDATADIR = os.path.join(ROOT, 'acceptance') RESOURCEDIR = os.path.join(ROOT, 'resources') SRCDIR = os.path.join(ROOT, '..', 'src') UTESTDIR = os.path.join(ROOT, 'unit') RESULTDIR = os.path.join(ROOT, 'results') HTPPSERVER = os.path.join(RESOURCEDIR, 'testserver', 'testserver.py') ROBOT_ARGS = [ '--doc', 'SeleniumSPacceptanceSPtestsSPwithSP%(browser)s', '--outputdir', '%(outdir)s', '--variable', 'browser:%(browser)s', '--escape', 'space:SP', '--report', 'none', '--log', 'none', '--loglevel', 'DEBUG', '--pythonpath', '%(pythonpath)s', ] REBOT_ARGS = [ '--outputdir', '%(outdir)s', '--name', '%(browser)sSPAcceptanceSPTests', '--escape', 'space:SP', '--critical', 'regression', ] ARG_VALUES = {'outdir': RESULTDIR, 'pythonpath': SRCDIR} def acceptance_tests(interpreter, browser, args): ARG_VALUES['browser'] = browser.replace('*', '') # TODO: running unit tests this way fails on my Windows, why? start_http_server() suffix = utils.is_windows and 'ybot.bat' or 'ybot' runner = "%s%s" % ('jython' == interpreter and 'j' or 'p', suffix) execute_tests(runner) stop_http_server() return process_output() def start_http_server(): server_output = TemporaryFile() Popen(['python', HTPPSERVER ,'start'], stdout=server_output, stderr=server_output) def execute_tests(runner): command = [runner] + [ arg % ARG_VALUES for arg in ROBOT_ARGS] + args +\ [ TESTDATADIR ] call(command, env=os.environ) def stop_http_server(): call(['python', HTPPSERVER, 'stop']) def process_output(): print call(['python', os.path.join(RESOURCEDIR, 'statuschecker.py'), os.path.join(RESULTDIR, 'output.xml')]) rebot = utils.is_windows and 'rebot.bat' or 'rebot' rebot_cmd = [rebot] + [ arg % ARG_VALUES for arg in REBOT_ARGS ] + \ [os.path.join(ARG_VALUES['outdir'], 'output.xml') ] rc = call(rebot_cmd, env=os.environ) if rc == 0: print 'All critical tests passed' else: print '%d critical test%s failed' % (rc, 's' if rc != 1 else '') return rc if __name__ == '__main__': if not len(sys.argv) > 2: print 'usage: python run_acceptance_tests python|jython browser [options]' print 'where `browser` is any browser or alias accepted by SeleniumLibrary.' sys.exit(1) interpreter = sys.argv[1] == 'jython' and 'jython' or 'python' browser = sys.argv[2].lower() args = sys.argv[3:] if not args: if interpreter == 'jython': print 'This script does not run unit tests with Jython' print 'They can be excecuted with jython test/run_unit_tests.py' else: print 'Running unit tests' failures = run_unit_tests() if failures != 0: print '\n%d unit tests failed - not running acceptance tests!' % failures sys.exit(1) print 'All unit tests passed' if browser != 'unit': sys.exit(acceptance_tests(interpreter, browser, args))
Python
0
@@ -1297,24 +1297,43 @@ g', 'none',%0A +#'--suite', '...',%0A '--loglevel' @@ -1519,16 +1519,47 @@ ssion',%0A +'--noncritical', 'inprogress',%0A %5D%0AARG_VA
8b1e6b226d925d7f2ef4890463122ec8046aa07a
add test
sensor/test_compass.py
sensor/test_compass.py
Python
0.000002
@@ -0,0 +1,96 @@ +#! /usr/bin/python%0Afrom Adafruit_LSM303 import LSM303%0A%0Alsm = LSM303()%0Awhile 1:%0A%09print lsm.read()
f0feed6b5e664bb4e8e63b8525f7438ee5e75b9f
clean up doc
inferno/lib/job_runner.py
inferno/lib/job_runner.py
from inferno.lib.disco_ext import get_disco_handle from inferno.lib.job import InfernoJob from inferno.lib.rule import (extract_subrules, deduplicate_rules, flatten_rules) def _start_job(rule, settings, urls=None): """Start a new job for an InfernoRule Note that the output of this function is a tuple of (InfernoJob, DiscoJob) If this InfernoJob fails to start by some reasons, e.g. not enough blobs, the DiscoJob would be None. """ job = InfernoJob(rule, settings, urls) return job, job.start() def _run_concurrent_rules(rule_list, settings, urls_blackboard): """Execute a list of rules concurrently, it assumes all the rules are runable(ie. all output urls of its sub_rule are available) Output: job_results. A dictionary of (rule_name : outputurls) pairs Exceptions: JobError, if it fails to start a job or one of the jobs dies """ def _get_rule_name(disco_job_name): return disco_job_name.rsplit('@')[0] # need to save both inferno_jobs and disco_jobs jobs = [] inferno_jobs = [] for rule in rule_list: urls = [] for sub_rule in extract_subrules(rule): urls += urls_blackboard[sub_rule.name] inferno_job, job = _start_job(rule, settings, urls) if job: jobs.append(job) inferno_jobs.append(inferno_job) else: raise Exception('There is not enough blobs to run %s' % rule.name) job_results = {} stop = False server, _ = get_disco_handle(settings.get('server')) while jobs: inactive, active = server.results(jobs, 5000) for jobname, (status, results) in inactive: if status == "ready": job_results[_get_rule_name(jobname)] = results elif status == "dead": stop = True jobs = active if stop: break if stop: for jobname, _ in jobs: server.kill(jobname) raise Exception('One of the concurrent jobs failed.') return inferno_jobs, job_results def _run_sequential_rules(rule_list, settings, urls_blackboard): """Execute a list of rules sequentially Note that the urls_blackboard could not be updated during the execution, since the wait method of InfernoJob does NOT return any urls. If the rule needs the results of previous rule, use _run_concurrent_rules instead. """ for rule in rule_list: urls = [] for sub_rule in extract_subrules(rule): urls += urls_blackboard[sub_rule.name] job, disco_job = _start_job(rule, settings, urls) if disco_job: job.wait() else: raise Exception('There is not enough blobs to run %s' % rule.name) def execute_rule(rule_, settings): """Execute an InfernoRule, it handles both single rule and nested rule cases * For the single rule, it is executed as usual. * For the nested rule, all sub-rules would be executed in a concurrent mode. Once all sub-rules are done, run the top-level rule as a single rule. """ def _get_runable_rules(rules, blackboard): ready_to_run = [] for rule in rules: ready = True for sub_rule in extract_subrules(rule): if not blackboard.get(sub_rule.name, None): ready = False break if ready: ready_to_run.append(rule) return ready_to_run all_rules = deduplicate_rules(flatten_rules(rule_)) # initialize the url blackboard, on which each entry is a # rule_name : outputurls pair. Default value of outputurls is [] urls_blackboard = {} for rule in all_rules: urls_blackboard[rule.name] = [] # execute all sub-rules concurrently, collect urls for the top-level rule parent, sub_rules = all_rules[-1:], all_rules[:-1] inferno_jobs = [] # collect all sub-jobs in order to purge them in the end while sub_rules: runable_rules = _get_runable_rules(sub_rules, urls_blackboard) try: jobs, ret = _run_concurrent_rules(runable_rules, settings, urls_blackboard) except Exception as sub_rule_exception: raise sub_rule_exception inferno_jobs += jobs for key, value in ret.iteritems(): urls_blackboard[key] = value sub_rules = [rule for rule in sub_rules if rule not in runable_rules] try: _run_sequential_rules(parent, settings, urls_blackboard) # InfernoJob will take care about whether purge the sub-jobs or not for job in inferno_jobs: job._purge(job.job.name) except Exception as parent_rule_exception: raise parent_rule_exception
Python
0
@@ -112,17 +112,16 @@ import -( extract_ @@ -162,17 +162,16 @@ en_rules -) %0A%0A%0Adef _ @@ -832,16 +832,17 @@ -JobError +Exception , if
a5dbda3f429d0a1e6cb4fc28b2a620dc2b40fd59
Resolve import dependency in consoleauth service
nova/cmd/consoleauth.py
nova/cmd/consoleauth.py
# Copyright (c) 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """VNC Console Proxy Server.""" import sys from oslo.config import cfg from nova import config from nova.openstack.common import log as logging from nova.openstack.common.report import guru_meditation_report as gmr from nova import service from nova import version CONF = cfg.CONF def main(): config.parse_args(sys.argv) logging.setup("nova") gmr.TextGuruMeditation.setup_autorun(version) server = service.Service.create(binary='nova-consoleauth', topic=CONF.consoleauth_topic) service.serve(server) service.wait()
Python
0.000014
@@ -730,16 +730,41 @@ config%0A +from nova import objects%0A from nov @@ -1013,16 +1013,43 @@ (%22nova%22) +%0A objects.register_all() %0A%0A gm
23b2578fadd8a7ee0885e9956a10667d647acaf8
add basic test for bist
test/test_bist.py
test/test_bist.py
Python
0.00044
@@ -0,0 +1,671 @@ +#!/usr/bin/env python3%0Afrom litex.soc.tools.remote import RemoteClient%0A%0Awb = RemoteClient(csr_data_width=8)%0Awb.open()%0Aregs = wb.regs%0A%0A# # #%0A%0Atest_size = 128*1024*1024%0A%0Aregs.generator_reset.write(1)%0Aregs.generator_reset.write(0)%0Aregs.generator_base.write(0)%0Aregs.generator_length.write((test_size*8)//128)%0A%0Aregs.generator_shoot.write(1)%0Awhile(not regs.generator_done.read()):%0A pass%0A%0Aregs.checker_reset.write(1)%0Aregs.checker_reset.write(0)%0Aregs.checker_base.write(0)%0Aregs.checker_length.write((test_size*8)//128)%0A%0Aregs.checker_shoot.write(1)%0Awhile(not regs.checker_done.read()):%0A pass%0A%0Aprint(%22errors: %7B:d%7D%22.format(regs.checker_error_count.read()))%0A%0A# # #%0A%0Awb.close()%0A
633efb26b4ba0498413d7c203df51f78f1968478
Add true/false values in condition regex
nyuki/utils/evaluate.py
nyuki/utils/evaluate.py
import re import ast from collections import defaultdict import logging log = logging.getLogger(__name__) EXPRESSIONS = [ # Types of values ast.Dict, ast.List, ast.NameConstant, ast.Num, ast.Set, ast.Str, ast.Tuple, # Types of operations ast.Compare, ast.BoolOp, ast.UnaryOp ] OPERATORS = [ ast.And, ast.Eq, ast.NotEq, ast.Lt, ast.LtE, ast.Gt, ast.GtE, ast.In, ast.NotIn, ast.Invert, ast.Is, ast.IsNot, ast.Not, ast.Or, ast.UAdd, ast.USub ] CONTEXTS = [ast.Load] AUTHORIZED_TYPES = EXPRESSIONS + OPERATORS + CONTEXTS def safe_eval(expr): """ Ensures an expression only defines authorized operations (no call to functions, no variable assignement...) and evaluates it. """ tree = ast.parse(expr, mode='eval').body for node in ast.walk(tree): if not type(node) in AUTHORIZED_TYPES: raise TypeError("forbidden type {} found in {}".format(node, expr)) return bool(eval(expr)) class ConditionBlock: def __init__(self, conditions): # Check there is at least one condition if len(conditions) == 0: raise ValueError('no condition in condition block') # Check first condition is 'if' if conditions[0]['type'] != 'if': raise TypeError("first condition must be an 'if'") # Check next conditions (if any) if len(conditions) >= 2: for cond in conditions[1:-1]: # All intermediate conditions must be 'elif' if cond['type'] != 'elif': raise TypeError("expected 'elif' condition," " got '{}'".format(cond)) # The last condition can be either an 'elif' or an 'else' if conditions[-1]['type'] not in ('elif', 'else'): raise TypeError("last condition must be 'elif' or 'else'," " got '{}'".format(conditions[-1])) self._conditions = conditions def _clean_condition(self, condition, data): """ Format the condition string (as eval-compliant code). nb: variable replacement should be `@variable_name` formatted. """ match = re.findall( r' *(and|or)? *\( *(@\S*|\'[^\']*\'|\d+) +([=<>!]=?|not in|in) +(@\S*|\d+|\'[^\']*\') *\)', condition ) if not match: return condition def replace(match): key = match.group('var_name') value = data.get(key) placeholder = '{!r}' if isinstance(value, str) else '{}' return placeholder.format(value) # Reconstruct a cleaned string from the operation parts. # See https://regex101.com/r/hUueag/1 cleaned = '' for operation in match: # Get 'and' or 'or' operation andor = operation[0] # Restructure condition string, striping any trailing space ops = [] ops.append(re.sub(r'^@(?P<var_name>\w+)$', replace, operation[1])) ops.append(operation[2]) ops.append(re.sub(r'^@(?P<var_name>\w+)$', replace, operation[3])) cleaned += '{}({})'.format(andor, ' '.join(ops)) return cleaned def condition_validated(self, condition, data): """ To be overridden to do your own logic once the condition has been validated (True) by `self._evaluate`. """ raise NotImplementedError def apply(self, data): """ Iterate through the conditions and stop at first validated condition. """ for condition in self._conditions: # If type 'else', set given next tasks and leave if condition['type'] == 'else': self.condition_validated(condition['rules'], data) return # Else find the condition and evaluate it cleaned = self._clean_condition(condition['condition'], data) log.debug('arithmetics: trying %s', cleaned) if safe_eval(cleaned): log.debug( 'arithmetics: validated condition "%s" as "%s"', condition, cleaned ) self.condition_validated(condition['rules'], data) return
Python
0.001485
@@ -2298,16 +2298,27 @@ *(@%5CS*%7C +true%7Cfalse%7C %5C'%5B%5E%5C'%5D* @@ -2354,16 +2354,27 @@ +(@%5CS*%7C +true%7Cfalse%7C %5Cd+%7C%5C'%5B%5E @@ -2799,17 +2799,17 @@ /hUueag/ -1 +2 %0A
f070b3c9a97b16aebc8500af703ed713e170f519
Fix Dask-on-Ray test: Python 3 dictionary .values() is a view, and is not indexable (#13945)
python/ray/tests/test_dask_scheduler.py
python/ray/tests/test_dask_scheduler.py
import dask import numpy as np import dask.array as da import pytest import ray from ray.util.dask import ray_dask_get def test_ray_dask_basic(ray_start_regular_shared): @ray.remote def stringify(x): return "The answer is {}".format(x) zero_id = ray.put(0) def add(x, y): # Can retrieve ray objects from inside Dask. zero = ray.get(zero_id) # Can call Ray methods from inside Dask. return ray.get(stringify.remote(x + y + zero)) add = dask.delayed(add) @ray.remote def call_add(): z = add(2, 4) # Can call Dask graphs from inside Ray. return z.compute(scheduler=ray_dask_get) ans = ray.get(call_add.remote()) assert ans == "The answer is 6", ans def test_ray_dask_persist(ray_start_regular_shared): arr = da.ones(5) + 2 result = arr.persist(scheduler=ray_dask_get) np.testing.assert_array_equal(result.dask.values()[0], np.ones(5) + 2) if __name__ == "__main__": import sys sys.exit(pytest.main(["-v", __file__]))
Python
0.000001
@@ -910,16 +910,35 @@ y_equal( +%0A next(iter( result.d @@ -953,12 +953,19 @@ es() -%5B0%5D, +)),%0A np.
5e008ac92016a092c1ce9c9590a79d72f4cf1cf6
Initialize tests
tests/__main__.py
tests/__main__.py
Python
0.000001
@@ -0,0 +1,64 @@ +import unittest%0A%0Aif __name__ == '__main__':%0A unittest.main()%0A
0e3effc3a7402d3b4c1b2c91539c4d1004c5b0e3
Add test_traitscli.py
test_traitscli.py
test_traitscli.py
Python
0.000004
@@ -0,0 +1,2300 @@ +import unittest%0A%0Afrom traits.api import Event, Callable, Type%0A%0Afrom traitscli import TraitsCLIBase%0Afrom sample import SampleCLI%0A%0A%0Aclass TestingCLIBase(TraitsCLIBase):%0A%0A def do_run(self):%0A # Get trait attribute names%0A names = self.class_trait_names(%0A # Avoid 'trait_added' and 'trait_modified'%0A # (See also %60HasTraits.traits%60):%0A trait_type=lambda t: not isinstance(t, Event))%0A self.attributes = dict((n, getattr(self, n)) for n in names)%0A%0A%0Aclass TestCaseBase(unittest.TestCase):%0A%0A cliclass = None%0A %22%22%22Subclass of %60TraitsCLIBase%60.%22%22%22%0A%0A def assert_attributes(self, attributes, args=%5B%5D):%0A ret = self.cliclass.cli(args)%0A self.assertEqual(ret.attributes, attributes)%0A%0A%0Aclass TestSampleCLI(TestCaseBase):%0A%0A class cliclass(TestingCLIBase, SampleCLI):%0A pass%0A%0A def test_empty_args(self):%0A self.assert_attributes(dict(%0A yes=False,%0A no=True,%0A fnum=0.0,%0A inum=0,%0A string='',%0A choice='a',%0A not_configurable_from_cli=False,%0A ))%0A%0A def test_full_args(self):%0A self.assert_attributes(%0A dict(%0A yes=True,%0A no=False,%0A fnum=0.2,%0A inum=2,%0A string='some string',%0A choice='b',%0A not_configurable_from_cli=False,%0A ),%0A %5B'--yes', '--no',%0A '--fnum', '0.2',%0A '--inum', '2',%0A '--string', 'some string',%0A '--choice', 'b',%0A %5D)%0A%0A def test_invalid_type_int(self):%0A self.assertRaises(SystemExit, self.cliclass.cli, %5B'--inum', 'x'%5D)%0A%0A def test_invalid_type_float(self):%0A self.assertRaises(SystemExit, self.cliclass.cli, %5B'--fnum', 'x'%5D)%0A%0A def test_invalid_type_enum(self):%0A self.assertRaises(SystemExit, self.cliclass.cli, %5B'--choice', 'x'%5D)%0A%0A%0Aclass TestEvalType(TestCaseBase):%0A%0A class cliclass(TestingCLIBase):%0A callable = Callable(config=True)%0A type = Type(config=True)%0A%0A def test_full_args(self):%0A self.assert_attributes(%0A dict(%0A callable=id,%0A type=int,%0A ),%0A %5B'--callable', 'id',%0A '--type', 'int',%0A %5D)%0A
31622652980f603ddc308dff514eae65635eb318
Add serializers to serialize Image to: - A PIL image (optionally resized) - A binary object (optionally resized)
app/grandchallenge/retina_api/serializers.py
app/grandchallenge/retina_api/serializers.py
Python
0.000002
@@ -0,0 +1,1653 @@ +from io import BytesIO%0A%0Aimport SimpleITK as sitk%0Afrom PIL import Image as PILImage%0A%0Afrom django.http import Http404%0Afrom rest_framework import serializers%0A%0A%0Aclass PILImageSerializer(serializers.BaseSerializer):%0A %22%22%22%0A Read-only serializer that returns a PIL image from a Image instance.%0A If %22width%22 and %22height%22 are passed as extra serializer content, the%0A PIL image will be resized to those dimensions.%0A %22%22%22%0A%0A def to_representation(self, instance):%0A image_itk = instance.get_sitk_image()%0A if image_itk is None:%0A raise Http404%0A pil_image = self.convert_itk_to_pil(image_itk)%0A try:%0A pil_image.thumbnail(%0A (self.context%5B%22width%22%5D, self.context%5B%22height%22%5D),%0A PILImage.ANTIALIAS,%0A )%0A except KeyError:%0A pass%0A return pil_image%0A%0A @staticmethod%0A def convert_itk_to_pil(image_itk):%0A depth = image_itk.GetDepth()%0A image_nparray = sitk.GetArrayFromImage(image_itk)%0A if depth %3E 0:%0A # Get center slice of image if 3D%0A image_nparray = image_nparray%5Bdepth // 2%5D%0A return PILImage.fromarray(image_nparray)%0A%0A%0Aclass BytesImageSerializer(PILImageSerializer):%0A %22%22%22%0A Read-only serializer that returns a BytesIO image from an Image instance.%0A %22%22%22%0A%0A def to_representation(self, instance):%0A image_pil = super().to_representation(instance)%0A return self.create_thumbnail_as_bytes_io(image_pil)%0A%0A @staticmethod%0A def create_thumbnail_as_bytes_io(image_pil):%0A buffer = BytesIO()%0A image_pil.save(buffer, format=%22png%22)%0A return buffer.getvalue()%0A
101a4c1288ddadbad6dbe0186adde3921ef2546f
add ctrl-c handler
lib/ctrlc.py
lib/ctrlc.py
Python
0.000036
@@ -0,0 +1,470 @@ +import sys%0Aimport time%0Aimport signal%0A%0Aclass CtrlC:%0A pressed = False%0A%0A @classmethod%0A def handle(cls, signal, frame):%0A print('Ctrl-C pressed, will exit soon')%0A if cls.pressed:%0A print('Ctrl-C pressed twice. Committing violent suicide.')%0A sys.exit(1)%0A cls.pressed = True%0A%0Asignal.signal(signal.SIGINT, CtrlC.handle)%0A%0Aif __name__ == '__main__':%0A time.sleep(2)%0A if CtrlC.pressed:%0A print('yay')%0A time.sleep(2)%0A
1298cf9c7a40ce73d46067035ded2318c62f7380
Add simple tests for DrsSymbol and DrsIndexed
tests/drs_test.py
tests/drs_test.py
Python
0
@@ -0,0 +1,1725 @@ +%22%22%22Tests for drudge scripts.%22%22%22%0A%0Afrom sympy import Symbol, IndexedBase%0A%0Afrom drudge.drs import DrsSymbol%0Afrom drudge.utils import sympy_key%0A%0A%0A#%0A# Unit tests for the utility classes and functions%0A# ------------------------------------------------%0A#%0A%0A%0Adef test_basic_drs_symb():%0A %22%22%22Test the symbol class for basic operations.%0A %22%22%22%0A%0A name = 'a'%0A ref = Symbol(name)%0A dict_ = %7Bref: 1%7D%0A%0A symbs = %5B%0A DrsSymbol(None, name),%0A DrsSymbol(%5B%5D, name)%0A %5D%0A for i in symbs:%0A assert isinstance(i, DrsSymbol)%0A assert ref == i%0A assert i == ref%0A assert hash(ref) == hash(i)%0A assert dict_%5Bi%5D == 1%0A assert sympy_key(ref) == sympy_key(i)%0A%0A ref = Symbol(name + 'x')%0A for i in symbs:%0A assert ref != i%0A assert i != ref%0A assert hash(ref) != hash(i)%0A assert sympy_key(ref) != sympy_key(i)%0A%0A%0Adef test_basic_drs_indexed():%0A %22%22%22Test basic properties of drudge script indexed object.%22%22%22%0A%0A base_name = 'a'%0A orig_base = IndexedBase(base_name)%0A%0A for drudge in %5BNone, %5B%5D%5D:%0A matching_indices = %5B%0A (Symbol('x'), DrsSymbol(drudge, 'x')),%0A (%0A (Symbol('x'), Symbol('y')),%0A (DrsSymbol(drudge, 'x'), DrsSymbol(drudge, 'y'))%0A )%0A %5D%0A drs_base = DrsSymbol(drudge, base_name)%0A for orig_indices, drs_indices in matching_indices:%0A ref = orig_base%5Borig_indices%5D%0A for i in %5B%0A orig_base%5Bdrs_indices%5D,%0A drs_base%5Borig_indices%5D,%0A drs_base%5Bdrs_indices%5D%0A %5D:%0A assert ref == i%0A assert hash(ref) == hash(i)%0A assert sympy_key(ref) == sympy_key(i)%0A
aa2a15c44228a8a27a5b3f91f25f38156a647457
Update queries.py
blitzdb/backends/file/queries.py
blitzdb/backends/file/queries.py
import six import re if six.PY3: from functools import reduce def and_query(expressions): def _and(query_function,expressions = expressions): compiled_expressions = [compile_query(e) for e in expressions] return reduce(lambda x,y: x & y,[e(query_function) for e in compiled_expressions]) return _and def or_query(expressions): def _or(query_function,expressions = expressions): compiled_expressions = [compile_query(e) for e in expressions] return reduce(lambda x,y: x | y,[e(query_function) for e in compiled_expressions]) return _or def filter_query(key,expression): if isinstance(expression,dict) and len(expression) == 1 and list(expression.keys())[0].startswith('$'): compiled_expression = compile_query(expression) else: compiled_expression = expression def _get(query_function,key = key,expression = compiled_expression): return query_function(key,expression) return _get def not_query(expression): compiled_expression = compile_query(expression) def _not(index,expression = compiled_expression): all_keys = index.get_all_keys() returned_keys = expression(index) return [key for key in all_keys if not key in returned_keys] return _not def gte_query(expression): def _gte(index,expression = expression): ev = expression() if callable(expression) else expression return [store_key for value,store_keys in index.get_index().items() if value >= ev for store_key in store_keys] return _gte def lte_query(expression): def _lte(index,expression = expression): ev = expression() if callable(expression) else expression return [store_key for value,store_keys in index.get_index().items() if value <= ev for store_key in store_keys] return _lte def gt_query(expression): def _gt(index,expression = expression): ev = expression() if callable(expression) else expression return [store_key for value,store_keys in index.get_index().items() if value > ev for store_key in store_keys] return _gt def lt_query(expression): def _lt(index,expression = expression): ev = expression() if callable(expression) else expression return [store_key for value,store_keys in index.get_index().items() if value < ev for store_key in store_keys] return _lt def ne_query(expression): def _ne(index,expression = expression): ev = expression() if callable(expression) else expression return [store_key for value,store_keys in index.get_index().items() if value != ev for store_key in store_keys] return _ne def exists_query(expression): def _exists(index,expression = expression): ev = expression() if callable(expression) else expression return [store_key for value,store_keys in index.get_index().items() for store_key in store_keys] return _ne def regex_query(expression): def _regex(index,expression = expression): pattern = re.compile(expression) return [store_key for value,store_keys in index.get_index().items() if re.match(pattern,value) for store_key in store_keys] return _regex def all_query(expression): def _all(index,expression = expression): ev = expression() if callable(expression) else expression try: ev_iter = iter(ev) except TypeError as te: raise AttributeError("$in argument must be an iterable!") hashed_ev = [index.get_hash_for(v) for v in ev] store_keys = set([]) if len(hashed_ev) == 0: return [] store_keys = set(index.get_keys_for(hashed_ev[0])) for value in hashed_ev[1:]: store_keys &= set(index.get_keys_for(value)) return list(store_keys) return _all def in_query(expression): def _in(index,expression = expression): ev = expression() if callable(expression) else expression try: ev_iter = iter(ev) except TypeError as te: raise AttributeError("$in argument must be an iterable!") hashed_ev = [index.get_hash_for(v) for v in ev] store_keys = set() for value in hashed_ev: store_keys |= set(index.get_keys_for(value)) return list(store_keys) return _in def compile_query(query): if isinstance(query,dict): expressions = [] for key,value in query.items(): if key.startswith('$'): if not key in query_funcs: raise AttributeError("Invalid operator: %s" % key) expressions.append(query_funcs[key](value)) else: expressions.append(filter_query(key,value)) if len(expressions) > 1: return and_query(expressions) else: return expressions[0] if len(expressions) else lambda query_function : query_function(None,None) else: return query query_funcs = { '$regex' : regex_query, '$exists' : exists_query, '$and' : and_query, '$all' : all_query, '$or' : or_query, '$gte' : gte_query, '$lte' : lte_query, '$gt' : gt_query, '$lt' : lt_query, '$ne' : ne_query, '$not' : not_query, '$in' : in_query, }
Python
0.000001
@@ -2928,26 +2928,30 @@ return _ -n e +xists %0A%0Adef regex_ @@ -3184,32 +3184,32 @@ n store_keys%5D %0A%0A - return _rege @@ -3209,16 +3209,295 @@ n _regex +%0A %0Adef eq_query(expression):%0A%0A def _eq(index,expression = expression):%0A ev = expression() if callable(expression) else expression%0A return %5Bstore_key for value,store_keys in index.get_index().items() if value == ev for store_key in store_keys%5D %0A%0A return _eq %0A%0Adef al @@ -5572,10 +5572,37 @@ _query,%0A + '$eq' : eq_query,%0A %0A %7D%0A
5abd80dd9d90c60190f7a170697301284c3731ec
Version 1
rename.py
rename.py
Python
0.000001
@@ -0,0 +1,1410 @@ +#!/usr/bin/env python%0Afrom os import rename, listdir%0Aimport string%0A%0AuserInput = raw_input(%22Enter Command: %22)%0A%0Aextensions = %5B'jpg', 'JPG', 'png', 'PNG'%5D%0A%0Alist1 = %5B%5D%0A%0Afnames = listdir('.')%0Afor fname in fnames:%0A if fname%5B-3:%5D in extensions and fname%5B0%5D in string.digits:%0A list1.append(fname%5B:-4%5D)%0A%0Afor item in list1:%0A print item%0A%0Alist1Sorted = sorted(list1, key=int)%0A%0Afor item in list1Sorted:%0A print item%0A%0Aif userInput == %22reorder%22:%0A i = 1%0A for item in list1Sorted:%0A if item == str(i):%0A i += 1%0A else:%0A for fname in fnames:%0A if fname.startswith(list1Sorted%5Bi - 1%5D + %22.%22):%0A rename(fname, fname.replace(list1Sorted%5Bi - 1%5D + %22.%22, str(i) + %22.%22))%0A i += 1%0Aelif userInput.startswith(%22insert %22):%0A userInputList = userInput.split(%22 %22)%0A i = int(userInputList%5B2%5D)%0A length = len(list1Sorted)%0A x = 0%0A while i %3C= length - x:%0A for fname in fnames:%0A if fname.startswith(str(length - x) + %22.%22):%0A rename(fname, fname.replace(str(length - x) + %22.%22, str((length - x) + 1) + %22.%22))%0A x += 1%0A for fname in fnames:%0A if fname.startswith(userInputList%5B1%5D + %22.%22):%0A rename(fname, fname.replace(userInputList%5B1%5D + %22.%22, userInputList%5B2%5D + %22.%22))%0A%0A%0A# for fname in fnames:%0A# if fname.startswith(badprefix*2):%0A# rename(fname, fname.replace(badprefix, '', 1))
a412295b09481113d6f42565520d03ce8bfd36b8
Create ECIScraper.py
ECIScraper.py
ECIScraper.py
Python
0
@@ -0,0 +1,821 @@ +from bs4 import BeautifulSoup as bs%0Aimport httplib%0A%0A%0Aclass ECIScrapper:%0A%09def __init__(self, url):%0A%09%09self.url = url.split(%22/%22)%5B0%5D%0A%09%09self.getrequest = '/'.join(url.split('/')%5B1:%5D)%0A%09%09print self.url, self.getrequest%0A%09%09self.connection = httplib.HTTPConnection(self.url)%0A%09%09self.connection.request(%22GET%22, '/'+self.getrequest)%0A%09%09self.response = self.connection.getresponse()%0A%09%09self.page = self.response.read()%0A%0A%09%09self.soup = bs(self.page)%0A%09%09print self.soup.find_all('table', style=%22margin: auto; width: 100%25; font-family: Verdana; border: solid 1px black;font-weight:lighter%22)%0A%0A%09%09style = %22margin: auto; width: 100%25; font-family: Verdana; border: solid 1px black;font-weight:lighter%22%0A%0A%0A%09%0A%09def getData(self):%0A%09%09print url;%0A%0A%0A%0A%0Aif __name__==%22__main__%22:%0A%09url = %22eciresults.ap.nic.in/ConstituencywiseS2653.htm?ac=53%22%0A%09ECIScrapper(url)%0A
82617f295ed21c179bab6ad3c3c2af5c417f40ba
Install pandas and scipy from Anaconda as part of upgrade process. Provides final installation fix for burden testing code. #167 #191
gemini/gemini_update.py
gemini/gemini_update.py
"""Perform in-place updates of gemini and databases when installed into virtualenv. """ import os import subprocess import sys import gemini.config def release(parser, args): """Update gemini to the latest release, along with associated data files. """ url = "https://raw.github.com/arq5x/gemini/master/requirements.txt" # update locally isolated python pip_bin = os.path.join(os.path.dirname(sys.executable), "pip") activate_bin = os.path.join(os.path.dirname(sys.executable), "activate") conda_bin = os.path.join(os.path.dirname(sys.executable), "conda") if os.path.exists(conda_bin): pkgs = ["cython", "distribute", "ipython", "nose", "numpy", "pip", "pycrypto", "pyparsing", "pysam", "pyyaml", "pyzmq"] subprocess.check_call([conda_bin, "install", "--yes"] + pkgs) elif os.path.exists(activate_bin): subprocess.check_call([pip_bin, "install", "--upgrade", "distribute"]) else: raise NotImplementedError("Can only upgrade gemini installed in anaconda or virtualenv") # update libraries #subprocess.check_call([pip_bin, "install", "-r", url]) # update datafiles config = gemini.config.read_gemini_config() install_script = os.path.join(os.path.dirname(__file__), "install-data.py") subprocess.check_call([sys.executable, install_script, config["annotation_dir"]]) print "Gemini upgraded to latest version" # update tests test_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(pip_bin))), "gemini") if os.path.exists(test_dir) and os.path.exists(os.path.join(test_dir, "master-test.sh")): os.chdir(test_dir) subprocess.check_call(["git", "pull", "origin", "master"]) print "Run test suite with: cd %s && bash master-test.sh" % test_dir
Python
0
@@ -752,15 +752,50 @@ ml%22, - %22pyzmq +%0A %22pyzmq%22, %22pandas%22, %22scipy %22%5D%0A
f69de6e6cf63f9b3770ffdf4da32ca2149006a2e
add fit test for record, test is renamed so nose doesn't run it
scipy/stats/tests/test_fit.py
scipy/stats/tests/test_fit.py
Python
0.000002
@@ -0,0 +1,2610 @@ +# NOTE: contains only one test, _est_cont_fit, that is renamed so that%0D%0A# nose doesn't run it%0D%0A# I put this here for the record and for the case when someone wants to%0D%0A# verify the quality of fit%0D%0A# with current parameters: %0D%0A%0D%0A%0D%0Aimport numpy.testing as npt%0D%0Aimport numpy as np%0D%0A%0D%0Afrom scipy import stats%0D%0A%0D%0Afrom test_continuous_basic import distcont%0D%0A%0D%0A# this is not a proper statistical test for convergence, but only%0D%0A# verifies that the estimate and true values don't differ by too much%0D%0An_repl1 = 1000 # sample size for first run%0D%0An_repl2 = 5000 # sample size for second run, if first run fails%0D%0Athresh_percent = 0.25 # percent of true parameters for fail cut-off%0D%0Athresh_min = 0.75 # minimum difference estimate - true to fail test%0D%0A%0D%0A#distcont = %5B%5B'genextreme', (3.3184017469423535,)%5D%5D%0D%0A%0D%0Adef test_cont_fit():%0D%0A # this tests the closeness of the estimated parameters to the true%0D%0A # parameters with fit method of continuous distributions%0D%0A # Note: is slow, some distributions don't converge with sample size %3C= 10000%0D%0A%0D%0A for distname, arg in distcont:%0D%0A yield check_cont_fit, distname,arg%0D%0A%0D%0A%0D%0Adef check_cont_fit(distname,arg): %0D%0A distfn = getattr(stats, distname)%0D%0A rvs = distfn.rvs(size=n_repl1,*arg)%0D%0A est = distfn.fit(rvs) #,*arg) # start with default values%0D%0A%0D%0A truearg = np.hstack(%5Barg,%5B0.0,1.0%5D%5D)%0D%0A diff = est-truearg%0D%0A %0D%0A txt = ''%0D%0A diffthreshold = np.max(np.vstack(%5Btruearg*thresh_percent,%0D%0A np.ones(distfn.numargs+2)*thresh_min%5D),0)%0D%0A # threshold for location%0D%0A diffthreshold%5B-2%5D = np.max(%5Bnp.abs(rvs.mean())*thresh_percent,thresh_min%5D)%0D%0A %0D%0A if np.any(np.isnan(est)):%0D%0A raise AssertionError, 'nan returned in fit'%0D%0A else: %0D%0A if np.any((np.abs(diff) - diffthreshold) %3E 0.0):%0D%0A## txt = 'WARNING - diff too large with small sample'%0D%0A## print 'parameter diff =', diff - diffthreshold, txt%0D%0A rvs = np.concatenate(%5Brvs,distfn.rvs(size=n_repl2-n_repl1,*arg)%5D)%0D%0A est = distfn.fit(rvs) #,*arg)%0D%0A truearg = np.hstack(%5Barg,%5B0.0,1.0%5D%5D)%0D%0A diff = est-truearg%0D%0A if np.any((np.abs(diff) - diffthreshold) %3E 0.0):%0D%0A txt = 'parameter: %25s%5Cn' %25 str(truearg)%0D%0A txt += 'estimated: %25s%5Cn' %25 str(est)%0D%0A txt += 'diff : %25s%5Cn' %25 str(diff)%0D%0A raise AssertionError, 'fit not very good in %25s%5Cn' %25 distfn.name + txt%0D%0A %0D%0A%0D%0A%0D%0Aif __name__ == %22__main__%22:%0D%0A import nose%0D%0A #nose.run(argv=%5B'', __file__%5D)%0D%0A nose.runmodule(argv=%5B__file__,'-s'%5D, exit=False)%0D%0A
7720fbc1d8a81430c38598fd96b95d8b4da4a74c
fix a bug about can not import ChineseAnalyzer with change tab to 4 wihte spaces under PEP8
jieba/analyse/__init__.py
jieba/analyse/__init__.py
import jieba import os try: from analyzer import ChineseAnalyzer except ImportError: pass _curpath=os.path.normpath( os.path.join( os.getcwd(), os.path.dirname(__file__) ) ) f_name = os.path.join(_curpath,"idf.txt") content = open(f_name,'rb').read().decode('utf-8') idf_freq = {} lines = content.split('\n') for line in lines: word,freq = line.split(' ') idf_freq[word] = float(freq) median_idf = sorted(idf_freq.values())[len(idf_freq)/2] stop_words= set([ "the","of","is","and","to","in","that","we","for","an","are","by","be","as","on","with","can","if","from","which","you","it","this","then","at","have","all","not","one","has","or","that" ]) def extract_tags(sentence,topK=20): words = jieba.cut(sentence) freq = {} for w in words: if len(w.strip())<2: continue if w.lower() in stop_words: continue freq[w]=freq.get(w,0.0)+1.0 total = sum(freq.values()) freq = [(k,v/total) for k,v in freq.iteritems()] tf_idf_list = [(v * idf_freq.get(k,median_idf),k) for k,v in freq] st_list = sorted(tf_idf_list,reverse=True) top_tuples= st_list[:topK] tags = [a[1] for a in top_tuples] return tags
Python
0
@@ -21,17 +21,20 @@ os%0Atry:%0A -%09 + from ana @@ -86,9 +86,12 @@ or:%0A -%09 + pass
ec96ce58076ba5aa54abeb423937a629cbe1e3d5
Work in progress
logparser.py
logparser.py
Python
0.000003
@@ -0,0 +1,1468 @@ +#!/usr/bin/python%0A%22%22%22 Log parser. %22%22%22%0Afrom HTMLParser import HTMLParser%0Aimport urllib%0A%0A%0Aclass DailyParser(HTMLParser):%0A%0A %22%22%22%0A HTML parser for the donations log of Wikimedia France%0A%0A Attributes:%0A status (int): status variable of the parser.%0A donations (list data.Donation): list of donations read.%0A %22%22%22%0A%0A START_PARSER = 0%0A FOUND_DONATION_TABLE = 1%0A READ_HOURS = 2%0A READ_DONATOR = 3%0A READ_DONATION = 4%0A END_OF_DONATION_TABLE = 5%0A%0A def __init__(self):%0A super(DonationsParser, self).__init__()%0A self.status = DailyParser.START_PARSER%0A self.donations = %5B%5D%0A%0A def handle_starttag(self, tag, attrs):%0A pass%0A%0A def handle_endtag(self, tag):%0A pass%0A%0A def handle_data(self, data):%0A pass%0A%0A%0Aclass LogParser:%0A%0A def __init__(self):%0A self.parser = DailyParser()%0A%0A @staticmethod%0A def daypage(day):%0A %22%22%22 Returns the page content containing the donations from a specific %0A day.%0A%0A Args:%0A day (datetime.date): day to fetch donation.%0A%0A Returns:%0A str: page content with the donation of the day specified as args.%0A %22%22%22%0A url_args = date.strftime(%22%25Y-%25m-%25d%22)%0A url = %22https://dons.wikimedia.fr/journal/%25s%22 %25 url_args%0A return urllib.urlopen(url).read()%0A%0A def fetchday(self, day):%0A %22%22%22 Returns donations from a day. %22%22%22%0A day_content = self.daypage(day)%0A self.parser.feed(day_content)%0A
162b82b64d319e0c854c08b3bd2e412ab5e67d97
add pytables testing file
blaze/compute/tests/test_pytables_compute.py
blaze/compute/tests/test_pytables_compute.py
Python
0
@@ -0,0 +1,2494 @@ +from __future__ import absolute_import, division, print_function%0A%0Aimport pytest%0Atables = pytest.importorskip('tables')%0A%0Aimport numpy as np%0Aimport tempfile%0Afrom contextlib import contextmanager%0Aimport os%0A%0Afrom blaze.compute.core import compute%0Afrom blaze.compute.pytables import *%0Afrom blaze.compute.numpy import *%0Afrom blaze.expr.table import *%0Afrom blaze.compatibility import xfail%0A%0At = TableSymbol('t', '%7Bid: int, name: string, amount: int%7D')%0A%0Ax = np.array(%5B(1, 'Alice', 100),%0A (2, 'Bob', -200),%0A (3, 'Charlie', 300),%0A (4, 'Denis', 400),%0A (5, 'Edith', -500)%5D,%0A dtype=%5B('id', '%3Ci8'), ('name', 'S7'), ('amount', '%3Ci8')%5D)%0A%0A@contextmanager%0Adef data():%0A filename = tempfile.mktemp()%0A f = tables.open_file(filename, 'w')%0A d = f.createTable('/', 'title', x)%0A%0A yield d%0A%0A d.close()%0A f.close()%0A os.remove(filename)%0A%0A%0Adef eq(a, b):%0A return (a == b).all()%0A%0A%0Adef test_table():%0A with data() as d:%0A assert compute(t, d) == d%0A%0A%0Adef test_projection():%0A with data() as d:%0A assert eq(compute(t%5B'name'%5D, d), x%5B'name'%5D)%0A%0A%0A@xfail(reason=%22ColumnWise not yet supported%22)%0Adef test_eq():%0A with data() as d:%0A assert eq(compute(t%5B'amount'%5D == 100, d),%0A x%5B'amount'%5D == 100)%0A%0A%0Adef test_selection():%0A with data() as d:%0A assert eq(compute(t%5Bt%5B'amount'%5D == 100%5D, d), x%5Bx%5B'amount'%5D == 0%5D)%0A assert eq(compute(t%5Bt%5B'amount'%5D %3C 0%5D, d), x%5Bx%5B'amount'%5D %3C 0%5D)%0A%0A%0A@xfail(reason=%22ColumnWise not yet supported%22)%0Adef test_arithmetic():%0A with data() as d:%0A assert eq(compute(t%5B'amount'%5D + t%5B'id'%5D, d),%0A x%5B'amount'%5D + x%5B'id'%5D)%0A assert eq(compute(t%5B'amount'%5D * t%5B'id'%5D, d),%0A x%5B'amount'%5D * x%5B'id'%5D)%0A assert eq(compute(t%5B'amount'%5D %25 t%5B'id'%5D, d),%0A x%5B'amount'%5D %25 x%5B'id'%5D)%0A%0Adef test_Reductions():%0A with data() as d:%0A assert compute(t%5B'amount'%5D.count(), d) == len(x%5B'amount'%5D)%0A%0A%0A@xfail(reason=%22TODO: sorting could work if on indexed column%22)%0Adef test_sort():%0A with data() as d:%0A assert eq(compute(t.sort('amount'), d),%0A np.sort(x, order='amount'))%0A%0A assert eq(compute(t.sort('amount', ascending=False), d),%0A np.sort(x, order='amount')%5B::-1%5D)%0A%0A assert eq(compute(t.sort(%5B'amount', 'id'%5D), d),%0A np.sort(x, order=%5B'amount', 'id'%5D))%0A%0A%0Adef test_head():%0A with data() as d:%0A assert eq(compute(t.head(2), d),%0A x%5B:2%5D)%0A
67df732067847af15e41b8eed05137b6ab2bb6d2
add __version__ (forgot to commit)
libcutadapt/__init__.py
libcutadapt/__init__.py
Python
0.000001
@@ -0,0 +1,22 @@ +__version__ = '0.9.2'%0A
188d583caea0e640f41e400839552fe593154eda
Set 2, challenge 9 completed.
set2/crypto9.py
set2/crypto9.py
Python
0
@@ -0,0 +1,1036 @@ +#!/usr/local/bin/python%0A%0A__author__ = 'Walshman23'%0A%0Aimport sys%0Asys.path.insert(1, %22../common%22) # Want to locate modules in our 'common' directory%0A%0A%0A# A block cipher transforms a fixed-sized block (usually 8 or 16 bytes) of plaintext into ciphertext.%0A# But we almost never want to transform a single block; we encrypt irregularly-sized messages.%0A#%0A# One way we account for irregularly-sized messages is by padding, creating a plaintext that is an even%0A# multiple of the blocksize. The most popular padding scheme is called PKCS#7.%0A#%0A# So: pad any block to a specific block length, by appending the number of bytes of padding to the end of the block.%0A# For instance,%0A#%0A# %22YELLOW SUBMARINE%22%0A#%0A# ... padded to 20 bytes would be:%0A#%0A# %22YELLOW SUBMARINE%5Cx04%5Cx04%5Cx04%5Cx04%22%0A%0A%0A# Get block from stdin%0A%0A# Use 16 as block size%0A%0Ablocksize=16%0A%0Abuf = sys.stdin.read()%0A%0Aif len(buf) %3C blocksize:%0A padlen = blocksize - len(buf)%0Aelse:%0A padlen = len(buf) %25 blocksize%0A%0Asys.stdout.write(buf)%0A%0Aif padlen != 0:%0A sys.stdout.write(chr(padlen)*padlen)%0A%0A%0A%0A%0A
ed33a8dc90468f2873a4a581c22027f10d9393d4
Add Wordpress_2_Instances testcase
heat/tests/functional/test_WordPress_2_Intances.py
heat/tests/functional/test_WordPress_2_Intances.py
Python
0
@@ -0,0 +1,2150 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4%0A#%0A# Licensed under the Apache License, Version 2.0 (the %22License%22); you may%0A# not use this file except in compliance with the License. You may obtain%0A# a copy of the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS, WITHOUT%0A# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the%0A# License for the specific language governing permissions and limitations%0A#%0A%0Aimport util%0Aimport verify%0Aimport nose%0Afrom nose.plugins.attrib import attr%0A%0Afrom heat.common import context%0Afrom heat.engine import manager%0Aimport unittest%0Aimport os%0A%0A%0A@attr(speed='slow')%0A@attr(tag=%5B'func', 'wordpress', '2instance', 'ebs',%0A 'WordPress_2_Instances.template'%5D)%0Aclass WordPress2Instances(unittest.TestCase):%0A def setUp(self):%0A template = 'WordPress_2_Instances.template'%0A%0A stack_paramstr = ';'.join(%5B'InstanceType=m1.xlarge',%0A 'DBUsername=dbuser',%0A 'DBPassword=' + os.environ%5B'OS_PASSWORD'%5D%5D)%0A%0A self.stack = util.Stack(template, 'F17', 'x86_64', 'cfntools',%0A stack_paramstr)%0A%0A self.DatabaseServer = util.Instance('DatabaseServer')%0A self.DatabaseServer.check_cfntools()%0A self.DatabaseServer.wait_for_provisioning()%0A%0A self.WebServer = util.Instance('WebServer')%0A self.WebServer.check_cfntools()%0A self.WebServer.wait_for_provisioning()%0A%0A def test_instance(self):%0A # ensure wordpress was installed%0A self.assertTrue(self.WebServer.file_present%0A ('/etc/wordpress/wp-config.php'))%0A print %22Wordpress installation detected%22%0A%0A # Verify the output URL parses as expected, ie check that%0A # the wordpress installation is operational%0A stack_url = self.stack.get_stack_output(%22WebsiteURL%22)%0A print %22Got stack output WebsiteURL=%25s, verifying%22 %25 stack_url%0A ver = verify.VerifyStack()%0A self.assertTrue(ver.verify_wordpress(stack_url))%0A%0A self.stack.cleanup()%0A
fb7bc8af34f3ed375d30b43655366e6368080e76
Create Import_Libraries.py
home/INMOOV/Config/ExtraConfig/Import_Libraries.py
home/INMOOV/Config/ExtraConfig/Import_Libraries.py
Python
0
@@ -0,0 +1,389 @@ +from java.lang import String%0Afrom org.myrobotlab.net import BareBonesBrowserLaunch%0Afrom datetime import datetime%0Afrom subprocess import Popen, PIPE%0A#######################%0Aimport threading%0Aimport time%0Aimport random%0Aimport urllib, urllib2%0Aimport json%0Aimport io%0Aimport itertools%0Aimport textwrap%0Aimport codecs%0Aimport socket%0Aimport os%0Aimport shutil%0Aimport hashlib%0Aimport subprocess%0Aimport csv%0A
4de971725601ed5f630ec103ad01cf5c624ad866
Add the occupancy sensor_class (#3176)
homeassistant/components/binary_sensor/__init__.py
homeassistant/components/binary_sensor/__init__.py
""" Component to interface with binary sensors. For more details about this component, please refer to the documentation at https://home-assistant.io/components/binary_sensor/ """ import logging import voluptuous as vol from homeassistant.helpers.entity_component import EntityComponent from homeassistant.helpers.entity import Entity from homeassistant.const import (STATE_ON, STATE_OFF) from homeassistant.helpers.config_validation import PLATFORM_SCHEMA # noqa DOMAIN = 'binary_sensor' SCAN_INTERVAL = 30 ENTITY_ID_FORMAT = DOMAIN + '.{}' SENSOR_CLASSES = [ None, # Generic on/off 'cold', # On means cold (or too cold) 'connectivity', # On means connection present, Off = no connection 'gas', # CO, CO2, etc. 'heat', # On means hot (or too hot) 'light', # Lightness threshold 'moisture', # Specifically a wetness sensor 'motion', # Motion sensor 'moving', # On means moving, Off means stopped 'opening', # Door, window, etc. 'power', # Power, over-current, etc 'safety', # Generic on=unsafe, off=safe 'smoke', # Smoke detector 'sound', # On means sound detected, Off means no sound 'vibration', # On means vibration detected, Off means no vibration ] SENSOR_CLASSES_SCHEMA = vol.All(vol.Lower, vol.In(SENSOR_CLASSES)) def setup(hass, config): """Track states and offer events for binary sensors.""" component = EntityComponent( logging.getLogger(__name__), DOMAIN, hass, SCAN_INTERVAL) component.setup(config) return True # pylint: disable=no-self-use class BinarySensorDevice(Entity): """Represent a binary sensor.""" @property def is_on(self): """Return True if the binary sensor is on.""" return None @property def state(self): """Return the state of the binary sensor.""" return STATE_ON if self.is_on else STATE_OFF @property def sensor_class(self): """Return the class of this sensor, from SENSOR_CLASSES.""" return None @property def state_attributes(self): """Return device specific state attributes.""" attr = {} if self.sensor_class is not None: attr['sensor_class'] = self.sensor_class return attr
Python
0.000009
@@ -998,16 +998,81 @@ stopped%0A + 'occupancy', # On means occupied, Off means not occupied%0A 'ope
6f0b5a0dc44269d9e72f3698317604d90d6cecf3
add script for migrate user mailchimp
scripts/fix_user_mailchimp.py
scripts/fix_user_mailchimp.py
Python
0
@@ -0,0 +1,1501 @@ +import logging%0Aimport sys%0Afrom datetime import datetime%0A%0Afrom django.db import transaction%0Afrom django.utils import timezone%0A%0Afrom website.app import setup_django%0Asetup_django()%0Afrom osf.models import OSFUser%0Afrom scripts import utils as script_utils%0Afrom website.mailchimp_utils import subscribe_mailchimp%0Afrom website import settings%0A%0Alogger = logging.getLogger(__name__)%0A%0A%0Adef main():%0A dry = '--dry' in sys.argv%0A if not dry:%0A # If we're not running in dry mode log everything to a file%0A script_utils.add_file_logger(logger, __file__)%0A%0A with transaction.atomic():%0A start_time = datetime.strptime('2017-12-20 08:25:25', '%25Y-%25m-%25d %25H:%25M:%25S')%0A start_time = start_time.replace(tzinfo=timezone.now().tzinfo)%0A%0A end_time = datetime.strptime('2017-12-20 18:05:00', '%25Y-%25m-%25d %25H:%25M:%25S')%0A end_time = end_time.replace(tzinfo=timezone.now().tzinfo)%0A%0A users = OSFUser.objects.filter(is_registered=True, date_disabled__isnull=True, date_registered__range=%5Bstart_time, end_time%5D)%0A%0A if not dry:%0A for user in users:%0A subscribe_mailchimp(settings.MAILCHIMP_GENERAL_LIST, user._id)%0A logger.info('User %7B%7D has been subscribed to OSF general mailing list'.format(user._id))%0A%0A logger.info('%7B%7D users have been subscribed to OSF general mailing list'.format(users.count()))%0A%0A if dry:%0A raise Exception('Abort Transaction - Dry Run')%0A print('Done')%0A%0Aif __name__ == '__main__':%0A main()%0A
9571acd941cb7ecac96676ead87c43fadda3e74f
Create TimeUpload.py
TimeUpload.py
TimeUpload.py
Python
0
@@ -0,0 +1,1898 @@ +from pydrive.auth import GoogleAuth%0Afrom pydrive.drive import GoogleDrive%0Aimport time%0Aimport csv%0A%0AtimeID='0B9ffTjUEqeFEZ28zdTRhMlJlY0k'%0Afor i in range(10):%0A#get the curret time%0A date_time=time.asctime()%0A date_time_split=date_time.split(' ') #gives a list with the date and time components%0A time_only=date_time_split%5B3%5D # gives just the current time%0A date_only = str(date_time_split%5B1%5D + ' ' + date_time_split%5B2%5D+' ' +date_time_split%5B4%5D)%0A%0A #get the current csv from the GDrive and append the date and time and upload the new file to Gdrive%0A gauth = GoogleAuth()%0A # Try to load saved client credentials%0A gauth.LoadCredentialsFile(%22mycreds.txt%22)%0A if gauth.credentials is None:%0A # Authenticate if they're not there%0A gauth.LocalWebserverAuth()%0A elif gauth.access_token_expired:%0A # Refresh them if expired%0A gauth.Refresh()%0A else:%0A # Initialize the saved creds%0A gauth.Authorize()%0A # Save the current credentials to a file%0A gauth.SaveCredentialsFile(%22mycreds.txt%22)%0A%0A drive = GoogleDrive(gauth)%0A #Download the prior file that we will append the new data to%0A current=drive.CreateFile(%7B'id': timeID%7D)%0A current.GetContentFile('current.csv')%0A %0A #delete the prior data file to keep these files from accumulating on the GDrive%0A #current.DeleteFile(timeID)%0A %0A%0A with open('current.csv', 'a') as csvfile:%0A fieldnames = %5B'Time', 'Date'%5D%0A writer = csv.DictWriter(csvfile, fieldnames=fieldnames)%0A writer.writerow(%7B'Time': time_only, 'Date': date_only%7D)%0A csvfile.close()%0A %0A file1 = drive.CreateFile(%7B'title':'time.csv', 'id': timeID%7D) #open a new file on the GDrive%0A file1.SetContentFile('current.csv') #sets the file content to the CSV file created above from the working directory%0A file1.Upload() #upload the file%0A timeID=file1%5B'id'%5D%0A%0A time.sleep(30) #pause for 30seconds%0A
7c6bbe3860e7cce0f464dc0d95683de3c5ca57a5
Add test of `ResNet50FeatureProducer()`
testci/test_resnet50_feature.py
testci/test_resnet50_feature.py
Python
0
@@ -0,0 +1,1648 @@ +from PIL import Image%0Aimport collections%0Aimport datetime%0Aimport numpy as np%0Aimport pytest%0A%0Afrom pelops.features.resnet50 import ResNet50FeatureProducer%0A%0A%[email protected]%0Adef img_data():%0A DATA = %5B%5B%5B 0, 0, 0%5D,%0A %5B255, 255, 255%5D,%0A %5B 0, 0, 0%5D%5D,%0A %5B%5B255, 255, 255%5D,%0A %5B 0, 0, 0%5D,%0A %5B255, 255, 255%5D%5D,%0A %5B%5B 0, 0, 0%5D,%0A %5B255, 255, 255%5D,%0A %5B 0, 0, 0%5D%5D%5D%0A return np.array(DATA, dtype=np.uint8)%0A%0A%[email protected]%0Adef chip_producer(img_data):%0A Chip = collections.namedtuple(%22Chip%22, %5B%22filepath%22, %22car_id%22, %22cam_id%22, %22time%22, %22img_data%22, %22misc%22%5D)%0A CHIPS = (%0A # filepath, car_id, cam_id, time, img_data, misc%0A (%22car1_cam1.png%22, 1, 1, datetime.datetime(2016, 10, 1, 0, 1, 2, microsecond=100), img_data, %7B%7D),%0A )%0A%0A chip_producer = %7B%22chips%22: %7B%7D%7D%0A for filepath, car_id, cam_id, time, img_data, misc in CHIPS:%0A chip = Chip(filepath, car_id, cam_id, time, img_data, misc)%0A chip_producer%5B%22chips%22%5D%5Bfilepath%5D = chip%0A%0A return chip_producer%0A%0A%[email protected]%0Adef feature_producer(chip_producer):%0A res = ResNet50FeatureProducer(chip_producer)%0A return res%0A%0A%0Adef test_features(feature_producer, chip_producer):%0A for _, chip in chip_producer%5B%22chips%22%5D.items():%0A features = feature_producer.produce_features(chip)%0A assert features.shape == (1, 2048)%0A assert np.sum(features) != 0%0A%0A%0Adef test_preprocess_image(feature_producer, img_data):%0A img = Image.fromarray(img_data)%0A img_resized = feature_producer.preprocess_image(img, 224, 224)%0A assert img_resized.shape == (1, 224, 224, 3)%0A
d8a3f92a06971ba6fe24f71914a466ff91f00f5f
Create WikiBot3.5.py
WikiBot3.5.py
WikiBot3.5.py
Python
0
@@ -0,0 +1,2437 @@ +import discord%0Aimport wikipedia%0A%0Atoken = %22Mjg3NjU2MjM1MjU0NDE1MzYx.C-5xKQ.khJ9dPouM9783FMA0Ht-92XkS6A%22%0A%0Alanguage = %22en%22%0A%0Aclient = discord.Client()%0A%0A%[email protected]%0Aasync def on_ready():%0A print(%22Bot is ready%22)%0A print(client.user.name)%0A print(client.user.id)%0A%0A%[email protected]%0Aasync def on_server_join(server):%0A await client.send_message(server.default_channel, %22Oi, i'm the WikiBot! https://en.wikipedia.org/wiki/Main_Page%22)%0A%0A%[email protected]%0Aasync def on_message(message):%0A if message.channel.is_private and message.author.id != client.user.id:%0A await printout(message, message.content)%0A%0A else:%0A ping = %22%3C@%22 + client.user.id + %22%3E%22%0A if message.content.startswith(ping):%0A %0A print(%22I'm called!%22)%0A %0A toretract = len(ping)%0A query = message.content%5Btoretract:%5D%0A %0A if query%5B0%5D == %22 %22:%0A query = query%5B1:%5D%0A %0A print(%22Query = %22 + query)%0A %0A await printout(message, query)%0A%0A%0Aasync def printout(message, query):%0A wikipage = None%0A lookup = True%0A print(%22printout%22)%0A%0A try:%0A wikipage = wikipedia.page(query)%0A print(%22I found directly%22) %0A %0A except wikipedia.exceptions.PageError:%0A print(%22Can't access by default. Trying to search%22)%0A %0A except Exception:%0A lookup = False%0A %0A if wikipage is None and lookup:%0A wikipage = wikipedia.suggest(query)%0A %0A if wikipage is None and lookup:%0A await client.send_message(message.channel, %22Sorry, cannot find %22 + query + %22 :v%22)%0A elif not lookup:%0A await client.send_message(message.channel, %22Something went wrong. Try to be more specific in search, or maybe I can't reach Wikipedia%22)%0A else:%0A imglist = wikipage.images%0A if len(imglist) == 0:%0A em = discord.Embed(title=wikipage.title, description=wikipedia.summary(query, sentences=2), colour=0x2DAAED, url=wikipage.url)%0A else:%0A em = discord.Embed(title=wikipage.title, description=wikipedia.summary(query, sentences=2), colour=0x2DAAED, url=wikipage.url, image=imglist%5B0%5D)%0A em.set_author(name=client.user.name, icon_url=%22https://wikibot.rondier.io%22)%0A await client.send_message(message.channel, embed=em)%0A await client.send_message(message.channel, %22More at %22 + wikipage.url)%0A%0Aclient.run(token)%0A
3ef6866b39601dfafa10895a69c5d348a77ded3e
add test for eject and eject_all
mpf/tests/test_BallDevice_SmartVirtual.py
mpf/tests/test_BallDevice_SmartVirtual.py
Python
0
@@ -0,0 +1,1737 @@ +from mpf.tests.MpfTestCase import MpfTestCase%0A%0A%0Aclass TestBallDeviceSmartVirtual(MpfTestCase):%0A def getConfigFile(self):%0A return 'test_ball_device.yaml'%0A%0A def getMachinePath(self):%0A return 'tests/machine_files/ball_device/'%0A%0A def get_platform(self):%0A return 'smart_virtual'%0A%0A def test_eject(self):%0A # add initial balls to trough%0A self.hit_switch_and_run(%22s_ball_switch1%22, 1)%0A self.hit_switch_and_run(%22s_ball_switch2%22, 1)%0A self.assertEqual(2, self.machine.ball_devices.test_trough.balls)%0A self.assertEqual(2, self.machine.ball_devices.test_trough.available_balls)%0A%0A # call eject%0A self.machine.ball_devices.test_trough.eject()%0A self.assertEqual(2, self.machine.ball_devices.test_trough.balls)%0A self.assertEqual(1, self.machine.ball_devices.test_trough.available_balls)%0A%0A # one ball should be gone%0A self.advance_time_and_run(30)%0A self.assertEqual(1, self.machine.ball_devices.test_trough.balls)%0A self.assertEqual(1, self.machine.ball_devices.test_trough.available_balls)%0A%0A def test_eject_all(self):%0A # add initial balls to trough%0A self.hit_switch_and_run(%22s_ball_switch1%22, 1)%0A self.hit_switch_and_run(%22s_ball_switch2%22, 1)%0A self.assertEqual(2, self.machine.ball_devices.test_trough.balls)%0A self.assertEqual(2, self.machine.ball_devices.test_trough.available_balls)%0A%0A # call eject_all%0A self.machine.ball_devices.test_trough.eject_all()%0A self.advance_time_and_run(30)%0A%0A # all balls should be gone%0A self.assertEqual(0, self.machine.ball_devices.test_trough.balls)%0A self.assertEqual(0, self.machine.ball_devices.test_trough.available_balls)%0A
104fcfc4eed7f3233d329602283093c7f86484c3
add development server
server.py
server.py
Python
0
@@ -0,0 +1,1256 @@ +from http.server import HTTPServer, BaseHTTPRequestHandler%0A%0Aclass StaticServer(BaseHTTPRequestHandler):%0A%0A def do_GET(self):%0A root = 'html'%0A #print(self.path)%0A if self.path == '/':%0A filename = root + '/index.html'%0A else:%0A filename = root + self.path%0A%0A self.send_response(200)%0A if filename%5B-4:%5D == '.css':%0A self.send_header('Content-type', 'text/css')%0A elif filename%5B-5:%5D == '.json':%0A self.send_header('Content-type', 'application/javascript')%0A elif filename%5B-3:%5D == '.js':%0A self.send_header('Content-type', 'application/javascript')%0A elif filename%5B-4:%5D == '.ico':%0A self.send_header('Content-type', 'image/x-icon')%0A else:%0A self.send_header('Content-type', 'text/html')%0A self.end_headers()%0A with open(filename, 'rb') as fh:%0A html = fh.read()%0A #html = bytes(html, 'utf8')%0A self.wfile.write(html)%0A%0Adef run(server_class=HTTPServer, handler_class=StaticServer, port=8000):%0A server_address = ('', port)%0A httpd = server_class(server_address, handler_class)%0A print('Starting httpd on port %7B%7D'.format(port))%0A httpd.serve_forever()%0A%0Arun()%0A%0A# vim: expandtab%0A %0A
e88ba0984f3e6045b407342fa7231887142380e2
Add migration to create roles
corehq/apps/accounting/migrations/0031_create_report_builder_roles.py
corehq/apps/accounting/migrations/0031_create_report_builder_roles.py
Python
0.000001
@@ -0,0 +1,464 @@ +# -*- coding: utf-8 -*-%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import models, migrations%0Afrom corehq.apps.hqadmin.management.commands.cchq_prbac_bootstrap import cchq_prbac_bootstrap%0Afrom corehq.sql_db.operations import HqRunPython%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('accounting', '0030_remove_softwareplan_visibility_trial_internal'),%0A %5D%0A%0A operations = %5B%0A HqRunPython(cchq_prbac_bootstrap),%0A %5D%0A
7b7ec9cdd1f0ed213608a5c309702e49e44b36e2
Add simple test.
tests/integration/test_smoke.py
tests/integration/test_smoke.py
Python
0.000001
@@ -0,0 +1,239 @@ +from django.test import TestCase%0A%0AURLS_PUBLIC = %5B%0A %22/%22,%0A%5D%0A%0A%0Aclass SimpleTests(TestCase):%0A def test_urls(self):%0A for url in URLS_PUBLIC:%0A res = self.client.get(url)%0A self.assertEqual(res.status_code, 200)%0A
c44001ec697faf7552764f91e52fa927056b1538
Add solution for porblem 31
euler031.py
euler031.py
Python
0.000001
@@ -0,0 +1,371 @@ +#!/usr/bin/python%0A%0ALIMIT = 200%0Acoins = %5B1, 2, 5, 10, 20, 50, 100, 200%5D%0A%0A%0Adef rec_count(total, step):%0A if total == LIMIT:%0A return 1%0A if total %3E LIMIT:%0A return 0%0A c = 0%0A for x in coins:%0A if x %3C step:%0A continue%0A c += rec_count(total + x, x)%0A return c%0A%0Acount = 0%0Afor x in coins:%0A count += rec_count(x, x)%0Aprint(count)%0A
84c5bfa0252814c5797cf7f20b04808dafa9e1fa
Create MergeIntervals_001.py
leetcode/056-Merge-Intervals/MergeIntervals_001.py
leetcode/056-Merge-Intervals/MergeIntervals_001.py
Python
0
@@ -0,0 +1,974 @@ +# Definition for an interval.%0A# class Interval:%0A# def __init__(self, s=0, e=0):%0A# self.start = s%0A# self.end = e%0A%0Aclass Solution:%0A # @param %7BInterval%5B%5D%7D intervals%0A # @return %7BInterval%5B%5D%7D%0A %0A def sortmeg(self, intervals):%0A ls = %5B%5D%0A for i in intervals:%0A ls.append(i.start)%0A idx = sorted(range(len(ls)),key=lambda x:ls%5Bx%5D)%0A%0A sortedintv = %5B%5D%0A for i in idx:%0A sortedintv.append(intervals%5Bi%5D)%0A%0A return sortedintv%0A%0A%0A def merge(self, intervals):%0A if len(intervals) %3C 2:%0A return intervals%0A %0A intervals = self.sortmeg(intervals)%0A p = 0%0A while p + 1 %3C= len(intervals) - 1:%0A if intervals%5Bp+1%5D.start %3C= intervals%5Bp%5D.end:%0A if intervals%5Bp+1%5D.end %3E intervals%5Bp%5D.end:%0A intervals%5Bp%5D.end = intervals%5Bp+1%5D.end%0A del intervals%5Bp+1%5D%0A else:%0A p += 1%0A return intervals%0A
8471516294d5b28a81cae73db591ae712f44bc01
Add failing cairo test
tests/pygobject/test_structs.py
tests/pygobject/test_structs.py
Python
0.000002
@@ -0,0 +1,830 @@ +# Copyright 2013 Christoph Reiter%0A#%0A# This library is free software; you can redistribute it and/or%0A# modify it under the terms of the GNU Lesser General Public%0A# License as published by the Free Software Foundation; either%0A# version 2.1 of the License, or (at your option) any later version.%0A%0Aimport unittest%0A%0Afrom gi.repository import Gtk%0A%0Afrom tests import is_gi%0A%0A%0Aclass StructTest(unittest.TestCase):%0A%0A @unittest.skipUnless(is_gi, %22FIXME%22)%0A def test_foreign_cairo(self):%0A window = Gtk.OffscreenWindow()%0A area = Gtk.DrawingArea()%0A window.add(area)%0A%0A def foo(area, context):%0A self.assertTrue(hasattr(context, %22set_source_rgb%22))%0A area.connect(%22draw%22, foo)%0A%0A window.show_all()%0A while Gtk.events_pending():%0A Gtk.main_iteration()%0A window.destroy()%0A
c46e6d170f4d641c3bb5045a701c7810d77f28a6
add update-version script
update-version.py
update-version.py
Python
0
@@ -0,0 +1,1092 @@ +#!/usr/bin/env python%0A# -*- coding: utf-8 -*-%0A%0Aimport argparse%0Aimport os%0Aimport xml.etree.ElementTree as et%0A%0A%0ANS = %22http://maven.apache.org/POM/4.0.0%22%0APOM_NS = %22%7Bhttp://maven.apache.org/POM/4.0.0%7D%22%0A%0A%0Adef getModuleNames(mainPom):%0A pom = et.parse(mainPom)%0A modules = pom.findall(%22./%7Bns%7Dmodules/%7Bns%7Dmodule%22.format(ns=POM_NS))%0A return map(lambda element: element.text, modules)%0A%0A%0Adef updateVersionInModule(module, newVersion):%0A pomPath = os.path.join(module, %22pom.xml%22)%0A modulePom = et.parse(pomPath)%0A parentVersion = modulePom.find(%22./%7Bns%7Dparent/%7Bns%7Dversion%22.format(ns=POM_NS))%0A parentVersion.text = newVersion%0A modulePom.write(pomPath, xml_declaration=False, encoding=%22utf-8%22, method=%22xml%22)%0A%0A%0Aif __name__ == '__main__':%0A et.register_namespace('', NS)%0A%0A parser = argparse.ArgumentParser(description='Update parent version in all submodules.')%0A parser.add_argument('version', help='the new parent version')%0A args = parser.parse_args()%0A%0A allModules = getModuleNames(%22pom.xml%22)%0A for module in allModules:%0A updateVersionInModule(module, args.version)
639106506be3f6b91a3e45cde88701625c077a28
Update battery_model.py
pySDC/projects/PinTSimE/battery_model.py
pySDC/projects/PinTSimE/battery_model.py
import numpy as np import dill from pySDC.helpers.stats_helper import get_sorted from pySDC.core import CollBase as Collocation from pySDC.implementations.problem_classes.Battery import battery from pySDC.implementations.sweeper_classes.imex_1st_order import imex_1st_order from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI from pySDC.projects.PinTSimE.piline_model import setup_mpl import pySDC.helpers.plot_helper as plt_helper from pySDC.core.Hooks import hooks from pySDC.projects.PinTSimE.switch_estimator import SwitchEstimator class log_data(hooks): def post_step(self, step, level_number): super(log_data, self).post_step(step, level_number) # some abbreviations L = step.levels[level_number] L.sweep.compute_end_point() self.add_to_stats( process=step.status.slot, time=L.time + L.dt, level=L.level_index, iter=0, sweep=L.status.sweep, type='current L', value=L.uend[0], ) self.add_to_stats( process=step.status.slot, time=L.time + L.dt, level=L.level_index, iter=0, sweep=L.status.sweep, type='voltage C', value=L.uend[1], ) self.increment_stats( process=step.status.slot, time=L.time, level=L.level_index, iter=0, sweep=L.status.sweep, type='restart', value=1, initialize=0, ) def main(use_switch_estimator=True): """ A simple test program to do SDC/PFASST runs for the battery drain model """ # initialize level parameters level_params = dict() level_params['restol'] = 1e-10 level_params['dt'] = 1e-3 # initialize sweeper parameters sweeper_params = dict() sweeper_params['collocation_class'] = Collocation sweeper_params['node_type'] = 'LEGENDRE' sweeper_params['quad_type'] = 'LOBATTO' sweeper_params['num_nodes'] = 5 sweeper_params['QI'] = 'LU' # For the IMEX sweeper, the LU-trick can be activated for the implicit part sweeper_params['initial_guess'] = 'zero' # initialize problem parameters problem_params = dict() problem_params['Vs'] = 5.0 problem_params['Rs'] = 0.5 problem_params['C'] = 1 problem_params['R'] = 1 problem_params['L'] = 1 problem_params['alpha'] = 10 problem_params['V_ref'] = 1 problem_params['set_switch'] = False problem_params['t_switch'] = False # initialize step parameters step_params = dict() step_params['maxiter'] = 20 # initialize controller parameters controller_params = dict() controller_params['logger_level'] = 20 controller_params['hook_class'] = log_data # convergence controllers switch_estimator_params = {} convergence_controllers = {SwitchEstimator: switch_estimator_params} # fill description dictionary for easy step instantiation description = dict() description['problem_class'] = battery # pass problem class description['problem_params'] = problem_params # pass problem parameters description['sweeper_class'] = imex_1st_order # pass sweeper description['sweeper_params'] = sweeper_params # pass sweeper parameters description['level_params'] = level_params # pass level parameters description['step_params'] = step_params if use_switch_estimator: description['convergence_controllers'] = convergence_controllers assert problem_params['alpha'] > problem_params['V_ref'], 'Please set "alpha" greater than "V_ref"' assert problem_params['V_ref'] > 0, 'Please set "V_ref" greater than 0' assert 'errtol' not in description['step_params'].keys(), 'No exact solution known to compute error' assert 'alpha' in description['problem_params'].keys(), 'Please supply "alpha" in the problem parameters' assert 'V_ref' in description['problem_params'].keys(), 'Please supply "V_ref" in the problem parameters' # set time parameters t0 = 0.0 Tend = 2.4 # instantiate controller controller = controller_nonMPI(num_procs=1, controller_params=controller_params, description=description) # get initial values on finest level P = controller.MS[0].levels[0].prob uinit = P.u_exact(t0) # call main function to get things done... uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend) # fname = 'data/battery.dat' fname = 'battery.dat' f = open(fname, 'wb') dill.dump(stats, f) f.close() # filter statistics by number of iterations iter_counts = get_sorted(stats, type='niter', sortby='time') # compute and print statistics min_iter = 20 max_iter = 0 f = open('battery_out.txt', 'w') niters = np.array([item[1] for item in iter_counts]) out = ' Mean number of iterations: %4.2f' % np.mean(niters) f.write(out + '\n') print(out) for item in iter_counts: out = 'Number of iterations for time %4.2f: %1i' % item f.write(out + '\n') print(out) min_iter = min(min_iter, item[1]) max_iter = max(max_iter, item[1]) assert np.mean(niters) <= 5, "Mean number of iterations is too high, got %s" % np.mean(niters) f.close() plot_voltages() return np.mean(niters) def plot_voltages(cwd='./'): """ Routine to plot the numerical solution of the model """ f = open(cwd + 'battery.dat', 'rb') stats = dill.load(f) f.close() # convert filtered statistics to list of iterations count, sorted by process cL = get_sorted(stats, type='current L', sortby='time') vC = get_sorted(stats, type='voltage C', sortby='time') times = [v[0] for v in cL] setup_mpl() fig, ax = plt_helper.plt.subplots(1, 1, figsize=(4.5, 3)) ax.plot(times, [v[1] for v in cL], label='$i_L$') ax.plot(times, [v[1] for v in vC], label='$v_C$') ax.legend(frameon=False, fontsize=12, loc='upper right') ax.set_xlabel('Time') ax.set_ylabel('Energy') fig.savefig('data/battery_model_solution.png', dpi=300, bbox_inches='tight') def estimation_check(): use_switch_estimator = [True, False] niters_mean = [] for item in use_switch_estimator: niters_mean.append(main(use_switch_estimator=item)) for item, element in zip(use_switch_estimator, niters_mean): out = 'Switch estimation: {} -- Average number of iterations: {}'.format(item, element) print(out) if __name__ == "__main__": main()
Python
0.000001
@@ -90,16 +90,28 @@ SDC.core +.Collocation import
1d3719bcd03b92d04efae10933928f953d95c7a4
Add a simple basicmap python example
src/python/BasicMap.py
src/python/BasicMap.py
Python
0.000014
@@ -0,0 +1,584 @@ +%22%22%22%0A%3E%3E%3E from pyspark.context import SparkContext%0A%3E%3E%3E sc = SparkContext('local', 'test')%0A%3E%3E%3E b = sc.parallelize(%5B1, 2, 3, 4%5D)%0A%3E%3E%3E sorted(basicSquare(b).collect())%0A%5B1, 4, 9, 12%5D%0A%22%22%22%0A%0Aimport sys%0A%0Afrom pyspark import SparkContext%0A%0Adef basicSquare(nums):%0A %22%22%22Square the numbers%22%22%22%0A return nums.map(lambda x: x * x)%0A%0Aif __name__ == %22__main__%22:%0A master = %22local%22%0A if len(sys.argv) = 2:%0A master = sys.argv%5B1%5D%0A sc = SparkContext(master, %22BasicMap%22)%0A nums = sc.parallelize(%5B1, 2, 3, 4%5D)%0A output = countWords(nums)%0A for num in output:%0A print %22%25i %22 %25 (num)%0A%0A
41220718d0e9a32fc9e95d55acdb989b2f87563f
Add @job tasks
smsish/tasks.py
smsish/tasks.py
Python
0.000791
@@ -0,0 +1,505 @@ +import django_rq%0Afrom rq.decorators import job%0A%0ADEFAULT_QUEUE_NAME = %22default%22%0ADEFAULT_REDIS_CONNECTION = django_rq.get_connection()%0A%0A%0A@job(DEFAULT_QUEUE_NAME, connection=DEFAULT_REDIS_CONNECTION)%0Adef send_sms(*args, **kwargs):%0A%09from smsish.sms import send_sms as _send_sms%0A%09return _send_sms(*args, **kwargs)%0A%0A%0A@job(DEFAULT_QUEUE_NAME, connection=DEFAULT_REDIS_CONNECTION)%0Adef send_mass_sms(*args, **kwargs):%0A%09from smsish.sms import send_mass_sms as _send_mass_sms%0A%09return _send_mass_sms(*args, **kwargs)%0A
6ee145c7af7084f228ee48754ef2a0bfc37c5946
Add missing hooks.py module
pyqt_distutils/hooks.py
pyqt_distutils/hooks.py
Python
0.000003
@@ -0,0 +1,2465 @@ +%22%22%22%0AA pyqt-distutils hook is a python function that is called after the%0Acompilation of a ui script to let you customise its content. E.g. you%0Amight want to write a hook to change the translate function used or replace%0Athe PyQt imports by your owns if you're using a shim,...%0A%0AThe hook function is a simple python function which must take a single%0Aargument: the path to the generated python script.%0A%0AHooks are exposed as setuptools entrypoint using :attr:%60ENTRYPOINT%60 as the%0Aentrypoint key. E.g., in your setup.py::%0A%0A setup(%0A ...,%0A entry_points=%7B%0A 'pyqt_distutils_hooks': %5B%0A 'hook_name = package_name.module_name:function_name'%5D%0A %7D,%0A ...)%0A%0A%0A%0AThere is a %22hooks%22 config key where you can list the hooks%0Athat you want to run on all your ui/qrc scripts. E.g.::%0A%0A %7B%0A %22files%22: %5B%0A %5B%22forms/*.ui%22, %22foo_gui/forms/%22%5D,%0A %5B%22resources/*.qrc%22, %22foo_gui/forms/%22%5D%0A %5D,%0A %22pyrcc%22: %22pyrcc5%22,%0A %22pyrcc_options%22: %22%22,%0A %22pyuic%22: %22pyuic5%22,%0A %22pyuic_options%22: %22--from-imports%22,%0A %22hooks%22: %5B%22gettext%22, %22spam%22, %22eggs%22%5D%0A %7D%0A%0AAt the moment, we provide one builtin hook: **gettext**. This hook let you%0Ause %60%60gettext.gettext%60%60 instead of %60%60QCoreApplication.translate%60%60.%0A%0A%22%22%22%0Aimport pkg_resources%0Aimport traceback%0A%0A%0A#: Name of the entrypoint to use in setup.py%0AENTRYPOINT = 'pyqt_distutils_hooks'%0A%0A%0Adef load_hooks():%0A %22%22%22%0A Load the exposed hooks.%0A%0A Returns a dict of hooks where the keys are the name of the hook and the%0A values are the actual hook functions.%0A %22%22%22%0A hooks = %7B%7D%0A for entrypoint in pkg_resources.iter_entry_points(ENTRYPOINT):%0A name = str(entrypoint).split('=')%5B0%5D.strip()%0A try:%0A hook = entrypoint.load()%0A except Exception:%0A traceback.print_exc()%0A else:%0A hooks%5Bname%5D = hook%0A return hooks%0A%0A%0Adef hook(ui_file_path):%0A %22%22%22%0A This is the prototype of a hook function.%0A %22%22%22%0A pass%0A%0A%0AGETTEXT_REPLACEMENT = ''' import gettext%0A def _translate(_, string):%0A return gettext.gettext(string)%0A'''%0A%0A%0Adef gettext(ui_file_path):%0A %22%22%22%0A Let you use gettext instead of the Qt tools for l18n%0A %22%22%22%0A with open(ui_file_path, 'r') as fin:%0A content = fin.read()%0A with open(ui_file_path, 'w') as fout:%0A fout.write(content.replace(%0A ' _translate = QtCore.QCoreApplication.translate',%0A GETTEXT_REPLACEMENT))%0A
ff79343cb1feda5259244199b4f0d503da401f24
Create quick_sort_iterativo.py
quick_sort_iterativo.py
quick_sort_iterativo.py
Python
0.000004
@@ -0,0 +1,1632 @@ +import unittest%0A%0A%0Adef _quick_recursivo(seq, inicio, final):%0A if inicio %3E= final:%0A return seq%0A indice_pivot = final%0A pivot = seq%5Bindice_pivot%5D%0A i_esquerda = inicio%0A i_direita = final - 1%0A%0A while i_esquerda%3C=i_direita:%0A while i_esquerda%3C=i_direita and seq%5Bi_esquerda%5D%3C=pivot:%0A i_esquerda=i_esquerda+1%0A while i_esquerda%3C=i_direita and seq%5Bi_direita%5D%3E=pivot:%0A i_direita=i_direita-1%0A if i_esquerda%3Ci_direita:%0A aux=seq%5Bi_esquerda%5D%0A seq%5Bi_esquerda%5D=seq%5Bi_direita%5D%0A seq%5Bi_direita%5D=aux%0A aux=seq%5Bi_esquerda%5D%0A seq%5Bi_esquerda%5D=seq%5Bfinal%5D%0A seq%5Bfinal%5D=aux%0A _quick_recursivo(seq, inicio, i_esquerda - 1)%0A _quick_recursivo(seq, i_esquerda + 1, final)%0A %0A%0A return seq%0A%0Adef quick_sort(seq):%0A return _quick_recursivo(seq, 0, len(seq) - 1)%0A%0A%0Aclass OrdenacaoTestes(unittest.TestCase):%0A def teste_lista_vazia(self):%0A self.assertListEqual(%5B%5D, quick_sort(%5B%5D))%0A%0A def teste_lista_unitaria(self):%0A self.assertListEqual(%5B1%5D, quick_sort(%5B1%5D))%0A%0A def teste_lista_binaria(self):%0A self.assertListEqual(%5B1, 2%5D, quick_sort(%5B2, 1%5D))%0A%0A def teste_lista_desordenada(self):%0A self.assertListEqual(%5B0, 1, 2, 3, 4, 5, 6, 7, 8, 9%5D, quick_sort(%5B9, 7, 1, 8, 5, 3, 6, 4, 2, 0%5D))%0A%0A def teste_lista_com_elementos_repetidos(self):%0A self.assertListEqual(%5B0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9, 9%5D, quick_sort(%5B9, 7, 1, 8, 5, 3, 6, 4, 2, 0, 9, 9%5D))%0A%0A def teste_lista_so_com_elementos_repetidos(self):%0A self.assertListEqual(%5B9, 9, 9%5D, quick_sort(%5B9, 9, 9%5D))%0A%0A%0Aif __name__ == '__main__':%0A unittest.main()%0A
4324eaf427731db3943cf130e42e29509bdbd4df
Fix for Python 3
asv/config.py
asv/config.py
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import (absolute_import, division, print_function, unicode_literals) import os import sys from . import util class Config(object): """ Manages the configuration for a benchmark project. """ api_version = 1 def __init__(self): self.project = "project" self.project_url = "#" self.repo = None self.pythons = ["{0.major}.{0.minor}".format(sys.version_info)] self.matrix = {} self.env_dir = "env" self.benchmark_dir = "benchmarks" self.results_dir = "results" self.html_dir = "html" self.show_commit_url = "#" self.hash_length = 8 @classmethod def load(cls, path=None): """ Load a configuration from a file. If no file is provided, defaults to `asv.conf.json`. """ if not path: path = "asv.conf.json" if not os.path.exists(path): raise RuntimeError("Config file {0} not found.".format(path)) conf = Config() d = util.load_json(path, cls.api_version) conf.__dict__.update(d) if not getattr(conf, "repo", None): raise ValueError( "No repo specified in {0} config file.".format(path)) return conf @classmethod def update(cls, path=None): if not path: path = "asv.conf.json" if not os.path.exists(path): raise RuntimeError("Config file {0} not found.".format(path)) util.update_json(cls, path, cls.api_version)
Python
0.000054
@@ -493,24 +493,18 @@ %5B%22%7B0 -.major%7D.%7B0.minor +%5B0%5D%7D.%7B0%5B1%5D %7D%22.f
6a47c684012b98679c9274ca4087958c725a1fa7
support extensions in tests
test/unit/dockerstache_tests.py
test/unit/dockerstache_tests.py
Python
0
@@ -0,0 +1,2218 @@ +#!/usr/bin/env python%0A%22%22%22%0Adockerstache module test coverage for API calls%0A%0A%22%22%22%0Aimport os%0Aimport tempfile%0Aimport json%0Aimport unittest%0Aimport mock%0A%0Afrom dockerstache.dockerstache import run%0A%0A%0Aclass RunAPITests(unittest.TestCase):%0A %22%22%22tests for run API call%22%22%22%0A def setUp(self):%0A self.tempdir = tempfile.mkdtemp()%0A self.defaults = os.path.join(self.tempdir, 'defaults.json')%0A self.context = os.path.join(self.tempdir, 'context.json')%0A self.dotfile = os.path.join(self.tempdir, '.dockerstache')%0A%0A with open(self.defaults, 'w') as handle:%0A json.dump(%0A %7B%22defaults%22: %7B%22value1%22: 1, %22value2%22: 2%7D, %22default_value%22: 99%7D,%0A handle%0A )%0A%0A with open(self.context, 'w') as handle:%0A json.dump(%0A %7B%0A %22defaults%22: %7B%22value2%22: 100%7D,%0A %22context%22: %7B%22value3%22: 3, %22value4%22: 4%7D%0A %7D,%0A handle%0A )%0A with open(self.dotfile, 'w') as handle:%0A json.dump(%0A %7B%0A %22context%22: self.context,%0A %22defaults%22: self.defaults%0A %7D,%0A handle%0A )%0A self.opts = %7B%7D%0A self.opts%5B'input'%5D = self.tempdir%0A self.opts%5B'output'%5D = None%0A self.opts%5B'context'%5D = None%0A self.opts%5B'defaults'%5D = None%0A%0A def tearDown(self):%0A %22%22%22cleanup test data %22%22%22%0A if os.path.exists(self.tempdir):%0A os.system(%22rm -rf %7B%7D%22.format(self.tempdir))%0A%0A @mock.patch('dockerstache.dockerstache.process_templates')%0A def test_run(self, mock_process):%0A %22%22%22test run method%22%22%22%0A run(**self.opts)%0A self.failUnless(mock_process.called)%0A%0A @mock.patch('dockerstache.dockerstache.process_templates')%0A def test_run_extend_context(self, mock_process):%0A %22%22%22test run method with extras for context%22%22%22%0A extend = %7B'extensions': %7B'extras': 'values'%7D%7D%0A self.opts%5B'extend_context'%5D = extend%0A run(**self.opts)%0A self.failUnless(mock_process.called)%0A context = mock_process.call_args%5B0%5D%5B2%5D%0A self.failUnless('extensions' in context)%0A%0A%0Aif __name__ == '__main__':%0A unittest.main()%0A