commit
stringlengths
40
40
subject
stringlengths
1
3.25k
old_file
stringlengths
4
311
new_file
stringlengths
4
311
old_contents
stringlengths
0
26.3k
lang
stringclasses
3 values
proba
float64
0
1
diff
stringlengths
0
7.82k
44e3876d76c7d7b3571c82030ff78260e4ec7e65
Add PCA.py template
ML/PCA.py
ML/PCA.py
Python
0
@@ -0,0 +1,217 @@ +%22%22%22%0AExact principal component analysis (PCA)%0A%22%22%22%0A%0Aclass PCA(object):%0A %22%22%22%0A Exact principal component analysis (PCA)%0A %22%22%22%0A %0A def __init__(self):%0A return %0A %0A def fit(self, X):%0A return
cd2c959674043fcc3b6261129f57f266539a8658
Add a Python snippet.
Python.py
Python.py
Python
0.000043
@@ -0,0 +1,220 @@ +#!/usr/bin/env python%0A# coding: utf-8%0A%0A%22%22%22Python snippet%0A%22%22%22%0A%0Aimport os%0Aimport sys%0A%0Aif __name__ == '__main__':%0A if len (sys.argv) == 1:%0A print (%22Hi there!%22)%0A else:%0A print (%22Hello, %25s!%22 %25 sys.argv%5B1%5D)%0A
65449c60f357eeab5ddc9eb91a468ab1e3719de7
Add dismiss_recommendation example (#35)
examples/v0/recommendations/dismiss_recommendation.py
examples/v0/recommendations/dismiss_recommendation.py
Python
0
@@ -0,0 +1,2990 @@ +# Copyright 2019 Google LLC%0A#%0A# Licensed under the Apache License, Version 2.0 (the %22License%22);%0A# you may not use this file except in compliance with the License.%0A# You may obtain a copy of the License at%0A#%0A# https://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS,%0A# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.%0A# See the License for the specific language governing permissions and%0A# limitations under the License.%0A%22%22%22This example dismisses a given recommendation.%0A%0ATo retrieve recommendations for text ads, run get_text_ad_recommendations.py.%0A%22%22%22%0A%0Afrom __future__ import absolute_import%0A%0Aimport argparse%0Aimport six%0Aimport sys%0A%0Aimport google.ads.google_ads.client%0A%0A%0Adef main(client, customer_id, recommendation_id):%0A recommendation_service = client.get_service('RecommendationService')%0A%0A dismiss_recommendation_request = client.get_type(%0A 'DismissRecommendationRequest')%0A%0A dismiss_recommendation_operation = (dismiss_recommendation_request.%0A DismissRecommendationOperation())%0A%0A dismiss_recommendation_operation.resource_name = (%0A recommendation_service.recommendation_path(%0A customer_id, recommendation_id))%0A%0A try:%0A dismissal_response = recommendation_service.dismiss_recommendation(%0A customer_id,%0A %5Bdismiss_recommendation_operation%5D)%0A except google.ads.google_ads.errors.GoogleAdsException as ex:%0A print('Request with ID %22%25s%22 failed with status %22%25s%22 and includes the '%0A 'following errors:' %25 (ex.request_id, ex.error.code().name))%0A for error in ex.failure.errors:%0A print('%5CtError with message %22%25s%22.' %25 error.message)%0A if error.location:%0A for field_path_element in error.location.field_path_elements:%0A print('%5Ct%5CtOn field: %25s' %25 field_path_element.field_name)%0A sys.exit(1)%0A%0A print('Dismissed recommendation with resource name: %22%25s%22.'%0A %25 dismissal_response.results%5B0%5D.resource_name)%0A%0A%0Aif __name__ == '__main__':%0A # GoogleAdsClient will read the google-ads.yaml configuration file in the%0A # home directory if none is specified.%0A google_ads_client = (google.ads.google_ads.client.GoogleAdsClient%0A .load_from_storage())%0A%0A parser = argparse.ArgumentParser(%0A description=('Dismisses a recommendation with the given ID.'))%0A # The following argument(s) should be provided to run the example.%0A parser.add_argument('-c', '--customer_id', type=six.text_type,%0A required=True, help='The Google Ads customer ID.')%0A parser.add_argument('-r', '--recommendation_id', type=six.text_type,%0A required=True, help='The recommendation ID.')%0A args = parser.parse_args()%0A%0A main(google_ads_client, args.customer_id, args.recommendation_id)%0A
8d6ca433d33551cc1fe5c08edcf68ec65e5447b0
Add solution to exercise 3.3.
exercises/chapter_03/exercise_03_03/exercies_03_03.py
exercises/chapter_03/exercise_03_03/exercies_03_03.py
Python
0.000054
@@ -0,0 +1,299 @@ +# 3-3 Your Own List%0Atransportation = %5B%22mountainbike%22, %22teleportation%22, %22Citro%C3%ABn DS3%22%5D%0A%0Aprint(%22A %22 + transportation%5B0%5D + %22 is good when exercising in the woods.%5Cn%22)%0Aprint(%22The ultimate form of trarsportation must be %22 + transportation%5B1%5D + %22.%5Cn%22)%0Aprint(%22Should I buy a %22 + transportation%5B2%5D + %22?%5Cn%22)%0A
d82ecab372ed22da0b00512294ee6cd3f5fcb012
Add script to reindex datasets.
ckanofworms/scripts/reindex.py
ckanofworms/scripts/reindex.py
Python
0
@@ -0,0 +1,2817 @@ +#! /usr/bin/env python%0A# -*- coding: utf-8 -*-%0A%0A%0A# CKAN-of-Worms -- A logger for errors found in CKAN datasets%0A# By: Emmanuel Raviart %[email protected]%3E%0A#%0A# Copyright (C) 2013 Etalab%0A# http://github.com/etalab/ckan-of-worms%0A#%0A# This file is part of CKAN-of-Worms.%0A#%0A# CKAN-of-Worms is free software; you can redistribute it and/or modify%0A# it under the terms of the GNU Affero General Public License as%0A# published by the Free Software Foundation, either version 3 of the%0A# License, or (at your option) any later version.%0A#%0A# CKAN-of-Worms is distributed in the hope that it will be useful,%0A# but WITHOUT ANY WARRANTY; without even the implied warranty of%0A# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the%0A# GNU Affero General Public License for more details.%0A#%0A# You should have received a copy of the GNU Affero General Public License%0A# along with this program. If not, see %3Chttp://www.gnu.org/licenses/%3E.%0A%0A%0A%22%22%22Reindex objects.%22%22%22%0A%0A%0Aimport argparse%0Aimport logging%0Aimport os%0Aimport sys%0A%0Aimport paste.deploy%0A%0Afrom ckanofworms import contexts, environment, model%0A%0A%0Aapp_name = os.path.splitext(os.path.basename(__file__))%5B0%5D%0Alog = logging.getLogger(app_name)%0A%0A%0Adef main():%0A parser = argparse.ArgumentParser(description = __doc__)%0A parser.add_argument('config', help = %22CKAN-of-Worms configuration file%22)%0A parser.add_argument('-a', '--all', action = 'store_true', default = False, help = %22publish everything%22)%0A parser.add_argument('-d', '--dataset', action = 'store_true', default = False, help = %22publish datasets%22)%0A parser.add_argument('-g', '--group', action = 'store_true', default = False, help = %22publish groups%22)%0A parser.add_argument('-o', '--organization', action = 'store_true', default = False, help = %22publish organizations%22)%0A parser.add_argument('-s', '--section', default = 'main',%0A help = %22Name of configuration section in configuration file%22)%0A parser.add_argument('-u', '--user', action = 'store_true', default = False, help = %22publish accounts%22)%0A parser.add_argument('-v', '--verbose', action = 'store_true', default = False, help = %22increase output verbosity%22)%0A args = parser.parse_args()%0A logging.basicConfig(level = logging.DEBUG if args.verbose else logging.WARNING, stream = sys.stdout)%0A site_conf = paste.deploy.appconfig('config:%7B0%7D#%7B1%7D'.format(os.path.abspath(args.config), args.section))%0A environment.load_environment(site_conf.global_conf, site_conf.local_conf)%0A%0A ctx = contexts.null_ctx%0A%0A if args.all or args.dataset:%0A for dataset in model.Dataset.find():%0A dataset.compute_weight()%0A dataset.compute_timestamp()%0A if dataset.save(ctx, safe = False):%0A log.info(u'Updated dataset: %7B%7D'.format(dataset.name))%0A%0A return 0%0A%0A%0Aif __name__ == %22__main__%22:%0A sys.exit(main())%0A
d7adec1fefae1ff64e0caab2cf1c774ed9498fd8
Use simpler form of pg_try_advisory_lock
configure-aspen.py
configure-aspen.py
from __future__ import division from decimal import Decimal as D import threading import time import traceback import gratipay import gratipay.wireup from gratipay import canonize, utils from gratipay.security import authentication, csrf, x_frame_options from gratipay.utils import cache_static, i18n, set_cookie, timer from gratipay.version import get_version import aspen from aspen import log_dammit # Monkey patch aspen.Response # =========================== if hasattr(aspen.Response, 'redirect'): raise Warning('aspen.Response.redirect() already exists') def _redirect(response, url): response.code = 302 response.headers['Location'] = url raise response aspen.Response.redirect = _redirect if hasattr(aspen.Response, 'set_cookie'): raise Warning('aspen.Response.set_cookie() already exists') def _set_cookie(response, *args, **kw): set_cookie(response.headers.cookie, *args, **kw) aspen.Response.set_cookie = _set_cookie # Wireup Algorithm # ================ exc = None try: website.version = get_version() except Exception, e: exc = e website.version = 'x' website.renderer_default = "jinja2" website.renderer_factories['jinja2'].Renderer.global_context = { 'range': range, 'unicode': unicode, 'enumerate': enumerate, 'len': len, 'float': float, 'type': type, 'str': str } env = website.env = gratipay.wireup.env() gratipay.wireup.canonical(env) website.db = gratipay.wireup.db(env) website.mail = gratipay.wireup.mail(env) gratipay.wireup.billing(env) gratipay.wireup.username_restrictions(website) gratipay.wireup.nanswers(env) gratipay.wireup.other_stuff(website, env) gratipay.wireup.accounts_elsewhere(website, env) tell_sentry = website.tell_sentry = gratipay.wireup.make_sentry_teller(env) if exc: tell_sentry(exc) # Periodic jobs # ============= conn = website.db.get_connection().__enter__() def cron(period, func, exclusive=False): def f(): if period <= 0: return sleep = time.sleep if exclusive: cursor = conn.cursor() try_lock = lambda: cursor.one("SELECT pg_try_advisory_lock(0, 0)") has_lock = False while 1: try: if exclusive and not has_lock: has_lock = try_lock() if not exclusive or has_lock: func() except Exception, e: tell_sentry(e) log_dammit(traceback.format_exc().strip()) sleep(period) t = threading.Thread(target=f) t.daemon = True t.start() cron(env.update_global_stats_every, lambda: utils.update_global_stats(website)) cron(env.check_db_every, website.db.self_check, True) # Website Algorithm # ================= def add_stuff_to_context(request): request.context['username'] = None # Helpers for global call to action to support Gratipay itself. user = request.context.get('user') p = user.participant if user else None if p and p.is_free_rider is None: usage = p.usage # Above $500/wk we suggest 2%. if usage >= 5000: low = D('100.00') high = D('1000.00') elif usage >= 500: low = D('10.00') high = D('100.00') # From $20 to $499 we suggest 5%. elif usage >= 100: low = D('5.00') high = D('25.00') elif usage >= 20: low = D('1.00') high = D('5.00') # Below $20 we suggest 10%. elif usage >= 5: low = D('0.50') high = D('2.00') else: low = D('0.10') high = D('1.00') request.context['cta_low'] = low request.context['cta_high'] = high algorithm = website.algorithm algorithm.functions = [ timer.start , algorithm['parse_environ_into_request'] , algorithm['tack_website_onto_request'] , algorithm['raise_200_for_OPTIONS'] , canonize , authentication.inbound , csrf.inbound , add_stuff_to_context , i18n.inbound , algorithm['dispatch_request_to_filesystem'] , algorithm['apply_typecasters_to_path'] , cache_static.inbound , algorithm['get_resource_for_request'] , algorithm['get_response_for_resource'] , tell_sentry , algorithm['get_response_for_exception'] , gratipay.outbound , authentication.outbound , csrf.outbound , cache_static.outbound , x_frame_options , algorithm['log_traceback_for_5xx'] , algorithm['delegate_error_to_simplate'] , tell_sentry , algorithm['log_traceback_for_exception'] , algorithm['log_result_of_request'] , timer.end , tell_sentry ]
Python
0.000901
@@ -2147,11 +2147,8 @@ ock( -0, 0)%22)
ab99892d974503f2e0573a8937dc8f1b085b0014
Add stringbuilder module
modules/pipestrconcat.py
modules/pipestrconcat.py
Python
0.000001
@@ -0,0 +1,592 @@ +# pipestrconcat.py #aka stringbuilder%0A#%0A%0Afrom pipe2py import util%0A%0Adef pipe_strconcat(context, _INPUT, conf, **kwargs):%0A %22%22%22This source builds a string and yields it forever.%0A %0A Keyword arguments:%0A context -- pipeline context%0A _INPUT -- not used%0A conf:%0A part -- parts%0A %0A Yields (_OUTPUT):%0A string%0A %22%22%22%0A s = %22%22%0A for part in conf%5B'part'%5D:%0A if %22subkey%22 in part:%0A pass #todo get from _INPUT e.g %7Bu'type': u'text', u'subkey': u'severity'%7D%0A else:%0A s += util.get_value(part, kwargs)%0A%0A while True:%0A yield s%0A%0A
e7053da76c14f12bfc02992ab745aac193e7c869
Create compareLists.py
compareLists.py
compareLists.py
Python
0.000001
@@ -0,0 +1,464 @@ +def unique(a):%0A %22%22%22 return the list with duplicate elements removed %22%22%22%0A return list(set(a))%0A%0Adef intersect(a, b):%0A %22%22%22 return the intersection of two lists %22%22%22%0A return list(set(a) & set(b))%0A%0Adef union(a, b):%0A %22%22%22 return the union of two lists %22%22%22%0A return list(set(a) %7C set(b))%0A%0Aif __name__ == %22__main__%22: %0A a = %5B0,1,2,0,1,2,3,4,5,6,7,8,9%5D%0A b = %5B5,6,7,8,9,10,11,12,13,14%5D%0A print unique(a)%0A print intersect(a, b)%0A print union(a, b)%0A
0ca69bd8c29d123702e1934863d5d8a8c0d1703b
Create parse.py
parse.py
parse.py
Python
0.00002
@@ -0,0 +1,2213 @@ +# Parse the Essential Script%0Adef parse(source):%0A parsedScript = %5B%5B%5D%5D%0A word = ''%0A prevChar = ''%0A inArgs = False%0A inList = False%0A inString = False%0A inQuote = False%0A for char in source:%0A if char == '(' and not inString and not inQuote:%0A parsedScript.append(%5B%5D)%0A parsedScript%5B-1%5D.append('args')%0A if word:%0A parsedScript%5B-1%5D.append(word)%0A word = ''%0A elif char in (';', '%5Cn') and not inString and not inQuote:%0A if word:%0A parsedScript%5B-1%5D.append(word)%0A word = ''%0A parsedScript.append(%5B%5D)%0A elif char == '%5B':%0A parsedScript.append(%5B%5D)%0A parsedScript%5B-1%5D.append('list')%0A if word:%0A parsedScript%5B-1%5D.append(word)%0A word = ''%0A elif char in (')', '%5D') and not inString and not inQuote:%0A if word:%0A parsedScript%5B-1%5D.append(word)%0A word = ''%0A temp = parsedScript.pop()%0A parsedScript%5B-1%5D.append(temp)%0A elif char in (' ', '%5Ct') and not inString and not inQuote:%0A if word:%0A parsedScript%5B-1%5D.append(word)%0A word = ''%0A elif char == '%5C%22' and not prevChar == '%5C%5C':%0A inString = not inString%0A elif char == '%5C'' and not prevChar == '%5C%5C':%0A inQuote = not inQuote%0A elif char in ('+', '-', '*', '/'):%0A if word:%0A parsedScript%5B-1%5D.append(word)%0A word = ''%0A parsedScript%5B-1%5D.append(char)%0A else:%0A word += char%0A prevChar = char%0A if word:%0A parsedScript%5B-1%5D.append(word)%0A word = ''%0A reparsedScript = %5B%5B%5D%5D%0A %0A # Parse multi-line code until 'end'%0A for word in parsedScript:%0A if word:%0A if word%5B0%5D in ('subroutine', 'if', 'for', 'while'):%0A reparsedScript.append(%5B%5D)%0A reparsedScript%5B-1%5D.append(word)%0A elif word%5B0%5D == 'end':%0A temp = reparsedScript.pop()%0A reparsedScript%5B-1%5D.append(temp)%0A else:%0A reparsedScript%5B-1%5D.append(word)%0A return reparsedScript%5B0%5D%0A
c4b7bd5b74aaba210a05f946d59c98894b60b21f
Add test for pixel CLI
tests/cli/test_pixel.py
tests/cli/test_pixel.py
Python
0
@@ -0,0 +1,552 @@ +%22%22%22 Test %60%60yatsm line%60%60%0A%22%22%22%0Aimport os%0A%0Afrom click.testing import CliRunner%0Aimport pytest%0A%0Afrom yatsm.cli.main import cli%0A%0A%[email protected](%22DISPLAY%22 not in os.environ, reason=%22requires display%22)%0Adef test_cli_pixel_pass_1(example_timeseries):%0A %22%22%22 Correctly run for one pixel%0A %22%22%22%0A runner = CliRunner()%0A result = runner.invoke(%0A cli,%0A %5B'-v', 'pixel',%0A '--band', '5',%0A '--plot', 'TS',%0A '--style', 'ggplot',%0A example_timeseries%5B'config'%5D, '1', '1'%0A %5D)%0A assert result.exit_code == 0%0A
26dd65a282ada1e79309c4ff35cee4e49b086b66
Create part3.py
part3.py
part3.py
Python
0.000002
@@ -0,0 +1,1054 @@ +import pygame%0A%0Apygame.init()%0A%0Adisplay_width = 800%0Adisplay_height = 600%0A%0Ablack = (0,0,0)%0Awhite = (255,255,255)%0Ared = (255,0,0)%0A%0AgameDisplay = pygame.display.set_mode((display_width,display_height))%0Apygame.display.set_caption('A bit Racey')%0Aclock = pygame.time.Clock()%0A%0AcarImg = pygame.image.load('racecar.png')%0A%0Adef car(x,y):%0A gameDisplay.blit(carImg,(x,y))%0A%0A%0Ax = (display_width * 0.45)%0Ay = (display_height * 0.8)%0A%0Ax_change = 0%0A%0Acrashed = False%0A%0Awhile not crashed:%0A%0A for event in pygame.event.get():%0A if event.type == pygame.QUIT:%0A crashed = True%0A%0A if event.type == pygame.KEYDOWN:%0A if event.key == pygame.K_LEFT:%0A x_change = -5%0A if event.key == pygame.K_RIGHT:%0A x_change = 5%0A%0A if event.type == pygame.KEYUP:%0A if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:%0A x_change = 0%0A%0A %0A %0A x += x_change%0A%0A gameDisplay.fill(white)%0A car(x,y)%0A %0A pygame.display.update()%0A clock.tick(60)%0A%0Apygame.quit()%0Aquit()%0A
98663d644b90e0e4c6188555501bcbc2b42d391a
Create part4.py
part4.py
part4.py
Python
0.000001
@@ -0,0 +1,1268 @@ +import pygame%0A%0Apygame.init()%0A%0Adisplay_width = 800%0Adisplay_height = 600%0A%0Ablack = (0,0,0)%0Awhite = (255,255,255)%0Ared = (255,0,0)%0A%0Acar_width = 73%0A%0AgameDisplay = pygame.display.set_mode((display_width,display_height))%0Apygame.display.set_caption('A bit Racey')%0Aclock = pygame.time.Clock()%0A%0AcarImg = pygame.image.load('racecar.png')%0A%0Adef car(x,y):%0A gameDisplay.blit(carImg,(x,y))%0A%0A%0A%0Adef game_loop():%0A x = (display_width * 0.45)%0A y = (display_height * 0.8)%0A%0A x_change = 0%0A%0A gameExit = False%0A%0A while not gameExit:%0A%0A for event in pygame.event.get():%0A if event.type == pygame.QUIT:%0A gameExit = True%0A%0A if event.type == pygame.KEYDOWN:%0A if event.key == pygame.K_LEFT:%0A x_change = -5%0A if event.key == pygame.K_RIGHT:%0A x_change = 5%0A%0A if event.type == pygame.KEYUP:%0A if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:%0A x_change = 0%0A%0A x += x_change%0A%0A gameDisplay.fill(white)%0A car(x,y)%0A%0A if x %3E display_width - car_width or x %3C 0:%0A gameExit = True%0A %0A %0A pygame.display.update()%0A clock.tick(60)%0A%0A%0A%0Agame_loop()%0Apygame.quit()%0Aquit()%0A
4727d86e5207dac3f53018b4ff2d1d0ade97d4e6
Add http_json external pillar (#32741)
salt/pillar/http_json.py
salt/pillar/http_json.py
Python
0
@@ -0,0 +1,1157 @@ +# -*- coding: utf-8 -*-%0A%22%22%22%0AA module that adds data to the Pillar structure retrieved by an http request%0A%0A%0AConfiguring the HTTP_JSON ext_pillar%0A====================================%0A%0ASet the following Salt config to setup Foreman as external pillar source:%0A%0A.. code-block:: json%0A%0A ext_pillar:%0A - http_json:%0A url: http://example.com/api/minion_id%0A ::TODO::%0A username: username%0A password: password%0A%0AModule Documentation%0A====================%0A%22%22%22%0Afrom __future__ import absolute_import%0A%0A# Import python libs%0Aimport logging%0A%0A%0Adef ext_pillar(minion_id,%0A pillar, # pylint: disable=W0613%0A url=None):%0A %22%22%22%0A Read pillar data from HTTP response.%0A%0A :param url String to make request%0A :returns dict with pillar data to add%0A :returns empty if error%0A %22%22%22%0A # Set up logging%0A log = logging.getLogger(__name__)%0A%0A data = __salt__%5B'http.query'%5D(url=url, decode=True, decode_type='json')%0A%0A if 'dict' in data:%0A return data%5B'dict'%5D%0A%0A log.error('Error caught on query to' + url + '%5CnMore Info:%5Cn')%0A%0A for k, v in data.iteritems():%0A log.error(k + ' : ' + v)%0A%0A return %7B%7D%0A
d2e5c2d20cf7e07f2dc8288d303e8f4088d5877a
Update module!
Modules/Update.py
Modules/Update.py
Python
0
@@ -0,0 +1,992 @@ +from ModuleInterface import ModuleInterface%0Afrom IRCResponse import IRCResponse, ResponseType%0Aimport GlobalVars%0Aimport re%0Aimport subprocess%0A%0A%0Aclass Module(ModuleInterface):%0A triggers = %5B%22update%22%5D%0A help = %22update - pulls the latest code from GitHub%22%0A%0A def onTrigger(self, Hubbot, message):%0A if message.User.Name not in GlobalVars.admins:%0A return IRCResponse(ResponseType.Say, %22Only my admins can update me!%22, message.ReplyTo)%0A%0A subprocess.call(%5B%22git%22, %22fetch%22%5D)%0A%0A output = subprocess.check_output(%5B%22git%22, %22whatchanged%22, %22..origin/master%22%5D)%0A changes = re.findall('%5Cn%5Cn%5Cs%7B4%7D(.+?)%5Cn%5Cn', output)%0A%0A if len(changes) == 0:%0A return IRCResponse(ResponseType.Say, %22The bot is already up to date.%22, message.ReplyTo)%0A%0A changes = list(reversed(changes))%0A response = %22New Commits: %7B%7D%22.format(%22 %7C %22.join(changes))%0A%0A subprocess.call(%5B%22git%22, %22pull%22%5D)%0A%0A return IRCResponse(ResponseType.Say, response, message.ReplyTo)
555cfbb827532c54598cecde01ef4e6e5e07714d
Create a test for re-evaluating external tasks while a workflow is running.
test/worker_external_task_test.py
test/worker_external_task_test.py
Python
0
@@ -0,0 +1,2065 @@ +# Copyright (c) 2015%0A#%0A# Licensed under the Apache License, Version 2.0 (the %22License%22); you may not%0A# use this file except in compliance with the License. You may obtain a copy of%0A# the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS, WITHOUT%0A# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the%0A# License for the specific language governing permissions and limitations under%0A# the License.%0A%0Aimport luigi%0Aimport unittest%0Afrom mock import Mock, patch%0Afrom helpers import with_config%0A%0A%0Amock_external_task = Mock(spec=luigi.ExternalTask)%0Amock_external_task.complete.side_effect = %5BFalse, False, True%5D%0A%0Aclass TestTask(luigi.Task):%0A %22%22%22%0A Requires a single file dependency%0A %22%22%22%0A def __init__(self):%0A super(TestTask, self).__init__()%0A self.has_run = False%0A %0A def requires(self):%0A return mock_external_task%0A%0A def output(self):%0A mock_target = Mock(spec=luigi.Target)%0A # the return is False so that this task will be scheduled%0A mock_target.exists.return_value = False%0A%0A def run(self):%0A self.has_run = True%0A%0A%0Aclass WorkerExternalTaskTest(unittest.TestCase):%0A%0A @with_config(%7B'core': %7B'retry-external-tasks': 'true'%7D%7D)%0A def test_external_dependency_satisified_later(self):%0A %22%22%22%0A Test that an external dependency that is not %60complete%60 when luigi is invoked, but %5C%0A becomes %60complete%60 while the workflow is executing is re-evaluated.%0A %22%22%22%0A assert luigi.configuration.get_config().getboolean('core',%0A 'retry-external-tasks',%0A False) == True%0A%0A test_task = TestTask()%0A luigi.build(%5Btest_task%5D, local_scheduler=True)%0A%0A assert test_task.has_run == True%0A assert mock_external_task.complete.call_count == 3%0A%0A%0Aif __name__ == '__main__':%0A unittest.main()%0A
7edcf7e1aa4824dc18584b88f21b2dc4ff9cab98
Use the requests.session() helper to get a Session() object.
rightscale/httpclient.py
rightscale/httpclient.py
from functools import partial import requests DEFAULT_ROOT_RES_PATH = '/' class HTTPResponse(object): """ Wrapper around :class:`requests.Response`. Parses ``Content-Type`` header and makes it available as a list of fields in the :attr:`content_type` member. """ def __init__(self, raw_response): self.raw_response = raw_response content_type = raw_response.headers.get('content-type', '') ct_fields = [f.strip() for f in content_type.split(';')] self.content_type = ct_fields def __getattr__(self, name): return getattr(self.raw_response, name) class HTTPClient(object): """ Convenience wrapper around Requests. :param str endpoint: URL for the API endpoint. E.g. ``https://blah.org``. :param dict extra_headers: When specified, these key-value pairs are added to the default HTTP headers passed in with each request. """ def __init__( self, endpoint='', extra_headers=None, ): self.endpoint = endpoint s = requests.Session() s.headers['Accept'] = 'application/json' if extra_headers: s.headers.update(extra_headers) self.s = s # convenience methods self.delete = partial(self.request, 'delete') self.get = partial(self.request, 'get') self.head = partial(self.request, 'head') self.post = partial(self.request, 'post') self.put = partial(self.request, 'put') def request(self, method, path='/', url=None, ignore_codes=[], **kwargs): """ Performs HTTP request. :param str method: An HTTP method (e.g. 'get', 'post', 'PUT', etc...) :param str path: A path component of the target URL. This will be appended to the value of ``self.endpoint``. If both :attr:`path` and :attr:`url` are specified, the value in :attr:`url` is used and the :attr:`path` is ignored. :param str url: The target URL (e.g. ``http://server.tld/somepath/``). If both :attr:`path` and :attr:`url` are specified, the value in :attr:`url` is used and the :attr:`path` is ignored. :param ignore_codes: List of HTTP error codes (e.g. 404, 500) that should be ignored. If an HTTP error occurs and it is *not* in :attr:`ignore_codes`, then an exception is raised. :type ignore_codes: list of int :param kwargs: Any other kwargs to pass to :meth:`requests.request()`. Returns a :class:`requests.Response` object. """ _url = url if url else (self.endpoint + path) r = self.s.request(method, _url, **kwargs) if not r.ok and r.status_code not in ignore_codes: r.raise_for_status() return HTTPResponse(r)
Python
0
@@ -1088,17 +1088,17 @@ equests. -S +s ession()
d10505678fd5624e5e88f72ac7852109f149b264
Add new kcov package (#14574)
var/spack/repos/builtin/packages/kcov/package.py
var/spack/repos/builtin/packages/kcov/package.py
Python
0
@@ -0,0 +1,1184 @@ +# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other%0A# Spack Project Developers. See the top-level COPYRIGHT file for details.%0A#%0A# SPDX-License-Identifier: (Apache-2.0 OR MIT)%0A%0Afrom spack import *%0A%0A%0Aclass Kcov(CMakePackage):%0A %22%22%22Code coverage tool for compiled programs, Python and Bash which uses%0A debugging information to collect and report data without special%0A compilation options%22%22%22%0A%0A homepage = %22http://simonkagstrom.github.io/kcov/index.html%22%0A url = %22https://github.com/SimonKagstrom/kcov/archive/38.tar.gz%22%0A%0A version('38', sha256='b37af60d81a9b1e3b140f9473bdcb7975af12040feb24cc666f9bb2bb0be68b4')%0A%0A depends_on('[email protected]:', type='build')%0A depends_on('zlib')%0A depends_on('curl')%0A%0A def cmake_args(self):%0A # Necessary at least on macOS, fixes linking error to LLDB%0A # https://github.com/Homebrew/homebrew-core/blob/master/Formula/kcov.rb%0A return %5B'-DSPECIFY_RPATH=ON'%5D%0A%0A @run_after('install')%0A @on_package_attributes(run_tests=True)%0A def test(self):%0A # The help message exits with an exit code of 1%0A kcov = Executable(self.prefix.bin.kcov)%0A kcov('-h', ignore_errors=1)%0A
a6e65ac7378b12cc6889199cac602a8fbee4b6e8
add nagios check on autoplot metrics
nagios/check_autoplot.py
nagios/check_autoplot.py
Python
0
@@ -0,0 +1,656 @@ +%22%22%22Check autoplot stats%22%22%22%0Afrom __future__ import print_function%0Aimport sys%0A%0Aimport psycopg2%0A%0A%0Adef main():%0A %22%22%22Go Main Go%22%22%22%0A pgconn = psycopg2.connect(database='mesosite', host='iemdb',%0A user='nobody')%0A cursor = pgconn.cursor()%0A cursor.execute(%22%22%22%0A select count(*), avg(timing) from autoplot_timing%0A where valid %3E now() - '4 hours'::interval%0A %22%22%22)%0A (count, speed) = cursor.fetchone()%0A speed = 0 if speed is None else speed%0A%0A print((%22Autoplot cnt:%25s speed:%25.2f %7C COUNT=%25s;; SPEED=%25.3f;;%22%0A ) %25 (count, speed, count, speed))%0A sys.exit(0)%0A%0A%0Aif __name__ == '__main__':%0A main()%0A
81b713d69408f6b5712f67d7707bbb17f9588ef6
Update __init__.py
tendrl/node_agent/manager/__init__.py
tendrl/node_agent/manager/__init__.py
import signal import threading from tendrl.commons.event import Event from tendrl.commons import manager as commons_manager from tendrl.commons.message import Message from tendrl.commons import TendrlNS from tendrl.node_agent.provisioner.gluster.manager import \ ProvisioningManager as GlusterProvisioningManager from tendrl import node_agent from tendrl.node_agent.message.handler import MessageHandler from tendrl.node_agent import node_sync from tendrl.integrations.gluster import GlusterIntegrationNS class NodeAgentManager(commons_manager.Manager): def __init__(self): # Initialize the state sync thread which gets the underlying # node details and pushes the same to etcd super(NodeAgentManager, self).__init__( NS.state_sync_thread, message_handler_thread=NS.message_handler_thread ) node_sync.platform_detect.sync() node_sync.sds_detect.sync() def main(): # NS.node_agent contains the config object, # hence initialize it before any other NS node_agent.NodeAgentNS() # Init NS.tendrl TendrlNS() # Init NS.provisioning # TODO(team) remove NS.provisioner and use NS.provisioning.{ceph, gluster} # provisioning.ProvisioningNS() # Init NS.integrations.ceph # TODO(team) add all short circuited ceph(import/create) NS.tendrl.flows # to NS.integrations.ceph # ceph.CephIntegrationNS() # Init NS.integrations.gluster # TODO(team) add all short circuited ceph(import/create) NS.tendrl.flows # to NS.integrations.ceph GlusterIntegrationNS() # Compile all definitions NS.compiled_definitions = \ NS.node_agent.objects.CompiledDefinitions() NS.compiled_definitions.merge_definitions([ NS.tendrl.definitions, NS.node_agent.definitions, NS.integrations.gluster.definitions]) NS.node_agent.compiled_definitions = NS.compiled_definitions # Every process needs to set a NS.type # Allowed types are "node", "integration", "monitoring" NS.type = "node" NS.first_node_inventory_sync = True NS.state_sync_thread = node_sync.NodeAgentSyncThread() NS.compiled_definitions.save() NS.node_context.save() NS.tendrl_context.save() NS.node_agent.definitions.save() # NS.integrations.ceph.definitions.save() NS.node_agent.config.save() NS.publisher_id = "node_agent" NS.message_handler_thread = MessageHandler() NS.gluster_provisioner = GlusterProvisioningManager( NS.tendrl.definitions.get_parsed_defs()["namespace.tendrl"][ 'gluster_provisioner'] ) if NS.config.data.get("with_internal_profiling", False): from tendrl.commons import profiler profiler.start() NS.gluster_sds_sync_running = False m = NodeAgentManager() m.start() complete = threading.Event() def shutdown(signum, frame): Event( Message( priority="debug", publisher=NS.publisher_id, payload={"message": "Signal handler: stopping"} ) ) complete.set() m.stop() if NS.gluster_sds_sync_running: NS.gluster_integrations_sync_thread.stop() def reload_config(signum, frame): Event( Message( priority="debug", publisher=NS.publisher_id, payload={"message": "Signal handler: SIGHUP"} ) ) NS.config = NS.config.__class__() signal.signal(signal.SIGTERM, shutdown) signal.signal(signal.SIGINT, shutdown) signal.signal(signal.SIGHUP, reload_config) while not complete.is_set(): complete.wait(timeout=1) if __name__ == "__main__": main()
Python
0.000072
@@ -3504,16 +3504,41 @@ lass__() +%0A NS.config.save() %0A%0A si
b39eeea0b25e1e5bcec1d762a041e5ecf465885c
add solution for Reorder List
src/reorderList.py
src/reorderList.py
Python
0
@@ -0,0 +1,988 @@ +# Definition for singly-linked list.%0A# class ListNode:%0A# def __init__(self, x):%0A# self.val = x%0A# self.next = None%0A%0A%0Aclass Solution:%0A # @param head, a ListNode%0A # @return nothing%0A%0A def reorderList(self, head):%0A if head is None or head.next is None:%0A return%0A slow = fast = head%0A while fast.next and fast.next.next:%0A slow = slow.next%0A fast = fast.next.next%0A fast, slow.next = slow.next, None%0A fast = self.reverseList(fast)%0A self.merge2Lists(head, fast)%0A%0A def reverseList(self, head):%0A if head is None or head.next is None:%0A return head%0A pre, cur = head, head.next%0A while cur:%0A nxt, cur.next = cur.next, pre%0A cur, pre = nxt, cur%0A head.next = None%0A return pre%0A%0A def merge2Lists(self, l1, l2):%0A while l2:%0A n1, n2 = l1.next, l2.next%0A l1.next, l2.next = l2, n1%0A l1, l2 = n1, n2%0A
68e056459dd3818ebb0c5dbdc8b4f1089bec9f07
Add a few behavior tests for selection
tests/selection_test.py
tests/selection_test.py
Python
0
@@ -0,0 +1,1453 @@ +import os%0Aimport pytest%0Aimport yaml%0A%0Afrom photoshell.selection import Selection%0A%0A%[email protected]%0Adef sidecar(tmpdir):%0A tmpdir.join(%22test.sidecar%22).write(yaml.dump(%7B%0A 'developed_path': os.path.join(tmpdir.strpath, %22test.jpeg%22),%0A 'datetime': '2014-10-10 00:00'%0A %7D, default_flow_style=False))%0A return os.path.join(tmpdir.strpath, %22test.sidecar%22)%0A%0A%[email protected]%0Adef empty_selection():%0A s = Selection('', '')%0A return s%0A%0A%[email protected]%0Adef selection(empty_selection):%0A empty_selection.images.append('image')%0A empty_selection.photos.append('image')%0A return empty_selection%0A%0A%0Adef test_current_default_selection(selection):%0A assert selection.current()%0A%0A%0Adef test_current_is_none_if_selection_empty(empty_selection):%0A assert empty_selection.current() is None%0A%0A%0Adef test_current_photo_default_selection(selection):%0A assert selection.current_photo()%0A%0A%0Adef test_current_photo_is_none_if_selection_empty(empty_selection):%0A assert empty_selection.current_photo() is None%0A%0A%0Adef test_next_prev_does_nothing_single_photo(selection):%0A assert selection.current() == selection.next()%0A assert selection.current() == selection.prev()%0A%0A%0Adef test_next_prev_wrap_around(selection):%0A selection.photos.append('photo2')%0A selection.images.append('image2')%0A%0A assert selection.next() == 'image2'%0A assert selection.next() == 'image'%0A assert selection.prev() == 'image2'%0A assert selection.prev() == 'image'%0A
63a34000402f4253f16221b11d620e65e1786447
add solution for Reverse Bits
src/reverseBits.py
src/reverseBits.py
Python
0.000001
@@ -0,0 +1,149 @@ +class Solution:%0A # @param n, an integer%0A # @return an integer%0A%0A def reverseBits(self, n):%0A return int(bin(n)%5B2:%5D.zfill(32)%5B::-1%5D, 2)%0A
f81a612eabf5972d15a5b3f11d12897530cbf155
Add dump-tree command (wip)
cvsgit/command/dump-tree.py
cvsgit/command/dump-tree.py
Python
0.000003
@@ -0,0 +1,875 @@ +%22%22%22Command to dump the full state of the source tree at a certain%0Apoint in time.%22%22%22%0A%0Aimport re%0Aimport subprocess%0Afrom subprocess import PIPE%0Aimport sys%0A%0Afrom cvsgit.cvs import split_cvs_source%0Afrom cvsgit.i18n import _%0Afrom cvsgit.main import Command, Conduit%0Afrom cvsgit.utils import Tempdir, stripnl%0A%0Aclass dump_tree(Command):%0A __doc__ = _(%0A %22%22%22Dump the source tree state at a certain date%0A%0A Usage: %25prog %3Cdate%3E%0A%0A Computes and dumps the state of the source tree as it was at the%0A given %3Cdate%3E.%0A %22%22%22)%0A%0A def initialize_options(self):%0A pass%0A%0A def finalize_options(self):%0A if len(self.args) %3E 0:%0A self.usage_error(_('too many arguments'))%0A%0A def run(self):%0A conduit = Conduit()%0A cvs = conduit.cvs%0A for changeset in cvs.changesets():%0A print changeset%0A%0Aif __name__ == '__main__':%0A dump_tree()%0A
ff2c4b68a5eace4451eeef4fd6ca84d37435c556
Add fields to privatemessage for network invitations.
project/editorial/migrations/0087_auto_20180226_1409.py
project/editorial/migrations/0087_auto_20180226_1409.py
Python
0
@@ -0,0 +1,756 @@ +# -*- coding: utf-8 -*-%0A# Generated by Django 1.11.9 on 2018-02-26 22:09%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations, models%0Aimport django.db.models.deletion%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('editorial', '0086_auto_20180102_2145'),%0A %5D%0A%0A operations = %5B%0A migrations.AddField(%0A model_name='privatemessage',%0A name='network_invitation',%0A field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='editorial.Network'),%0A ),%0A migrations.AddField(%0A model_name='privatemessage',%0A name='network_invitation_response',%0A field=models.NullBooleanField(),%0A ),%0A %5D%0A
73084b964f964c05cb948be3acaa6ba68d62dc30
test plotting particles
ws/CSUIBotClass2014/test/test_plot_particles.py
ws/CSUIBotClass2014/test/test_plot_particles.py
Python
0
@@ -0,0 +1,1997 @@ +#!/usr/bin/python%0A%0A# @author: vektor dewanto%0A# @obj: demonstrate how to plot particles in an occupancy grid map, _although_, for now, all positions are valid%0A%0Aimport matplotlib.pyplot as plt%0Aimport numpy as np%0Aimport math%0Aimport matplotlib.cm as cmx%0Afrom matplotlib import colors%0A%0A# Construct the occupancy grid map%0Agrid_map = %7B'size': (10,10), 'res': 1.0%7D%0A%0Agrid = %5B1,1,1,1,1,1,1,1,1,1,%5C%0A 1,0,0,1,0,1,0,0,0,1,%5C%0A 1,0,0,1,0,1,0,0,0,1,%5C%0A 1,0,0,0,0,1,0,1,1,1,%5C%0A 1,1,1,1,0,0,0,0,0,1,%5C%0A 1,0,0,1,0,0,0,0,0,1,%5C%0A 1,0,0,0,0,0,0,0,0,1,%5C%0A 1,0,0,1,0,0,0,0,0,1,%5C%0A 1,0,0,1,0,0,0,0,1,1,%5C%0A 1,1,1,1,1,1,1,1,1,1%5D%0Aassert len(grid)==grid_map%5B'size'%5D%5B0%5D*grid_map%5B'size'%5D%5B1%5D, 'grid size is mismatched'%0Agrid = np.asarray(grid)%0Agrid = grid.reshape(grid_map%5B'size'%5D%5B0%5D, grid_map%5B'size'%5D%5B1%5D)%0Agrid_map%5B'grid'%5D = grid%0A%0A# Plot the map%0Aplt.subplot(1,1,1)%0Aplt.pcolormesh(grid_map%5B'grid'%5D, edgecolors='k', linewidths=0.1, cmap=colors.ListedColormap(%5B'w','b'%5D))%0Aplt.title('The occupancy grid map with particles')%0A%0A# At t=0, initiate X with n_particle particles drawn from a uniform distribution (since this is a global loc. problem)%0A# For now, we donot check whether the particle is on an occupied grid%0An_particle = 100;%0AX_tmp = np.random.uniform(0.0, 10.0, n_particle)%0AY_tmp = np.random.uniform(0.0, 10.0, n_particle)%0ATHETA_tmp = np.random.uniform(0.0, math.pi*2.0, n_particle)%0AXYTHETA_tmp = zip(X_tmp, Y_tmp, THETA_tmp)%0AW = %5B1.0/n_particle%5D * n_particle# uniform%0AX = zip(XYTHETA_tmp, W)%0A%0A# Plot positions, the color corresponds to the weight%0Aax = plt.axes()%0Aax.scatter(%5Be%5B0%5D%5B0%5D for e in X%5D, %5Be%5B0%5D%5B1%5D for e in X%5D, c=%5Be%5B1%5D for e in X%5D, marker='o', s=20, cmap=cmx.jet)%0A%0A# Plot bearings%0Afor e in X:%0A x = e%5B0%5D%5B0%5D%0A y = e%5B0%5D%5B1%5D%0A theta = e%5B0%5D%5B2%5D%0A %0A # convert polar to cartesian coord%0A r = 0.1%0A dx = r * math.cos(theta)%0A dy = r * math.sin(theta) %0A %0A ax.arrow(x, y, dx, dy, head_width=0.05, head_length=0.1, fc='k', ec='k')%0A%0Aplt.show()%0A
68330d9b991b090c0ea0cd0da48a7d2a1fc04a29
Update builds status
infra/gcb/builds_status.py
infra/gcb/builds_status.py
#!/usr/bin/env python2 import datetime import os import sys import jinja2 import json import tempfile import dateutil.parser from oauth2client.client import GoogleCredentials from googleapiclient.discovery import build as gcb_build from google.cloud import logging from google.cloud import storage from jinja2 import Environment, FileSystemLoader STATUS_BUCKET = 'oss-fuzz-build-logs' LOGS_BUCKET = 'oss-fuzz-gcb-logs' SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) def usage(): sys.stderr.write( "Usage: " + sys.argv[0] + " <projects_dir>\n") exit(1) def scan_project_names(projects_dir): projects = [] for root, dirs, files in os.walk(projects_dir): for f in files: if f == "Dockerfile": projects.append(os.path.basename(root)) return sorted(projects) def upload_status(successes, failures, unstable): """Upload main status page.""" env = Environment(loader=FileSystemLoader(os.path.join(SCRIPT_DIR, 'templates'))) data = { 'projects': failures + successes + unstable, 'failures': failures, 'successes': successes, 'unstable': unstable, 'last_updated': datetime.datetime.utcnow().ctime() } storage_client = storage.Client() bucket = storage_client.get_bucket(STATUS_BUCKET) blob = bucket.blob('status.html') blob.cache_control = 'no-cache' blob.upload_from_string( env.get_template('status_template.html').render(data), content_type='text/html') blob = bucket.blob('status.json') blob.cache_control = 'no-cache' blob.upload_from_string( json.dumps(data), content_type='text/html') def is_build_successful(build): if build['status'] == 'SUCCESS': return True build_id = build['id'] logging_client = logging.Client(project='oss-fuzz') entries = logging_client.list_entries( order_by=logging.DESCENDING, page_size=1, filter_=( 'resource.type="build" AND ' 'resource.labels.build_id="{0}"'.format(build_id))) entry = next(entries.pages) entry = list(entry)[0] return entry.payload == 'DONE' def find_last_build(builds): DELAY_MINUTES = 40 for build in builds: finish_time = dateutil.parser.parse(build['finishTime'], ignoretz=True) if (datetime.datetime.utcnow() - finish_time >= datetime.timedelta(minutes=DELAY_MINUTES)): storage_client = storage.Client() status_bucket = storage_client.get_bucket(STATUS_BUCKET) gcb_bucket = storage_client.get_bucket(LOGS_BUCKET) log_name = 'log-{0}.txt'.format(build['id']) log = gcb_bucket.blob(log_name) dest_log = status_bucket.blob(log_name) with tempfile.NamedTemporaryFile() as f: log.download_to_filename(f.name) dest_log.upload_from_filename(f.name, content_type='text/plain') return build return None def main(): if len(sys.argv) != 2: usage() projects_dir = sys.argv[1] credentials = GoogleCredentials.get_application_default() cloudbuild = gcb_build('cloudbuild', 'v1', credentials=credentials) successes = [] failures = [] for project in scan_project_names(projects_dir): print project query_filter = ('(status="SUCCESS" OR status="FAILURE") AND ' + 'images="gcr.io/oss-fuzz/{0}"'.format(project)) response = cloudbuild.projects().builds().list( projectId='oss-fuzz', filter=query_filter).execute() if not 'builds' in response: continue builds = response['builds'] last_build = find_last_build(builds) if not last_build: print >>sys.stderr, 'Failed to get build for', project continue print last_build['startTime'], last_build['status'], last_build['id'] if is_build_successful(last_build): successes.append({ 'name': project, 'build_id': last_build['id'], }) else: failures.append({ 'name': project, 'build_id': last_build['id'], }) upload_status(successes, failures, []) if __name__ == "__main__": main()
Python
0
@@ -839,18 +839,8 @@ ures -, unstable ):%0A @@ -1065,19 +1065,8 @@ sses - + unstable ,%0A @@ -3856,32 +3856,83 @@ st_build%5B'id'%5D,%0A + 'finish_time': last_build%5B'finishTime'%5D,%0A %7D)%0A els @@ -4021,24 +4021,75 @@ uild%5B'id'%5D,%0A + 'finish_time': last_build%5B'finishTime'%5D,%0A %7D)%0A%0A @@ -4125,12 +4125,8 @@ ures -, %5B%5D )%0A%0A%0A
6342c6cab9b5dd0b34ca5de575ef82592474e1d5
add mvnsite.py to build site without javadocs or test run
bin/mvnsite.py
bin/mvnsite.py
Python
0
@@ -0,0 +1,1004 @@ +#!/usr/bin/env python%0A%0A# Licensed to the Apache Software Foundation (ASF) under one or more%0A# contributor license agreements. See the NOTICE file distributed with%0A# this work for additional information regarding copyright ownership.%0A# The ASF licenses this file to You under the Apache License, Version 2.0%0A# (the %22License%22); you may not use this file except in compliance with%0A# the License. You may obtain a copy of the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS,%0A# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.%0A# See the License for the specific language governing permissions and%0A# limitations under the License.%0A%0Afrom subprocess import call%0Aimport sys%0A%0Aargs = sys.argv%5B1 :%5D%0A# mvn site:site -Dmaven.javadoc.skip=true -DskipTests%0Acall(%5B%22mvn.bat%22, %22site:site%22, %22-Dmaven.javadoc.skip=true%22, %22-DskipTests%22%5D + args)%0A%0A
598d937f3f180e22a1b4793644ffdb1b9a26f261
update crawler_main.py
crawler_main.py
crawler_main.py
""" crawler study code Author: [email protected] """ import urllib.request import os import re from collections import deque from filecmp import cmp ERROR_RETURN = "ERROR:" def retrun_is_error(return_str): return return_str[0 : len(ERROR_RETURN)] == ERROR_RETURN def python_cnt(str): return str.count("python") def get_one_page(url): try: urlfd = urllib.request.urlopen(url, timeout = 2) except Exception as ex: return ERROR_RETURN + ("URL " + "\"" + url + "\"" + " open failed. " + str(ex)) if "html" not in urlfd.getheader("Content-Type"): return ERROR_RETURN + ("URL " + "\"" + url + "\"" + "is not html page.") try: html_str = urlfd.read().decode("utf-8") except: return ERROR_RETURN + ("Fail to decode URL " + "\"" + url + "\"" + ".") return html_str if __name__ == "__main__": start_url = "http://news.dbanotes.net/" to_be_visited = deque() visited = set() cnt = 0 py_str_cnt = 0 to_be_visited.append(start_url) while to_be_visited: url = to_be_visited.popleft() print(str(cnt) + "page(s) has been grabbed." + "URL " + "\"" + url + "\"" + " is being grabbed.") html_str = get_one_page(url) if retrun_is_error(html_str): print(html_str) continue cnt += 1 visited |= {url} py_cnt_tmp = python_cnt(html_str) if py_cnt_tmp != 0: py_str_cnt += py_cnt_tmp print("Find %d \"python\" , total count %d" % (py_cnt_tmp, py_str_cnt)) #todo: parse the html_str link_pattern = re.compile('href=\"(.+?)\"') #links' regular expression for tmp_url in link_pattern.findall(html_str): if "http" in tmp_url and tmp_url not in visited: to_be_visited.append(tmp_url)
Python
0.000001
@@ -130,32 +130,8 @@ eque -%0Afrom filecmp import cmp %0A%0AER @@ -150,16 +150,17 @@ %22ERROR: + %22%0Adef re @@ -1425,32 +1425,40 @@ = %7Burl%7D%0A + %0A py_cnt_ @@ -1734,15 +1734,8 @@ ') # -links' regu
e904341eb7b426ea583e345689249d7f13451dc9
Add biome types.
biome_types.py
biome_types.py
Python
0
@@ -0,0 +1,524 @@ +biome_types = %7B%0A -1: %22Will be computed%22,%0A 0: %22Ocean%22,%0A 1: %22Plains%22,%0A 2: %22Desert%22,%0A 3: %22Extreme Hills%22,%0A 4: %22Forest%22,%0A 5: %22Taiga%22,%0A 6: %22Swampland%22,%0A 7: %22River%22,%0A 8: %22Hell%22,%0A 9: %22Sky%22,%0A 10: %22FrozenOcean%22,%0A 11: %22FrozenRiver%22,%0A 12: %22Ice Plains%22,%0A 13: %22Ice Mountains%22,%0A 14: %22MushroomIsland%22,%0A 15: %22MushroomIslandShore%22,%0A 16: %22Beach%22,%0A 17: %22DesertHills%22,%0A 18: %22ForestHills%22,%0A 19: %22TaigaHills%22,%0A 20: %22Extreme Hills Edge%22,%0A 21: %22Jungle%22,%0A 22: %22JungleHills%22,%0A%7D%0A
0a0b322ca7d42d28ba495b7786cd2bd92c0bfd34
Add test_register.py
tests/test_assembler/test_register.py
tests/test_assembler/test_register.py
Python
0.000003
@@ -0,0 +1,535 @@ +'Test of videocore.Register'%0A%0Afrom nose.tools import raises%0A%0Afrom videocore.assembler import Register, AssembleError, REGISTERS%0A%0Adef test_register_names():%0A for name in REGISTERS:%0A assert name == REGISTERS%5Bname%5D.name%0A assert name == str(REGISTERS%5Bname%5D)%0A%0A@raises(AssembleError)%0Adef test_pack_of_accumulator():%0A REGISTERS%5B'r0'%5D.pack('nop')%0A%0A@raises(AssembleError)%0Adef test_pack_of_regfileB():%0A REGISTERS%5B'rb0'%5D.pack('nop')%0A%0A@raises(AssembleError)%0Adef test_unpack_of_regfileB():%0A REGISTERS%5B'rb0'%5D.unpack('nop')%0A
12266ffcb7fcb809ec0e0a3102077581e64eb9e0
Update migrations
server/adventures/migrations/0002_auto_20160909_1901.py
server/adventures/migrations/0002_auto_20160909_1901.py
Python
0.000001
@@ -0,0 +1,1324 @@ +# -*- coding: utf-8 -*-%0A# Generated by Django 1.10 on 2016-09-09 19:01%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations, models%0Aimport django.db.models.deletion%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('adventures', '0001_initial'),%0A %5D%0A%0A operations = %5B%0A migrations.CreateModel(%0A name='Setting',%0A fields=%5B%0A ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),%0A ('name', models.TextField()),%0A %5D,%0A ),%0A migrations.AddField(%0A model_name='adventure',%0A name='publisher',%0A field=models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, to='adventures.Publisher'),%0A preserve_default=False,%0A ),%0A migrations.AlterField(%0A model_name='adventure',%0A name='edition',%0A field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='adventures.Edition'),%0A ),%0A migrations.AddField(%0A model_name='adventure',%0A name='setting',%0A field=models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, to='adventures.Setting'),%0A preserve_default=False,%0A ),%0A %5D%0A
df9b7cd8d1b34f8c29c372589ad9efd3a5435d0f
Implement TwitchWordsCounterBot class.
twitchbot/twitch_words_counter_bot.py
twitchbot/twitch_words_counter_bot.py
Python
0
@@ -0,0 +1,1211 @@ +import irc.bot%0Aimport irc.strings%0A%0Afrom .words_counter import WordsCounter%0A%0A%0Aclass TwitchWordsCounterBot(irc.bot.SingleServerIRCBot):%0A def __init__(self, channel, nickname, password, server, port=6667):%0A irc.bot.SingleServerIRCBot.__init__(self, %5B(server, port, password)%5D, nickname, nickname)%0A self.server = server%0A self.channel = channel%0A self.words_counter = WordsCounter()%0A%0A def start(self):%0A print(%22Connecting to the server '%25s'...%22 %25 self.server)%0A super(TwitchWordsCounterBot, self).start()%0A%0A def on_welcome(self, c, e):%0A print(%22Connected to the server '%25s'.%22 %25 self.server)%0A print(%22Joining to the channel '%25s'...%22 %25 self.channel)%0A c.join(self.channel)%0A%0A def _on_join(self, c, e):%0A super(TwitchWordsCounterBot, self)._on_join(c, e)%0A print(%22Joined to the channel '%25s'!%22 %25 self.channel)%0A%0A def _on_disconnect(self, c, e):%0A super(TwitchWordsCounterBot, self)._on_disconnect(c, e)%0A print(%22Disconnected from the server '%25s'.%22 %25 self.server)%0A print(e)%0A%0A def on_pubmsg(self, c, e):%0A message = e.arguments%5B0%5D%0A self.words_counter.count_words(message)%0A print(self.words_counter)%0A
6136eef341f1ac5ce0be278c3ab78192192d0efa
check if OS is UNIX-y
posix.py
posix.py
Python
0.999519
@@ -0,0 +1,155 @@ +#!/bin/py %0A%0Afrom sys import platform %0A%0Adef osCheck():%0A%09# Check if OS is UNIX-y %0A%09if %22darwin%22 or %22linux%22 in platform.lower(): %0A%09%09print platform%0A%0AosCheck()%0A
2a0724922bde4cdd5219c721cdfd5460a2e5f3ed
Create Timely_Tweeter.py
Timely_Tweeter.py
Timely_Tweeter.py
Python
0.000001
@@ -0,0 +1,774 @@ +#-=- Coding: Python UTF-8 -=-%0A%0Aimport tweepy, time, sys %0A%0Aargfile = str(sys.argv%5B1%5D)%0A%0A#Twitter Account info%0A #Place Keys and Tokens bewteen the quotes%0ACONSUMER_KEY = '' #The Consumer Key (API Key)%0ACONSUMER_SECRET = '' #The Consumer Secret (API Secret)%0AACCESS_KEY = '' #The Access Token%0AACCESS_SECRET = '' #The Access Token Secret%0ASLEEPY_TIME = #Time to wait in seconds between tweets%0A%0A#Now it checks in with Twitter and gets authenticated%0Aauth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)%0Aauth.set_access_token(ACCESS_KEY, ACCESS_SECRET)%0Aapi = tweepy.API(auth)%0A%0Afilename=open(argfile, 'r') #Opens file%0Af=filename.readlines() #Pulls data from file%0Afilename.close() #Closes file%0A%0Afor line in f:%0A api.update_status(line)%0A time.sleep(SLEEPY_TIME) #Time to wait%0A
9c0750ef401870e0187e3b7f0e4e39cf3d7e3944
Make sure the profile data is unmarshallable as profile data.
test/test_benchmarks.py
test/test_benchmarks.py
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import (absolute_import, division, print_function, unicode_literals) import os import pytest import six from asv import benchmarks from asv import config from asv import environment BENCHMARK_DIR = os.path.join(os.path.dirname(__file__), 'benchmark') INVALID_BENCHMARK_DIR = os.path.join( os.path.dirname(__file__), 'benchmark.invalid') ASV_CONF_JSON = { 'benchmark_dir': BENCHMARK_DIR, 'repo': 'https://github.com/spacetelescope/asv.git', 'project': 'asv' } def test_find_benchmarks(tmpdir): tmpdir = six.text_type(tmpdir) os.chdir(tmpdir) d = {} d.update(ASV_CONF_JSON) d['env_dir'] = os.path.join(tmpdir, "env") conf = config.Config.from_json(d) b = benchmarks.Benchmarks(conf, regex='secondary') assert len(b) == 3 b = benchmarks.Benchmarks(conf, regex='example') assert len(b) == 3 b = benchmarks.Benchmarks(conf, regex='time_example_benchmark_1') assert len(b) == 1 b = benchmarks.Benchmarks(conf) assert len(b) == 7 envs = list(environment.get_environments( conf.env_dir, conf.pythons, conf.matrix)) b = benchmarks.Benchmarks(conf) times = b.run_benchmarks(envs[0], profile=True) assert len(times) == 7 assert times[ 'time_examples.TimeSuite.time_example_benchmark_1']['result'] is not None # Benchmarks that raise exceptions should have a time of "None" assert times[ 'time_secondary.TimeSecondary.time_exception']['result'] is None assert times[ 'subdir.time_subdir.time_foo']['result'] is not None assert times[ 'mem_examples.mem_list']['result'] > 2000 assert times[ 'time_secondary.track_value']['result'] == 42.0 assert 'profile' in times[ 'time_secondary.track_value'] def test_invalid_benchmark_tree(tmpdir): tmpdir = six.text_type(tmpdir) os.chdir(tmpdir) d = {} d.update(ASV_CONF_JSON) d['benchmark_dir'] = INVALID_BENCHMARK_DIR d['env_dir'] = os.path.join(tmpdir, "env") conf = config.Config.from_json(d) with pytest.raises(ValueError): b = benchmarks.Benchmarks(conf)
Python
0.000002
@@ -203,16 +203,30 @@ ort os%0A%0A +import pstats%0A import p @@ -1911,16 +1911,210 @@ alue'%5D%0A%0A + profile_path = os.path.join(tmpdir, 'test.profile')%0A with open(profile_path, 'wb') as fd:%0A fd.write(times%5B'time_secondary.track_value'%5D%5B'profile'%5D)%0A pstats.Stats(profile_path)%0A%0A %0Adef tes
d1ecc996269a801c65d3b88791f7f5546c8af1b8
add setup.py
setup.py
setup.py
Python
0.000001
@@ -0,0 +1,321 @@ +from setuptools import setup%0A%0Asetup(%0A name='daria',%0A version='0.0.1',%0A description='pytorch trainer',%0A author='odanado',%0A author_email='[email protected]',%0A url='https://github.com/odanado/daria',%0A license='MIT License',%0A packages=%5B'daria'%5D,%0A tests_require=%5B'mock'%5D,%0A test_suite='tests',%0A)%0A
38bf3ce6db844999fe5903dad91e991c6fea57c7
Add setup
setup.py
setup.py
Python
0.000001
@@ -0,0 +1,686 @@ +#!/usr/bin/env python%0A%0Afrom setuptools import setup, find_packages%0A%0A%0Asetupconf = dict(%0A name = 'contract',%0A version = '0.3',%0A license = 'BSD',%0A url = 'https://github.com/Deepwalker/contract/',%0A author = 'Barbuza, Deepwalker',%0A author_email = '[email protected]',%0A description = ('Validation and parsing library'),%0A long_description = %22Place README here%22,%0A%0A packages = find_packages(),%0A%0A classifiers = %5B%0A 'Intended Audience :: Developers',%0A 'License :: OSI Approved :: BSD License',%0A 'Operating System :: OS Independent',%0A 'Programming Language :: Python',%0A %5D,%0A )%0A%0Aif __name__ == '__main__':%0A setup(**setupconf)%0A
ea7d55fa309d592669e86dae826b7cc08323de16
update setup.py version to 0.2
setup.py
setup.py
from distutils.core import setup setup(name='mpmath', description = 'Python library for arbitrary-precision floating-point arithmetic', version='0.1', url='http://mpmath.googlecode.com', author='Fredrik Johansson', author_email='[email protected]', license = 'BSD', packages=['mpmath'], )
Python
0
@@ -161,9 +161,9 @@ ='0. -1 +2 ',%0D%0A
12ece36bf0355ad619635675b419d9d0e7163cf4
Add setup.py file
setup.py
setup.py
Python
0.000001
@@ -0,0 +1,341 @@ +#!/usr/bin/env python%0A%0Afrom setuptools import setup, find_packages%0A%0Asetup(%0A name='django-cache-relation',%0A description=%22Non-magical object caching for Django.%22,%0A version='0.1',%0A url='http://code.playfire.com/',%0A%0A author='Playfire.com',%0A author_email='[email protected]',%0A license='BSD',%0A%0A packages=find_packages(),%0A)%0A
30d3f42b4910b84b2a3419e43ea6e5e6da2ab7a0
Add setup
setup.py
setup.py
Python
0.000001
@@ -0,0 +1,362 @@ +from setuptools import setup%0A%0Asetup(name = 'enzynet',%0A description = 'EnzyNet: enzyme classification using 3D convolutional neural networks on spatial representation',%0A author = 'Afshine Amidi and Shervine Amidi',%0A author_email = '%3Cauthor1-lastname%[email protected], %3Cauthor2-firstname%[email protected]',%0A license = 'MIT',%0A packages = %5B'enzynet'%5D)%0A
45e624fe5176dd59b8f42636b777a1b6a6106dca
Add initial setuptools integration, required by click
setup.py
setup.py
Python
0
@@ -0,0 +1,209 @@ +# -*- coding: utf-8 -*-%0A# vi:si:et:sw=4:sts=4:ts=4%0A%0Afrom setuptools import setup%0A%0Asetup(%0A name='loafer',%0A version='0.0.1',%0A entry_points='''%0A %5Bconsole_scripts%5D%0A loafer=loafer.cli:cli%0A ''',%0A)%0A
81e7e9ed4b3b0f6840e11adc5c73648471f606ef
Add setup.py
setup.py
setup.py
Python
0.000001
@@ -0,0 +1,907 @@ +# coding: utf-8%0A%0Afrom __future__ import print_function, unicode_literals%0A%0Aimport sys%0A%0Afrom setuptools import setup%0A%0Ainstall_requires = %5B%5D%0Aif sys.version_info%5B0%5D == 2:%0A install_requires.append('statistics')%0A%0Asetup(%0A name='scrapy-slotstats',%0A version='0.1',%0A license='MIT License',%0A description='Scrapy extension to show statistics of downloader slots',%0A author='orangain',%0A author_email='[email protected]',%0A url='https://github.com/orangain/scrapy-slotstats',%0A keywords=%22scrapy downloader slot stats%22,%0A py_modules=%5B'scrapy_slotstats'%5D,%0A platforms=%5B'Any'%5D,%0A install_requires=install_requires,%0A classifiers=%5B%0A 'Development Status :: 4 - Beta',%0A 'Environment :: Console',%0A 'Framework :: Scrapy',%0A 'License :: OSI Approved :: MIT License',%0A 'Operating System :: OS Independent',%0A 'Programming Language :: Python :: 2.7',%0A %5D%0A)%0A
21380bcf76a8144d182166c3441d308af2eda417
Add first pass at setup.py
setup.py
setup.py
Python
0
@@ -0,0 +1,449 @@ +#!/usr/bin/python%0Aimport os%0Afrom distutils.core import setup, Extension%0A%0Aext_modules = %5B%5D%0A%0Apackages = %5B'bayesdb', 'bayesdb.tests'%5D%0Asetup(%0A name='BayesDB',%0A version='0.1',%0A author='MIT.PCP',%0A author_email = '[email protected]',%0A url='probcomp.csail.mit.edu/bayesdb',%0A long_description='BayesDB',%0A packages=packages,%0A package_dir=%7B'bayesdb':'bayesdb/'%7D,%0A ext_modules=ext_modules,%0A )%0A
374e27087d6d432ba01a0ef65c4109be84e50dcf
Add setup.py
setup.py
setup.py
Python
0.000001
@@ -0,0 +1,1752 @@ +import os%0Aimport sys%0A%0Atry:%0A from setuptools import setup%0Aexcept ImportError:%0A from distutils.core import setup%0A%0Atry:%0A from distutils.command.build_py import build_py_2to3 as build_py%0Aexcept ImportError:%0A from distutils.command.build_py import build_py%0A%0Apath, script = os.path.split(sys.argv%5B0%5D)%0Aos.chdir(os.path.abspath(path))%0A%0A# Don't import rjmetrics module here, since deps may not be installed%0Asys.path.insert(0, os.path.join(os.path.dirname(__file__), 'rjmetrics'))%0Afrom version import VERSION%0A%0Ainstall_requires = %5B'requests %3E= 0.8.8'%5D%0A%0A# Get simplejson if we don't already have json%0Aif sys.version_info %3C (3, 0):%0A try:%0A from util import json%0A except ImportError:%0A install_requires.append('simplejson')%0A%0Asetup(%0A name='rjmetrics',%0A cmdclass=%7B'build_py': build_py%7D,%0A version=VERSION,%0A description='Python client for RJMetrics APIs',%0A author='RJMetrics',%0A author_email='[email protected]',%0A url='https://rjmetrics.com/',%0A packages=%5B'rjmetrics', 'rjmetrics.test'%5D,%0A package_data=%7B'rjmetrics': %5B'../VERSION'%5D%7D,%0A install_requires=install_requires,%0A test_suite='rjmetrics.test.all',%0A use_2to3=True,%0A classifiers=%5B%0A %22Development Status :: 4 - Beta%22,%0A %22Intended Audience :: Developers%22,%0A %22License :: OSI Approved :: Apache License%22,%0A %22Operating System :: OS Independent%22,%0A %22Programming Language :: Python%22,%0A %22Programming Language :: Python :: 2%22,%0A %22Programming Language :: Python :: 2.7%22,%0A %22Programming Language :: Python :: 3%22,%0A %22Programming Language :: Python :: 3.4%22,%0A %22Programming Language :: Python :: Implementation :: PyPy%22,%0A %22Topic :: Software Development :: Libraries :: Python Modules%22,%0A %5D)%0A
cde4cf347080d927e9e7d3aad2146300ddf5ebbd
version 0.1.0
setup.py
setup.py
Python
0.000003
@@ -0,0 +1,1693 @@ +# python setup.py register bdist_wheel upload -r https://www.python.org/pypi%0A%0Afrom setuptools import setup, find_packages%0A# To use a consistent encoding%0Afrom codecs import open%0Afrom os import path%0A%0Ahere = path.abspath(path.dirname(__file__))%0A%0A# Get the long description from the README file%0Awith open(path.join(here, 'README.rst'), encoding='utf-8') as f:%0A long_description = f.read()%0A%0Asetup(%0A name='uchord',%0A version='0.1.0',%0A description='Creating Ukulele Chord Diagrams in SVG with Python',%0A long_description=long_description,%0A url='https://github.com/gkvoelkl/python-ukulele-chord-to-svg',%0A author='gkvoelkl',%0A author_email='[email protected]',%0A%0A license='MIT',%0A%0A classifiers=%5B%0A 'Development Status :: 4 - Beta',%0A 'Intended Audience :: Developers',%0A 'Topic :: Multimedia :: Sound/Audio',%0A 'License :: OSI Approved :: MIT License',%0A 'Programming Language :: Python :: 3',%0A 'Programming Language :: Python :: 3.3',%0A 'Programming Language :: Python :: 3.4',%0A 'Programming Language :: Python :: 3.5',%0A 'Programming Language :: Python :: 3.6',%0A %5D,%0A%0A keywords= %5B%0A 'music',%0A 'ukulele',%0A 'chord',%0A 'audio',%0A 'svg'%0A %5D,%0A%0A #packages=find_packages(),%0A py_modules=%5B'uchord'%5D,%0A #install_requires=%5B'python-osc'%5D,%0A%0A # To provide executable scripts, use entry points in preference to the%0A # %22scripts%22 keyword. Entry points provide cross-platform support and allow%0A # pip to create the appropriate form of executable for the target platform.%0A #entry_points=%7B%0A # 'console_scripts': %5B%0A # 'sample=sample:main',%0A # %5D,%0A #%7D,%0A)%0A%0A
9c05031446d0d17bdc207b00ebf47d9769f96d33
Add a setup.py for owebunit to be able to obtain ocookie via pip
setup.py
setup.py
Python
0
@@ -0,0 +1,278 @@ +#!/usr/bin/env python%0A%0Afrom distutils.core import setup%0A%0Asetup(name='ocookie',%0A version='0.1',%0A description='Comprehensive cookie library',%0A author='Oleg Pudeyev',%0A author_email='[email protected]',%0A url='http://github.com/p/ocookie',%0A packages=%5B'ocookie'%5D,%0A)%0A
431acaabf7a3e77b416a57998bfadcb2d3864555
Add a setup.py
setup.py
setup.py
Python
0
@@ -0,0 +1,861 @@ +from setuptools import setup, find_packages%0Aimport codecs%0Aimport os%0Aimport re%0A%0Asetup(%0A name=%22httpbin%22,%0A version=%220.1.0%22,%0A description=%22HTTP Request and Response Service%22,%0A%0A # The project URL.%0A url='https://github.com/kennethreitz/httpbin',%0A%0A # Author details%0A author='Kenneth Reitz',%0A author_email='[email protected]',%0A%0A # Choose your license%0A license='MIT',%0A%0A classifiers=%5B%0A 'Development Status :: 5 - Production/Stable',%0A 'Intended Audience :: Developers',%0A 'Natural Language :: English',%0A 'License :: OSI Approved :: MIT License',%0A 'Programming Language :: Python',%0A 'Programming Language :: Python :: 2.7',%0A 'Programming Language :: Python :: 3.4',%0A %5D,%0A packages=find_packages(),%0A install_requires=%5B'Flask','MarkupSafe','decorator','itsdangerous','six'%5D,%0A)%0A
82b8651c9eed0c19224c8a7b53a0bedae81337a3
Add a setup.py.
setup.py
setup.py
Python
0
@@ -0,0 +1,196 @@ +%0Afrom setuptools import setup, find_packages%0A%0Asetup(%0A%0A name = %22WebStar%22,%0A version = %220.1b%22,%0A %0A author=%22Mike Boers%22,%0A author_email=%[email protected]%22,%0A license=%22BSD-3%22%0A)%0A
d157b4e1f4709b0205d5de31df65a5308f926d49
Add setup.py
setup.py
setup.py
Python
0.000001
@@ -0,0 +1,608 @@ +#!/usr/bin/env python%0A# coding: utf-8%0A%0Aimport re%0A%0Atry:%0A from setuptools import setup%0Aexcept ImportError:%0A from distutils.core import setup%0A%0A%0Aversion = %22%22%0A%0Awith open(%22autumn.py%22, %22r%22) as f:%0A version = re.search(r'%5E__version__%5Cs*=%5Cs*%5B%5C'%22%5D(%5B%5E%5C'%22%5D*)%5B%5C'%22%5D',%0A f.read(), re.MULTILINE).group(1)%0A%0Aif not version:%0A raise RuntimeError(%22No version information%22)%0A%0A%0Asetup(name=%22autumn%22,%0A version=version,%0A description=%22A simple Pythonic MySQL ORM.%22,%0A author=%22ushuz%22,%0A url=%22https://github.com/ushuz/autumn%22,%0A py_modules=%5B%22autumn%22%5D,%0A license=%22MIT License%22,%0A)%0A
a2bfe07ba67e902870dd366626b23dbb5e6e2696
Create messageMode.py
messageMode.py
messageMode.py
Python
0.000001
@@ -0,0 +1,1828 @@ +%0A#!/usr/bin/python%0A#coding=utf-8%0A#filename: messageMode.py%0A %0Aimport telnetlib%0Aimport os,sys,commands,multiprocessing%0Aimport smtplib%0Aimport time%0Afrom email.mime.multipart import MIMEMultipart%0Afrom email.mime.text import MIMEText%0Afrom email.mime.image import MIMEImage%0Aimport urllib2%0A%0A%0A#---init---%0Abegintime = time.strftime('%25Y-%25m-%25d %25H:%25M:%25S',time.localtime(time.time()))%0Amuti_phone='13521161000'%0Amuti_mail='[email protected]'%0Apythonlog ='/home/sms_mail.log'%0A%0Asender = '[email protected]'%0Asmtpserver = 'hxx.163.com'%0Ausername = '[email protected]'%0Apassword = 'password'%0A %0A#----------%0A %0A%0Adef sendtxtmail(_subject,_mail_off,_msg,_fuc_mail,_begintime):%0A for mail_index in range(0, len(_fuc_mail.split(';'))):%0A if _mail_off == 1:%0A break%0A _receiver = _fuc_mail.split(';')%5Bmail_index%5D%0A if _receiver.find('null') == -1:%0A try:%0A msg = MIMEText('%3Chtml%3E'+_msg+'%3C/html%3E','html','utf-8')%0A msg%5B'Subject'%5D = _subject%0A msg%5B'to'%5D = _receiver%0A smtp = smtplib.SMTP()%0A smtp.connect(smtpserver)%0A smtp.login(username, password)%0A smtp.sendmail(sender,_receiver, msg.as_string())%0A smtp.quit()%0A os.system(%22echo %22+_begintime+' '+_subject+' '+_receiver+%22 mail send successful %3E%3E %22+pythonlog)%0A print %22mail send successful%22%0A except Exception,e:%0A print %22mail send fail%22%0A print e%5B1%5D%0A os.system(%22echo %22+_begintime+' '+_subject+' '+_receiver+%22 mail send fail ,Code: %22+str(e%5B0%5D)+' '+e%5B1%5D.split()%5B0%5D+'- -! %3E%3E'+pythonlog)%0A return 'mail func over'%0A%0A%0A%0A%0Adef main(arg_msg):%0A sendtxtmail('test_subject',0,arg_msg,muti_mail,begintime)%0A return 'main func over'%0A%0A %0A%0Aif __name__ == %22__main__%22:%0A print main(sys.argv%5B1%5D)%0A
3d020f09332093807f70a1bca5360e1418633bb4
Add setup.py.
setup.py
setup.py
Python
0
@@ -0,0 +1,100 @@ +from setuptools import setup, find_packages%0Asetup(name='Anytask',%0A packages=find_packages(),%0A)%0A
b38eb4f8a7b8e3400ea09c600e241d8c4a9d0846
Add setup so sgfs can install this to test with
setup.py
setup.py
Python
0
@@ -0,0 +1,635 @@ +from distutils.core import setup%0A%0Asetup(%0A name='sgsession',%0A version='0.1-dev',%0A description='Shotgun ORM/Session.',%0A url='http://github.com/westernx/sgsession',%0A %0A packages=%5B'sgsession'%5D,%0A %0A author='Mike Boers',%0A author_email='[email protected]',%0A license='BSD-3',%0A %0A classifiers=%5B%0A 'Intended Audience :: Developers',%0A 'License :: OSI Approved :: BSD License',%0A 'Natural Language :: English',%0A 'Operating System :: OS Independent',%0A 'Programming Language :: Python :: 2',%0A 'Topic :: Software Development :: Libraries :: Python Modules',%0A %5D,%0A %0A)
5263a684d4bd111b903456a8da2c92ddb25e7811
Add migration
seriesly/series/migrations/0002_auto_20180127_0718.py
seriesly/series/migrations/0002_auto_20180127_0718.py
Python
0.000002
@@ -0,0 +1,918 @@ +# Generated by Django 2.0 on 2018-01-27 13:18%0A%0Afrom django.db import migrations, models%0Aimport django.utils.timezone%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('series', '0001_initial'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterField(%0A model_name='show',%0A name='added',%0A field=models.DateTimeField(default=django.utils.timezone.now),%0A ),%0A migrations.AlterField(%0A model_name='show',%0A name='country',%0A field=models.CharField(blank=True, max_length=255),%0A ),%0A migrations.AlterField(%0A model_name='show',%0A name='network',%0A field=models.CharField(blank=True, max_length=255),%0A ),%0A migrations.AlterField(%0A model_name='show',%0A name='timezone',%0A field=models.CharField(blank=True, max_length=255),%0A ),%0A %5D%0A
874fbb6749d60ea3fcf078d25d7911d7ac314ab1
Add a setup.py file for use with python install tools.
setup.py
setup.py
Python
0
@@ -0,0 +1,430 @@ +try:%0A from setuptools import setup%0Aexcept ImportError:%0A from distutils.core import setup%0A%0Aconfig = %7B%0A 'description': 'File validator',%0A 'author': 'Iestyn Pryce',%0A 'url': '',%0A 'download_url': '',%0A 'author_email': '[email protected]',%0A 'version': '0.1',%0A 'install_requires': %5B'nose'%5D,%0A 'packages': %5B'validator'%5D,%0A 'scripts': %5B'bin/validate_file.py'%5D,%0A 'name': 'validator'%0A%7D%0A%0Asetup(**config)%0A
952bbd2ba7b58856487ce96a3c8bdd4bd35b7d77
version bump
setup.py
setup.py
from __future__ import unicode_literals, print_function import os from setuptools import setup, find_packages def read(fname): return open(os.path.join(os.path.dirname(__file__), fname), 'rb') \ .read().decode('utf-8') setup( name='xunitmerge', version='1.0.2', author='Miroslav Shubernetskiy', author_email='[email protected]', description='Utility for merging multiple XUnit xml reports ' 'into a single xml report.', long_description=read('README.rst') + read('LICENSE.rst'), url='https://github.com/miki725/xunitmerge', packages=find_packages(exclude=['test', 'test.*']), scripts=['bin/xunitmerge'], install_requires=[ 'six', ], keywords=' '.join([ 'xunit', 'reports', ]), classifiers=[ "Programming Language :: Python", "Programming Language :: Python :: 3", "Development Status :: 4 - Beta", "Intended Audience :: Developers", "Topic :: Software Development :: Libraries :: Python Modules", "Topic :: Software Development :: Testing", ], license='MIT', )
Python
0.000001
@@ -275,17 +275,17 @@ on='1.0. -2 +3 ',%0A a
3258a5ba8c748ce079082c34d13b231f157b1463
Add experimental top-level copy of setup.py
setup.py
setup.py
Python
0
@@ -0,0 +1,2520 @@ +#!/usr/bin/env python%0A%0A# Original libphonenumber Java code:%0A# Copyright (C) 2009-2011 The Libphonenumber Authors%0A#%0A# Licensed under the Apache License, Version 2.0 (the %22License%22);%0A# you may not use this file except in compliance with the License.%0A# You may obtain a copy of the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS,%0A# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.%0A# See the License for the specific language governing permissions and%0A# limitations under the License.%0A%0Aimport distutils.core%0Aimport sys%0A# Importing setuptools adds some features like %22setup.py test%22, but%0A# it's optional so swallow the error if it's not there.%0Atry:%0A import setuptools%0Aexcept ImportError:%0A pass%0A%0Amajor, minor = sys.version_info%5B:2%5D%0Apython_25 = (major %3E 2 or (major == 2 and minor %3E= 5))%0Aif not python_25:%0A raise RuntimeError(%22Python 2.5 or newer is required%22)%0Apython_3x = (major %3E= 3)%0Aif python_3x:%0A package_name = 'phonenumbers3k'%0A dev_status = 'Development Status :: 3 - Alpha'%0Aelse:%0A package_name = 'phonenumbers'%0A dev_status = 'Development Status :: 4 - Beta'%0A%0A# Add ./python/ subdirectory to path%0Asys.path.append('python')%0A%0A# Discover version of phonenumbers package%0Afrom phonenumbers import __version__%0A%0Adistutils.core.setup(name=package_name,%0A version=__version__,%0A description=%22Python version of Google's common library for parsing, formatting, storing and validating international phone numbers.%22,%0A author='David Drysdale',%0A author_email='[email protected]',%0A url='https://github.com/daviddrysdale/python-phonenumbers',%0A license='Apache License 2.0',%0A packages=%5B'phonenumbers', 'phonenumbers.data', 'phonenumbers.geodata'%5D,%0A package_dir=%7B'': 'python'%7D,%0A test_suite=%22tests%22,%0A platforms='Posix; MacOS X; Windows',%0A classifiers=%5Bdev_status,%0A 'Intended Audience :: Developers',%0A 'License :: OSI Approved :: Apache Software License',%0A 'Operating System :: OS Independent',%0A 'Topic :: Communications :: Telephony',%0A %5D,%0A )%0A
58dd2d188aab1fbf30ff843307eecf5ca685527c
Add setup
setup.py
setup.py
Python
0.000001
@@ -0,0 +1,716 @@ +from setuptools import find_packages, setup%0A%0Asetup(%0A name='ngx-task',%0A version='0.1',%0A description='Testimonial for candidates to show up their code-foo',%0A author='Dmitry Shulyak',%0A author_email='[email protected]',%0A url='https://github.com/shudmi/ngx-task',%0A classifiers=%5B%0A 'License :: Apache License 2.0',%0A 'Programming Language :: Python',%0A 'Programming Language :: Python 3',%0A 'Programming Language :: Python 3.4',%0A %5D,%0A packages=find_packages(exclude=%5B'tests', 'tests.*'%5D),%0A install_requires=%5B%5D,%0A entry_points=%22%22%22%0A %5Bconsole_scripts%5D%0A ngx_generate=ngx_task.cli.generate_data%0A ngx_process=ngx_task.cli.process_data%0A %22%22%22%0A)%0A
90746eba08c67c4f62462ed74d08566cafa18724
Add setup.py
setup.py
setup.py
Python
0.000001
@@ -0,0 +1,765 @@ +#!/usr/bin/env python%0A%0Afrom setuptools import setup, find_packages%0A%0Asetup(%0A name='wrenet',%0A version='0.1',%0A description='Network configurations viewer in the Windows Registry',%0A author='graypawn',%0A author_email='choi.pawn' '@gmail.com',%0A url='https://github.com/graypawn/wrenet',%0A license='Apache License (2.0)',%0A packages=find_packages(),%0A install_requires = %7B'python-registry %3E= 1.0.0'%7D,%0A classifiers = %5B%22Programming Language :: Python%22,%0A %22Programming Language :: Python :: 3%22,%0A %22Operating System :: POSIX :: Linux%22,%0A %22License :: OSI Approved :: Apache Software License%22%5D,%0A entry_points=%7B%0A 'console_scripts': %5B%0A 'wrenet=wrenet.wrenet:main'%0A %5D%0A %7D%0A)%0A
50742b6e629e6f54a9f3784a3c1495eb9d82c238
Add start of processed package
brightway_projects/processing/processed_package.py
brightway_projects/processing/processed_package.py
Python
0
@@ -0,0 +1,3294 @@ +from ..errors import InconsistentFields, NonUnique%0A%0A%0Adef greedy_set_cover(data, exclude=None):%0A %22%22%22Find unique set of attributes that uniquely identifies each element in %60%60data%60%60.%0A%0A Feature selection is a well known problem, and is analogous to the %60set cover problem %3Chttps://en.wikipedia.org/wiki/Set_cover_problem%3E%60__, for which there is a %60well known heuristic %3Chttps://en.wikipedia.org/wiki/Set_cover_problem#Greedy_algorithm%3E%60__.%0A%0A Args:%0A data (iterable): List of dictionaries with the same fields.%0A exclude (iterable): Fields to exclude during search for uniqueness. %60%60id%60%60 is Always excluded.%0A%0A Returns:%0A Set of attributes (strings)%0A%0A Raises:%0A NonUnique: The given fields are not enough to ensure uniqueness%0A %22%22%22%0A if exclude is None:%0A exclude = %7B%22id%22%7D%0A else:%0A exclude = set(exclude)%0A exclude.add(%22id%22)%0A%0A%0Adef as_unique_attributes(data, exclude=None, include=None):%0A %22%22%22Format %60%60data%60%60 as unique set of attributes and values for use in %60%60create_processed_datapackage%60%60.%0A%0A Note: Each element in %60%60data%60%60 must have the attributes %60%60id%60%60.%0A%0A data = %5B%0A %7B%7D,%0A %5D%0A%0A Args:%0A data (iterable): List of dictionaries with the same fields.%0A exclude (iterable): Fields to exclude during search for uniqueness. %60%60id%60%60 is Always excluded.%0A include (iterable): Fields to include when returning, even if not unique%0A%0A Returns:%0A (list of field names as strings, dictionary of data ids to values for given field names)%0A%0A Raises:%0A InconsistentFields: Not all features provides all fields.%0A %22%22%22%0A include = set(%5B%5D) if include is None else set(include)%0A fields = greedy_set_cover(data, exclude)%0A%0A if len(%7Bset(obj.keys()) for obj in data%7D) %3E 1:%0A raise InconsistentFields%0A%0A def formatter(obj, fields, include):%0A return %7B%0A key: value%0A for key, value in obj.items()%0A if (key in fields or key in include or key == %22id%22)%0A %7D%0A%0A return (fields, %5Bformatter(obj, fields, include) for obj in data%5D)%0A%0A%0Adef create_processed_datapackage(%0A array,%0A rows,%0A cols,%0A filepath=None,%0A id_=None,%0A metadata=None,%0A replace=True,%0A compress=True,%0A in_memory=False,%0A):%0A %22%22%22Create a datapackage with numpy structured arrays and metadata.%0A%0A Exchanging large, dense datasets like MRIO tables is not efficient if each exchange must be listed separately. Instead, we would prefer to exchange the processed arrays used to build the matrices directly. However, these arrays use integer indices which are not consistent across computers or even Brightway projects. This function includes additional metadata to solve this problem, mapping these integer ids to enough attributes to uniquely identify each feature. Separate metadata files are included for each column in the array (i.e. the row and column indices).%0A%0A Args:%0A array (numpy structured array): The numeric data. Usually generated via %60%60create_numpy_structured_array%60%60.%0A rows (dict): Dictionary mapping integer indices in %60%60row_value%60%60 to a dictionary of attributes.%0A cols (dict): Dictionary mapping integer indices in %60%60col_value%60%60 to a dictionary of attributes.%0A%0A Returns:%0A Something :)%0A %22%22%22%0A pass%0A
c68cda0549bb9c47be0580ecd43f55966e614b31
Add Pascal's Triangle/nCr Table
mathematics/combinatorics/ncr_table/kevin.py
mathematics/combinatorics/ncr_table/kevin.py
Python
0.000004
@@ -0,0 +1,740 @@ +#!/usr/bin/env python%0A%0A# https://www.hackerrank.com/challenges/ncr-table%0A%0A%0Adef get_number():%0A return int(input().strip())%0A%0A%0Adef nCr(row_number):%0A rows = %5B%5B1%5D, %5B1, 1%5D, %5B1, 2, 1%5D%5D%0A while row_number %3E= len(rows):%0A # 1%0A # 1 1%0A # 1 2 1%0A # 1 4 4 1%0A # .......%0A row = %5B(rows%5B-1%5D%5Bindex%5D + rows%5B-1%5D%5Bindex + 1%5D)%0A for index in range(len(rows) - 1)%5D%0A rows.append(%5B1%5D + row + %5B1%5D)%0A%0A # Spew elements with * to show the proper output%0A print(*rows%5Brow_number%5D)%0A%0A%0A# Generate this row from the nCr table%0Ainputs = %5B%5D%0Anumber_of_items = get_number()%0Afor i in range(number_of_items):%0A pascals_row = get_number()%0A inputs.append(pascals_row)%0Aprint()%0A%5BnCr(item) for item in inputs%5D%0A
7e04f5012c44fac086fc0f693dc653884d7377a3
remove errant print
beaver/utils.py
beaver/utils.py
# -*- coding: utf-8 -*- import argparse import glob2 import itertools import logging import platform import re import sys import beaver logging.basicConfig() MAGIC_BRACKETS = re.compile("({([^}]+)})") IS_GZIPPED_FILE = re.compile(".gz$") REOPEN_FILES = 'linux' not in platform.platform().lower() cached_regices = {} def parse_args(): epilog_example = """ Beaver provides an lightweight method for shipping local log files to Logstash. It does this using either redis, stdin, zeromq as the transport. This means you'll need a redis, stdin, zeromq input somewhere down the road to get the events. Events are sent in logstash's json_event format. Options can also be set as environment variables. Please see the readme for complete examples. """ parser = argparse.ArgumentParser(description='Beaver logfile shipper', epilog=epilog_example, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('-c', '--configfile', help='ini config file path', dest='config', default='/dev/null') parser.add_argument('-d', '--debug', help='enable debug mode', dest='debug', default=False, action='store_true') parser.add_argument('-D', '--daemonize', help='daemonize in the background', dest='daemonize', default=False, action='store_true') parser.add_argument('-f', '--files', help='space-separated filelist to watch, can include globs (*.log). Overrides --path argument', dest='files', default=None, nargs='+') parser.add_argument('-F', '--format', help='format to use when sending to transport', default=None, dest='format', choices=['json', 'msgpack', 'raw', 'rawjson', 'string']) parser.add_argument('-H', '--hostname', help='manual hostname override for source_host', default=None, dest='hostname') parser.add_argument('-m', '--mode', help='bind or connect mode', dest='mode', default=None, choices=['bind', 'connect']) parser.add_argument('-l', '--logfile', '-o', '--output', help='file to pipe output to (in addition to stdout)', default=None, dest='output') parser.add_argument('-p', '--path', help='path to log files', default=None, dest='path') parser.add_argument('-P', '--pid', help='path to pid file', default=None, dest='pid') parser.add_argument('-t', '--transport', help='log transport method', dest='transport', default=None, choices=['rabbitmq', 'redis', 'sqs', 'stdout', 'udp', 'zmq']) parser.add_argument('-v', '--version', help='output version and quit', dest='version', default=False, action='store_true') parser.add_argument('--fqdn', help="use the machine's FQDN for source_host", dest="fqdn", default=False, action='store_true') return parser.parse_args() def setup_custom_logger(name, args=None, output=None, formatter=None): logger = logging.getLogger(name) logger.propagate = False if logger.handlers: logger.handlers = [] has_args = args is not None and type(args) == argparse.Namespace is_debug = has_args and args.debug is True if not logger.handlers: if formatter is None: formatter = logging.Formatter('[%(asctime)s] %(levelname)-7s %(message)s') handler = logging.StreamHandler() if output is None and has_args and args.daemonize: output = args.output if output is not None: handler = logging.FileHandler(output) if formatter is not False: handler.setFormatter(formatter) logger.addHandler(handler) if is_debug: logger.setLevel(logging.DEBUG) if hasattr(logging, 'captureWarnings'): logging.captureWarnings(True) else: logger.setLevel(logging.INFO) if hasattr(logging, 'captureWarnings'): logging.captureWarnings(False) logger.debug('Logger level is {0}'.format(logging.getLevelName(logger.level))) return logger def version(args): if args.version: formatter = logging.Formatter('%(message)s') logger = setup_custom_logger('beaver', args=args, formatter=formatter) logger.info('Beaver {0}'.format(beaver.__version__)) sys.exit(0) def eglob(path, exclude=None): """Like glob.glob, but supports "/path/**/{a,b,c}.txt" lookup""" fi = itertools.chain.from_iterable paths = list(fi(glob2.iglob(d) for d in expand_paths(path))) print paths if exclude: cached_regex = cached_regices.get(exclude, None) if not cached_regex: cached_regex = cached_regices[exclude] = re.compile(exclude) paths = [x for x in paths if not cached_regex.search(x)] return paths def expand_paths(path): """When given a path with brackets, expands it to return all permutations of the path with expanded brackets, similar to ant. >>> expand_paths("../{a,b}/{c,d}") ['../a/c', '../a/d', '../b/c', '../b/d'] >>> expand_paths("../{a,b}/{a,b}.py") ['../a/a.py', '../a/b.py', '../b/a.py', '../b/b.py'] >>> expand_paths("../{a,b,c}/{a,b,c}") ['../a/a', '../a/b', '../a/c', '../b/a', '../b/b', '../b/c', '../c/a', '../c/b', '../c/c'] >>> expand_paths("test") ['test'] >>> expand_paths("") """ pr = itertools.product parts = MAGIC_BRACKETS.findall(path) if path == "": return elif not parts: return [path] permutations = [[(p[0], i, 1) for i in p[1].split(",")] for p in parts] return [_replace_all(path, i) for i in pr(*permutations)] def _replace_all(path, replacements): for j in replacements: path = path.replace(*j) return path
Python
0.001167
@@ -4323,24 +4323,8 @@ )))%0A - print paths%0A
842869063ead9b2e6a1e22d11c9901072f2319aa
Add script to self generate docs for recurring data types
docs/generate_spec.py
docs/generate_spec.py
Python
0
@@ -0,0 +1,900 @@ +# -*- encoding: utf-8 -*-%0A#%0A# This script is to be used to automagically generate the recurring data types%0A# documentation based on the API specification.%0A#%0A# to run it just do:%0A#%0A# $ python generate_spec.py %3E outputfile.md%0A#%0A# :authors: Arturo Filast%C3%B2%0A# :licence: see LICENSE%0A%0A%0Aimport inspect%0Afrom globaleaks.rest.messages import base%0A%0Adef create_spec(spec):%0A doc = %22%22%0A for k, v in spec.items():%0A doc += %22 %25s: %25s%5Cn%22 %25 (k, v)%0A return doc%0A%0Adef create_class_doc(klass):%0A doc = %22## %25s%5Cn%22 %25 klass.__name__%0A if klass.__doc__:%0A docstring = %5Bline.strip() for line in klass.__doc__.split(%22%5Cn%22)%5D%0A doc += '%5Cn'.join(docstring)%0A doc += %22%5Cn%22%0A doc += create_spec(klass.specification)%0A return doc%0A%0Afor name, klass in inspect.getmembers(base, inspect.isclass):%0A if issubclass(klass, base.GLTypes) and klass != base.GLTypes:%0A print create_class_doc(klass)%0A%0A
7d23ad49da0044d83f781105cb01addb1a4aa41c
Add catalog.wsgi file
catalog.wsgi
catalog.wsgi
Python
0
@@ -0,0 +1,157 @@ +#!/usr/bin/python%0Aimport sys%0Asys.path.insert(0,%22/var/www/html/catalog/%22)%0A%0Afrom catalog import app as application%0Aapplication.secret_key = 'super_secret_key'%0A
dd5ae6788b4bb3630c16ce0996b206ae5e26228f
Extract env seq
scripts/extract_paths.py
scripts/extract_paths.py
import glob import sys import avidaspatial num = sys.argv[1] env = sys.argv[2] filenames = glob.glob("*"+env+"*/lineage_locs_"+num+".dat") env = avidaspatial.parse_environment_file("../config/env"+env+".cfg", (60, 60)) outfile = open("paths_"+num+"_"+env+".dat", "w") outfile_env = open("env_seq_"+num+"_"+env+".dat", "w") for name in filenames: infile = open(name) path = infile.readline().split()[1:-1] infile.close() path = [int(i) for i in path] path = [[i % 60, i // 60] for i in path] outfile.write(str(path) + "\n") env_seq = [] for loc in path: env_seq.append(sorted(list(env[loc[1]][loc[0]]))) outfile_env.write(",".join([str(i) for i in env_seq]) + "\n") outfile.close() outfile_env.close()
Python
0.999944
@@ -58,16 +58,19 @@ v%5B1%5D%0Aenv +_id = sys.a @@ -106,16 +106,19 @@ (%22*%22+env +_id +%22*/line @@ -202,16 +202,19 @@ env%22+env +_id +%22.cfg%22, @@ -252,32 +252,35 @@ hs_%22+num+%22_%22+env +_id +%22.dat%22, %22w%22)%0Aou @@ -318,16 +318,19 @@ +%22_%22+env +_id +%22.dat%22,
c16fae0519068e40d7b1ed988f49460198f6fd43
Create decode_diameter.py
decode_diameter.py
decode_diameter.py
Python
0.000327
@@ -0,0 +1,2203 @@ +#-------------------------------------------------------------------------------%0A# Name: Decode Diameter%0A# Purpose:%0A#%0A# Author: XIAO Zhen%0A#%0A# Created: 08/10/2014%0A# Copyright: (c) XIAO Zhen 2014%0A# Licence: MIT License%0A#-------------------------------------------------------------------------------%0A#!/usr/bin/env python%0A%0Aimport os%0Aimport sys%0A%0Adef logerr(msg):%0A print %22Error: %22 + msg%0Adef loginfo(msg):%0A print %22Info : %22 + msg%0Adef output(msg):%0A print msg%0A%0A%0Adef loadAvpDefineFile(filename):%0A d = dict()%0A%0A try:%0A file = open(filename,'r')%0A except:%0A logerr(%22Cannot open file:%22 + filename)%0A return d%0A%0A cur_avp = '-1'%0A detail = %5B%5D%0A for line in file.readlines():%0A if(line%5B:4%5D == 'avp '):%0A if(cur_avp != '-1'):%0A d%5Bcur_avp%5D = detail%0A detail = %5B%5D%0A%0A cur_avp = line.split()%5B1%5D%0A if(cur_avp in d):%0A cur_avp = '-1'%0A%0A elif(line.find(%22VENDOR_ID%22) != -1 and cur_avp != '-1'):%0A cur_avp += ':' + line.split()%5B2%5D%5B:-1%5D%0A%0A elif(line.find('DATA_TYPE') != -1):%0A detail.append(line.split()%5B2%5D%5B:-1%5D)%0A%0A elif(line.find('AVP_NAME') != -1):%0A detail.append(line.split()%5B2%5D%5B1:-2%5D)%0A%0A file.close()%0A return d%0A%0Adef decode(avps,hex):%0A '''%0A 0. Grouped%0A 1. OctetString%0A 2. OctetString%0A 3. Int32%0A 4. Int64%0A 5. UInt32%0A 6. UInt64%0A 9. Address%0A 10.Time%0A 11.Diameter-Identify%0A 12.DiameterURI%0A 13.Enum%0A 459:0%0A %5B'13', 'User-Equipment-Info-Type'%5D%0A '''%0A i = 0%0A if(hex%5Bi:i + 2%5D != '01'):%0A logerr(%22This is not a diameter message!%22)%0A return%0A i += 2%0A%0A offset = %5B%5D%0A offset.append(eval('0x' + hex%5Bi:i+6%5D) - 8)%0A %0A%0A%0A%0A%0Adef main():%0A #use the the directory where the script located as current work dir%0A os.chdir(os.path.dirname(sys.argv%5B0%5D))%0A%0A #load the avp define file%0A file_name_avp_define = %22Avpdefine.avp%22%0A avps = loadAvpDefineFile(file_name_avp_define)%0A%0A i = 0%0A for avp in avps:%0A print avp%0A print avps%5Bavp%5D%0A i += 1%0A if(i == 10):%0A break%0A%0A hex = '-'%0A decode(avps,hex)%0A%0A%0Aif __name__ == '__main__':%0A main()%0A
8968251b7e1b89171b285e377d17dae299019cd0
Test that '--checks' accepts notebooks either before or after the check command (#887)
tests/test_cli_check.py
tests/test_cli_check.py
Python
0
@@ -0,0 +1,1066 @@ +import pytest%0Afrom nbformat.v4.nbbase import new_code_cell, new_notebook%0A%0Afrom jupytext import write%0Afrom jupytext.cli import jupytext%0A%0Afrom .utils import requires_black%0A%0A%[email protected]%0Adef non_black_notebook(python_notebook):%0A return new_notebook(metadata=python_notebook.metadata, cells=%5Bnew_code_cell(%221+1%22)%5D)%0A%0A%0A@requires_black%0Adef test_check_notebooks_left_or_right_black(python_notebook, tmpdir, cwd_tmpdir):%0A write(python_notebook, str(tmpdir / %22nb1.ipynb%22))%0A write(python_notebook, str(tmpdir / %22nb2.ipynb%22))%0A%0A jupytext(%5B%22*.ipynb%22, %22--check%22, %22black --check %7B%7D%22%5D)%0A jupytext(%5B%22--check%22, %22black --check %7B%7D%22, %22*.ipynb%22%5D)%0A%0A%0A@requires_black%0Adef test_check_notebooks_left_or_right_not_black(%0A non_black_notebook, tmpdir, cwd_tmpdir%0A):%0A write(non_black_notebook, str(tmpdir / %22nb1.ipynb%22))%0A write(non_black_notebook, str(tmpdir / %22nb2.ipynb%22))%0A%0A with pytest.raises(SystemExit):%0A jupytext(%5B%22*.ipynb%22, %22--check%22, %22black --check %7B%7D%22%5D)%0A%0A with pytest.raises(SystemExit):%0A jupytext(%5B%22--check%22, %22black --check %7B%7D%22, %22*.ipynb%22%5D)%0A
4694f6bf2405d0aae5e6c3fc393f8a839e8aac07
Add tests for converter.Line and converter.Generator.
tests/test_converter.py
tests/test_converter.py
Python
0
@@ -0,0 +1,2135 @@ +# coding: utf-8%0A# Copyright (c) 2010-2012 Rapha%C3%ABl Barrois%0A%0Aimport unittest%0A%0Afrom confmgr import converter%0A%0A%0Aclass LineTestCase(unittest.TestCase):%0A def test_repr(self):%0A self.assertEqual(%22Line('foo', 'bar')%22,%0A repr(converter.Line('foo', 'bar')))%0A %0A def test_equality(self):%0A self.assertEqual(%0A converter.Line('foo', 'bar'),%0A converter.Line('foo', 'bar'))%0A%0A self.assertNotEqual(%0A converter.Line('foo', 'bar'),%0A converter.Line('foo', 'baz'))%0A%0A self.assertNotEqual(%0A converter.Line('foo', 'bar'),%0A converter.Line('fo', 'bar'))%0A%0A def test_compare_to_other(self):%0A self.assertNotEqual('foo', converter.Line('foo', 'bar'))%0A self.assertNotEqual(converter.Line('foo', 'bar'), 'foo')%0A%0A def test_hash(self):%0A s = set()%0A for _i in range(5):%0A s.add(converter.Line('foo', 'bar'))%0A%0A self.assertEqual(1, len(s))%0A self.assertEqual(set(%5Bconverter.Line('foo', 'bar')%5D), s)%0A%0A def test_fill_original_normal(self):%0A l = converter.Line('foo', None)%0A self.assertEqual(None, l.original)%0A l.fill_original()%0A self.assertEqual('foo', l.original)%0A%0A def test_fill_original_comment(self):%0A l = converter.Line('#@foo', None)%0A self.assertEqual(None, l.original)%0A l.fill_original()%0A self.assertEqual('#@@foo', l.original)%0A%0A l = converter.Line('%22@foo', None)%0A self.assertEqual(None, l.original)%0A l.fill_original()%0A self.assertEqual('%22@@foo', l.original)%0A%0A l = converter.Line('!@foo', None)%0A self.assertEqual(None, l.original)%0A l.fill_original()%0A self.assertEqual('!@@foo', l.original)%0A%0A%0Aclass GeneratorTestCase(unittest.TestCase):%0A def test_no_special(self):%0A txt = %5B%0A 'foo',%0A 'bar',%0A 'baz',%0A %5D%0A%0A g = converter.Generator(txt, categories=%5B%5D, fs=None)%0A expected = %5Bconverter.Line(s, s) for s in txt%5D%0A out = list(g)%0A self.assertItemsEqual(expected, out)%0A%0A%0Aif __name__ == '__main__':%0A unittest.main()%0A
a37640d107d1dd58ba4f9db3e043020ad76cd25d
Create cam_control.py
cam_control.py
cam_control.py
Python
0.000001
@@ -0,0 +1,1788 @@ +#!/usr/bin/python%0A# -*- coding: utf-8 -*-%0A%0Afrom cv2 import *%0Aimport MySQLdb as ms%0Aimport time%0Aimport _mysql_exceptions as M%0Aimport os%0A%0Adef get_image():%0A cam1 = VideoCapture(0)%0A cam2 = VideoCapture(1)%0A s1, img1 = cam1.read()%0A s2, img2 = cam2.read()%0A if s1:%0A imwrite(%22test1.jpg%22,img)%0A if s2:%0A imwrite(%22test2.jpg%22,img)%0A%0Adef read_image():%0A fin1 = open(%22test1.jpg%22)%0A fin2 = open(%22test2.jpg%22)%0A img1 = fin1.read()%0A img2 = fin2.read()%0A return img1,img2%0A%0Adef query() :%0A%0A try :%0A db = ms.connect(host=%22your_host_name%22,user=%22your_user_name%22,%5C%0A passwd=%22your_password%22,db=%22your_database_name%22)%0A except(M.OperationalError):%0A print '%5Cn', %22########ISSUE_%25s_Mysqldatabase_########%22 %25 (%22your_host_name%22)%0A print %22########RPi_CANT_REACH_DATABASE########%22%0A print %22########CHECK_WIRES_FROM_RPI_TO_INTERNETPROVIDER'S_ROOTER(BOX)##%22%0A os.system(%22sudo reboot%22)%0A%0A data1 = read_image()%5B0%5D%0A data2 = read_image()%5B1%5D%0A%0A try :%0A #set up of a cursor to be able to execute a query in database.%0A c = db.cursor()%0A date = time.strftime(%22%25a, %25d, %25b %25Y %25H:%25M:%25S%22, time.gmtime())%0A c.execute(%22INSERT INTO images(date,cam1,cam2) VALUES (%25s,%25s,%25s)%22, (date,data1,data2))%0A print %22%3C--- Send image ---%3E%22,%22--- / date / --- : %22,date%0A except(NameError) :%0A #os.system(%22sudo reboot%22)%0A print %22NameError: %22, NameError%0A%0Aif __name__ == %22__main__%22 :%0A%0A while True :%0A get_image()%0A try :%0A query()%0A #print %22Ok test.jpg image found%22%0A except :%0A print %22No test.jpg image found%22%0A #cam get .jpg file and send an image %5C%0A #every 30 minutes=1800 seconds%0A #every 5minutes = 300 seconds%0A time.sleep(300)%0A
d2a283856a9e2559a131c5aaa2407477be993af0
add file to help gather all the data we need
collate.py
collate.py
Python
0
@@ -0,0 +1,792 @@ +import csv%0Afrom glob import glob%0A%0A%0Adef collate_from_breath_meta(cohort):%0A %22%22%22%0A Gets all breath_meta.csv files in our specific cohort and then gets all%0A the data from these files and stores them in a dictionary.%0A %22%22%22%0A if cohort not in %5B%22ardscohort%22, %22controlcohort%22%5D:%0A raise Exception(%22Input must either be ardscohort or controlcohort%22)%0A dirs = os.listdir(cohort)%0A cohort_files = %5B%5D%0A for dir in dirs:%0A files = glob(%22%7B%7D/%7B%7D/0*_breath_meta.csv%22.format(cohort, dir))%0A for f in files:%0A cohort_files.append(f)%0A%0A data = %5B%5D%0A for f in cohort_files:%0A with open(f) as meta:%0A reader = csv.reader(meta)%0A for line in reader:%0A data.append(line)%0A return data%0A%0A%0A%0Aif __name__ == %22__main__%22:%0A main()%0A
d2667faded6dfdd1fb2992ec188b8fed12bb2723
Add ncurses 5.9
packages/ncurses.py
packages/ncurses.py
Python
0.000015
@@ -0,0 +1,972 @@ +class NcursesPackage (GnuPackage):%0A%09def __init__ (self):%0A%09%09GnuPackage.__init__ (self, 'ncurses', '5.9')%0A%09%09%0A%0A%09%09self.sources.extend (%5B%0A%09%09%09%09'https://trac.macports.org/export/136235/trunk/dports/devel/ncurses/files/hex.diff',%0A%09%09%09%09'https://trac.macports.org/export/136235/trunk/dports/devel/ncurses/files/ungetch_guard.diff',%0A%09%09%09%09'https://trac.macports.org/export/136235/trunk/dports/devel/ncurses/files/configure.diff',%0A%09%09%09%09'https://trac.macports.org/export/136235/trunk/dports/devel/ncurses/files/constructor_types.diff',%0A%09%09%09%09'https://trac.macports.org/export/136235/trunk/dports/devel/ncurses/files/pkg_config_libdir.diff'%0A%09%09%5D)%0A%0A%09def prep (self):%0A%09%09Package.prep (self)%0A%09%09if Package.profile.name == 'darwin':%0A%09%09%09for p in range (1, len (self.sources)):%0A%09%09%09%09self.sh ('patch -p0 --ignore-whitespace %3C %22%25%7Bsources%5B' + str (p) + '%5D%7D%22')%0A%0A%0A%09def make (self):%0A%09%09self.local_make_flags.extend (%5B'-DPKG_CONFIG_LIBDIR=%25s' %25 self.PKG_CONFIG_PATH%5D) %0A%09%09Package.make (self)%0A%0ANcursesPackage ()%0A
00bfd02f921a42d4f288254d1accb7546d8df2c5
Add hbase consistency check throw hbase hbck command, easily can be added some checks like backups servers or region servers
check_hbase.py
check_hbase.py
Python
0
@@ -0,0 +1,1832 @@ +#!/usr/bin/env python%0A# vim: ts=4:sw=4:et:sts=4:ai:tw=80%0Afrom utils import krb_wrapper,StringContext%0Aimport os%0Aimport argparse%0Aimport nagiosplugin%0Aimport re%0Aimport subprocess%0A%0Ahtml_auth = None%0A%0Adef parser():%0A version=%220.1%22%0A parser = argparse.ArgumentParser(description=%22Checks datanode%22)%0A parser.add_argument('-p', '--principal', action='store', dest='principal')%0A parser.add_argument('-s', '--secure',action='store_true')%0A parser.add_argument('-k', '--keytab',action='store')%0A parser.add_argument('--cache_file',action='store', default='/tmp/nagios.krb')%0A parser.add_argument('-v','--version', action='version', version='%25(prog)s ' + version)%0A args = parser.parse_args()%0A if args.secure and (args.principal is None or args.keytab is None):%0A parser.error(%22if secure cluster, both of --principal and --keytab required%22)%0A return args%0A%0Aclass Hbase(nagiosplugin.Resource):%0A%0A def __init__(self):%0A p = subprocess.Popen(%5B'hbase','hbck'%5D,stdout=subprocess.PIPE,stderr=None)%0A output,err = p.communicate()%0A self.status=None%0A if err is None:%0A for line in output.splitlines():%0A m = re.match('%5E%5Cs*Status%5Cs*:%5Cs*(?P%3CSTATUS%3E%5Cw+)%5Cs*',line)%0A if m:%0A self.status=m.group('STATUS')%0A else:%0A return 2,%22Critical: %22+err%0A %0A def probe(self):%0A yield nagiosplugin.Metric('status',self.status,context=%22status%22)%0A%[email protected]%0Adef main():%0A args = parser()%0A if args.secure:%0A auth_token = krb_wrapper(args.principal,args.keytab,args.cache_file)%0A os.environ%5B'KRB5CCNAME'%5D = args.cache_file%0A check = nagiosplugin.Check(Hbase(),%0A StringContext('status',%0A 'OK'))%0A check.main()%0A if auth_token: auth_token.destroy() %0A%0Aif __name__ == '__main__':%0A main()%0A
f865bf2d7365ccecec07be7e51e8d81676f3aae2
Add check_cycles tests module
tests/plantcv/morphology/test_check_cycles.py
tests/plantcv/morphology/test_check_cycles.py
Python
0
@@ -0,0 +1,350 @@ +import cv2%0Afrom plantcv.plantcv import outputs%0Afrom plantcv.plantcv.morphology import check_cycles%0A%0A%0Adef test_check_cycles(morphology_test_data):%0A # Clear previous outputs%0A outputs.clear()%0A mask = cv2.imread(morphology_test_data.ps_mask, -1)%0A _ = check_cycles(mask)%0A assert outputs.observations%5B'default'%5D%5B'num_cycles'%5D%5B'value'%5D == 16%0A
1ab296398aaa796a9a5b620c4281d9376ada8b3e
Add short script which prints the entire CMIP6 MIP experiment list #197.
ece2cmor3/scripts/mip-experiment-list.py
ece2cmor3/scripts/mip-experiment-list.py
Python
0
@@ -0,0 +1,739 @@ +#!/usr/bin/env python%0A# Thomas Reerink%0A#%0A# Run example:%0A# python mip-experiment-list.py%0A#%0A# Looping over all MIPs and within each MIP over all its MIP experiments.%0A# Printing the MIP experiment list with some additional info.%0A#%0A%0Afrom dreqPy import dreq%0Adq = dreq.loadDreq()%0A%0Amip_list_file= open( 'mip-experiment-list.txt', 'w' )%0A%0A# Loop over the MIPs:%0Afor mip in dq.coll%5B'mip'%5D.items:%0A # Loop over the MIP experiments:%0A for u in dq.inx.iref_by_sect%5Bmip.uid%5D.a%5B'experiment'%5D:%0A ex = dq.inx.uid%5Bu%5D%0A mip_list_file.write( '%7B:20%7D %7B:20%7D %7B:30%7D %7B:3%7D %7B%7D'.format(mip.label, ex.mip, ex.label, ex.tier%5B0%5D, ex.title) + '%5Cn')%0A #print '%7B:20%7D %7B:20%7D %7B:30%7D %7B:3%7D %7B%7D'.format(mip.label, ex.mip, ex.label, ex.tier%5B0%5D, ex.title)%0A%0Amip_list_file.close()%0A
97eabd4e33086c66372b0e15dd1eeda12e99f427
Create createfile.py
createfile.py
createfile.py
Python
0.000004
@@ -0,0 +1,497 @@ +import os%0A#creates file on the go on the entries of a tuple%0A%0Aports=%5B20,21,23,25,43,49,53,69,70,79,80,109,110,115,137,139,143,161,194,389,443,444,458,546,547,1080%5D%0A%0Apath=raw_input('Enter the path you want to create the files: ')%0A%0Atry:%0A os.chdir(path)%0Aexcept:%0A print %22Invalid Path%22%0A%0Atry:%0A for i in ports:%0A for i in ports:%0A file = open('./'+str(i),'w')%0A file.close()%0Aexcept:%0A print %22Could not create files, please check if you have the appropriate read/write permissions%0A
6d50dc3c266f4a1b7f517935b961cfb20602011b
add benchmark.py
suite/benchmark.py
suite/benchmark.py
Python
0.000003
@@ -0,0 +1,802 @@ +#!/usr/bin/python%0A%0A# Simple benchmark for Capstone by disassembling random code. By Nguyen Anh Quynh, 2014%0A%0Afrom capstone import *%0A%0Afrom time import time%0Afrom random import randint%0A%0A%0Adef random_str(size):%0A lst = %5Bstr(randint(0, 255)) for _ in xrange(size)%5D%0A return %22%22.join(lst)%0A%0Adef cs(md, data):%0A insns = md.disasm(data, 0)%0A # uncomment below line to speed up this function 200 times!%0A # return%0A for i in insns:%0A if i.address == 0x100000:%0A print i%0A%0Amd = Cs(CS_ARCH_X86, CS_MODE_32)%0Amd.detail = False%0A%0A# warm up few times%0Afor i in xrange(3):%0A data = random_str(128)%0A cs(md, data)%0A%0A# start real benchmark%0Ac_t = 0%0Afor i in xrange(10000):%0A code = random_str(128)%0A %0A t1 = time()%0A cs(md, code)%0A c_t += time() - t1%0A%0A%0Aprint %22Capstone:%22, c_t, %22seconds%22%0A
95c0d34be2699ee85d23a32384d408ac25561978
Normalize to str to work around Unicode for now
base32_crockford.py
base32_crockford.py
""" base32-crockford ================ A Python module implementing the alternate base32 encoding as described by Douglas Crockford at: http://www.crockford.com/wrmg/base32.html. According to his description, the encoding is designed to: * Be human and machine readable * Be compact * Be error resistant * Be pronounceable It uses a symbol set of 10 digits and 22 letters, excluding I, L O and U. Decoding is not case sensitive, and 'i' and 'l' are converted to '1' and 'o' is converted to '0'. Encoding uses only upper-case characters. Hyphens can be present in symbol strings to improve readability, and are removed when decoding. A check symbol can be appended to a symbol string to detect errors within the string. """ import string __all__ = ["encode", "decode", "normalize"] # The encoded symbol space does not include I, L, O or U; # the last five symbols are exclusively for checksum values SYMBOLS = "0123456789ABCDEFGHJKMNPQRSTVWXYZ*~$=U" ENCODE_SYMBOLS = {i: ch for (i, ch) in enumerate(SYMBOLS)} DECODE_SYMBOLS = {ch: i for (i, ch) in enumerate(SYMBOLS)} NORMALIZE_SYMBOLS = string.maketrans("IiLlOo", "111100") BASE = 32 CHECK_BASE = 37 def encode(number, checksum=False): """ Encodes a base 10 positive integer into a symbol string. Raises a ValueError on invalid input. If checksum is set to True, a check symbol will also be calculated and appended to the string. """ number = int(number) if number < 0: raise ValueError("Number '%d' is not a positive integer" % number) check_symbol = '' if checksum: check_symbol = ENCODE_SYMBOLS[number % CHECK_BASE] if number == 0: return '0' + check_symbol symbol_string = '' while number > 0: remainder = number % BASE number //= BASE symbol_string = ENCODE_SYMBOLS[remainder] + symbol_string return symbol_string + check_symbol def decode(symbol_string, checksum=False, strict=False): """ Decodes a given symbol string into a base 10 number. Raises a ValueError on invalid input. If checksum is set to True, the string is assumed to have a trailing check symbol which will be validated. If the checksum validation fails, a ValueError is raised. If strict is set to True, a ValueError is raised if the normalization step requires changes to the string. """ symbol_string = normalize(symbol_string, strict=strict) if checksum: symbol_string, check_symbol = symbol_string[:-1], symbol_string[-1] # The letter 'U' is only valid as a check symbol if 'U' in symbol_string: raise ValueError("String '%s' contains invalid characters" % symbol_string) number = 0 for symbol in symbol_string: number = number * BASE + DECODE_SYMBOLS[symbol] if checksum: check_value = DECODE_SYMBOLS[check_symbol] modulo = number % CHECK_BASE if check_value != modulo: raise ValueError("Invalid check symbol '%s' for string '%s'" % (check_symbol, symbol_string)) return number def normalize(symbol_string, strict=False): """ Normalizes a given symbol string to account for error resistance and prepare it for decoding. These transformations are applied: 1. Hyphens are removed 2. 'I', 'i', 'L' or 'l' are converted to '1' 3. 'O' or 'o' are converted to '0' 4. All characters are converted to uppercase If the strict parameter is set to True, a ValueError is raised if any of the above transformations are applied. """ string = symbol_string.translate(NORMALIZE_SYMBOLS, '-').upper() if strict and string != symbol_string: raise ValueError("Normalization required for string '%s'" % symbol_string) return string
Python
0
@@ -3640,16 +3640,20 @@ tring = +str( symbol_s @@ -3657,16 +3657,17 @@ l_string +) .transla
78aea51f508a14bb1b03b49933576c84b56a7459
Add an example for the new dropdowns
examples/views/dropdown.py
examples/views/dropdown.py
Python
0.000003
@@ -0,0 +1,2241 @@ +import typing%0A%0Aimport discord%0Afrom discord.ext import commands%0A%0A# Defines a custom Select containing colour options%0A# that the user can choose. The callback function%0A# of this class is called when the user changes their choice%0Aclass Dropdown(discord.ui.Select):%0A def __init__(self):%0A%0A # Set the options that will be presented inside the dropdown%0A options = %5B%0A discord.SelectOption(label='Red', description='Your favourite colour is red', emoji='%F0%9F%9F%A5'),%0A discord.SelectOption(label='Green', description='Your favourite colour is green', emoji='%F0%9F%9F%A9'),%0A discord.SelectOption(label='Blue', description='Your favourite colour is blue', emoji='%F0%9F%9F%A6')%0A %5D%0A%0A # The placeholder is what will be shown when no option is chosen%0A # The min and max values indicate we can only pick one of the three options%0A # The options parameter defines the dropdown options. We defined this above%0A super().__init__(placeholder='Choose your favourite colour...', min_values=1, max_values=1, options=options)%0A%0A async def callback(self, interaction: discord.Interaction):%0A # Use the interaction object to send a response message containing%0A # the user's favourite colour or choice. The self object refers to the%0A # Select object, and the values attribute gets a list of the user's %0A # selected options. We only want the first one.%0A await interaction.response.send_message(f'Your favourite colour is %7Bself.values%5B0%5D%7D')%0A%0A%0Aclass DropdownView(discord.ui.View):%0A def __init__(self):%0A super().__init__()%0A%0A # Adds the dropdown to our view object.%0A self.add_item(Dropdown())%0A%0A%0Aclass Bot(commands.Bot):%0A def __init__(self):%0A super().__init__(command_prefix=commands.when_mentioned_or('$'))%0A%0A async def on_ready(self):%0A print(f'Logged in as %7Bself.user%7D (ID: %7Bself.user.id%7D)')%0A print('------')%0A %0A %0Abot = Bot()%0A%0A%[email protected]()%0Aasync def colour(ctx):%0A %22%22%22Sends a message with our dropdown containing colours%22%22%22%0A%0A # Create the view containing our dropdown%0A view = DropdownView()%0A%0A # Sending a message containing our view%0A await ctx.send('Pick your favourite colour:', view=view)%0A%0A%0Abot.run('token')%0A
bcb6c0780aacf77069a08f8d5b44d295881d9b9d
Create solution to swap odd even characters
swapOddEvenChar.py
swapOddEvenChar.py
Python
0.000001
@@ -0,0 +1,159 @@ +#Python3%0Aword = list(input().strip())%0A%0Afor i in range(0,len(word),2):%0A%09if(i+1%3E=len(word)):%0A%09%09break%0A%09word%5Bi%5D,word%5Bi+1%5D = word%5Bi+1%5D,word%5Bi%5D%0A%0Aprint(''.join(word))
7bde47d48f4e80b4449049a8b05767b30eb2c516
Add stupid CSV export example
utilities/export-csv.py
utilities/export-csv.py
Python
0
@@ -0,0 +1,2143 @@ +#!/usr/bin/python%0A%0Aimport os%0Aimport csv%0A%0Aimport sys%0Asys.path.append('../pynipap')%0Aimport pynipap%0A%0Aclass Export:%0A%09def __init__(self, xmlrpc_uri):%0A%09%09self.xmlrpc_uri = xmlrpc_uri%0A%0A%0A%09def write(self, output_file, schema_name):%0A%09%09%22%22%22%0A%09%09%22%22%22%0A%09%09f = open(output_file, %22w+%22)%0A%09%09writer = csv.writer(f, quoting=csv.QUOTE_MINIMAL)%0A%0A%09%09pynipap.xmlrpc_uri = xmlrpc_uri%0A%09%09ao = pynipap.AuthOptions(%7B 'authoritative_source': 'nipap' %7D)%0A%0A%09%09import socket,xmlrpclib%0A%09%09try:%0A%09%09%09schema = pynipap.Schema.list(%7B 'name': schema_name %7D)%5B0%5D%0A%09%09except socket.error:%0A%09%09%09print %3E%3E sys.stderr, %22Connection refused, please check hostname & port%22%0A%09%09%09sys.exit(1)%0A%09%09except xmlrpclib.ProtocolError:%0A%09%09%09print %3E%3E sys.stderr, %22Authentication failed, please check your username / password%22%0A%09%09%09sys.exit(1)%0A%09%09except IndexError:%0A%09%09%09print %3E%3E sys.stderr, %22Non existing schema (%22, schema_name, %22)%22%0A%09%09%09sys.exit(1)%0A%0A%09%09res = pynipap.Prefix.smart_search(schema, ' ', %7B 'include_all_parents': True %7D)%0A%09%09for p in res%5B'result'%5D:%0A%09%09%09writer.writerow(%5Bp.display_prefix, p.type, p.node, p.order_id, p.description%5D)%0A%0A%0Aif __name__ == '__main__':%0A%09import optparse%0A%09parser = optparse.OptionParser()%0A%09parser.add_option('--username', default='', help=%22Username%22)%0A%09parser.add_option('--password', default='', help=%22Password%22)%0A%09parser.add_option('--host', help=%22NIPAP backend host%22)%0A%09parser.add_option('--port', default=1337, help=%22NIPAP backend port%22)%0A%09parser.add_option('--schema', help=%22Schema name%22)%0A%09parser.add_option('--file', help=%22Output file%22)%0A%0A%09(options, args) = parser.parse_args()%0A%0A%09if options.host is None:%0A%09%09print %3E%3E sys.stderr, %22Please specify the NIPAP backend host to work with%22%0A%09%09sys.exit(1)%0A%0A%09if options.schema is None:%0A%09%09print %3E%3E sys.stderr, %22Please specify a schema to export%22%0A%09%09sys.exit(1)%0A%0A%09if options.file is None:%0A%09%09print %3E%3E sys.stderr, %22Please specify an output file%22%0A%09%09sys.exit(1)%0A%0A%09auth_uri = ''%0A%09if options.username:%0A%09%09auth_uri = %22%25s:%25s@%22 %25 (options.username, options.password)%0A%0A%09xmlrpc_uri = %22http://%25(auth_uri)s%25(host)s:%25(port)s%22 %25 %7B%0A%09%09%09'auth_uri'%09: auth_uri,%0A%09%09%09'host'%09%09: options.host,%0A%09%09%09'port'%09%09: options.port%0A%09%09%09%7D%0A%0A%09wr = Export(xmlrpc_uri)%0A%09wr.write(options.file, options.schema)%0A
9b6eddb88f5de1b7c44d42e1d4a3dc1c90180862
Implement deck.
onirim/deck.py
onirim/deck.py
Python
0
@@ -0,0 +1,877 @@ +import random%0A%0Aclass Deck:%0A%0A def __init__(self, cards):%0A self._undrawn = list(cards)%0A self._discarded = %5B%5D%0A self._limbo = %5B%5D%0A%0A def draw(self, n=1):%0A %22%22%22Draw n cards.%22%22%22%0A if n %3E len(self._undrawn) or n %3C 0:%0A raise ValueError()%0A drawn, self._undrawn = self._undrawn%5B:n%5D, self._undrawn%5Bn:%5D%0A return drawn%0A%0A def put_discard(self, card):%0A %22%22%22Put a card to discard pile.%22%22%22%0A self._discarded.append(card)%0A%0A def put_limbo(self, card):%0A %22%22%22Put a card to Limbo pile.%22%22%22%0A self._limbo.append(card)%0A%0A def shuffle(self):%0A %22%22%22Shuffle the undrawn pile.%22%22%22%0A random.shuffle(self._undrawn)%0A%0A def shuffle_with_limbo(self):%0A %22%22%22Shuffle limbo pile back to undrawn pile.%22%22%22%0A self._undrawn += self._limbo%0A self._limbo = %5B%5D%0A random.shuffle(self._undrawn)%0A
f5711401b79433f5b52e675cec67b63f6511836a
add tests file
tests.py
tests.py
Python
0.000001
@@ -0,0 +1,408 @@ +#!flask/bin/python%0Aimport unittest%0A%0Afrom server import app%0A%0Adef add(a, b):%0A return a+b%0A%0A%0Aclass TestCase(unittest.TestCase):%0A def setUp(self):%0A app.config%5B'TESTING'%5D = True%0A self.app = app.test_client()%0A%0A def tearDown(self):%0A pass%0A%0A def test_add(self):%0A self.assertEqual(add(1, 2), 3)%0A self.assertEqual(add(3, 4), 7)%0A%0Aif __name__ == '__main__':%0A unittest.main()
e9d87a087a0f0102157d7c718a048c72f655c54a
Store registered refs as plugin metadata
smore/ext/marshmallow.py
smore/ext/marshmallow.py
# -*- coding: utf-8 -*- from __future__ import absolute_import from marshmallow.compat import iteritems from marshmallow import class_registry from smore import swagger from smore.apispec.core import Path from smore.apispec.utils import load_operations_from_docstring def schema_definition_helper(name, schema, **kwargs): """Definition helper that allows using a marshmallow :class:`Schema <marshmallow.Schema>` to provide Swagger metadata. :param type schema: A marshmallow Schema class. """ return swagger.schema2jsonschema(schema) def schema_path_helper(view, **kwargs): doc_operations = load_operations_from_docstring(view.__doc__) if not doc_operations: return operations = doc_operations.copy() for method, config in iteritems(doc_operations): if 'schema' in config: schema_cls = class_registry.get_class(config['schema']) if not operations[method].get('responses'): operations[method]['responses'] = {} operations[method]['responses']['200'] = swagger.schema2jsonschema(schema_cls) return Path(operations=operations) def setup(spec): spec.register_definition_helper(schema_definition_helper) spec.register_path_helper(schema_path_helper)
Python
0
@@ -293,16 +293,22 @@ _helper( +spec, name, sc @@ -516,16 +516,201 @@ %22%22%22%0A + # Store registered refs, keyed by Schema class%0A plug = spec.plugins%5B'smore.ext.marshmallow'%5D%0A if 'refs' not in plug:%0A plug%5B'refs'%5D = %7B%7D%0A plug%5B'refs'%5D%5Bschema%5D = name%0A retu
3654817845e1d22a5b0e648a79d0bf6db12c2704
add run_sql shell command
treeherder/model/management/commands/run_sql.py
treeherder/model/management/commands/run_sql.py
Python
0.000002
@@ -0,0 +1,2638 @@ +import MySQLdb%0Afrom optparse import make_option%0A%0Afrom django.core.management.base import BaseCommand%0A%0Afrom treeherder.model.models import Datasource%0Afrom django.conf import settings%0A%0A%0Aclass Command(BaseCommand):%0A help = (%22Runs an arbitrary sql statement or file%22%0A %22 on a number of databases.%22)%0A%0A option_list = BaseCommand.option_list + (%0A%0A make_option(%0A '--datasources',%0A action='store',%0A dest='datasources',%0A default='all',%0A help='A comma separated list of datasources to execute the sql code on'),%0A%0A make_option(%0A '--data-type',%0A action='store',%0A dest='data_type',%0A default='jobs',%0A choices=%5B'jobs', 'objectstore'%5D,%0A help='The target data-type of the sql code'),%0A%0A make_option(%0A '-f', '--file',%0A dest='sql_file',%0A help='Sql source file',%0A metavar='FILE',%0A default=%22%22)%0A%0A )%0A%0A def handle(self, *args, **options):%0A%0A if not options%5B%22sql_file%22%5D:%0A self.stderr.write(%22No sql file provided!%22)%0A return%0A%0A datasources = Datasource.objects.filter(contenttype=options%5B'data_type'%5D)%0A if options%5B'datasources'%5D != 'all':%0A if ',' in options%5B'datasources'%5D:%0A datasources = datasources.filter(%0A project__in=options%5B'datasources'%5D.split(','))%0A else:%0A datasources = datasources.filter(%0A project=options%5B'datasources'%5D)%0A%0A with open(options%5B%22sql_file%22%5D) as sql_file:%0A sql_code = sql_file.read()%0A%0A self.stdout.write(%22%7B0%7D datasource found%22.format(%0A len(datasources)%0A ))%0A for datasource in datasources:%0A self.stdout.write(%22--------------------------%22)%0A db = MySQLdb.connect(%0A host=datasource.host,%0A db=datasource.name,%0A user=settings.TREEHERDER_DATABASE_USER,%0A passwd=settings.TREEHERDER_DATABASE_PASSWORD)%0A try:%0A cursor = db.cursor()%0A cursor.execute(sql_code)%0A self.stdout.write(%22Sql code executed on %7B0%7D%22.format(datasource))%0A except Exception as e:%0A error_string = %22!!! Sql code execution failed on %7B0%7D !!!%22%0A self.stderr.write(error_string.format(datasource))%0A self.stderr.write(%22%7B0%7D%22.format(e))%0A finally:%0A if cursor:%0A cursor.close()%0A
5b20a487afa90c0d91a43d4d29526d352511316f
add utils.py with utilities
utils.py
utils.py
Python
0.000001
@@ -0,0 +1,526 @@ +from csv import DictReader%0Aimport re%0A%0Adef read_csv(filename):%0A with open(filename) as csvfile:%0A return list(DictReader(csvfile, dialect='excel'))%0A%0Adef split_name(string):%0A surname, name = re.search(r'%5E(%5BA-Z%5C'%5C.%5Cs%5D+)%5Cs(.+)$', string).groups()%0A return name, surname%0A%0Adef iterate_names(name, surname):%0A yield name, surname%0A while ' ' in name:%0A name = name.rsplit(' ', 1)%5B0%5D%0A yield name, surname%0A while ' ' in surname:%0A surname = surname.rsplit(' ', 1)%5B0%5D%0A yield name, surname%0A
89d8e6a8a422bade352d3bf94f2c59c1d0dc601b
Create dictionary.py
dictionary.py
dictionary.py
Python
0.000096
@@ -0,0 +1,312 @@ +x = %7B'job': 'teacher', 'color': 'blue'%7D // Create a dictionary, list with defination%0A%0Aprint(x%5B'job'%5D) // You will see 'teacher'%0A%0Ay = %7B'emotion': 'happy', 'reason': %7B'action': 'playing game', 'platform': 'PC'%7D%7D%0A%0Aprint(y%5B'reason'%5D%5B'action'%5D) // You will see 'playing game'%0A
b08341d2822ad266e07d4104a45604ad9d5b504a
add unit test for text_analyzer
src/text_analyzer.py
src/text_analyzer.py
Python
0.000002
@@ -0,0 +1,1242 @@ +import os%0Aimport unittest%0A%0Adef analyze_text(filename):%0A%0A lines = 0%0A chars = 0%0A with open(filename, 'r') as f:%0A for line in f:%0A lines += 1%0A chars += len(line)%0A return (lines, chars)%0A%0Aclass TextAnalysisTests(unittest.TestCase):%0A %22%22%22Test for the %60%60analyze_test()%60%60 function%22%22%22%0A%0A def setUp(self):%0A self.filename = 'funfile.txt'%0A with open(self.filename, 'w') as f:%0A f.write('Spring is here. %5Cn'%0A 'As the birds sing. %5Cn'%0A 'And the flowers and bees. %5Cn'%0A 'In such a joy.')%0A%0A def tearDown(self):%0A try:%0A os.remove(self.filename)%0A except:%0A pass%0A%0A def test_function_runs(self):%0A analyze_text(self.filename)%0A%0A def test_line_count(self):%0A self.assertEqual(analyze_text(self.filename)%5B0%5D, 4)%0A%0A def test_charactor_count(self):%0A self.assertEqual(analyze_text(self.filename)%5B1%5D, 78)%0A%0A def test_no_such_file(self):%0A with self.assertRaises(IOError):%0A analyze_text(%22foo%22)%0A%0A def test_no_deletion(self):%0A analyze_text(self.filename)%0A self.assertTrue(os.path.exists(self.filename))%0A%0Aif __name__ == '__main__':%0A unittest.main()%0A%0A%0A
e1aa02badee2951f4f4aeeb09f37be030466e711
Add pyupgrades.py
bin/pyupgrades.py
bin/pyupgrades.py
Python
0.000003
@@ -0,0 +1,1885 @@ +#!/usr/bin/env python%0A%0Aimport xmlrpclib%0Aimport pip%0Aimport argparse%0Aimport re%0Afrom pkg_resources import parse_version%0A%0Adef version_number_compare(version1, version2):%0A return cmp(parse_version(version1), parse_version(version2))%0A %0A def normalize(v):%0A return %5Bint(x) for x in re.sub(r'(%5C.0+)*$','', v).split(%22.%22)%5D%0A return cmp(normalize(version1), normalize(version2))%0A%0Apackage_format = '%7Bdist.project_name%7D %7Bdist.version%7D'%0Adisplay_format = '%7Bpackage:40%7D %7Bmessage%7D'%0A%0Aif __name__ == '__main__':%0A parser = argparse.ArgumentParser(description='Process some integers.')%0A parser.add_argument('-a', '--all', dest='all', action='store_true', default=False)%0A parser.add_argument('-m', '--mirror', dest='mirror', default='http://pypi.python.org/pypi')%0A %0A args = parser.parse_args()%0A %0A if not args:%0A exit(-1)%0A %0A pypi = xmlrpclib.ServerProxy(args.mirror)%0A for dist in pip.get_installed_distributions():%0A package_str = package_format.format(dist=dist)%0A %0A available = pypi.package_releases(dist.project_name)%0A if not available:%0A # Try the capitalized package name%0A available = pypi.package_releases(dist.project_name.capitalize())%0A %0A upgrade_available = True%0A if not available:%0A print display_format.format(package=package_str, message='no releases at pypi')%0A continue%0A %0A comparison = version_number_compare(available%5B0%5D, dist.version)%0A if comparison == 0:%0A if not args.all:%0A continue%0A print display_format.format(package=package_str, message='up to date')%0A elif comparison %3C 0:%0A print display_format.format(package=package_str, message='older version on pypi')%0A else:%0A print display_format.format(package=package_str, message='%25s available' %25 available%5B0%5D)%0A
7b09a44c7df8b2aa28e45c5382626c2f8c4bf61b
Add a script to convert from rst style files to markdown
bin/run_redpen.py
bin/run_redpen.py
Python
0
@@ -0,0 +1,1368 @@ +#!/usr/bin/python%0A%0Aimport os%0Aimport re%0Aimport shutil%0Afrom optparse import OptionParser%0A%0Adef main():%0A parser = OptionParser(usage=%22usage: %25prog %5Boptions%5D%22,%0A version=%22%25prog 1.0%22)%0A parser.add_option(%22-i%22, %22--inputdir%22,%0A action=%22store%22,%0A dest=%22indir%22,%0A default=%22source%22,%0A help=%22specify the input directory containing rst files.%22)%0A parser.add_option(%22-o%22, %22--outdir%22,%0A action=%22store%22,%0A dest=%22outdir%22,%0A default=%22build/mdfiles%22,%0A help=%22specify the output directory of markdownized files.%22)%0A (options, args) = parser.parse_args()%0A%0A indir = options.indir%0A outdir = options.outdir%0A%0A if os.path.exists(outdir) == True:%0A shutil.rmtree(outdir)%0A os.makedirs(outdir)%0A%0A for root, dirs, files in os.walk(indir):%0A for file in files:%0A mdfile_pat = re.compile(%22.*%5C.rst%22)%0A if not mdfile_pat.search(file):%0A continue%0A fileroot, ext = os.path.splitext(file)%0A cmdline = %22pandoc -r markdown -w rst %25s -o %25s%22 %25 (os.path.join(root, file),%0A outdir + %22/%22 + fileroot + %22.md%22)%0A os.system(cmdline)%0A%0Aif __name__ == '__main__':%0A main()%0A
30c368f1794f7bbc4121f732143ac07e7148a3ca
Create KevinAndExpectation.py
Probability/KevinAndExpectation.py
Probability/KevinAndExpectation.py
Python
0
@@ -0,0 +1,1211 @@ +# Importing standard libraries%0Aimport sys%0Afrom math import sqrt%0A%0A# Parsing functions%0Adef parseInt(stream):%0A return int(stream.readline().rstrip())%0A%0A'''%0A %0A Dynamically precomputing the summation series for N %3C 10%5E6 so that each test case%0A is solved in constnat time for any N less than 10%5E6. There fore for Task 1, this%0A solution takes O(1) time%0A %0A'''%0A# Computing the summation series%0Adef getL(N):%0A L = %5B0%5D*(N + 1)%0A L%5B1%5D = 1.0%0A for i in range(2, N + 1):%0A L%5Bi%5D = L%5Bi - 1%5D + sqrt(i * 4.0 - 3.0)%0A return L%0A%0A'''%0A%0A For N greater than 10%5E6 we take an approximation of the series since we have not%0A precomputed it already. This approximation was obtained from Wolfram alpha%0A %0A'''%0Adef getAns(N):%0A return (4.0/3.0) * (N ** 1.5)%0A %0A# Main function for the program%0Aif __name__ == %22__main__%22:%0A stream = sys.stdin%0A T = parseInt(stream)%0A L = getL(1000000)%0A for i in range(T):%0A N = parseInt(stream)%0A if(N %3C 1000000):%0A summationN = L%5BN%5D %0A ans = 0.5 - 1.0/N + (0.5/N) * (summationN)%0A print ans%0A else:%0A summationN = getAns(N)%0A ans = 0.5 - 1.0/N + (0.5/N) * (summationN)%0A print ans%0A
53b0d93a7a29121e9d24058bfe4b7ee3bd33f7ca
Add info for version 2.16 (#3601)
var/spack/repos/builtin/packages/ack/package.py
var/spack/repos/builtin/packages/ack/package.py
############################################################################## # Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, [email protected], All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/llnl/spack # Please also see the LICENSE file for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack import * class Ack(Package): """ack 2.14 is a tool like grep, optimized for programmers. Designed for programmers with large heterogeneous trees of source code, ack is written purely in portable Perl 5 and takes advantage of the power of Perl's regular expressions.""" homepage = "http://beyondgrep.com/" url = "http://beyondgrep.com/ack-2.14-single-file" version('2.14', 'e74150a1609d28a70b450ef9cc2ed56b', expand=False) depends_on('perl') def install(self, spec, prefix): mkdirp(prefix.bin) ack = 'ack-{0}-single-file'.format(self.version) # rewrite the script's #! line to call the perl dependency shbang = '#!' + join_path(spec['perl'].prefix.bin, 'perl') filter_file(r'^#!/usr/bin/env perl', shbang, ack) install(ack, join_path(prefix.bin, "ack")) set_executable(join_path(prefix.bin, "ack"))
Python
0
@@ -1625,16 +1625,86 @@ -file%22%0A%0A + version('2.16', '7085b5a5c76fda43ff049410870c8535', expand=False)%0A vers
3b30e8a66ca5a3c68055696c339a44fffc98afb3
compute jaccard with numpy boradcasting
exercise/broadcast.py
exercise/broadcast.py
Python
0.999999
@@ -0,0 +1,1512 @@ +import numpy as np%0A%0A# a = np.array(%5B1.0, 2.0, 3.0%5D)%0A# b = 2.0%0A%0A# print(a * b)%0A%0A# x = np.arange(4)%0A# xx = x.reshape(4,1)%0A# %0A# y = np.ones(5)%0A%0A# x = np.array(%5B1,2%5D).reshape((2,1))%0A# y =np.arange(4).reshape((1,4))%0A# %0A# print(x-y)%0A%0A# from numpy import array, argmin, sqrt, sum%0A# %0A# observation = array(%5B111.0,188.0%5D)%0A# %0A# codes = array(%5B%5B102.0, 203.0%5D,%0A# %5B132.0, 193.0%5D,%0A# %5B45.0, 155.0%5D,%0A# %5B57.0, 173.0%5D%5D)%0A# %0A# # observation = observation.reshape((1,-1))%0A# # distance = np.sqrt((observation%5B:,0%5D - codes%5B:,0%5D) ** 2 + (observation%5B:,1%5D - codes%5B:,1%5D) ** 2)%0A# %0A# diff = codes - observation%0A# distance = (diff **2).sum(axis=-1) %0A# %0A# min_ind = np.argmin(np.sqrt(distance))%0A# print(codes%5Bmin_ind%5D)%0A%0A%0Agt_bboxes = np.array(%5B%5B0,0,1,2%5D,%5B1,0,3,4%5D%5D).reshape((-1,1,4))%0A%0Aanchors = np.array(%5B%5B100,100,105,105%5D,%5B2,1,3,3.5%5D,%5B0,0,10,10%5D%5D).reshape((1,-1,4))%0A%0A%0Ainter_ymin = np.maximum(gt_bboxes%5B:,:,0%5D, anchors%5B:,:,0%5D)%0Ainter_xmin = np.maximum(gt_bboxes%5B:,:,1%5D, anchors%5B:,:,1%5D)%0Ainter_ymax = np.minimum(gt_bboxes%5B:,:,2%5D, anchors%5B:,:,2%5D)%0Ainter_xmax = np.minimum(gt_bboxes%5B:,:,3%5D, anchors%5B:,:,3%5D)%0A%0Ah = np.maximum(inter_ymax - inter_ymin, 0.)%0Aw = np.maximum(inter_xmax - inter_xmin, 0.)%0A%0Ainter_area = h * w%0Aanchors_area = (anchors%5B:,:,3%5D - anchors%5B:,:,1%5D) * (anchors%5B:,:,2%5D - anchors%5B:,:,0%5D)%0Agt_bboxes_area = (gt_bboxes%5B:,:,3%5D - gt_bboxes%5B:,:,1%5D) * (gt_bboxes%5B:,:,2%5D - gt_bboxes%5B:,:,0%5D)%0Aunion_area = anchors_area - inter_area + gt_bboxes_area%0Ajaccard = inter_area/union_area%0Aprint(jaccard)%0A%0A%0A%0A%0A%0A
d0d182605389ec73773df35b9e06455b9f9a2923
add get_posts
facebook/get_posts.py
facebook/get_posts.py
Python
0.000005
@@ -0,0 +1,1608 @@ +%22%22%22%0AA simple example script to get all posts on a user's timeline.%0AOriginally created by Mitchell Stewart.%0A%3Chttps://gist.github.com/mylsb/10294040%3E%0A%22%22%22%0Aimport facebook%0Aimport requests%0A%0A%0Adef some_action(post):%0A %22%22%22 Here you might want to do something with each post. E.g. grab the%0A post's message (post%5B'message'%5D) or the post's picture (post%5B'picture'%5D).%0A In this implementation we just print the post's created time.%0A %22%22%22%0A print(post%5B'created_time'%5D)%0A%0A%0A# You'll need an access token here to do anything. You can get a temporary one%0A# here: https://developers.facebook.com/tools/explorer/%0Aaccess_token = 'CAAHPNmH9dEUBAJ53c9925baOfzbjsCmaAujxZBSEBBpIKqxBwyqBTDMsQSZCsfxReqDlAIsyAWC6ZCtLMibt5G6AcHy2nDb2IC4pvFz0SMJWpnMJol3Rzvt80PKNz9IYGDHfNZBQTF3VhI36yDE8qiI2EzTK7LKuNLBEq3AugsSgXdFGtKcbP2UOtoZCZBaRSZBxHzph5yOmV5yflsJ5258'%0A# Look at Bill Gates's profile for this example by using his Facebook id.%0Auser = 'BillGates'%0A%0Agraph = facebook.GraphAPI(access_token)%0Aprofile = graph.get_object(user)%0Aposts = graph.get_connections(profile%5B'id'%5D, 'posts')%0A%0A# Wrap this block in a while loop so we can keep paginating requests until%0A# finished.%0Awhile True:%0A try:%0A # Perform some action on each post in the collection we receive from%0A # Facebook.%0A %5Bsome_action(post=post) for post in posts%5B'data'%5D%5D%0A # Attempt to make a request to the next page of data, if it exists.%0A posts = requests.get(posts%5B'paging'%5D%5B'next'%5D).json()%0A except KeyError:%0A # When there are no more pages (%5B'paging'%5D%5B'next'%5D), break from the%0A # loop and end the script.%0A break%0A
419ca7099bf47ed00ede73d9de14690a643a3943
Add data for integration testing of basic csv and crosstab formats
test/test_integration.py
test/test_integration.py
Python
0
@@ -0,0 +1,745 @@ +%22%22%22Integrations tests for EcoData Retriever%22%22%22%0A%0Aimport os%0Aimport shutil%0Afrom retriever import HOME_DIR%0A%0Asimple_csv = %7B'name': 'simple_csv',%0A 'raw_data': %22a,b,c%5Cn1,2,3%5Cn4,5,6%22,%0A 'script': %22shortname: simple_csv%5Cntable: simple_csv, http://example.com/simple_csv.txt%22,%0A 'expect_out': %22a,b,c%5Cn1,2,3%5Cn4,5,6%22%7D%0A%0Acrosstab = %7B'name': 'crosstab',%0A 'raw_data': %22a,b,c1,c2%5Cn1,1,1.1,1.2%5Cn1,2,2.1,2.2%22,%0A 'script': %22shortname: crosstab%5Cntable: crosstab, http://example.com/crosstab.txt%5Cn*column: a, int%5Cn*column: b, int%5Cn*ct_column: c%5Cn*column: val, ct-double%5Cn*ct_names: c1,c2%22,%0A 'expect_out': %22a,b,c,val%5Cn1,1,c1,1.1%5Cn1,1,c2,1.2%5Cn1,2,c1,2.1%5Cn1,2,c2,2.2%22%7D%0A%0Atests = %5Bsimple_csv, crosstab%5D%0A
465fbc1657e90134323fd05ee4216da5af110ee4
add tools
pycrawler/utils/tools.py
pycrawler/utils/tools.py
Python
0.000001
@@ -0,0 +1,368 @@ +__author__ = 'mengpeng'%0Aimport time%0A%0A%0Adef gethash(string, cap=0xffffffff):%0A return hash(string) & cap%0A%0A%0Adef timestamp():%0A return time.strftime(%22%25H:%25M:%25S%22, time.localtime(time.time()))%0A%0A%0Adef datastamp():%0A return time.strftime(%22%25Y-%25m-%25d%22, time.localtime(time.time()))%0A%0A%0Adef fullstamp():%0A return time.strftime(%22%25Y-%25m-%25d %25H:%25M:%25S%22, time.localtime(time.time()))
d8fc66417860e634bbb2a6d860628b645811d62c
Add WIP for Python example
examples/python/curieimu.py
examples/python/curieimu.py
Python
0
@@ -0,0 +1,1816 @@ +#!/usr/bin/python%0A%0A# Author: Ron Evans (@deadprogram)%0A# Copyright (c) 2016 Intel Corporation.%0A#%0A# Permission is hereby granted, free of charge, to any person obtaining%0A# a copy of this software and associated documentation files (the%0A# %22Software%22), to deal in the Software without restriction, including%0A# without limitation the rights to use, copy, modify, merge, publish,%0A# distribute, sublicense, and/or sell copies of the Software, and to%0A# permit persons to whom the Software is furnished to do so, subject to%0A# the following conditions:%0A#%0A# The above copyright notice and this permission notice shall be%0A# included in all copies or substantial portions of the Software.%0A#%0A# THE SOFTWARE IS PROVIDED %22AS IS%22, WITHOUT WARRANTY OF ANY KIND,%0A# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF%0A# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND%0A# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE%0A# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION%0A# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION%0A# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE%0A%0Aimport time, sys, signal, atexit%0Aimport pyupm_curieimu as curieimu%0A%0A## Exit handlers ##%0A# This stops python from printing a stacktrace when you hit control-C%0Adef SIGINTHandler(signum, frame):%0A%09raise SystemExit%0A%0A# This lets you run code on exit,%0A# including functions from myAccelrCompass%0Adef exitHandler():%0A%09print %22Exiting%22%0A%09sys.exit(0)%0A%0A# Register exit handlers%0Aatexit.register(exitHandler)%0Asignal.signal(signal.SIGINT, SIGINTHandler)%0A%0A%0Awhile(1):%0A%09# Get the acceleration%0A%09curieimu.updateAccel();%0A%0A%09outputStr = %22acc: gX %7B0%7D - gY %7B1%7D - gZ %7B2%7D%22.format(%0A%09curieimu.getAccelX(), curieimu.getAccelY(),%0A%09curieimu.getAccelZ())%0A%09print outputStr%0A%0A%09print %22 %22%0A%09time.sleep(1)%0A
2f7aa680b79b60d707d7b09818e3ec55748448b2
fix anonymous token extraction(closes #23650)
youtube_dl/extractor/discovery.py
youtube_dl/extractor/discovery.py
from __future__ import unicode_literals import random import re import string from .discoverygo import DiscoveryGoBaseIE from ..compat import compat_urllib_parse_unquote from ..utils import ExtractorError from ..compat import compat_HTTPError class DiscoveryIE(DiscoveryGoBaseIE): _VALID_URL = r'''(?x)https?:// (?P<site> (?:(?:www|go)\.)?discovery| (?:www\.)? (?: investigationdiscovery| discoverylife| animalplanet| ahctv| destinationamerica| sciencechannel| tlc| velocity )| watch\. (?: hgtv| foodnetwork| travelchannel| diynetwork| cookingchanneltv| motortrend ) )\.com/tv-shows/(?P<show_slug>[^/]+)/(?:video|full-episode)s/(?P<id>[^./?#]+)''' _TESTS = [{ 'url': 'https://go.discovery.com/tv-shows/cash-cab/videos/riding-with-matthew-perry', 'info_dict': { 'id': '5a2f35ce6b66d17a5026e29e', 'ext': 'mp4', 'title': 'Riding with Matthew Perry', 'description': 'md5:a34333153e79bc4526019a5129e7f878', 'duration': 84, }, 'params': { 'skip_download': True, # requires ffmpeg } }, { 'url': 'https://www.investigationdiscovery.com/tv-shows/final-vision/full-episodes/final-vision', 'only_matching': True, }, { 'url': 'https://go.discovery.com/tv-shows/alaskan-bush-people/videos/follow-your-own-road', 'only_matching': True, }, { # using `show_slug` is important to get the correct video data 'url': 'https://www.sciencechannel.com/tv-shows/mythbusters-on-science/full-episodes/christmas-special', 'only_matching': True, }] _GEO_COUNTRIES = ['US'] _GEO_BYPASS = False _API_BASE_URL = 'https://api.discovery.com/v1/' def _real_extract(self, url): site, show_slug, display_id = re.match(self._VALID_URL, url).groups() access_token = None cookies = self._get_cookies(url) # prefer Affiliate Auth Token over Anonymous Auth Token auth_storage_cookie = cookies.get('eosAf') or cookies.get('eosAn') if auth_storage_cookie and auth_storage_cookie.value: auth_storage = self._parse_json(compat_urllib_parse_unquote( compat_urllib_parse_unquote(auth_storage_cookie.value)), display_id, fatal=False) or {} access_token = auth_storage.get('a') or auth_storage.get('access_token') if not access_token: access_token = self._download_json( 'https://%s.com/anonymous' % site, display_id, 'Downloading token JSON metadata', query={ 'authRel': 'authorization', 'client_id': '3020a40c2356a645b4b4', 'nonce': ''.join([random.choice(string.ascii_letters) for _ in range(32)]), 'redirectUri': 'https://fusion.ddmcdn.com/app/mercury-sdk/180/redirectHandler.html?https://www.%s.com' % site, })['access_token'] headers = self.geo_verification_headers() headers['Authorization'] = 'Bearer ' + access_token try: video = self._download_json( self._API_BASE_URL + 'content/videos', display_id, 'Downloading content JSON metadata', headers=headers, query={ 'embed': 'show.name', 'fields': 'authenticated,description.detailed,duration,episodeNumber,id,name,parental.rating,season.number,show,tags', 'slug': display_id, 'show_slug': show_slug, })[0] video_id = video['id'] stream = self._download_json( self._API_BASE_URL + 'streaming/video/' + video_id, display_id, 'Downloading streaming JSON metadata', headers=headers) except ExtractorError as e: if isinstance(e.cause, compat_HTTPError) and e.cause.code in (401, 403): e_description = self._parse_json( e.cause.read().decode(), display_id)['description'] if 'resource not available for country' in e_description: self.raise_geo_restricted(countries=self._GEO_COUNTRIES) if 'Authorized Networks' in e_description: raise ExtractorError( 'This video is only available via cable service provider subscription that' ' is not currently supported. You may want to use --cookies.', expected=True) raise ExtractorError(e_description) raise return self._extract_video_info(video, stream, display_id)
Python
0
@@ -347,25 +347,12 @@ -(?:(?:www%7C go -) %5C. -)? disc @@ -374,18 +374,13 @@ -(?: www%5C. -)? %0A @@ -639,38 +639,8 @@ tlc -%7C%0A velocity %0A @@ -3175,93 +3175,27 @@ s:// -fusion.ddmcdn.com/app/mercury-sdk/180/redirectHandler.html?https://www.%25s.com' %25 site +www.discovery.com/' ,%0A
b78fb81cba34992bb84ed3814aae04ce05ef913f
Add del-uri.py example script
examples/scripts/del-uri.py
examples/scripts/del-uri.py
Python
0.000001
@@ -0,0 +1,3460 @@ +#!/usr/bin/env python3%0A###%0A# (C) Copyright (2012-2015) Hewlett Packard Enterprise Development LP%0A#%0A# Permission is hereby granted, free of charge, to any person obtaining a copy%0A# of this software and associated documentation files (the %22Software%22), to deal%0A# in the Software without restriction, including without limitation the rights%0A# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell%0A# copies of the Software, and to permit persons to whom the Software is%0A# furnished to do so, subject to the following conditions:%0A#%0A# The above copyright notice and this permission notice shall be included in%0A# all copies or substantial portions of the Software.%0A#%0A# THE SOFTWARE IS PROVIDED %22AS IS%22, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR%0A# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,%0A# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE%0A# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER%0A# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,%0A# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN%0A# THE SOFTWARE.%0A###%0Aimport sys%0Aif sys.version_info %3C (3, 4):%0A raise Exception(%22Must use Python 3.4 or later%22)%0A%0Aimport hpOneView as hpov%0Afrom pprint import pprint%0A%0A%0Adef acceptEULA(con):%0A # See if we need to accept the EULA before we try to log in%0A con.get_eula_status()%0A try:%0A if con.get_eula_status() is True:%0A print(%22EULA display needed%22)%0A con.set_eula('no')%0A except Exception as e:%0A print('EXCEPTION:')%0A print(e)%0A%0A%0Adef login(con, credential):%0A # Login with givin credentials%0A try:%0A con.login(credential)%0A except:%0A print('Login failed')%0A%0A%0Adef deluri(con, uri):%0A resource = con.delete(uri)%0A pprint(resource)%0A%0A%0Adef main():%0A parser = argparse.ArgumentParser(add_help=True,%0A formatter_class=argparse.RawTextHelpFormatter,%0A description='''%0A Delete resource by URI%0A%0A Usage: ''')%0A parser.add_argument('-a', dest='host', required=True,%0A help='''%0A HP OneView Appliance hostname or IP address''')%0A parser.add_argument('-u', dest='user', required=False,%0A default='Administrator',%0A help='''%0A HP OneView Username''')%0A parser.add_argument('-p', dest='passwd', required=True,%0A help='''%0A HP OneView Password''')%0A parser.add_argument('-c', dest='cert', required=False,%0A help='''%0A Trusted SSL Certificate Bundle in PEM (Base64 Encoded DER) Format''')%0A parser.add_argument('-y', dest='proxy', required=False,%0A help='''%0A Proxy (host:port format''')%0A parser.add_argument('-i', dest='uri', required=False,%0A help='''%0A URI of the resource to delete''')%0A%0A args = parser.parse_args()%0A credential = %7B'userName': args.user, 'password': args.passwd%7D%0A%0A con = hpov.connection(args.host)%0A sec = hpov.security(con)%0A%0A if args.proxy:%0A con.set_proxy(args.proxy.split(':')%5B0%5D, args.proxy.split(':')%5B1%5D)%0A if args.cert:%0A con.set_trusted_ssl_bundle(args.cert)%0A%0A login(con, credential)%0A acceptEULA(con)%0A%0A deluri(con, args.uri)%0A%0Aif __name__ == '__main__':%0A import sys%0A import argparse%0A sys.exit(main())%0A%0A# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:%0A
5cf3ff125226ddbf2edfad9d3c0d6ea2d59618ce
add missing file
pygraphviz/tests/test.py
pygraphviz/tests/test.py
Python
0.000003
@@ -0,0 +1,1105 @@ +#!/usr/bin/env python%0Aimport sys%0Afrom os import path,getcwd%0A%0Adef run(verbosity=1,doctest=False,numpy=True):%0A %22%22%22Run PyGraphviz tests.%0A%0A Parameters%0A ----------%0A verbosity: integer, optional%0A Level of detail in test reports. Higher numbers provide more detail.%0A%0A doctest: bool, optional%0A True to run doctests in code modules%0A %22%22%22%0A try:%0A import nose%0A except ImportError:%0A raise ImportError(%5C%0A %22The nose package is needed to run the tests.%22)%0A%0A sys.stderr.write(%22Running PyGraphiz tests:%22)%0A nx_install_dir=path.join(path.dirname(__file__), path.pardir)%0A # stop if running from source directory%0A if getcwd() == path.abspath(path.join(nx_install_dir,path.pardir)):%0A raise RuntimeError(%22Can't run tests from source directory.%5Cn%22%0A %22Run 'nosetests' from the command line.%22)%0A%0A argv=%5B' ','--verbosity=%25d'%25verbosity,%0A '-w',nx_install_dir,%0A '-exe'%5D%0A if doctest:%0A argv.extend(%5B'--with-doctest','--doctest-extension=txt'%5D)%0A nose.run(argv=argv)%0A%0Aif __name__==%22__main__%22:%0A run()%0A%0A
2af53a39096c0eab9d95c304c802281fe3c580ae
Make JAX CompiledFunction objects pickle-able.
tests/pickle_test.py
tests/pickle_test.py
Python
0.000032
@@ -0,0 +1,1496 @@ +# Copyright 2021 Google LLC%0A#%0A# Licensed under the Apache License, Version 2.0 (the %22License%22);%0A# you may not use this file except in compliance with the License.%0A# You may obtain a copy of the License at%0A#%0A# https://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS,%0A# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.%0A# See the License for the specific language governing permissions and%0A# limitations under the License.%0A%22%22%22Tests for interoperability between JAX and pickling libraries.%22%22%22%0A%0Aimport pickle%0Aimport unittest%0A%0Afrom absl.testing import absltest%0A%0Atry:%0A import cloudpickle%0Aexcept ImportError:%0A cloudpickle = None%0A%0Aimport jax%0Afrom jax.config import config%0Afrom jax import test_util as jtu%0A%0Aconfig.parse_flags_with_absl()%0A%0A%0Aclass CloudpickleTest(jtu.JaxTestCase):%0A%0A @unittest.skipIf(cloudpickle is None, %22Requires cloudpickle%22)%0A @unittest.skipIf(jax.lib._xla_extension_version %3C 31,%0A %22Requires jaxlib 0.1.71%22)%0A def testPickleOfJittedFunctions(self):%0A%0A @jax.jit%0A def f(x, y):%0A return x * y%0A%0A @jax.jit%0A def g(z):%0A return f(z, z + 77) # noqa: F821%0A%0A expected = g(32)%0A s = cloudpickle.dumps(g)%0A del f, g%0A%0A g_unpickled = pickle.loads(s)%0A actual = g_unpickled(32)%0A self.assertEqual(expected, actual)%0A%0A%0Aif __name__ == %22__main__%22:%0A absltest.main(testLoader=jtu.JaxTestLoader())%0A
c4ee6bb374e07a07bac8b8f52cf94d7d474e0e33
Fix typo in test comment
tests/test_config.py
tests/test_config.py
import os from pathlib import Path from rhizo.config import load_config def check_config(config): assert config.output_path == '/foo/bar' assert config.sub_config.a == 'test' assert config.sub_config.b == 2 assert round(config.sub_config.c - 3.14, 4) == 0 def _load_test_config(filename, use_environ=False): """Load a config file from the test_data subdirectory.""" path = Path(__file__).parent / 'test_data' / filename return load_config(str(path), use_environ) def test_text_config(): config = _load_test_config('sample_config.txt') check_config(config) def test_environment_config(): os.environ['RHIZO_SUB_CONFIG'] = 'a: override\nb: 3' os.environ['RHIZO_OTHER_SETTING'] = 'from_env' config = _load_test_config('sample_config.json', True) # Not overridden in environmene assert config.output_path == '/foo/bar' # Overridden in environment; dict value in environment assert config.sub_config == { "a": "override", "b": 3 } # Only specified in environment assert config.other_setting == 'from_env' def test_json_config(): # Make sure environment override only happens if requested os.environ['RHIZO_OUTPUT_PATH'] = 'overridden' config = _load_test_config('sample_config.json') check_config(config) def test_hjson_config(): config = _load_test_config('sample_config.hjson') check_config(config) def test_config_update(): config = _load_test_config('sample_config.hjson') config.update(_load_test_config('update.hjson')) assert config.output_path == '/foo/test' assert config.sub_config.a == 'test' assert config.sub_config.b == 3
Python
0.000092
@@ -827,17 +827,17 @@ vironmen -e +t %0A ass