commit
stringlengths
40
40
subject
stringlengths
1
3.25k
old_file
stringlengths
4
311
new_file
stringlengths
4
311
old_contents
stringlengths
0
26.3k
lang
stringclasses
3 values
proba
float64
0
1
diff
stringlengths
0
7.82k
a67a25b80c70450c3a68a3f8f6c2925a64a735b6
version bump and ug regen
src/robot/version.py
src/robot/version.py
# Automatically generated by 'package.py' script. import sys VERSION = 'trunk' RELEASE = '20131203' TIMESTAMP = '20131203-143027' def get_version(sep=' '): if RELEASE == 'final': return VERSION return VERSION + sep + RELEASE def get_full_version(who=''): sys_version = sys.version.split()[0] version = '%s %s (%s %s on %s)' \ % (who, get_version(), _get_interpreter(), sys_version, sys.platform) return version.strip() def _get_interpreter(): if sys.platform.startswith('java'): return 'Jython' if sys.platform == 'cli': return 'IronPython' if 'PyPy' in sys.version: return 'PyPy' return 'Python'
Python
0
@@ -88,21 +88,21 @@ E = '201 -31203 +40126 '%0ATIMEST @@ -115,20 +115,20 @@ '201 -31203-143027 +40126-184803 '%0A%0Ad
fa1f80f2cb99cf44c7584eed85a04fb8dee56a38
Move the encoding and shebang to the first two lines
testing-game.py
testing-game.py
''' * Copyright (c) 2015 Spotify AB. * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. ''' # !/usr/bin/python # -*- coding: utf-8 -*- import argparse import os import subprocess def find_xctest_tests(blame_lines, names, source, xctestsuperclasses): xctest_identifiers = ['XCTestCase'] xctest_identifiers.extend(xctestsuperclasses) contains_test_case = False for xctest_identifier in xctest_identifiers: contains_test_case |= source.find(xctest_identifier) != -1 if contains_test_case: break if contains_test_case: for blame_line in blame_lines: if blame_line.replace(' ', '').find('-(void)test') != -1: blame_info = blame_line[blame_line.find('(')+1:] blame_info = blame_info[:blame_info.find(')')] blame_components = blame_info.split() name_components = blame_components[:len(blame_components)-4] name = ' '.join(name_components) name_count = names.get(name, 0) names[name] = name_count + 1 return names def find_java_tests(blame_lines, names, source): next_is_test = False for blame_line in blame_lines: separator = blame_line.find(')') blame_code_nospaces = blame_line[separator+1:] blame_code_nospaces = blame_code_nospaces.replace(' ', '') blame_code_nospaces = blame_code_nospaces.replace('\t', '') if next_is_test or blame_code_nospaces.startswith('publicvoidtest'): blame_info = blame_line[:separator] name = blame_info[blame_info.find('<')+1:blame_info.find('@')] name_count = names.get(name, 0) names[name] = name_count + 1 next_is_test = False else: next_is_test = blame_code_nospaces.startswith('@Test') return names def find_boost_tests(blame_lines, names, source): test_cases = ['BOOST_AUTO_TEST_CASE', 'BOOST_FIXTURE_TEST_CASE'] for blame_line in blame_lines: contains_test_case = False for test_case in test_cases: contains_test_case |= blame_line.find(test_case) != -1 if contains_test_case: break if contains_test_case: blame_info = blame_line[blame_line.find('(')+1:] blame_info = blame_info[:blame_info.find(')')] blame_components = blame_info.split() name_components = blame_components[:len(blame_components)-4] name = ' '.join(name_components) name_count = names.get(name, 0) names[name] = name_count + 1 return names def find_nose_tests(blame_lines, names, source): for blame_line in blame_lines: separator = blame_line.find(')') blame_code_nospaces = blame_line[separator+1:] blame_code_nospaces = blame_code_nospaces.replace(' ', '') blame_code_nospaces = blame_code_nospaces.replace('\t', '') if blame_code_nospaces.startswith('deftest_'): blame_info = blame_line[:separator] name = blame_info[blame_info.find('<')+1:blame_info.find('@')] name_count = names.get(name, 0) names[name] = name_count + 1 return names def find_git_status(directory, xctestsuperclasses): names = {} objc_extensions = ['.m', '.mm'] java_extensions = ['.java'] cpp_extensions = ['.cpp', '.mm'] python_extensions = ['.py'] valid_extensions = objc_extensions valid_extensions.extend(java_extensions) valid_extensions.extend(cpp_extensions) valid_extensions.extend(python_extensions) for root, dirs, files in os.walk(directory): for name in files: filename, fileextension = os.path.splitext(name) absfile = os.path.join(root, name) if fileextension in valid_extensions: try: with open(absfile) as sourcefile: source = sourcefile.read() p = subprocess.Popen(['git', 'blame', absfile], stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = p.communicate() blame_lines = out.splitlines() if fileextension in objc_extensions: names = find_xctest_tests(blame_lines, names, source, xctestsuperclasses) if fileextension in java_extensions: names = find_java_tests(blame_lines, names, source) if fileextension in cpp_extensions: names = find_boost_tests(blame_lines, names, source) if fileextension in python_extensions: names = find_nose_tests(blame_lines, names, source) except: 'Could not open file: ' + absfile return names if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('-d', '--directory', help='The directory to search for files in', required=False, default=os.getcwd()) parser.add_argument('-x', '--xctestsuperclasses', help='A comma separated list of XCTest super classes', required=False, default='') args = parser.parse_args() xctest_superclasses = args.xctestsuperclasses.replace(' ', '').split(',') names = find_git_status(args.directory, xctest_superclasses) total_tests = 0 for name in names: total_tests += names[name] print "Total Tests: %(t)d" % {'t': total_tests} print "-------------------------------------------" sorted_list = sorted(names.items(), key=lambda x: x[1], reverse=True) for t in sorted_list: percentage = (float(t[1]) / float(total_tests)) * 100.0 t_index = sorted_list.index(t) + 1 print "%(i)d. %(n)s, %(t)d (%(p).2f%%)" % {'i': t_index, 'n': t[0], 't': t[1], 'p': percentage}
Python
0.000304
@@ -1,12 +1,55 @@ +# !/usr/bin/python%0A# -*- coding: utf-8 -*-%0A '''%0A * Copyr @@ -886,53 +886,8 @@ '''%0A -%0A# !/usr/bin/python%0A# -*- coding: utf-8 -*-%0A%0A impo
04aa968a70b8065c9c9cd013d1266f8988c4220a
remove accidentally committed maxDiff change
tests/__init__.py
tests/__init__.py
import os import unittest import pytest class ScraperTest(unittest.TestCase): maxDiff = None online = False test_file_name = None def setUp(self): os.environ[ "RECIPE_SCRAPERS_SETTINGS" ] = "tests.test_data.test_settings_module.test_settings" test_file_name = ( self.test_file_name if self.test_file_name else self.scraper_class.__name__.lower() ) with open( "tests/test_data/{}.testhtml".format(test_file_name), encoding="utf-8" ) as testfile: self.harvester_class = self.scraper_class(testfile) canonical_url = self.harvester_class.canonical_url() if self.online: if not canonical_url: pytest.skip( f"could not find canonical url for online test of scraper '{self.scraper_class.__name__}'" ) self.harvester_class = self.scraper_class(url=canonical_url)
Python
0
@@ -79,27 +79,8 @@ ):%0A%0A - maxDiff = None%0A
cc0521c2f72c534e2fa94573f90e9ec2bb169405
use utc time for timestamps
database.py
database.py
import os.path from datetime import datetime from collections import defaultdict from flask import json from flaskext.sqlalchemy import SQLAlchemy import logging log = logging.getLogger(__name__) log.setLevel(logging.INFO) db = SQLAlchemy() class User(db.Model): id = db.Column(db.Integer, primary_key=True) openid_url = db.Column(db.Text()) name = db.Column(db.Text()) email = db.Column(db.Text()) class Person(db.Model): id = db.Column(db.Integer, primary_key=True) name = db.Column(db.Text()) def get_content(self): version = self.versions.order_by(ContentVersion.time.desc()).first() return {} if version is None else json.loads(version.content) def save_content_version(self, new_content, user): now = datetime.now() version = ContentVersion(person=self, user=user, time=now) version.content = json.dumps(new_content) db.session.add(version) db.session.commit() log.info("Content update for person id=%d version_id=%d", self.id, version.id) class ContentVersion(db.Model): id = db.Column(db.Integer, primary_key=True) person_id = db.Column(db.Integer, db.ForeignKey('person.id')) person = db.relationship('Person', backref=db.backref('versions', lazy='dynamic')) content = db.Column(db.LargeBinary) user_id = db.Column(db.Integer, db.ForeignKey('user.id')) user = db.relationship('User') time = db.Column(db.DateTime) def get_persons(): results = {} for person in Person.query.all(): results[person.id] = dict(person.get_content(), name=person.name) return results def import_json(json_path): now = datetime.now() count = defaultdict(int) with open(json_path, 'rb') as f: people_data = json.load(f) for person_data in people_data: found_persons = Person.query.filter_by(name=person_data['name']).all() if found_persons: assert len(found_persons) == 1 person = found_persons[0] else: person = Person(name=person_data['name']) db.session.add(person) log.info('New person %r, id=%d', person_data['name'], person.id) count['new-person'] += 1 emails = person_data['emails'] if emails: content = {'email': emails} if content != person.get_content(): version = ContentVersion(person=person, time=now) version.content = json.dumps(content) db.session.add(version) log.info('Content update for person id=%d', person.id) count['new-version'] += 1 db.session.commit() if count: log.info("JSON import from %r completed; %r", json_path, dict(count)) def get_user(openid_url): return User.query.filter_by(openid_url=openid_url).first() def get_update_user(openid_url, name, email): user = get_user(openid_url) if user is None: user = User(openid_url=openid_url) log.info("New user, openid_url=%r", openid_url) if (name, email) != (user.name, user.email): user.name = name user.email = email db.session.add(user) db.session.commit() log.info("User data modified for openid_url=%r", openid_url) return user
Python
0.000105
@@ -754,24 +754,27 @@ r):%0A +utc now = dateti @@ -768,32 +768,35 @@ cnow = datetime. +utc now()%0A ve @@ -843,24 +843,27 @@ =user, time= +utc now)%0A @@ -1689,16 +1689,19 @@ h):%0A +utc now = da @@ -1707,16 +1707,19 @@ atetime. +utc now()%0A @@ -2402,24 +2402,24 @@ _content():%0A - @@ -2467,16 +2467,19 @@ n, time= +utc now)%0A
c72b28ece7fe5313c7eff5f26d9ef0baaad1bad2
Update denormalization command
project/apps/api/management/commands/denormalize.py
project/apps/api/management/commands/denormalize.py
from django.core.management.base import ( BaseCommand, ) from apps.api.models import ( Convention, Contest, Contestant, Performance, Song, Group, Singer, Director, Panelist, ) class Command(BaseCommand): help = "Command to denormailze data." def handle(self, *args, **options): vs = Convention.objects.all() for v in vs: v.save() ts = Contest.objects.all() for t in ts: t.save() cs = Contestant.objects.all() for c in cs: c.save() as_ = Performance.objects.all() for a in as_: a.save() ps = Song.objects.all() for p in ps: p.save() ss = Singer.objects.all() for s in ss: s.save() js = Panelist.objects.all() for j in js: j.save() ds = Director.objects.all() for d in ds: d.save() return "Done"
Python
0.000004
@@ -118,26 +118,63 @@ st,%0A -Contestant +Award,%0A Contestant,%0A Entrant,%0A Session ,%0A Pe @@ -198,19 +198,8 @@ ng,%0A - Group,%0A @@ -518,72 +518,379 @@ -cs = Contestant.objects.all()%0A for c in cs:%0A c +ps = Panelist.objects.all()%0A for p in ps:%0A p.save()%0A ws = Award.objects.all()%0A for w in ws:%0A w.save()%0A es = Entrant.objects.all()%0A for e in es:%0A e.save()%0A cs = Contestant.objects.all()%0A for c in cs:%0A c.save()%0A ss = Session.objects.all()%0A for s in ss:%0A s .sav
74c4c832b5f99643ac23ad3885f22f7a493016f7
Update denormalization command
project/apps/api/management/commands/denormalize.py
project/apps/api/management/commands/denormalize.py
from django.core.management.base import ( BaseCommand, ) from apps.api.models import ( Convention, Contest, Contestant, Performance, Song, Group, Singer, Director, Panelist, ) class Command(BaseCommand): help = "Command to denormailze data." def handle(self, *args, **options): vs = Convention.objects.all() for v in vs: v.save() ts = Contest.objects.all() for t in ts: t.save() cs = Contestant.objects.all() for c in cs: c.save() as_ = Performance.objects.all() for a in as_: a.save() ps = Song.objects.all() for p in ps: p.save() ss = Singer.objects.all() for s in ss: s.save() js = Panelist.objects.all() for j in js: j.save() ds = Director.objects.all() for d in ds: d.save() return "Done"
Python
0.000004
@@ -118,26 +118,63 @@ st,%0A -Contestant +Award,%0A Contestant,%0A Entrant,%0A Session ,%0A Pe @@ -198,19 +198,8 @@ ng,%0A - Group,%0A @@ -518,72 +518,379 @@ -cs = Contestant.objects.all()%0A for c in cs:%0A c +ps = Panelist.objects.all()%0A for p in ps:%0A p.save()%0A ws = Award.objects.all()%0A for w in ws:%0A w.save()%0A es = Entrant.objects.all()%0A for e in es:%0A e.save()%0A cs = Contestant.objects.all()%0A for c in cs:%0A c.save()%0A ss = Session.objects.all()%0A for s in ss:%0A s .sav
7ebf1beec0912273317ed094e1c3806b2e910600
Remove commented lines
mbtiles/worker.py
mbtiles/worker.py
"""rio-mbtiles processing worker""" import logging import warnings from rasterio.enums import Resampling from rasterio.io import MemoryFile from rasterio.transform import from_bounds as transform_from_bounds from rasterio.warp import reproject, transform_bounds from rasterio.windows import Window from rasterio.windows import from_bounds as window_from_bounds import mercantile import rasterio # base_kwds = None # src = None TILES_CRS = "EPSG:3857" log = logging.getLogger(__name__) def init_worker(path, profile, resampling_method, open_opts, warp_opts): global base_kwds, filename, resampling, open_options, warp_options resampling = Resampling[resampling_method] base_kwds = profile.copy() filename = path open_options = open_opts.copy() if open_opts is not None else {} warp_options = warp_opts.copy() if warp_opts is not None else {} def process_tile(tile): """Process a single MBTiles tile Parameters ---------- tile : mercantile.Tile warp_options : Mapping GDAL warp options as keyword arguments. Returns ------- tile : mercantile.Tile The input tile. bytes : bytearray Image bytes corresponding to the tile. """ global base_kwds, resampling, filename, open_options, warp_options with rasterio.open(filename, **open_options) as src: # Get the bounds of the tile. ulx, uly = mercantile.xy(*mercantile.ul(tile.x, tile.y, tile.z)) lrx, lry = mercantile.xy(*mercantile.ul(tile.x + 1, tile.y + 1, tile.z)) kwds = base_kwds.copy() kwds["transform"] = transform_from_bounds( ulx, lry, lrx, uly, kwds["width"], kwds["height"] ) src_nodata = kwds.pop("src_nodata", None) dst_nodata = kwds.pop("dst_nodata", None) warnings.simplefilter("ignore") log.info("Reprojecting tile: tile=%r", tile) with MemoryFile() as memfile: with memfile.open(**kwds) as tmp: # determine window of source raster corresponding to the tile # image, with small buffer at edges try: west, south, east, north = transform_bounds( TILES_CRS, src.crs, ulx, lry, lrx, uly ) tile_window = window_from_bounds( west, south, east, north, transform=src.transform ) adjusted_tile_window = Window( tile_window.col_off - 1, tile_window.row_off - 1, tile_window.width + 2, tile_window.height + 2, ) tile_window = adjusted_tile_window.round_offsets().round_shape() # if no data in window, skip processing the tile if not src.read_masks(1, window=tile_window).any(): return tile, None except ValueError: log.info( "Tile %r will not be skipped, even if empty. This is harmless.", tile, ) num_threads = int(warp_options.pop("num_threads", 2)) reproject( rasterio.band(src, tmp.indexes), rasterio.band(tmp, tmp.indexes), src_nodata=src_nodata, dst_nodata=dst_nodata, num_threads=num_threads, resampling=resampling, **warp_options ) return tile, memfile.read()
Python
0
@@ -395,42 +395,8 @@ io%0A%0A -%0A# base_kwds = None%0A# src = None%0A%0A TILE
6785219c9e4e4bfd1d28e4802e992b84000a7f63
increase default read timeout to 5 seconds
pyatk/channel/uart.py
pyatk/channel/uart.py
# Copyright (c) 2012-2013 Harry Bock <[email protected]> # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import serial from pyatk.channel import base class UARTChannel(base.ATKChannelI): """ A serial port communications channel. The serial port is automatically configured for 115200 baud, 8N1, no flow control. """ def __init__(self, port): super(UARTChannel, self).__init__() self._ramkernel_channel_type = base.CHANNEL_TYPE_UART self.port = None port = serial.serial_for_url(port, do_not_open = True) port.baudrate = 115200 port.parity = serial.PARITY_NONE port.stopbits = serial.STOPBITS_ONE port.bytesize = serial.EIGHTBITS port.timeout = 0.5 port.rtscts = False port.xonxoff = False port.dsrdtr = False self.port = port def open(self): self.port.open() def close(self): self.port.close() def write(self, data): # Writes cannot time out with no flow control, so ChannelWriteTimeout # is not raised. self.port.write(data) def read(self, length): """ Read exactly ``length`` bytes from the UART channel. """ data_read = [] data_length = 0 while data_length < length: data = self.port.read((length - data_length)) # No data read indicates a timeout has occurred. if data == "": raise base.ChannelReadTimeout(length, "".join(data_read)) data_read.append(data) data_length += len(data) return "".join(data_read)
Python
0
@@ -2002,10 +2002,8 @@ = -0. 5%0A
5e02a4c3c7664d58375ec4af980e25322f98eb3c
add missing converter functions
pyexcel_io/service.py
pyexcel_io/service.py
""" pyexcel_io.service ~~~~~~~~~~~~~~~~~~~ provide service code to downstream projects :copyright: (c) 2014-2017 by Onni Software Ltd. :license: New BSD License, see LICENSE for more details """ import re import math import datetime from pyexcel._compact import PY2 def has_no_digits_in_float(value): """check if a float value had zero value in digits""" return value == math.floor(value) def detect_date_value(cell_text): """ Read the date formats that were written by csv.writer """ ret = None try: if len(cell_text) == 10: ret = datetime.datetime.strptime( cell_text, "%Y-%m-%d") ret = ret.date() elif len(cell_text) == 19: ret = datetime.datetime.strptime( cell_text, "%Y-%m-%d %H:%M:%S") elif len(cell_text) > 19: ret = datetime.datetime.strptime( cell_text[0:26], "%Y-%m-%d %H:%M:%S.%f") except ValueError: pass return ret def detect_float_value(cell_text): try: should_we_skip_it = (cell_text.startswith('0') and cell_text.startswith('0.') is False) if should_we_skip_it: # do not convert if a number starts with 0 # e.g. 014325 return None else: return float(cell_text) except ValueError: return None def detect_int_value(cell_text): if cell_text.startswith('0') and len(cell_text) > 1: return None try: return int(cell_text) except ValueError: pattern = '([0-9]+,)*[0-9]+$' if re.match(pattern, cell_text): integer_string = cell_text.replace(',', '') return int(integer_string) else: return None def float_value(value): """convert a value to float""" ret = float(value) return ret def date_value(value): """convert to data value accroding ods specification""" ret = "invalid" try: # catch strptime exceptions only if len(value) == 10: ret = datetime.datetime.strptime( value, "%Y-%m-%d") ret = ret.date() elif len(value) == 19: ret = datetime.datetime.strptime( value, "%Y-%m-%dT%H:%M:%S") elif len(value) > 19: ret = datetime.datetime.strptime( value[0:26], "%Y-%m-%dT%H:%M:%S.%f") except ValueError: pass if ret == "invalid": raise Exception("Bad date value %s" % value) return ret def time_value(value): """convert to time value accroding the specification""" import re results = re.match('PT(\d+)H(\d+)M(\d+)S', value) if results and len(results.groups()) == 3: hour = int(results.group(1)) minute = int(results.group(2)) second = int(results.group(3)) if hour < 24: ret = datetime.time(hour, minute, second) else: ret = datetime.timedelta(hours=hour, minutes=minute, seconds=second) else: ret = None return ret def boolean_value(value): """get bolean value""" if value == "true": ret = True else: ret = False return ret ODS_FORMAT_CONVERSION = { "float": float, "date": datetime.date, "time": datetime.time, 'timedelta': datetime.timedelta, "boolean": bool, "percentage": float, "currency": float } ODS_WRITE_FORMAT_COVERSION = { float: "float", int: "float", str: "string", datetime.date: "date", datetime.time: "time", datetime.timedelta: "timedelta", bool: "boolean" } if PY2: ODS_WRITE_FORMAT_COVERSION[unicode] = "string" VALUE_CONVERTERS = { "float": float_value, "date": date_value, "time": time_value, "timedelta": time_value, "boolean": boolean_value, "percentage": float_value } VALUE_TOKEN = { "float": "value", "date": "date-value", "time": "time-value", "boolean": "boolean-value", "percentage": "value", "currency": "value", "timedelta": "time-value" }
Python
0.000002
@@ -4074,16 +4074,713 @@ lue%0A%7D%0A%0A%0A +def ods_date_value(value):%0A return value.strftime(%22%25Y-%25m-%25d%22)%0A%0A%0Adef ods_time_value(value):%0A return value.strftime(%22PT%25HH%25MM%25SS%22)%0A%0A%0Adef ods_bool_value(value):%0A %22%22%22convert a boolean value to text%22%22%22%0A if value is True:%0A return %22true%22%0A else:%0A return %22false%22%0A%0A%0Adef ods_timedelta_value(cell):%0A %22%22%22convert a cell value to time delta%22%22%22%0A hours = cell.days * 24 + cell.seconds // 3600%0A minutes = (cell.seconds // 60) %25 60%0A seconds = cell.seconds %25 60%0A return %22PT%2502dH%2502dM%2502dS%22 %25 (hours, minutes, seconds)%0A%0A%0AODS_VALUE_CONVERTERS = %7B%0A %22date%22: ods_date_value,%0A %22time%22: ods_time_value,%0A %22boolean%22: ods_bool_value,%0A %22timedelta%22: ods_timedelta_value%0A%7D%0A%0A%0A VALUE_TO
ec9bc89372670e623dbe98c34591fba62a0ee64a
Rename merge to pack in postp.
pyfr/scripts/postp.py
pyfr/scripts/postp.py
#!/usr/bin/env python # -*- coding: utf-8 -*- import os from tempfile import NamedTemporaryFile from argparse import ArgumentParser, FileType import numpy as np from pyfr.util import rm def process_pack(args): # List the contents of the directory relnames = os.listdir(args.indir) # Get the absolute file names and extension-less file names absnames = [os.path.join(args.indir, f) for f in relnames] repnames = [f[:-4] for f in relnames] # Open/load the files files = [np.load(f, mmap_mode='r') for f in absnames] # Get the output pyfrs file name outname = args.outf or args.indir.rstrip('/') # Determine the dir and prefix of the temp file dirname, basename = os.path.split(outname) # Create a named temp file tempf = NamedTemporaryFile(prefix=basename, dir=dirname, delete=False) try: # Write the contents of the directory out as an npz (pyfrs) file np.savez(tempf, **dict(zip(repnames, files))) tempf.close() # Remove the output path if it should exist if os.path.exists(outname): rm(outname) # Rename the temp file into place os.rename(tempf.name, outname) except: # Clean up the temporary file if os.path.exists(tempf.name): os.remove(tempf.name) # Re-raise raise def main(): ap = ArgumentParser(prog='pyfr-postp', description='Post processes a ' 'PyFR simulation') sp = ap.add_subparsers(help='sub-command help') ap_merge = sp.add_parser('pack', help='pack --help', description='Packs a ' 'pyfrs-directory into a pyfrs-file. If no ' 'output file is specified then that of the ' 'input directory is taken. This command will ' 'replace any existing file or directory.') ap_merge.add_argument('indir', metavar='in', help='Input PyFR solution directory') ap_merge.add_argument('outf', metavar='out', nargs='?', help='Out PyFR solution file') ap_merge.set_defaults(process=process_pack) # Parse the arguments args = ap.parse_args() args.process(args) if __name__ == '__main__': main()
Python
0
@@ -1540,21 +1540,20 @@ %0A ap_ -merge +pack = sp.ad @@ -1640,17 +1640,16 @@ - 'pyfrs-d @@ -1685,17 +1685,16 @@ If no '%0A - @@ -1786,17 +1786,16 @@ - 'input d @@ -1834,17 +1834,16 @@ will '%0A - @@ -1904,37 +1904,36 @@ ctory.')%0A ap_ -merge +pack .add_argument('i @@ -1953,17 +1953,16 @@ r='in',%0A - @@ -2023,21 +2023,20 @@ %0A ap_ -merge +pack .add_arg @@ -2096,25 +2096,24 @@ - help='Out Py @@ -2142,13 +2142,12 @@ ap_ -merge +pack .set
ef628bcdd79ceb28e2b320059c9b00e52372663a
Improve the error message when PyGMT fails to load the GMT library (#814)
pygmt/clib/loading.py
pygmt/clib/loading.py
""" Utility functions to load libgmt as ctypes.CDLL. The path to the shared library can be found automatically by ctypes or set through the GMT_LIBRARY_PATH environment variable. """ import ctypes import os import sys from ctypes.util import find_library from pygmt.exceptions import GMTCLibError, GMTCLibNotFoundError, GMTOSError def load_libgmt(): """ Find and load ``libgmt`` as a :py:class:`ctypes.CDLL`. By default, will look for the shared library in the directory specified by the environment variable ``GMT_LIBRARY_PATH``. If it's not set, will let ctypes try to find the library. Returns ------- :py:class:`ctypes.CDLL` object The loaded shared library. Raises ------ GMTCLibNotFoundError If there was any problem loading the library (couldn't find it or couldn't access the functions). """ lib_fullnames = clib_full_names() error = True for libname in lib_fullnames: try: libgmt = ctypes.CDLL(libname) check_libgmt(libgmt) error = False break except OSError as err: error = err if error: raise GMTCLibNotFoundError( "Error loading the GMT shared library '{}':".format( ", ".join(lib_fullnames) ) ) return libgmt def clib_names(os_name): """ Return the name of GMT's shared library for the current OS. Parameters ---------- os_name : str The operating system name as given by ``sys.platform``. Returns ------- libnames : list of str List of possible names of GMT's shared library. """ if os_name.startswith("linux"): libnames = ["libgmt.so"] elif os_name == "darwin": # Darwin is macOS libnames = ["libgmt.dylib"] elif os_name == "win32": libnames = ["gmt.dll", "gmt_w64.dll", "gmt_w32.dll"] elif os_name.startswith("freebsd"): # FreeBSD libnames = ["libgmt.so"] else: raise GMTOSError(f'Operating system "{sys.platform}" not supported.') return libnames def clib_full_names(env=None): """ Return the full path of GMT's shared library for the current OS. Parameters ---------- env : dict or None A dictionary containing the environment variables. If ``None``, will default to ``os.environ``. Returns ------- lib_fullnames: list of str List of possible full names of GMT's shared library. """ if env is None: env = os.environ libnames = clib_names(os_name=sys.platform) # e.g. libgmt.so, libgmt.dylib, gmt.dll libpath = env.get("GMT_LIBRARY_PATH", "") # e.g. $HOME/miniconda/envs/pygmt/lib lib_fullnames = [os.path.join(libpath, libname) for libname in libnames] # Search for DLLs in PATH if GMT_LIBRARY_PATH is not defined [Windows only] if not libpath and sys.platform == "win32": for libname in libnames: libfullpath = find_library(libname) if libfullpath: lib_fullnames.append(libfullpath) return lib_fullnames def check_libgmt(libgmt): """ Make sure that libgmt was loaded correctly. Checks if it defines some common required functions. Does nothing if everything is fine. Raises an exception if any of the functions are missing. Parameters ---------- libgmt : :py:class:`ctypes.CDLL` A shared library loaded using ctypes. Raises ------ GMTCLibError """ # Check if a few of the functions we need are in the library functions = ["Create_Session", "Get_Enum", "Call_Module", "Destroy_Session"] for func in functions: if not hasattr(libgmt, "GMT_" + func): msg = " ".join( [ "Error loading libgmt.", "Couldn't access function GMT_{}.".format(func), ] ) raise GMTCLibError(msg)
Python
0.000811
@@ -1252,22 +1252,9 @@ ary -'%7B%7D':%22.format( +%22 %0A @@ -1266,16 +1266,15 @@ - %22, %22 +f%22%7B', ' .joi @@ -1289,30 +1289,30 @@ llnames) -%0A ) +%7D.%5Cn %7Berror%7D.%22 %0A
d3bc063cc35f5b7bc806c83cd23780108c509fb6
Disable checkin in embedded mode.
pykeg/core/checkin.py
pykeg/core/checkin.py
# Copyright 2014 Bevbot LLC, All Rights Reserved # # This file is part of the Pykeg package of the Kegbot project. # For more information on Pykeg or Kegbot, see http://kegbot.org/ # # Pykeg is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # Pykeg is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Pykeg. If not, see <http://www.gnu.org/licenses/>. """Checks a central server for updates.""" from django.utils import timezone from pykeg.core import models from pykeg.core import util import logging import os import requests FIELD_REG_ID = 'reg_id' FIELD_PRODUCT = 'product' FIELD_VERSION = 'version' FIELD_INTERVAL_MILLIS = 'interval_millis' FIELD_UPDATE_AVAILABLE = 'update_available' FIELD_UPDATE_REQUIRED = 'update_required' FIELD_UPDATE_TITLE = 'update_title' FIELD_UPDATE_URL = 'update_url' FIELD_NEWS = 'news' PRODUCT = 'kegbot-server' CHECKIN_URL = os.getenv('CHECKIN_URL', None) or 'https://kegbotcheckin.appspot.com/checkin' LOGGER = logging.getLogger('checkin') logging.getLogger('requests').setLevel(logging.WARNING) class CheckinError(Exception): """Base exception.""" def checkin(url=CHECKIN_URL, product=PRODUCT, timeout=None, quiet=False): """Issue a single checkin to the checkin server. No-op if kbsite.check_for_updates is False. Returns A checkin response dictionary, or None if checkin is disabled. Raises ValueError: On malformed reponse. requests.RequestException: On error talking to server. """ kbsite = models.KegbotSite.get() if not kbsite.check_for_updates: LOGGER.debug('Upgrade check is disabled') return site = models.KegbotSite.get() reg_id = site.registration_id headers = { 'User-Agent': util.get_user_agent(), } payload = { FIELD_PRODUCT: product, FIELD_REG_ID: reg_id, FIELD_VERSION: util.get_version(), } try: LOGGER.debug('Checking in, url=%s reg_id=%s' % (url, reg_id)) result = requests.post(url, data=payload, headers=headers, timeout=timeout).json() new_reg_id = result.get(FIELD_REG_ID) if new_reg_id != reg_id: LOGGER.debug('Updating reg_id=%s' % new_reg_id) site.registration_id = new_reg_id site.save() LOGGER.debug('Checkin result: %s' % str(result)) if not quiet: LOGGER.info('Checkin complete, reg_id=%s' % (reg_id,)) site.last_checkin_response = result site.last_checkin_time = timezone.now() site.save() return result except (ValueError, requests.RequestException) as e: if not quiet: LOGGER.warning('Checkin error: %s' % str(e)) raise CheckinError(e)
Python
0
@@ -834,16 +834,49 @@ es.%22%22%22%0A%0A +from django.conf import settings%0A from dja @@ -1957,16 +1957,116 @@ %22%22%22%0A + if settings.EMBEDDED:%0A LOGGER.debug('Checkin disabled in embedded mode')%0A return%0A%0A kbsi
454abe8f3081e8186b29d677e2bca88ca10112c7
Add support for a GYP_DEFINES environment variable as a way to pass in default values for definitions. Review URL: http://codereview.chromium.org/62128
pylib/gyp/__init__.py
pylib/gyp/__init__.py
#!/usr/bin/python import gyp.input import optparse import os.path import sys def FindBuildFiles(): extension = '.gyp' files = os.listdir(os.getcwd()) build_files = [] for file in files: if file[-len(extension):] == extension: build_files.append(file) return build_files def main(args): my_name = os.path.basename(sys.argv[0]) parser = optparse.OptionParser() usage = 'usage: %s [options ...] [build_file ...]' parser.set_usage(usage.replace('%s', '%prog')) parser.add_option('-D', dest='defines', action='append', metavar='VAR=VAL', help='sets variable VAR to value VAL') parser.add_option('-f', '--format', dest='formats', action='append', help='output formats to generate') parser.add_option('-I', '--include', dest='includes', action='append', metavar='INCLUDE', help='files to include in all loaded .gyp files') parser.add_option('--depth', dest='depth', metavar='PATH', help='set DEPTH gyp variable to a relative path to PATH') parser.add_option('-S', '--suffix', dest='suffix', default='', help='suffix to add to generated files') parser.add_option('--generator-flags', dest='generator_flags', default='', help='comma separated list of flag names to pass to the ' 'generator') (options, build_files) = parser.parse_args(args) if not options.formats: options.formats = [ {'darwin': 'xcode', 'win32': 'msvs', 'cygwin': 'msvs', 'linux2': 'scons',}[sys.platform] ] if not build_files: build_files = FindBuildFiles() if not build_files: print >>sys.stderr, (usage + '\n\n%s: error: no build_file') % \ (my_name, my_name) return 1 # TODO(mark): Chromium-specific hack! # For Chromium, the gyp "depth" variable should always be a relative path # to Chromium's top-level "src" directory. If no depth variable was set # on the command line, try to find a "src" directory by looking at the # absolute path to each build file's directory. The first "src" component # found will be treated as though it were the path used for --depth. if not options.depth: for build_file in build_files: build_file_dir = os.path.abspath(os.path.dirname(build_file)) build_file_dir_components = build_file_dir.split(os.path.sep) components_len = len(build_file_dir_components) for index in xrange(components_len - 1, -1, -1): if build_file_dir_components[index] == 'src': options.depth = os.path.sep.join(build_file_dir_components) break del build_file_dir_components[index] # If the inner loop found something, break without advancing to another # build file. if options.depth: break if not options.depth: raise Exception, \ 'Could not automatically locate src directory. This is a ' + \ 'temporary Chromium feature that will be removed. Use ' + \ '--depth as a workaround.' # -D on the command line sets variable defaults - D isn't just for define, # it's for default. Perhaps there should be a way to force (-F?) a # variable's value so that it can't be overridden by anything else. cmdline_default_variables = {} if options.defines: for define in options.defines: tokens = define.split('=', 1) if len(tokens) == 2: # Set the variable to the supplied value. cmdline_default_variables[tokens[0]] = tokens[1] else: # No value supplied, treat it as a boolean and set it. cmdline_default_variables[tokens[0]] = True # Set up includes. includes = [] # If ~/.gyp/include.gypi exists, it'll be forcibly included into every # .gyp file that's loaded, before anything else is included. home_vars = ['HOME'] if sys.platform in ('cygwin', 'win32'): home_vars.append('USERPROFILE') home = None for home_var in home_vars: home = os.getenv(home_var) if home != None: break if home != None: default_include = os.path.join(home, '.gyp', 'include.gypi') if os.path.exists(default_include): includes.append(default_include) # Command-line --include files come after the default include. if options.includes: includes.extend(options.includes) # Generator flags should be prefixed with the target generator since they # are global across all generator runs. generator_flags = options.generator_flags.split(',') # Generate all requested formats for format in options.formats: # Start with the default variables from the command line. default_variables = cmdline_default_variables.copy() # Default variables provided by this program and its modules should be # named WITH_CAPITAL_LETTERS to provide a distinct "best practice" namespace, # avoiding collisions with user and automatic variables. default_variables['GENERATOR'] = format generator_name = 'gyp.generator.' + format # These parameters are passed in order (as opposed to by key) # because ActivePython cannot handle key parameters to __import__. generator = __import__(generator_name, globals(), locals(), generator_name) default_variables.update(generator.generator_default_variables) # Process the input specific to this generator. [flat_list, targets, data] = gyp.input.Load(build_files, default_variables, includes[:], options.depth) params = {'options': options, 'build_files': build_files, 'generator_flags': generator_flags} # TODO(mark): Pass |data| for now because the generator needs a list of # build files that came in. In the future, maybe it should just accept # a list, and not the whole data dict. # NOTE: flat_list is the flattened dependency graph specifying the order # that targets may be built. Build systems that operate serially or that # need to have dependencies defined before dependents reference them should # generate targets in the order specified in flat_list. generator.GenerateOutput(flat_list, targets, data, params) # Done return 0 if __name__ == '__main__': sys.exit(main(sys.argv[1:]))
Python
0
@@ -60,16 +60,29 @@ os.path%0A +import shlex%0A import s @@ -3411,45 +3411,105 @@ %7D%0A -if options.defines:%0A for +defines = os.environ.get('GYP_DEFINES', %5B%5D)%0A if defines:%0A defines = shlex.split( define +s)%0A i -n +f opt @@ -3520,24 +3520,78 @@ .defines:%0A + defines += options.defines%0A for define in defines:%0A tokens = @@ -3616,18 +3616,16 @@ 1)%0A - if len(t @@ -3637,18 +3637,16 @@ ) == 2:%0A - # @@ -3683,34 +3683,32 @@ ed value.%0A - cmdline_default_ @@ -3740,18 +3740,16 @@ kens%5B1%5D%0A - else @@ -3756,18 +3756,16 @@ :%0A - # No val @@ -3811,18 +3811,16 @@ set it.%0A - cm
301b2ca9cdf33665312e092937c63b1db7db888f
Add missing imports
pymessenger2/utils.py
pymessenger2/utils.py
import hashlib import hmac import six def validate_hub_signature(app_secret, request_payload, hub_signature_header): """ @inputs: app_secret: Secret Key for application request_payload: request body hub_signature_header: X-Hub-Signature header sent with request @outputs: boolean indicated that hub signature is validated """ try: hash_method, hub_signature = hub_signature_header.split('=') except: pass else: digest_module = getattr(hashlib, hash_method) hmac_object = hmac.new( str(app_secret), unicode(request_payload), digest_module) generated_hash = hmac_object.hexdigest() if hub_signature == generated_hash: return True return False def generate_appsecret_proof(access_token, app_secret): """ @inputs: access_token: page access token app_secret_token: app secret key @outputs: appsecret_proof: HMAC-SHA256 hash of page access token using app_secret as the key """ if six.PY2: hmac_object = hmac.new( str(app_secret), unicode(access_token), hashlib.sha256) else: hmac_object = hmac.new( bytearray(app_secret, 'utf8'), str(access_token).encode('utf8'), hashlib.sha256) generated_hash = hmac_object.hexdigest() return generated_hash class ToJsonMixin: """ Derive from this with an `.asdict` member to get a working `to_json` function! """ def to_json(self): items_iterator = (attr.asdict(self).items() if six.PY3 else attr.asdict(self).iteritems()) return json.dumps({k: v for k, v in items_iterator if v is not None})
Python
0.000009
@@ -31,16 +31,40 @@ ort six%0A +import attr%0Aimport json%0A %0A%0Adef va
20d41656488ea43978f749e2e34303e49981695c
fix imports to include OR tools
pymzn/mzn/__init__.py
pymzn/mzn/__init__.py
from .model import * from .solvers import * from .minizinc import * from .templates import * __all__ = [ 'Solutions', 'minizinc', 'mzn2fzn', 'solns2out', 'MiniZincError', 'MiniZincUnsatisfiableError', 'MiniZincUnknownError', 'MiniZincUnboundedError', 'MiniZincModel', 'Statement', 'Constraint', 'Variable', 'ArrayVariable', 'OutputStatement', 'SolveStatement', 'Solver', 'Gecode', 'Chuffed', 'Optimathsat', 'Opturion', 'MIPSolver', 'Gurobi', 'CBC', 'G12Solver', 'G12Fd', 'G12Lazy', 'G12MIP', 'OscarCBLS', 'gecode', 'chuffed', 'optimathsat', 'opturion', 'gurobi', 'cbc', 'g12fd', 'g12lazy', 'g12mip', 'oscar_cbls', 'discretize', 'from_string', 'add_package', 'add_path' ]
Python
0
@@ -523,16 +523,31 @@ arCBLS', + 'ORTools',%0A 'gecode @@ -548,20 +548,16 @@ gecode', -%0A 'chuffe @@ -612,16 +612,20 @@ 'g12fd', +%0A 'g12laz @@ -627,20 +627,16 @@ 12lazy', -%0A 'g12mip @@ -651,16 +651,28 @@ r_cbls', + 'or_tools', 'discre @@ -692,16 +692,20 @@ string', +%0A 'add_pa @@ -711,20 +711,16 @@ ackage', -%0A 'add_pa
9395ef840d9dd9194b2f89e2a7240bb951662e0b
add missing imports
tabby/conversation.py
tabby/conversation.py
# # Copyright (C) 2006 Collabora Limited # Copyright (C) 2006 Nokia Corporation # @author Ole Andre Vadla Ravnaas <[email protected]> # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Library General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # import gobject import pango import gtk import scw from util import * class Conversation: def __init__(self, conn, notebook, handle, name): xml = gtk.glade.XML("data/glade/conversation.glade", "main_vbox") vbox = xml.get_widget("main_vbox") tbtn = xml.get_widget("call_toolbtn") tbtn.connect("clicked", self._call_button_clicked_cb) chat_sw = xml.get_widget("chat_sw") self._members_lbl = xml.get_widget("members_lbl") self._local_lbl = xml.get_widget("local_lbl") self._remote_lbl = xml.get_widget("remote_lbl") self._mem_entry = xml.get_widget("member_entry") self._mem_add_btn = xml.get_widget("member_add_btn") self._mem_add_btn.connect("clicked", self._mem_add_btn_clicked_cb) self._mem_rem_btn = xml.get_widget("member_rem_btn") self._mem_rem_btn.connect("clicked", self._mem_rem_btn_clicked_cb) self._media_frame = xml.get_widget("smc_debug_frame") # Call toolbar button image = gtk.Image() image.set_from_file("data/images/call.png") tbtn.set_icon_widget(image) # Chat widget self._model = gtk.ListStore(scw.TYPE_TIMESTAMP, scw.TYPE_PRESENCE, gobject.TYPE_STRING, scw.TYPE_ROW_COLOR) self._view = scw.View() #self._view.connect("activate", self.gtk_view_activate_cb) #self._view.connect("context-request", self.gtk_view_context_request_cb) #self._view.connect("key-press-event", self.gtk_view_key_press_event_cb) self._view.set_property("model", self._model) self._view.set_property("align-presences", True) self._view.set_property("presence-alignment", pango.ALIGN_CENTER) self._view.set_property("scroll-on-append", True) self._view.set_property("timestamp-format", "%H:%M") self._view.set_property("action-attributes", "underline='single' weight='bold'") self._view.set_property("selection-row-separator", "\n\n") self._view.set_property("selection-column-separator", "\t") chat_sw.add(self._view) # Entry widget self._entry = scw.Entry() #self._entry.connect("activate", self._entry_activate_cb) self._entry.set_property("history-size", 100) vbox.pack_end(self._entry, False, True, 2) self._conn = conn self._notebook = notebook if name is not None: pos = name.index("@") title = name[:pos] else: title = "Conversation" self._page_index = notebook.append_page(vbox, gtk.Label(title)) self._handle = handle self._name = name self._media_chan = None # Show the widgets created by us image.show() self._view.show() self._entry.show() def show(self): self._notebook.set_current_page(self._page_index) gobject.idle_add(self._entry.grab_focus) def take_media_channel(self, chan): self._media_chan = chan chan.connect("flags-changed", self._media_flags_changed_cb) chan.connect("members-changed", lambda chan, *args: self._media_update_members(chan)) self._media_frame.show() def _mem_add_btn_clicked_cb(self, button): str = self._mem_entry.get_text() if not str: return try: handle = int(str) self._media_chan.add_member(handle) except ValueError: return def _mem_rem_btn_clicked_cb(self, button): str = self._mem_entry.get_text() if not str: return try: handle = int(str) self._media_chan.remove_member(handle) except ValueError: return def _media_flags_changed_cb(self, chan): flags = self._media_chan[CHANNEL_INTERFACE_GROUP].GetGroupFlags() print "new flags:", flags can_add = (flags & CHANNEL_GROUP_FLAG_CAN_ADD) != 0 can_rem = (flags & CHANNEL_GROUP_FLAG_CAN_REMOVE) != 0 self._mem_add_btn.set_sensitive(can_add) self._mem_rem_btn.set_sensitive(can_rem) def _media_update_members(self, chan): members = self._media_chan[CHANNEL_INTERFACE_GROUP].GetMembers() local = self._media_chan[CHANNEL_INTERFACE_GROUP].GetLocalPendingMembers() remote = self._media_chan[CHANNEL_INTERFACE_GROUP].GetRemotePendingMembers() self._members_lbl.set_text(str(members)) self._local_lbl.set_text(str(local)) self._remote_lbl.set_text(str(remote)) if members: member = members[0] elif local: member = local[0] elif remote: member = remote[0] else: member = "" self._mem_entry.set_text(str(member)) def _call_button_clicked_cb(self, button): print "requesting StreamedMediaChannel with", self._name dbus_call_async(self._conn[CONN_INTERFACE].RequestChannel, CHANNEL_TYPE_STREAMED_MEDIA, CONNECTION_HANDLE_TYPE_CONTACT, self._handle, True, reply_handler=self._media_request_channel_reply_cb, error_handler=self._error_cb) def _media_request_channel_reply_cb(self, obj_path): channel = StreamedMediaChannel(self._conn, obj_path) self.take_media_channel(channel) def _error_cb(self, error): print "_error_cb: got error '%s'" % error
Python
0.000605
@@ -913,16 +913,29 @@ ort gtk%0A +import pango%0A import s @@ -957,16 +957,50 @@ port *%0A%0A +from util import dbus_call_async%0A%0A class Co
3cd595fb0a2f1d027aefe70b59e235ef3dd14d61
update basespider download
xspider/libs/basespider/basespider.py
xspider/libs/basespider/basespider.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # Created on 2017-02-21 # Project: basespider import json import time import socket import requests import traceback from requests.exceptions import ReadTimeout from requests.exceptions import ConnectionError class BaseGenerator(object): """ BaseSpider Generator """ def __init__(self): """ Generator Initialization """ self.urls = [] def generate(self, url): """ Obtain URI :return: """ self.urls.append(url) for url in self.urls: print json.dumps({"url": url, "args": 'None'}) def start_generator(self): """ Start Generator :return: """ start_url = "__START_URL__" self.generate(start_url) class BaseDownloader(object): """ BaseSpider Downloader """ def __init__(self): """ Downloader Initialization """ self.reqst = requests.Session() self.reqst.headers.update( {'Accept': 'text/html, application/xhtml+xml, */*', 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'en-US, en;q=0.8,zh-Hans-CN;q=0.5,zh-Hans;q=0.3', 'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; Win64; x64; rv:39.0) Gecko/20100101 Firefox/39.0'}) def download(self, url, type="request", timeout=50): """ Downloader Download By Type :return: response object """ if type == "request": start_time = time.time() try: resp = self.reqst.get(url, timeout=timeout) if resp.status_code != 200: resp = self.reqst.get(url, timeout=50) if resp.status_code != 200: raise ConnectionError end_time = time.time() return resp except Exception: print traceback.format_exc() class BaseParser(object): """ BaseSpider Parser """ def __init__(self): """ Parser Initialization """ pass def parser(self, resp): """ Paeser resp content :param resp: :return: """ return resp
Python
0
@@ -1016,29 +1016,17 @@ elf. -reqst. headers -.update( + = %0A @@ -1324,17 +1324,16 @@ x/39.0'%7D -) %0A%0A de @@ -1355,19 +1355,20 @@ , url, t -ype +ools =%22reques @@ -1381,16 +1381,26 @@ meout=50 +, **kwargs ):%0A @@ -1499,19 +1499,20 @@ if t -ype +ools == %22req @@ -1630,16 +1630,26 @@ =timeout +, **kwargs )%0A
80bc283676be51ef67fe7924bcc32adaa93fc985
Change timestamp format
guestbook/__init__.py
guestbook/__init__.py
# coding: utf-8 import pickle from datetime import datetime from collections import namedtuple, deque from flask import Flask, request, render_template, redirect, escape, Markup application = Flask(__name__) DATA_FILE = 'guestbook.dat' Post = namedtuple('Post', ['name', 'timestamp', 'comment']) def save_post(name, timestamp, comment): posts = pickle.load(DATA_FILE) assert isinstance(posts, deque) posts.appendleft(Post(name, timestamp, comment)) pickle.dump(posts, DATA_FILE) def load_posts(): return pickle.load(DATA_FILE) @application.route('/') def index(): return render_template('index.html', greeting_list=load_posts()) @application.route('/post', methods=['POST']) def post(): name = request.form.get('name') comment = request.form.get('comment') save_post(name, datetime.now(), comment) return redirect('/') @application.template_filter('nl2br') def nl2br_filter(s): return escape(s).replace('\n', Markup('<br />')) @application.template_filter('datetime_fmt') def datetime_fmt_filter(dt): return dt.strftime('%Y%m%d %H:%M:%S') def main(): application.run('127.0.0.1', 8000) if __name__ == "__main__": application.run('127.0.0.1', 8000, debug=True)
Python
0.000162
@@ -1083,13 +1083,15 @@ e('%25 -Y%25m%25d +d/%25m/%25Y %25H:
7c81e7dc96fc66c0dc82b2d431e07167c6a97557
Add failing tests re: Parser.ignore_unknown
tests/parser.py
tests/parser.py
from spec import Spec, skip, ok_, eq_, raises from invoke.parser import Parser, Context, Argument from invoke.collection import Collection class Parser_(Spec): def can_take_initial_context(self): c = Context() p = Parser(initial=c) eq_(p.initial, c) def can_take_initial_and_other_contexts(self): c1 = Context('foo') c2 = Context('bar') p = Parser(initial=Context(), contexts=[c1, c2]) eq_(p.contexts['foo'], c1) eq_(p.contexts['bar'], c2) def can_take_just_other_contexts(self): c = Context('foo') p = Parser(contexts=[c]) eq_(p.contexts['foo'], c) def can_take_just_contexts_as_non_keyword_arg(self): c = Context('foo') p = Parser([c]) eq_(p.contexts['foo'], c) @raises(ValueError) def raises_ValueError_for_unnamed_Contexts_in_contexts(self): Parser(initial=Context(), contexts=[Context()]) @raises(ValueError) def raises_error_for_context_name_clashes(self): Parser(contexts=(Context('foo'), Context('foo'))) @raises(ValueError) def raises_error_for_context_alias_and_name_clashes(self): Parser(contexts=(Context('foo', aliases=('bar',)), Context('bar'))) class parse_argv: def parses_sys_argv_style_list_of_strings(self): "parses sys.argv-style list of strings" # Doesn't-blow-up tests FTL mytask = Context(name='mytask') mytask.add_arg('arg') p = Parser(contexts=[mytask]) p.parse_argv(['mytask', '--arg', 'value']) def returns_only_contexts_mentioned(self): task1 = Context('mytask') task2 = Context('othertask') result = Parser((task1, task2)).parse_argv(['othertask']) eq_(len(result), 1) eq_(result[0].name, 'othertask') def always_includes_initial_context_if_one_was_given(self): # Even if no core/initial flags were seen t1 = Context('t1') init = Context() result = Parser((t1,), initial=init).parse_argv(['t1']) eq_(result[0].name, None) eq_(result[1].name, 't1') def returned_contexts_are_in_order_given(self): t1, t2 = Context('t1'), Context('t2') r = Parser((t1, t2)).parse_argv(['t2', 't1']) eq_([x.name for x in r], ['t2', 't1']) def returned_context_member_arguments_contain_given_values(self): c = Context('mytask', args=(Argument('boolean', kind=bool),)) result = Parser((c,)).parse_argv(['mytask', '--boolean']) eq_(result[0].args['boolean'].value, True) def arguments_which_take_values_get_defaults_overridden_correctly(self): args = (Argument('arg', kind=str), Argument('arg2', kind=int)) c = Context('mytask', args=args) argv = ['mytask', '--arg', 'myval', '--arg2', '25'] result = Parser((c,)).parse_argv(argv) eq_(result[0].args['arg'].value, 'myval') eq_(result[0].args['arg2'].value, 25) def returned_arguments_not_given_contain_default_values(self): # I.e. a Context with args A and B, invoked with no mention of B, # should result in B existing in the result, with its default value # intact, and not e.g. None, or the arg not existing. a = Argument('name', kind=str) b = Argument('age', default=7) c = Context('mytask', args=(a, b)) result = Parser((c,)).parse_argv(['mytask', '--name', 'blah']) eq_(c.args['age'].value, 7) def returns_remainder(self): "returns -- style remainder string chunk" r = Parser((Context('foo'),)).parse_argv(['foo', '--', 'bar', 'biz']) eq_(r.remainder, "bar biz") def clones_initial_context(self): a = Argument('foo', kind=bool) eq_(a.value, None) c = Context(args=(a,)) p = Parser(initial=c) assert p.initial is c r = p.parse_argv(['--foo']) assert p.initial is c c2 = r[0] assert c2 is not c a2 = c2.args['foo'] assert a2 is not a eq_(a.value, None) eq_(a2.value, True) def clones_noninitial_contexts(self): a = Argument('foo') eq_(a.value, None) c = Context(name='mytask', args=(a,)) p = Parser(contexts=(c,)) assert p.contexts['mytask'] is c r = p.parse_argv(['mytask', '--foo', 'val']) assert p.contexts['mytask'] is c c2 = r[0] assert c2 is not c a2 = c2.args['foo'] assert a2 is not a eq_(a.value, None) eq_(a2.value, 'val') class equals_signs: def _compare(self, argname, invoke, value): c = Context('mytask', args=(Argument(argname, kind=str),)) r = Parser((c,)).parse_argv(['mytask', invoke]) eq_(r[0].args[argname].value, value) def handles_equals_style_long_flags(self): self._compare('foo', '--foo=bar', 'bar') def handles_equals_style_short_flags(self): self._compare('f', '-f=bar', 'bar') def does_not_require_escaping_equals_signs_in_value(self): self._compare('f', '-f=biz=baz', 'biz=baz') def handles_multiple_boolean_flags_per_context(self): c = Context('mytask', args=( Argument('foo', kind=bool), Argument('bar', kind=bool) )) r = Parser((c,)).parse_argv(['mytask', '--foo', '--bar']) a = r[0].args eq_(a.foo.value, True) eq_(a.bar.value, True) class ParseResult_(Spec): "ParseResult" def setup(self): self.context = Context('mytask', args=(Argument('foo', kind=str), Argument('bar'))) argv = ['mytask', '--foo', 'foo-val', '--', 'my', 'remainder'] self.result = Parser((self.context,)).parse_argv(argv) def acts_as_a_list_of_parsed_contexts(self): eq_(len(self.result), 1) eq_(self.result[0].name, 'mytask') def exhibits_remainder_attribute(self): eq_(self.result.remainder, 'my remainder') def to_dict_returns_parsed_contexts_and_args_as_nested_dicts(self): eq_( self.result.to_dict(), {'mytask': {'foo': 'foo-val', 'bar': None}} )
Python
0
@@ -1235,24 +1235,196 @@ t('bar')))%0A%0A + def takes_ignore_unknown_kwarg(self):%0A Parser(ignore_unknown=True)%0A%0A def ignore_unknown_defaults_to_False(self):%0A eq_(Parser().ignore_unknown, False)%0A%0A class pa
f860a306b4c9fc583a83289ae2a6ecf407214e38
Add more checks to avoid crashing when input files are missing
pysteps/io/readers.py
pysteps/io/readers.py
"""Methods for reading files. """ import numpy as np def read_timeseries(inputfns, importer, **kwargs): """Read a list of input files using io tools and stack them into a 3d array. Parameters ---------- inputfns : list List of input files returned by any function implemented in archive. importer : function Any function implemented in importers. kwargs : dict Optional keyword arguments for the importer. Returns ------- out : tuple A three-element tuple containing the precipitation fields read, the quality fields, and associated metadata. """ # check for missing data if all(ifn is None for ifn in inputfns): return None, None, None else: for ifn in inputfns[0]: if ifn is not None: Rref, Qref, metadata = importer(ifn, **kwargs) break R = [] Q = [] timestamps = [] for i,ifn in enumerate(inputfns[0]): if ifn is not None: R_, Q_, _ = importer(ifn, **kwargs) R.append(R_) Q.append(Q_) timestamps.append(inputfns[1][i]) else: R.append(Rref*np.nan) if Qref is not None: Q.append(Qref*np.nan) else: Q.append(None) timestamps.append(inputfns[1][i]) R = np.concatenate([R_[None, :, :] for R_ in R]) #TODO: Q should be organized as R, but this is not trivial as Q_ can be also None or a scalar metadata["timestamps"] = np.array(timestamps) return R, Q, metadata
Python
0
@@ -655,16 +655,32 @@ ng data%0A + Rref = None%0A if a @@ -750,32 +750,102 @@ None%0A else:%0A + if len(inputfns%5B0%5D) == 0:%0A return None, None, None%0A for ifn @@ -978,16 +978,70 @@ break%0A%0A + if Rref is None:%0A return None, None, None%0A%0A R = @@ -1502,16 +1502,47 @@ 1%5D%5Bi%5D)%0A%0A + # Replace this with stack?%0A R =
e43fa4a0897d5dc909c4f8d77068c87393d1c03e
Remove print
comnnpass/views.py
comnnpass/views.py
from django.shortcuts import render from django.views.generic.list import ListView from .forms import QueryForm from .models import Result import pandas as pd import requests import datetime import math # 検索ページへ遷移 def top(request): return render(request,'search.html') # 検索処理・検索結果を返す def search(request): if request.method == 'POST': form = QueryForm(request.POST) dateFrom = form['dateFrom'].value() dateTo = form['dateTo'].value() keyword = form['keyword'].value() # 日付(from)が日付(To)より未来の場合, 結果を返さず # 検索画面へ遷移する if dateFrom > dateTo: msg = 'dateReverse' return render(request,'search.html',{'error':msg}) # 検索対象の年月日を取得する ymd = dateComplete(dateFrom,dateTo) if form['searchType'].value() == 'and': payload={'ymd':ymd,'keyword':keyword,'count':'100','order':'2'} else: payload={'ymd':ymd,'keyword_or':keyword,'count':'100','order':'2'} allEvents = [] connpass = ConnpassAPI(payload) if connpass.resultsNum() < 1: msg = 'noResult' return render(request,'search.html',{'error':msg}) allEvents = connpass.allEventsInfo() results = [] for idx,item in allEvents.iteritems(): title = item['title'] URL = item['event_url'] hashTag = item['hash_tag'] start = item['started_at'] end = item['ended_at'] limit = item['limit'] accepted = item['accepted'] waiting = item['waiting'] address = item['address'] place = item['place'] if item['series'] is not None: groupNm = item['series']['title'] groupUrl = item['series']['url'] else: groupNm = " " groupUrl = " " data = { 'keyword':keyword, 'from':dateFrom, 'to':dateTo, 'title':title, 'URL':URL, 'start':start, 'end':end, 'limit':limit, 'accepted':accepted, 'waiting':waiting, 'address':address, 'place':place, 'groupNm':groupNm, 'groupUrl':groupUrl} result = Result(data) results.append(result) # 検索結果は開催日降順で返ってくるので,要素の逆順にすることで開催日昇順にする resultsReverse = results[::-1] return render(request,'results.html',{'results':resultsReverse}) class resultList(ListView): template_name = 'results.html' paginate_by = 30 def get(self,request,results): self.object_list = results context = self.get_context_data(object_list=self.object_list) return self.render_to_response(context) # 入力された2つの日付の間を補完する def dateComplete(dateFrom,dateTo): # Connpass APIの日付仕様に合わせて/を削除 dateFrom = dateFrom.replace('/','') dateTo = dateTo.replace('/','') d_from = datetime.datetime.strptime(dateFrom,'%Y%m%d') d_to = datetime.datetime.strptime(dateTo,'%Y%m%d') dates = [] # 検索対象年月日(from)をリストへ dates.append(dateFrom) # 検索対象日from,toの間を補完する while(d_from != d_to): d_from += datetime.timedelta(days=1) dates.append(d_from.strftime('%Y%m%d')) return dates # Connpass API class ConnpassAPI: __url = 'https://connpass.com/api/v1/event/' def __init__(self,payload): self.payload = payload def __fetch(self): self.res = pd.DataFrame.from_dict(requests.get(self.__url,params=self.payload).json()) return self.res # 検索結果の総件数を返す def resultsNum(self): df = self.__fetch().drop('events',axis=1) if df.empty: return 0 else: print(df[0:1]['results_available'][0]) return df[0:1]['results_available'][0] ''' イベント詳細(全件)を返す 戻り値:pandas.series ''' def allEventsInfo(self): available = self.resultsNum() eventInfo = pd.DataFrame() iterate = math.ceil(available/100) print(iterate) for each in range(iterate): if each > 1: self.payload['start'] = (iterate*100)+1 df = pd.DataFrame.from_dict(self.__fetch()) eventInfo = pd.concat([eventInfo,df],ignore_index=True) else: eventInfo = pd.DataFrame.from_dict(self.__fetch()) return eventInfo['events']
Python
0.000016
@@ -3802,59 +3802,8 @@ se:%0A - print(df%5B0:1%5D%5B'results_available'%5D%5B0%5D)%0A @@ -4055,31 +4055,8 @@ 00)%0A - print(iterate)%0A
748d54cb46782fc1f6a5c46f8d270ddd516cf3c5
fix code style of test_dataset.py test=develop
python/paddle/fluid/tests/unittests/test_dataset.py
python/paddle/fluid/tests/unittests/test_dataset.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import paddle.fluid as fluid import numpy as np import os import shutil import unittest class TestDataset(unittest.TestCase): """ TestCases for Dataset. """ def test_dataset_create(self): """ Testcase for dataset create. """ try: dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset") except: self.assertTrue(False) try: dataset = fluid.DatasetFactory().create_dataset("QueueDataset") except: self.assertTrue(False) try: dataset = fluid.DatasetFactory().create_dataset("MyOwnDataset") self.assertTrue(False) except: self.assertTrue(True) def test_dataset_config(self): """ Testcase for dataset configuration. """ dataset = fluid.core.Dataset("MultiSlotDataset") dataset.set_thread_num(12) dataset.set_filelist(["a.txt", "b.txt", "c.txt"]) dataset.set_trainer_num(4) dataset.set_hdfs_config("my_fs_name", "my_fs_ugi") thread_num = dataset.get_thread_num() self.assertEqual(thread_num, 12) filelist = dataset.get_filelist() self.assertEqual(len(filelist), 3) self.assertEqual(filelist[0], "a.txt") self.assertEqual(filelist[1], "b.txt") self.assertEqual(filelist[2], "c.txt") trainer_num = dataset.get_trainer_num() self.assertEqual(trainer_num, 4) name, ugi = dataset.get_hdfs_config() self.assertEqual(name, "my_fs_name") self.assertEqual(ugi, "my_fs_ugi") def test_in_memory_dataset_run(self): """ Testcase for InMemoryDataset from create to run. """ with open("test_in_memory_dataset_run_a.txt", "w") as f: data = "1 1 2 3 3 4 5 5 5 5 1 1\n" data += "1 2 2 3 4 4 6 6 6 6 1 2\n" data += "1 3 2 3 5 4 7 7 7 7 1 3\n" f.write(data) with open("test_in_memory_dataset_run_b.txt", "w") as f: data = "1 4 2 3 3 4 5 5 5 5 1 4\n" data += "1 5 2 3 4 4 6 6 6 6 1 5\n" data += "1 6 2 3 5 4 7 7 7 7 1 6\n" data += "1 7 2 3 6 4 8 8 8 8 1 7\n" f.write(data) slots = ["slot1", "slot2", "slot3", "slot4"] slots_vars = [] for slot in slots: var = fluid.layers.data( name=slot, shape=[1], dtype="int64", lod_level=1) slots_vars.append(var) dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset") dataset.set_batch_size(32) dataset.set_thread(3) dataset.set_filelist([ "test_in_memory_dataset_run_a.txt", "test_in_memory_dataset_run_b.txt" ]) dataset.set_pipe_command("cat") dataset.set_use_var(slots_vars) dataset.load_into_memory() dataset.local_shuffle() exe = fluid.Executor(fluid.CPUPlace()) exe.run(fluid.default_startup_program()) for i in range(2): try: exe.train_from_dataset(fluid.default_main_program(), dataset) except: self.assertTrue(False) os.remove("./test_in_memory_dataset_run_a.txt") os.remove("./test_in_memory_dataset_run_b.txt") def test_queue_dataset_run(self): """ Testcase for QueueDataset from create to run. """ with open("test_queue_dataset_run_a.txt", "w") as f: data = "1 1 2 3 3 4 5 5 5 5 1 1\n" data += "1 2 2 3 4 4 6 6 6 6 1 2\n" data += "1 3 2 3 5 4 7 7 7 7 1 3\n" f.write(data) with open("test_queue_dataset_run_b.txt", "w") as f: data = "1 4 2 3 3 4 5 5 5 5 1 4\n" data += "1 5 2 3 4 4 6 6 6 6 1 5\n" data += "1 6 2 3 5 4 7 7 7 7 1 6\n" data += "1 7 2 3 6 4 8 8 8 8 1 7\n" f.write(data) slots = ["slot1", "slot2", "slot3", "slot4"] slots_vars = [] for slot in slots: var = fluid.layers.data( name=slot, shape=[1], dtype="int64", lod_level=1) slots_vars.append(var) dataset = fluid.DatasetFactory().create_dataset("QueueDataset") dataset.set_batch_size(32) dataset.set_thread(3) dataset.set_filelist( ["test_queue_dataset_run_a.txt", "test_queue_dataset_run_b.txt"]) dataset.set_pipe_command("cat") dataset.set_use_var(slots_vars) exe = fluid.Executor(fluid.CPUPlace()) exe.run(fluid.default_startup_program()) for i in range(2): try: exe.train_from_dataset(fluid.default_main_program(), dataset) except: self.assertTrue(False) os.remove("./test_queue_dataset_run_a.txt") os.remove("./test_queue_dataset_run_b.txt") if __name__ == '__main__': unittest.main()
Python
0.000449
@@ -1,12 +1,16 @@ +%22%22%22%0A # Copyrigh @@ -608,16 +608,20 @@ License. +%0A%22%22%22 %0A%0Afrom _
c3527f5526ee96398760cbef11d7de48f41fe998
Annotate NormOP test to skip grad check (#21894)
python/paddle/fluid/tests/unittests/test_norm_op.py
python/paddle/fluid/tests/unittests/test_norm_op.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import unittest import numpy as np from op_test import OpTest def l2_norm(x, axis, epsilon): x2 = x**2 s = np.sum(x2, axis=axis, keepdims=True) r = np.sqrt(s + epsilon) y = x / np.broadcast_to(r, x.shape) return y, r class TestNormOp(OpTest): def setUp(self): self.op_type = "norm" self.init_test_case() x = np.random.random(self.shape).astype("float64") y, norm = l2_norm(x, self.axis, self.epsilon) self.inputs = {'X': x} self.attrs = {'epsilon': self.epsilon, 'axis': self.axis} self.outputs = {'Out': y, 'Norm': norm} def test_check_output(self): self.check_output() def test_check_grad(self): self.check_grad(['X'], 'Out') def init_test_case(self): self.shape = [2, 3, 4, 5] self.axis = 1 self.epsilon = 1e-8 class TestNormOp2(TestNormOp): def init_test_case(self): self.shape = [5, 3, 9, 7] self.axis = 0 self.epsilon = 1e-8 class TestNormOp3(TestNormOp): def init_test_case(self): self.shape = [5, 3, 2, 7] self.axis = -1 self.epsilon = 1e-8 class TestNormOp4(TestNormOp): def init_test_case(self): self.shape = [128, 1024, 14, 14] self.axis = 2 self.epsilon = 1e-8 def test_check_grad(self): # since the gradient check is very slow in large shape, so skip check_grad pass class TestNormOp5(TestNormOp): def init_test_case(self): self.shape = [2048, 2048] self.axis = 1 self.epsilon = 1e-8 def test_check_grad(self): # since the gradient check is very slow in large shape, so skip check_grad pass if __name__ == '__main__': unittest.main()
Python
0
@@ -706,16 +706,36 @@ t OpTest +, skip_check_grad_ci %0A%0A%0Adef l @@ -1807,32 +1807,179 @@ psilon = 1e-8%0A%0A%0A +@skip_check_grad_ci(reason=%22'check_grad' on large inputs is too slow, %22 +%0A %22however it is desirable to cover the forward pass%22)%0A class TestNormOp @@ -2158,97 +2158,161 @@ -# since the gradient check is very slow in large shape, so skip check_grad%0A +pass%0A%0A%0A@skip_check_grad_ci(reason=%22'check_grad' on large inputs is too slow, %22 +%0A %22however it is desirable to cover the forward pass -%0A%0A +%22) %0Acla @@ -2489,91 +2489,8 @@ f):%0A - # since the gradient check is very slow in large shape, so skip check_grad%0A
dbc1df293f283367526b3a80c5f24d71e5d46be1
fix bug abort is undefined and return 204
middleware/app.py
middleware/app.py
from flask import Flask, jsonify, request from sense_hat import SenseHat from hat_manager import HatManager app = Flask(__name__) sense_hat = SenseHat() hat_manager = HatManager(sense_hat) @app.route('/') def index(): return 'Welcome to the PI manager. Choose a route according to what you want to do.' @app.route('/status') def get_status(): status = {'pressure': hat_manager.get_pressure, 'temperature': hat_manager.get_temperature, 'humidity': hat_manager.get_humidity} return jsonify({'status': status}) @app.route('/message', methods=['POST']) def print_message(): if not request.json or not 'message' in request.json: abort(400) message = request.json['message'] color = request.json['text_color'] bg_color = request.json['bg_color'] hat_manager.set_message(message) if __name__ == '__main__': # 0.0.0.0 = accessible to any device on the network app.run(debug=True, host='0.0.0.0')
Python
0.00001
@@ -34,16 +34,23 @@ request +, abort %0Afrom se @@ -821,16 +821,42 @@ essage)%0A + return jsonify(), 204%0A %0A%0Aif __n
fb34eebd253727dcc718e2387cb6f4ac763f0bae
Add DateTime Completed Field to Task
tasks/models/tasks.py
tasks/models/tasks.py
"""Models for tasks Each new type of task corresponds to a task model """ from django.db import models from data import Data_FullGrid_Confidence, Data_FullGrid # Tasks class Task_Naming_001(Data_FullGrid_Confidence): class Meta: db_table = 'tbl_response_naming_001' def __unicode__(self): return 'Task Naming 001' task_response_id = models.AutoField(primary_key=True, unique=True) worker_id = models.CharField(max_length=128) task_response_key = models.CharField(max_length=32, unique=True) class Task_Foci_001(Data_FullGrid): class Meta: db_table = 'tbl_response_foci_001' def __unicode__(self): return 'Task Foci 001' task_response_id = models.AutoField(primary_key=True, unique=True) worker_id = models.CharField(max_length=128) task_response_key = models.CharField(max_length=32, unique=True) class Task_Mapping_001(Data_FullGrid): class Meta: db_table = 'tbl_response_mapping_001' def __unicode__(self): return 'Task Mapping 001' task_response_id = models.AutoField(primary_key=True, unique=True) worker_id = models.CharField(max_length=128) task_response_key = models.CharField(max_length=32, unique=True)
Python
0
@@ -857,32 +857,87 @@ 32, unique=True) +%0A dt_completed = models.DateTimeField(auto_now=True) %0A%0Aclass Task_Map
5c49071a6f03cc07f520ab57d71eccade4620261
enforce whitelist
python/parse_log_for_errmsg/parse_log_for_errmsg.py
python/parse_log_for_errmsg/parse_log_for_errmsg.py
# -*- coding: utf-8 -*- #!/usr/bin/python ##------------------------------------------------------------------- ## @copyright 2017 DennyZhang.com ## Licensed under MIT ## https://raw.githubusercontent.com/DennyZhang/devops_public/master/LICENSE ## ## File : parse_log_for_errmsg.py ## Author : Denny <[email protected]> ## Description : ## -- ## Created : <2017-03-23> ## Updated: Time-stamp: <2017-03-23 16:05:17> ##------------------------------------------------------------------- import argparse import sys import glob import os NAGIOS_OK_ERROR=0 NAGIOS_EXIT_ERROR=2 MAX_FILE_SIZE = 1024 * 1024 * 1024 # 1GB SEPARATOR = "|" def filter_log_by_errmsg(log_folder, err_pattern_list, \ logfile_pattern = "*.log"): err_msg_list = [] # TODO: Performance tunning: For files bigger than GB, the script won't work for fname in glob.glob("%s/%s" % (log_folder, logfile_pattern)): if os.stat(fname).st_size > MAX_FILE_SIZE: print "ERROR: Unsupported large files. %s is larger than %s." % (fname, MAX_FILE_SIZE) sys.exit(NAGIOS_EXIT_ERROR) with open(fname) as f: content = f.readlines() for err_pattern in err_pattern_list: # print "Parse %s for %s." % (fname, err_pattern) for line in content: if err_pattern in line: err_msg_list.append(line) # print "err_msg_list: %s" % (','.join(err_msg_list)) return err_msg_list def filter_errmsg_by_whitelist(err_msg_list, whitelist_pattern_list): ret_msg_list = [] for line in err_msg_list: for whitelist_pattern in whitelist_pattern_list: if whitelist_pattern in line: break ret_msg_list.append(line) return ret_msg_list # Sample: ./parse_log_for_errmsg.py \ # --log_folder /opt/mymdm/logs # --err_patterns "error|exception" \ # --whitelist_patterns "route53|Maybe document|Not found template" if __name__ == '__main__': # get parameters from users parser = argparse.ArgumentParser() parser.add_argument('--log_folder', required=True, type=str, \ help="Which log folder to check") parser.add_argument('--err_patterns', default='error|exception', required=False, \ help="Interested error patterns. If multiple, we should use | to separate them") parser.add_argument('--whitelist_patterns', default='', required=False, \ help="What white patterns are expected to be safe") parser.add_argument('--logfile_pattern', default='*.log', required=False, \ help="What white patterns are expected to be safe") l = parser.parse_args() log_folder = l.log_folder err_pattern_list = l.err_patterns.split(SEPARATOR) if l.whitelist_patterns == "": whitelist_pattern_list = [] else: whitelist_pattern_list = l.whitelist_patterns.split(SEPARATOR) err_msg_list = filter_log_by_errmsg(log_folder, err_pattern_list, l.logfile_pattern) # print "err_msg_list: %s" % (','.join(err_msg_list)) if len(whitelist_pattern_list) != 0: # print "here! whitelist_pattern_list: %s. len: %d" % (",".join(whitelist_pattern_list), len(whitelist_pattern_list)) err_msg_list = filter_errmsg_by_whitelist(err_msg_list, whitelist_pattern_list) if len(err_msg_list) != 0: print "ERROR: unexpected errors/exceptions are found under %s. errmsg: %s" % \ (log_folder, "\n".join(err_msg_list)) sys.exit(NAGIOS_EXIT_ERROR) else: print "OK: no unexpected errors/exceptions are found under %s." % (log_folder) sys.exit(NAGIOS_OK_ERROR) ## File : parse_log_for_errmsg.py ends
Python
0.000001
@@ -411,12 +411,12 @@ 16:0 -5:17 +6:26 %3E%0A## @@ -1596,24 +1596,52 @@ r_msg_list:%0A + has_matched = False%0A for @@ -1747,14 +1747,86 @@ -break%0A +has_matched = True%0A break%0A if has_matched is False:%0A
f4fff0375f238a21ccbe5a5a4316f848c4af401e
use 'params' instead of url quote
flexget/components/sites/sites/newznab.py
flexget/components/sites/sites/newznab.py
from urllib.parse import quote, urlencode import feedparser from loguru import logger from flexget import plugin from flexget.entry import Entry from flexget.event import event from flexget.utils.requests import RequestException __author__ = 'deksan' logger = logger.bind(name='newznab') class Newznab: """ Newznab search plugin Provide a url or your website + apikey and a category Config example:: newznab: url: "http://website/api?apikey=xxxxxxxxxxxxxxxxxxxxxxxxxx&t=movie&extended=1" website: https://website apikey: xxxxxxxxxxxxxxxxxxxxxxxxxx category: movie Category is any of: movie, tvsearch, music, book, all """ schema = { 'type': 'object', 'properties': { 'category': {'type': 'string', 'enum': ['movie', 'tvsearch', 'tv', 'music', 'book', 'all']}, 'url': {'type': 'string', 'format': 'url'}, 'website': {'type': 'string', 'format': 'url'}, 'apikey': {'type': 'string'}, }, 'required': ['category'], 'additionalProperties': False, } def build_config(self, config): logger.debug(type(config)) if config['category'] == 'tv': config['category'] = 'tvsearch' if 'url' not in config: if 'apikey' in config and 'website' in config: if config['category'] == 'all': config['category'] = 'search' params = {'t': config['category'], 'apikey': config['apikey'], 'extended': 1} config['url'] = config['website'] + '/api?' + urlencode(params) return config def fill_entries_for_url(self, url, task): entries = [] logger.verbose('Fetching {}', url) try: r = task.requests.get(url) except RequestException as e: logger.error("Failed fetching '{}': {}", url, e) rss = feedparser.parse(r.content) logger.debug('Raw RSS: {}', rss) if not len(rss.entries): logger.info('No results returned') for rss_entry in rss.entries: new_entry = Entry() for key in list(rss_entry.keys()): new_entry[key] = rss_entry[key] new_entry['url'] = new_entry['link'] if rss_entry.enclosures: size = int(rss_entry.enclosures[0]['length']) # B new_entry['content_size'] = size / (2 ** 20) # MB entries.append(new_entry) return entries def search(self, task, entry, config=None): config = self.build_config(config) if config['category'] == 'movie': return self.do_search_movie(entry, task, config) elif config['category'] == 'tvsearch': return self.do_search_tvsearch(entry, task, config) elif config['category'] == 'search': return self.do_search_all(entry, task, config) else: entries = [] logger.warning("Work in progress. Searching for the specified category is not supported yet...") return entries def do_search_tvsearch(self, arg_entry, task, config=None): logger.info('Searching for {}', arg_entry['title']) # normally this should be used with next_series_episodes who has provided season and episodenumber if ( 'series_name' not in arg_entry or 'series_season' not in arg_entry or 'series_episode' not in arg_entry ): return [] if arg_entry.get('tvrage_id'): lookup = f"&rid={arg_entry.get('tvrage_id')}" else: lookup = f"&q={quote(arg_entry['series_name'])}" url = f"{config['url']}{lookup}&season={arg_entry['series_season']}&ep={arg_entry['series_episode']}" return self.fill_entries_for_url(url, task) def do_search_movie(self, arg_entry, task, config=None): entries = [] logger.info('Searching for {} (imdbid:{})', arg_entry['title'], arg_entry['imdb_id']) # normally this should be used with emit_movie_queue who has imdbid (i guess) if 'imdb_id' not in arg_entry: return entries imdb_id = arg_entry['imdb_id'].replace('tt', '') url = f"{config['url']}&imdbid={imdb_id}" return self.fill_entries_for_url(url, task) def do_search_all(self, arg_entry, task, config=None): logger.info('Searching for {}', arg_entry['title']) url = f"{config['url']}&q={arg_entry['title']}" return self.fill_entries_for_url(url, task) @event('plugin.register') def register_plugin(): plugin.register(Newznab, 'newznab', api_ver=2, interfaces=['search'])
Python
0.000027
@@ -21,15 +21,8 @@ port - quote, url @@ -1580,16 +1580,19 @@ url'%5D = +f%22%7B config%5B' @@ -1604,21 +1604,15 @@ te'%5D - + ' +%7D /api? -' + +%7B urle @@ -1624,16 +1624,18 @@ (params) +%7D%22 %0A%0A @@ -3585,25 +3585,25 @@ -lookup = f%22&rid=%7B +params = %7B'rid': arg_ @@ -3625,17 +3625,16 @@ ge_id')%7D -%22 %0A @@ -3644,117 +3644,74 @@ se:%0A +%09 - lookup = f%22&q=%7Bquote(arg_entry%5B'series_name'%5D)%7D%22%0A url = f%22%7Bconfig%5B'url'%5D%7D%7Blookup%7D& +params = %7B'q': arg_entry%5B'series_name'%5D%7D%0A%09params%5B' season -=%7B +'%5D = arg_ @@ -3736,14 +3736,25 @@ on'%5D -%7D&ep=%7B +%0A%09params%5B'ep'%5D = arg_ @@ -3776,16 +3776,66 @@ pisode'%5D +%0A url = f%22%7Bconfig%5B'url'%5D%7D%7Burlencode(params) %7D%22%0A @@ -4490,24 +4490,60 @@ itle'%5D)%0A - +%09params = %7B'q': arg_entry%5B'title'%5D%7D%0A + url = f%22 @@ -4561,30 +4561,26 @@ l'%5D%7D -&q=%7Barg_entry%5B'title'%5D +%7Burlencode(params) %7D%22%0A
6db0dccc5643cb2af254b0bf052806645f7445fd
fix regression on qibuild deploy
python/qibuild/gdb.py
python/qibuild/gdb.py
## Copyright (c) 2012 Aldebaran Robotics. All rights reserved. ## Use of this source code is governed by a BSD-style license that can be ## found in the COPYING file. """ Tools for the GNU debbugger """ import os from qibuild import ui import qibuild.sh import qibuild.command def split_debug(base_dir, objcopy=None): """ Split the debug information out of all the binaries in lib/ and bin/ The debug information will be put in a .debug directory next to the executable <base_dir>/bin/foo <base_dir>/bin/.debug/foo Also uses objcopy so that the binaries and libraries still remain usable with gdb :param: the objcopy executable to use. (defaults to the first objcopy executable found in PATH) """ if objcopy is None: objcopy = "objcopy" def _get_binaries(dir): res = list() for root, directories, filenames in os.walk(dir): if os.path.basename(root) == ".debug": continue for filename in filenames: full_path = os.path.join(root, filename) if qibuild.sh.is_binary(filename): res.append(full_path) return res binaries = list() bin_dir = os.path.join(base_dir, "bin") lib_dir = os.path.join(base_dir, "lib") binaries.extend(_get_binaries(bin_dir)) binaries.extend(_get_binaries(lib_dir)) for src in binaries: dirname, basename = os.path.split(src) debug_dir = os.path.join(dirname, ".debug") qibuild.sh.mkdir(debug_dir) dest = os.path.join(src, debug_dir, basename) to_run = list() to_run.append([objcopy, "--only-keep-debug", src, dest]) to_run.append([objcopy, "--strip-debug", "--strip-unneeded", "--add-gnu-debuglink=%s" % dest, src]) retcode = 0 #check if we need to do something #if mtime of src and dest are the same continue, else do the work and set #the mtime of dest to the one of src. stsrc = os.stat(src) stdst = None if os.path.exists(dest): stdst = os.stat(dest) if stdst and stsrc.st_mtime == stdst.st_mtime: ui.info("Debug info up-to-date for %s" % os.path.relpath(src, base_dir)) continue for cmd in to_run: retcode = 0 # FIXME: we should of course not try to split debug info twice, but # that's a hard problem retcode += qibuild.command.call(cmd, ignore_ret_code=True, quiet=True) if retcode == 0: os.utime(dest, (stsrc.st_atime, stsrc.st_mtime)) ui.info("Debug info extracted for %s" % os.path.relpath(src, base_dir)) else: ui.error("Error while extracting debug for %s" % os.path.relpath(src, base_dir)) if __name__ == "__main__": import sys split_debug(sys.argv[1])
Python
0.000001
@@ -1120,23 +1120,24 @@ binary(f -ilename +ull_path ):%0A
547c1d5d1ff2ced0969a86eda6e0094f8b76d94f
Bump to 0.1.1 with setup.py fix
minio/__init__.py
minio/__init__.py
# Minimal Object Storage Library, (C) 2015 Minio, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .minio import Minio from .acl import Acl from .parsers import Bucket, Object, ResponseError __author__ = "Minio, Inc." __version__ = "0.1.0"
Python
0
@@ -739,11 +739,11 @@ = %220.1. -0 +1 %22%0A
4bb8a61cde27575865cdd2b7df5afcb5d6860523
Add weird SLP orientation to get_world_pedir
fmriprep/interfaces/tests/test_reports.py
fmriprep/interfaces/tests/test_reports.py
import pytest from ..reports import get_world_pedir @pytest.mark.parametrize("orientation,pe_dir,expected", [ ('RAS', 'j', 'Posterior-Anterior'), ('RAS', 'j-', 'Anterior-Posterior'), ('RAS', 'i', 'Left-Right'), ('RAS', 'i-', 'Right-Left'), ('RAS', 'k', 'Inferior-Superior'), ('RAS', 'k-', 'Superior-Inferior'), ('LAS', 'j', 'Posterior-Anterior'), ('LAS', 'i-', 'Left-Right'), ('LAS', 'k-', 'Superior-Inferior'), ('LPI', 'j', 'Anterior-Posterior'), ('LPI', 'i-', 'Left-Right'), ('LPI', 'k-', 'Inferior-Superior'), ]) def test_get_world_pedir(tmpdir, orientation, pe_dir, expected): assert get_world_pedir(orientation, pe_dir) == expected
Python
0
@@ -557,16 +557,241 @@ rior'),%0A + ('SLP', 'k-', 'Posterior-Anterior'),%0A ('SLP', 'k', 'Anterior-Posterior'),%0A ('SLP', 'j-', 'Left-Right'),%0A ('SLP', 'j', 'Right-Left'),%0A ('SLP', 'i', 'Inferior-Superior'),%0A ('SLP', 'i-', 'Superior-Inferior'),%0A %5D)%0Adef t
a059a7e8b751fbc49bd1f363378d630d774ed2c1
set subtypes to None if not supported in this TAXII version
taxii_client/utils.py
taxii_client/utils.py
import pytz import json import calendar from libtaxii.clients import HttpClient from libtaxii.messages_10 import ContentBlock as ContentBlock10 from datetime import datetime from collections import namedtuple def ts_to_date(timestamp): if not timestamp: return None return datetime.utcfromtimestamp(timestamp).replace(tzinfo=pytz.UTC) def date_to_ts(obj): if obj.utcoffset() is not None: obj = obj - obj.utcoffset() millis = int( calendar.timegm(obj.timetuple()) * 1000 + obj.microsecond / 1000 ) return millis def configure_taxii_client_auth(tclient, cert=None, key=None, username=None, password=None): tls_auth = (cert and key) basic_auth = (username and password) if tls_auth and basic_auth: tclient.set_auth_type(HttpClient.AUTH_CERT_BASIC) tclient.set_auth_credentials(dict( key_file = key, cert_file = cert, username = username, password = password )) elif tls_auth: tclient.set_auth_type(HttpClient.AUTH_CERT) tclient.set_auth_credentials(dict( key_file = key, cert_file = cert )) elif basic_auth: tclient.set_auth_type(HttpClient.AUTH_BASIC) tclient.set_auth_credentials(dict( username = username, password = password )) return tclient class DatetimeJSONEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, datetime): return date_to_ts(obj) else: return JSONEncoder.default(self, obj) AbstractContentBlock = namedtuple('AbstractContentBlock', ['content', 'binding', 'subtypes', 'timestamp', 'source', 'sink_collection', 'source_collection']) class ContentBlock(AbstractContentBlock): def to_json(self): return json.dumps(self._asdict(), cls=DatetimeJSONEncoder) def extract_content(response, source=None, source_collection=None, sink_collection=None): for block in response.content_blocks: if isinstance(block, ContentBlock10): yield ContentBlock( content = block.content, binding = block.content_binding, timestamp = block.timestamp_label, subtypes = [], source = source, source_collection = source_collection, sink_collection = sink_collection ) else: yield ContentBlock( content = block.content, binding = block.content_binding.binding_id, timestamp = block.timestamp_label, subtypes = block.content_binding.subtype_ids, source = source, source_collection = source_collection, sink_collection = sink_collection )
Python
0.000001
@@ -2294,10 +2294,12 @@ s = -%5B%5D +None ,%0A
bfa66827e5afd175c15640b1678fbba347009953
Fix unit tests
python/test/_utils.py
python/test/_utils.py
from python.ServerGateway import DwebGatewayHTTPRequestHandler def _processurl(url, verbose, **kwargs): # Simulates HTTP Server process - wont work for all methods args = url.split('/') method = args.pop(0) f = getattr(DwebGatewayHTTPRequestHandler, method) assert f namespace = args.pop(0) if verbose: kwargs["verbose"] = True res = f(DwebGatewayHTTPRequestHandler, namespace, *args, **kwargs) return res
Python
0.000005
@@ -87,16 +87,28 @@ verbose, + headers=%7B%7D, **kwarg @@ -222,24 +222,146 @@ args.pop(0)%0A + DwebGatewayHTTPRequestHandler.headers = headers # This is a kludge, put headers on class, method expects an instance.%0A f = geta
c6d5d8e4128a8eb8a6f73d414b303000362135ef
Fix up logger for celery 3.x
mixpanel/tasks.py
mixpanel/tasks.py
import httplib import urllib import base64 import urlparse import logging import socket from django.utils import simplejson from celery.task import Task from celery.registry import tasks from mixpanel.conf import settings as mp_settings class EventTracker(Task): """ Task to track a Mixpanel event. """ name = "mixpanel.tasks.EventTracker" max_retries = mp_settings.MIXPANEL_MAX_RETRIES class FailedEventRequest(Exception): """The attempted recording event failed because of a non-200 HTTP return code""" pass def run(self, event_name, properties=None, token=None, test=None, throw_retry_error=False, **kwargs): """ Track an event occurrence to mixpanel through the API. ``event_name`` is the string for the event/category you'd like to log this event under ``properties`` is (optionally) a dictionary of key/value pairs describing the event. ``token`` is (optionally) your Mixpanel api token. Not required if you've already configured your MIXPANEL_API_TOKEN setting. ``test`` is an optional override to your `:data:mixpanel.conf.settings.MIXPANEL_TEST_ONLY` setting for determining if the event requests should actually be stored on the Mixpanel servers. """ l = self.get_logger(**kwargs) l.info("Recording event: <%s>" % event_name) if l.logger.getEffectiveLevel() == logging.DEBUG: httplib.HTTPConnection.debuglevel = 1 is_test = self._is_test(test) generated_properties = self._handle_properties(properties, token) url_params = self._build_params(event_name, generated_properties, is_test) l.debug("url_params: <%s>" % url_params) conn = self._get_connection() try: result = self._send_request(conn, url_params) except EventTracker.FailedEventRequest, exception: conn.close() l.info("Event failed. Retrying: <%s>" % event_name) kwargs.update({ 'properties': properties, 'token': token, 'test': test}) self.retry(args=[event_name], kwargs=kwargs, exc=exception, countdown=mp_settings.MIXPANEL_RETRY_DELAY, throw=throw_retry_error) return conn.close() if result: l.info("Event recorded/logged: <%s>" % event_name) else: l.info("Event ignored: <%s>" % event_name) return result def _is_test(self, test): """ Determine whether this event should be logged as a test request, meaning it won't actually be stored on the Mixpanel servers. A return result of 1 means this will be a test, 0 means it won't as per the API spec. Uses ``:mod:mixpanel.conf.settings.MIXPANEL_TEST_ONLY`` as the default if no explicit test option is given. """ if test == None: test = mp_settings.MIXPANEL_TEST_ONLY if test: return 1 return 0 def _handle_properties(self, properties, token): """ Build a properties dictionary, accounting for the token. """ if properties == None: properties = {} if not properties.get('token', None): if token is None: token = mp_settings.MIXPANEL_API_TOKEN properties['token'] = token l = self.get_logger() l.debug('pre-encoded properties: <%s>' % repr(properties)) return properties def _get_connection(self): server = mp_settings.MIXPANEL_API_SERVER # Wish we could use python 2.6's httplib timeout support socket.setdefaulttimeout(mp_settings.MIXPANEL_API_TIMEOUT) return httplib.HTTPConnection(server) def _build_params(self, event, properties, is_test): """ Build HTTP params to record the given event and properties. """ params = {'event': event, 'properties': properties} data = base64.b64encode(simplejson.dumps(params)) data_var = mp_settings.MIXPANEL_DATA_VARIABLE url_params = urllib.urlencode({data_var: data, 'test': is_test}) return url_params def _send_request(self, connection, params): """ Send a an event with its properties to the api server. Returns ``true`` if the event was logged by Mixpanel. """ endpoint = mp_settings.MIXPANEL_TRACKING_ENDPOINT try: connection.request('GET', '%s?%s' % (endpoint, params)) response = connection.getresponse() except socket.error, message: raise EventTracker.FailedEventRequest("The tracking request failed with a socket error. Message: [%s]" % message) if response.status != 200 or response.reason != 'OK': raise EventTracker.FailedEventRequest("The tracking request failed. Non-200 response code was: %s %s" % (response.status, response.reason)) # Successful requests will generate a log response_data = response.read() if response_data != '1': return False return True tasks.register(EventTracker) class FunnelEventTracker(EventTracker): """ Task to track a Mixpanel funnel event. """ name = "mixpanel.tasks.FunnelEventTracker" max_retries = mp_settings.MIXPANEL_MAX_RETRIES class InvalidFunnelProperties(Exception): """Required properties were missing from the funnel-tracking call""" pass def run(self, funnel, step, goal, properties, token=None, test=None, throw_retry_error=False, **kwargs): """ Track an event occurrence to mixpanel through the API. ``funnel`` is the string for the funnel you'd like to log this event under ``step`` the step in the funnel you're registering ``goal`` the end goal of this funnel ``properties`` is a dictionary of key/value pairs describing the funnel event. A ``distinct_id`` is required. ``token`` is (optionally) your Mixpanel api token. Not required if you've already configured your MIXPANEL_API_TOKEN setting. ``test`` is an optional override to your `:data:mixpanel.conf.settings.MIXPANEL_TEST_ONLY` setting for determining if the event requests should actually be stored on the Mixpanel servers. """ l = self.get_logger(**kwargs) l.info("Recording funnel: <%s>-<%s>" % (funnel, step)) properties = self._handle_properties(properties, token) is_test = self._is_test(test) properties = self._add_funnel_properties(properties, funnel, step, goal) url_params = self._build_params(mp_settings.MIXPANEL_FUNNEL_EVENT_ID, properties, is_test) l.debug("url_params: <%s>" % url_params) conn = self._get_connection() try: result = self._send_request(conn, url_params) except EventTracker.FailedEventRequest, exception: conn.close() l.info("Funnel failed. Retrying: <%s>-<%s>" % (funnel, step)) kwargs.update({ 'token': token, 'test': test}) self.retry(args=[funnel, step, goal, properties], kwargs=kwargs, exc=exception, countdown=mp_settings.MIXPANEL_RETRY_DELAY, throw=throw_retry_error) return conn.close() if result: l.info("Funnel recorded/logged: <%s>-<%s>" % (funnel, step)) else: l.info("Funnel ignored: <%s>-<%s>" % (funnel, step)) return result def _add_funnel_properties(self, properties, funnel, step, goal): if not properties.has_key('distinct_id'): error_msg = "A ``distinct_id`` must be given to record a funnel event" raise FunnelEventTracker.InvalidFunnelProperties(error_msg) properties['funnel'] = funnel properties['step'] = step properties['goal'] = goal return properties tasks.register(FunnelEventTracker)
Python
0.000221
@@ -1421,15 +1421,8 @@ f l. -logger. getE
1ca1137a57c16b1141e167ac8210ca3ac9014e2e
remove unnecessary import
mkt/stats/urls.py
mkt/stats/urls.py
from django.conf.urls import patterns, url from django.shortcuts import redirect import addons.views from . import views from stats.urls import series_re # Time series URLs following this pattern: # /app/{app_slug}/statistics/{series}-{group}-{start}-{end}.{format} # Also supports in-app URLs: # /app/{app_slug}/statistics/{inapp}/{series}-{group}-{start}-{end} # .{format} inapp = """inapp/(?P<inapp>[^/<>"]+)""" series = dict((type, '%s-%s' % (type, series_re)) for type in views.SERIES) def sales_stats_report_urls(category='', inapp_flag=False): """ urlpatterns helper builder for views.stats_report urls """ url_patterns = [] sales_metrics = ['revenue', 'sales', 'refunds'] inapp_prefix = '' inapp_suffix = '' if inapp_flag: inapp_prefix = inapp + '/' inapp_suffix = '_inapp' category_prefix = '' category_suffix = '' if category: category_prefix = category + '_' category_suffix = category + '/' for metric in sales_metrics: full_category = '%s%s' % (category_prefix, metric) # URL defaults revenue to root, don't explicitly put in url. if metric == 'revenue': metric = '' url_patterns += patterns('', url('^%ssales/%s%s$' % (inapp_prefix, category_suffix, metric), views.stats_report, name='mkt.stats.%s' % full_category + inapp_suffix, kwargs={'report': full_category + inapp_suffix}) ) return url_patterns def sales_series_urls(category='', inapp_flag=False): """ urlpatterns helper builder for views.*_series urls """ url_patterns = [] sales_metrics = ['revenue', 'sales', 'refunds'] inapp_suffix = '' if inapp_flag: inapp_suffix = '_inapp' # Distinguish between line and column series. view = views.finance_line_series category_prefix = '' if category: view = views.finance_column_series category_prefix = category + '_' for metric in sales_metrics: full_category = '%s%s%s' % (category_prefix, metric, inapp_suffix) kwargs = {} if metric != 'sales': # Defaults to sales so does not need primary_field arg. kwargs['primary_field'] = metric if category: kwargs['category_field'] = category url_re = series[full_category] if inapp_flag: url_re = '^%s/sales/%s' % (inapp, series[full_category]) url_patterns += patterns('', url(url_re, view, name='mkt.stats.%s' % full_category + '_series', kwargs=kwargs) ) return url_patterns app_stats_patterns = patterns('', # Overview (not implemented). url('^$', views.stats_report, name='mkt.stats.overview', kwargs={'report': 'installs'}), # kwargs={'report': 'app_overview'}. # Installs. url('^installs/$', views.stats_report, name='mkt.stats.installs', kwargs={'report': 'installs'}), url(series['installs'], views.installs_series, name='mkt.stats.installs_series'), # Usage (not implemented). url('^usage/$', views.stats_report, name='mkt.stats.usage', kwargs={'report': 'usage'}), url(series['usage'], views.usage_series, name='mkt.stats.usage_series'), ) app_stats_patterns += sales_stats_report_urls(category='currency', inapp_flag=True) app_stats_patterns += sales_series_urls(category='currency', inapp_flag=True) app_stats_patterns += sales_stats_report_urls(category='source', inapp_flag=True) app_stats_patterns += sales_series_urls(category='source', inapp_flag=True) app_stats_patterns += sales_stats_report_urls(inapp_flag=True) app_stats_patterns += sales_series_urls(inapp_flag=True) app_stats_patterns += sales_stats_report_urls(category='currency') app_stats_patterns += sales_series_urls(category='currency') app_stats_patterns += sales_stats_report_urls(category='source') app_stats_patterns += sales_series_urls(category='source') app_stats_patterns += sales_stats_report_urls() app_stats_patterns += sales_series_urls() # Overall site statistics. app_site_patterns = patterns('', url('^$', lambda r: redirect('mkt.stats.apps_count_new', permanent=False), name='mkt.stats.overall') ) keys = ['apps_count_new', 'apps_count_installed', 'apps_review_count_new', 'mmo_user_count_total', 'mmo_user_count_new', 'mmo_total_visitors'] urls = [] for key in keys: urls.append(url('^%s/$' % key, views.overall, name='mkt.stats.%s' % key, kwargs={'report': key})) app_site_patterns += patterns('', *urls) all_apps_stats_patterns = patterns('', # Landing pages. url('^$', views.my_apps_report, name='mkt.stats.my_apps_overview', kwargs={'report': 'installs'}), url('^installs/$', views.my_apps_report, name='mkt.stats.my_apps_installs', kwargs={'report': 'installs'}), # Data URL. url(series['my_apps'], views.my_apps_series, name='mkt.stats.my_apps_series'), )
Python
0.000037
@@ -79,28 +79,8 @@ ct%0A%0A -import addons.views%0A from
c7e9ea888bbbcef9e7ae29340c45e9aaf211d1da
Fix tests
tests/travis.py
tests/travis.py
import os os.environ['QT_API'] = os.environ['USE_QT_API'] from qtpy import QtCore, QtGui, QtWidgets print('Qt version:%s' % QtCore.__version__) print(QtCore.QEvent) print(QtGui.QPainter) print(QtWidgets.QWidget)
Python
0.000103
@@ -51,16 +51,24 @@ QT_API'%5D +.lower() %0A%0Afrom q
efac3c253dcd71be2c6510b5025ddedbb9a7358e
work when there's no RAVEN_CONFIG
temba/temba_celery.py
temba/temba_celery.py
from __future__ import absolute_import, unicode_literals import celery import os import raven import sys from django.conf import settings from raven.contrib.celery import register_signal, register_logger_signal # set the default Django settings module for the 'celery' program. os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'temba.settings') app = celery.Celery('temba') app.config_from_object('django.conf:settings') app.autodiscover_tasks(lambda: settings.INSTALLED_APPS) client = raven.Client(settings.RAVEN_CONFIG['dsn']) register_logger_signal(client) register_signal(client) @app.task(bind=True) def debug_task(self): # pragma: needs cover print('Request: {0!r}'.format(self.request)) # this is needed to simulate CELERY_ALWAYS_EAGER for plain 'send' tasks if 'test' in sys.argv or getattr(settings, 'CELERY_ALWAYS_EAGER', False): from celery import current_app def send_task(name, args=(), kwargs={}, **opts): # pragma: needs cover task = current_app.tasks[name] return task.apply(args, kwargs, **opts) current_app.send_task = send_task
Python
0.000004
@@ -475,16 +475,123 @@ _APPS)%0A%0A +# register raven if configured%0Araven_config = getattr(settings, 'RAVEN_CONFIG', None)%0Aif raven_config:%0A client = @@ -634,16 +634,20 @@ 'dsn'%5D)%0A + register @@ -669,16 +669,20 @@ client)%0A + register
bf0930121a53ea2df3a7b851256cb47ce86aec00
fix non-integer index error
pywt/data/_readers.py
pywt/data/_readers.py
import os import numpy as np def ascent(): """ Get an 8-bit grayscale bit-depth, 512 x 512 derived image for easy use in demos The image is derived from accent-to-the-top.jpg at http://www.public-domain-image.com/people-public-domain-images-pictures/ Parameters ---------- None Returns ------- ascent : ndarray convenient image to use for testing and demonstration Examples -------- >>> import pywt.data >>> ascent = pywt.data.ascent() >>> ascent.shape == (512, 512) True >>> ascent.max() 255 >>> import matplotlib.pyplot as plt >>> plt.gray() >>> plt.imshow(ascent) # doctest: +ELLIPSIS <matplotlib.image.AxesImage object at ...> >>> plt.show() # doctest: +SKIP """ fname = os.path.join(os.path.dirname(__file__), 'ascent.npz') ascent = np.load(fname)['data'] return ascent def aero(): """ Get an 8-bit grayscale bit-depth, 512 x 512 derived image for easy use in demos Parameters ---------- None Returns ------- aero : ndarray convenient image to use for testing and demonstration Examples -------- >>> import pywt.data >>> aero = pywt.data.ascent() >>> aero.shape == (512, 512) True >>> aero.max() 255 >>> import matplotlib.pyplot as plt >>> plt.gray() >>> plt.imshow(aero) # doctest: +ELLIPSIS <matplotlib.image.AxesImage object at ...> >>> plt.show() # doctest: +SKIP """ fname = os.path.join(os.path.dirname(__file__), 'aero.npz') aero = np.load(fname)['data'] return aero def camera(): """ Get an 8-bit grayscale bit-depth, 512 x 512 derived image for easy use in demos Parameters ---------- None Returns ------- camera : ndarray convenient image to use for testing and demonstration Examples -------- >>> import pywt.data >>> camera = pywt.data.ascent() >>> camera.shape == (512, 512) True >>> import matplotlib.pyplot as plt >>> plt.gray() >>> plt.imshow(camera) # doctest: +ELLIPSIS <matplotlib.image.AxesImage object at ...> >>> plt.show() # doctest: +SKIP """ fname = os.path.join(os.path.dirname(__file__), 'camera.npz') camera = np.load(fname)['data'] return camera def ecg(): """ Get 1024 points of an ECG timeseries. Parameters ---------- None Returns ------- ecg : ndarray convenient timeseries to use for testing and demonstration Examples -------- >>> import pywt.data >>> ecg = pywt.data.ecg() >>> ecg.shape == (1024,) True >>> import matplotlib.pyplot as plt >>> plt.plot(ecg) # doctest: +ELLIPSIS [<matplotlib.lines.Line2D object at ...>] >>> plt.show() # doctest: +SKIP """ fname = os.path.join(os.path.dirname(__file__), 'ecg.npy') ecg = np.load(fname) return ecg def nino(): """ This data contains the averaged monthly sea surface temperature in degrees Celcius of the Pacific Ocean, between 0-10 degrees South and 90-80 degrees West, from 1950 to 2016. This dataset is in the public domain and was obtained from NOAA. National Oceanic and Atmospheric Administration's National Weather Service ERSSTv4 dataset, nino 3, http://www.cpc.ncep.noaa.gov/data/indices/ Parameters ---------- None Returns ------- time : ndarray convenient timeseries to use for testing and demonstration sst : ndarray convenient timeseries to use for testing and demonstration Examples -------- >>> import pywt.data >>> time, sst = pywt.data.nino() >>> sst.shape == (264,) True >>> import matplotlib.pyplot as plt >>> plt.plot(time,sst) # doctest: +ELLIPSIS [<matplotlib.lines.Line2D object at ...>] >>> plt.show() # doctest: +SKIP """ fname = os.path.join(os.path.dirname(__file__), 'sst_nino3.npz') sst_csv = np.load(fname)['sst_csv'] # sst_csv = pd.read_csv("http://www.cpc.ncep.noaa.gov/data/indices/ersst4.nino.mth.81-10.ascii", sep=' ', skipinitialspace=True) # take only full years n = np.floor(sst_csv.shape[0]/12.)*12. # Building the mean of three mounth # the 4. column is nino 3 sst = np.mean(np.reshape(np.array(sst_csv)[:n,4],(n/3,-1)),axis=1) sst = (sst - np.mean(sst)) / np.std(sst, ddof=1) dt = 0.25 time = np.arange(len(sst)) * dt + 1950.0 # construct time array return time, sst
Python
0.025431
@@ -4179,16 +4179,20 @@ n = +int( np.floor @@ -4217,16 +4217,17 @@ 12.)*12. +) %0A # B @@ -4343,21 +4343,26 @@ %5B:n, + 4%5D, + (n/ +/ 3, + -1)), + axis
9dc4bb5dbfbd4702487b00fda9f267e19dd9d4d6
rename local to werkzeug
qg/web/app/wsgiapp.py
qg/web/app/wsgiapp.py
# -*- coding: utf-8 -*- # # Copyright 2013, Qunar OPSDEV # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Author: jaypei <[email protected]> # import sys import multiprocessing from oslo.config import cfg from werkzeug.wsgi import DispatcherMiddleware from werkzeug.serving import run_simple from qg.core.exception import QException from qg.core.app import QApplication web_opts = [ cfg.StrOpt('base-url', default='/', help='The url prefix of this site.'), cfg.StrOpt('run-mode', default="local", choices=('gunicorn', 'local'), help="Run server use the specify mode."), cfg.StrOpt('bind', default='0.0.0.0', help='The IP address to bind'), cfg.IntOpt('port', default=5000, help='The port to listen'), ] gunicorn_opts = [ cfg.StrOpt('config', default=None, help='The path to a Gunicorn config file.'), cfg.IntOpt('worker-count', default=0, help='Process worker count in gunicorn mode.'), cfg.BoolOpt('daemon', default=False, help='Run gunicorn mode as a daemon.'), cfg.StrOpt('accesslog', default=None, help='The Access log file to write to.' '"-" means log to stderr.'), cfg.BoolOpt('ignore-healthcheck-accesslog', default=False), cfg.IntOpt('timeout', default=30, help='Workers silent for more than this many seconds are ' 'killed and restarted.') ] CONF = cfg.CONF CONF.register_cli_opts(web_opts, 'web') CONF.register_cli_opts(gunicorn_opts, 'gunicorn') class WsgiNotInitialized(QException): message = "Wsgi-app was not initialized." class QWsgiApplication(QApplication): def init_app(self): super(QWsgiApplication, self).init_app() self.wsgi_app = None def configure(self): super(QWsgiApplication, self).configure() self._set_base_url(CONF.web.base_url) def _debug_run(self): self.flask_app.debug = True CONF.debug = True run_simple(CONF.web.bind, CONF.web.port, self.wsgi_app, use_reloader=CONF.debug, use_debugger=CONF.debug) def _gunicorn_run(self): from gunicorn.app.base import Application app = self.wsgi_app class QlibGunicornApp(Application): def init(self, parser, opts, args): worker_count = CONF.gunicorn.worker_count if worker_count <= 0: worker_count = multiprocessing.cpu_count() * 2 + 1 logger_class = "simple" if CONF.gunicorn.ignore_healthcheck_accesslog: logger_class = "qlib.web.glogging.GunicornLogger" return { 'bind': '{0}:{1}'.format(CONF.web.bind, CONF.web.port), 'workers': worker_count, 'daemon': CONF.gunicorn.daemon, 'config': CONF.gunicorn.config, 'accesslog': CONF.gunicorn.accesslog, 'timeout': CONF.gunicorn.timeout, 'logger_class': logger_class } def load(self): return app # NOTE(zhen.pei): 为了不让gunicorn默认匹配sys.argv[1:] sys.argv = [sys.argv[0]] QlibGunicornApp().run() def _set_base_url(self, base_url): base_url = base_url.strip() if not base_url.startswith("/"): base_url = "/" + base_url self.base_url = base_url def run(self): if CONF.web.run_mode == "local": self._debug_run() elif CONF.web.run_mode == "gunicorn": self._gunicorn_run() def append_wsgi_middlewares(self, *middlewares): if self.wsgi_app is None: raise WsgiNotInitialized() for middleware in middlewares: self.wsgi_app = middleware(self.wsgi_app) return self def set_wsgi_app(self, app, base_url=None): if base_url is None: base_url = self.base_url if base_url != "/": self.wsgi_app = DispatcherMiddleware(simple_404_app, { base_url: app }) else: self.wsgi_app = app def simple_404_app(environ, start_response): status = '404 NOT FOUND' response_headers = [('Content-type', 'text/plain')] start_response(status, response_headers) return [u"Page not found.".encode('utf8')]
Python
0.00099
@@ -1115,21 +1115,24 @@ corn', ' -local +werkzeug '),%0A @@ -4282,21 +4282,24 @@ ode == %22 -local +werkzeug %22:%0A
dd70c4a7fdde0ba5c221d7c3268827b1d6a38aea
Add param show_codes to v0/Ptref
source/jormungandr/jormungandr/interfaces/v0/Ptref.py
source/jormungandr/jormungandr/interfaces/v0/Ptref.py
# coding=utf-8 # Copyright (c) 2001-2014, Canal TP and/or its affiliates. All rights reserved. # # This file is part of Navitia, # the software to build cool stuff with public transport. # # Hope you'll enjoy and contribute to this project, # powered by Canal TP (www.canaltp.fr). # Help us simplify mobility and open public transport: # a non ending quest to the responsive locomotion way of traveling! # # LICENCE: This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # # Stay tuned using # twitter @navitia # IRC #navitia on freenode # https://groups.google.com/d/forum/navitia # www.navitia.io from flask import Flask from flask.ext.restful import Resource, fields from jormungandr import i_manager from jormungandr.protobuf_to_dict import protobuf_to_dict from flask.ext.restful import reqparse from jormungandr.interfaces.parsers import depth_argument from jormungandr.interfaces.argument import ArgumentDoc from jormungandr.interfaces.parsers import depth_argument from jormungandr.authentification import authentification_required class Ptref(Resource): parsers = {} method_decorators = [authentification_required] def __init__(self): super(Ptref, self).__init__() self.resource_type = "Unknown" self.parsers["get"] = reqparse.RequestParser( argument_class=ArgumentDoc) self.parsers["get"].add_argument("start_page", type=int, default=0, description= "The page where want to start") self.parsers["get"].add_argument("count", type=int, default=25, description= "The number of objects you want on the page") self.parsers["get"].add_argument("filter", type=str, default="", description="The filter parameter") self.parsers["get"].add_argument("depth", type=depth_argument, default=1, description= "The depth of object") self.parsers["get"].add_argument("forbidden_uris[]", type=unicode, action="append", description="Uri to forbid") def get(self, region): args = self.parsers["get"].parse_args() response = i_manager.dispatch(args, self.resource_type, instance_name=region) return protobuf_to_dict(response, use_enum_labels=True), 200 class StopAreas(Ptref): """ Retrieves all the stop areas of a region """ def __init__(self): super(StopAreas, self).__init__() self.resource_type = "stop_areas" class StopPoints(Ptref): """ Retrieves all the stop points of a region """ def __init__(self): super(StopPoints, self).__init__() self.resource_type = "stop_points" class Lines(Ptref): """ Retrieves all the lines of a region """ def __init__(self): super(Lines, self).__init__() self.resource_type = "lines" class Routes(Ptref): """ Retrieves all the routes of a region """ def __init__(self): super(Routes, self).__init__() self.resource_type = "routes" class PhysicalModes(Ptref): """ Retrieves all the physical modes of a region """ def __init__(self): super(PhysicalModes, self).__init__() self.resource_type = "physical_modes" class CommercialModes(Ptref): """ Retrieves all the commercial modes of a region """ def __init__(self): super(CommercialModes, self).__init__() self.resource_type = "commercial_modes" class Connections(Ptref): """ Retrieves all the connections of a region """ def __init__(self): super(Connections, self).__init__() self.resource_type = "connections" class JourneyPatternPoints(Ptref): """ Retrieves all the journey pattern points of a region """ def __init__(self): super(JourneyPatternPoints, self).__init__() self.resource_type = "journey_pattern_points" class JourneyPatterns(Ptref): """ Retrieves all the journey patterns of a region """ def __init__(self): super(JourneyPatterns, self).__init__() self.resource_type = "journey_patterns" class Companies(Ptref): """ Retrieves all the companies of a region """ def __init__(self): super(Companies, self).__init__() self.resource_type = "companies" class VehicleJourneys(Ptref): """ Retrieves all the vehicle journeys of a region """ def __init__(self): super(VehicleJourneys, self).__init__() self.resource_type = "vehicle_journeys" class Pois(Ptref): """ Retrieves all the pois of a region """ def __init__(self): super(Pois, self).__init__() self.resource_type = "pois" class PoiTypes(Ptref): """ Retrieves all the poi types of a region """ def __init__(self): super(PoiTypes, self).__init__() self.resource_type = "poi_types" class Networks(Ptref): """ Retrieves all the networks of a region """ def __init__(self): super(Networks, self).__init__() self.resource_type = "networks"
Python
0
@@ -1275,16 +1275,60 @@ fields%0A +from flask.ext.restful.types import boolean%0A from jor @@ -2988,16 +2988,184 @@ forbid%22) +%0A self.parsers%5B%22get%22%5D.add_argument(%22show_codes%22, type=boolean, default=False,%0A description=%22Either to show or not codes%22) %0A%0A de
460f218c2ed71a0a7aff5bb3353bca01a4841af1
Update Homework_Week4_CaseStudy2.py
Week4-Case-Studies-Part2/Bird-Migration/Homework_Week4_CaseStudy2.py
Week4-Case-Studies-Part2/Bird-Migration/Homework_Week4_CaseStudy2.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Thu Feb 23 21:46:13 2017 @author: lamahamadeh """ ''' ============================== Case Study 2 - Bird Migration ============================== ''' #In this case study, we will continue taking a look at patterns of flight #for each of the three birds in our dataset. #------------------------------------------------------------------------------ #Exercise 1 #---------- #pandas makes it easy to perform basic operations on groups within a dataframe #without needing to loop through each value in the dataframe. The sample code #shows you how to group the dataframe by birdname and then find the average #speed_2d for each bird. Modify the code to assign the mean altitudes of each #bird into an object called mean_altitudes. #load the dataframe import pandas as pd birddata = pd.read_csv('/Users/Admin/Desktop/bird_tracking.csv') # First, use `groupby` to group up the data. grouped_birds = birddata.groupby("bird_name") # Now operations are performed on each group. mean_speeds = grouped_birds.speed_2d.mean() # The `head` method prints the first 5 lines of each bird. print grouped_birds.head() # Find the mean `altitude` for each bird. # Assign this to `mean_altitudes`. mean_altitudes = grouped_birds.altitude.mean() #------------------------------------------------------------------------------ #Exercise 2 #---------- #In this exercise, we will group the flight times by date and calculate the #mean altitude within that day. Use groupby to group the data by date. # Convert birddata.date_time to the `pd.datetime` format. birddata.date_time = pd.to_datetime(birddata.date_time) # Create a new column of day of observation birddata["date"] = birddata.date_time.dt.date # Check the head of the column. print birddata.date.head() grouped_bydates = birddata.groupby("date") #Calculate the mean altitude per day and store these results as #mean_altitudes_perday. mean_altitudes_perday = grouped_bydates.altitude.mean() #------------------------------------------------------------------------------ #Exercise 3 #---------- #birddata already contains the date column. To find the average speed for each #bird and day, create a new grouped dataframe called grouped_birdday that #groups the data by both bird_name and date. grouped_birdday = birddata.groupby(["bird_name", "date"]) mean_altitudes_perday = grouped_birdday.altitude.mean() # look at the head of `mean_altitudes_perday`. mean_altitudes_perday.head() #------------------------------------------------------------------------------ #Exercise 4 #---------- #Great! Now find the average speed for each bird and day. Store these are three #pandas Series objects – one for each bird. #Use the plotting code provided to plot the average speeds for each bird. import matplotlib.pyplot as plt eric_daily_speed = grouped_birdday.speed_2d.mean()["Eric"] sanne_daily_speed = grouped_birdday.speed_2d.mean()["Sanne"] nico_daily_speed = grouped_birdday.speed_2d.mean()["Nico"] eric_daily_speed.plot(label="Eric") sanne_daily_speed.plot(label="Sanne") nico_daily_speed.plot(label="Nico") plt.legend(loc="upper left") plt.show() #------------------------------------------------------------------------------
Python
0.000001
@@ -2944,18 +2944,16 @@ plt%0D%0A%0D%0A -%0D%0A eric_dai
e9e40dd4d9d5357069261653cd1a432e99e8e1aa
Remove unexpected dummy code
initialize_data.py
initialize_data.py
import pandas import numpy as np from google.cloud import datastore from math import floor import pdb RATING_KIND = 'Rating' MOVIE_KIND = 'Movie' PROJECT_ID = 'cf-mr-service' client = datastore.Client(PROJECT_ID) def load_from_store(): query = client.query(kind=RATING_KIND) result = query.fetch() rating = list(result) read_rating = None for entity in rating: arr = np.fromstring(entity['data_str'], dtype=entity['dtype']).reshape(entity['rows'], entity['cols']) if read_rating is not None: read_rating = np.append(read_rating, arr, axis=0) else: read_rating = arr def save_to_store(): print 'save to store' header = ['user_id', 'item_id', 'rating', 'timestamp'] rating_data = pandas.read_csv('u.data', sep='\t', names=header) n_users = rating_data.user_id.unique().shape[0] n_items = rating_data.item_id.unique().shape[0] print 'Number of users = ' + str(n_users) + ' | Number of movies = ' + str(n_items) user_rating = np.zeros((n_users, n_items), dtype='uint8') for line in rating_data.itertuples(): user_rating[line[1] - 1, line[2] - 1] = line[3] # split_size = int(floor(1048487.0 * 3 / (4 * n_items))) split_size = int(floor(1048487.0 / n_items)) entity_list = [] print 'config split size = ' + str(split_size) config_key = key=client.key('Config', 'v1.0') entity = client.get(key=config_key) if entity is None: entity = datastore.Entity(key=config_key, exclude_from_indexes=['user_rating_split_size']) entity.update({ 'user_rating_split_size': split_size }) entity_list.append(entity) for i in xrange(0, n_users + 1, split_size): print 'split rating data from ' + str(i) + ' to ' + str(i + split_size) entity = datastore.Entity(key=client.key(RATING_KIND, str(i / split_size)), exclude_from_indexes=['rows', 'cols', 'dtype', 'data_str']) sub_arr = user_rating[i : i + split_size] entity.update({ 'rows': sub_arr.shape[0], 'cols': sub_arr.shape[1], 'dtype': str(sub_arr.dtype), 'data_str': sub_arr.tostring() }) entity_list.append(entity) print 'prepare deleting indexed users' query = client.query(kind='User') query.keys_only() user_keys = [] for user in query.fetch(): print 'users to be delete ' + user.key.name user_keys.append(user.key) with client.transaction(): print 'run transaction' client.put_multi(entity_list) client.delete_multi(user_keys) entity_list = [] print 'load movie info' f = open('u.item') while True: s = f.readline() if not s: break; item_info = s.split('|') entity = datastore.Entity(key=client.key(MOVIE_KIND, str(int(item_info[0]) - 1)), exclude_from_indexes=['title', 'imdb_url']) entity.update({ 'title': item_info[1], 'imdb_url': item_info[4] }) entity_list.append(entity) if (len(entity_list) >= 400): print 'put movie info' client.put_multi(entity_list) entity_list = [] print 'initialization transaction' if __name__ == '__main__': save_to_store() # load_from_store()
Python
0.000037
@@ -1359,20 +1359,16 @@ g_key = -key= client.k
6cb9a09ee92f3be6a5d807e9c5af41bac4796435
Remove loading of env file in development
{{cookiecutter.repo_name}}/config/settings/base.py
{{cookiecutter.repo_name}}/config/settings/base.py
# -*- coding: utf-8 -*- """ Django settings for {{ cookiecutter.project_name }}. For more information on this file, see https://docs.djangoproject.com/en/stable/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/stable/ref/settings/ """ from os.path import dirname, join, exists import environ # Django-environ for using 12-factor environment variables. # http://12factor.net/) env = environ.Env() # Load environment files from file in development env_file = join(dirname(__file__), 'development.env') if exists(env_file): environ.Env.read_env(str(env_file)) # Build paths inside the project like this: join(BASE_DIR, "directory") BASE_DIR = dirname(dirname(dirname(__file__))) # Secret key from environment variables # https://docs.djangoproject.com/en/stable/ref/settings/#secret-key SECRET_KEY = env('DJANGO_SECRET_KEY') # Application definition INSTALLED_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', # Third party app # Own apps ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', 'django.middleware.security.SecurityMiddleware', ) ROOT_URLCONF = 'config.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [ join(BASE_DIR, 'templates'), ], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', 'django.template.context_processors.static', ], }, }, ] WSGI_APPLICATION = 'config.wsgi.application' # Database # https://docs.djangoproject.com/en/stable/ref/settings/#databases # Get databases from DATABASE_URL. # https://django-environ.readthedocs.org/en/latest/ DATABASES = { 'default': env.db(), } # Internationalization # https://docs.djangoproject.com/en/stable/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' # Example: Europe/Oslo USE_I18N = False USE_L10N = True USE_TZ = True # Managers # https://docs.djangoproject.com/en/stable/ref/settings/#managers ADMINS = ( ("""{{ cookiecutter.author_name }}""", '{{ cookiecutter.email }}'), ) MANAGERS = ADMINS # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/stable/howto/static-files/ STATIC_URL = '/static/' STATICFILES_DIRS = [join(BASE_DIR, 'static')] STATIC_ROOT = 'staticfiles'
Python
0
@@ -289,50 +289,8 @@ %22%22%0A%0A -from os.path import dirname, join, exists%0A impo @@ -410,174 +410,8 @@ ()%0A%0A -# Load environment files from file in development%0Aenv_file = join(dirname(__file__), 'development.env')%0Aif exists(env_file):%0A environ.Env.read_env(str(env_file))%0A%0A # Bu @@ -450,20 +450,19 @@ e this: -join +str (BASE_DI @@ -466,11 +466,15 @@ _DIR -, %22 +.path(' dire @@ -478,17 +478,18 @@ irectory -%22 +') )%0ABASE_D @@ -497,31 +497,20 @@ R = -dirname(dirname(dirname +environ.Path (__f @@ -515,18 +515,20 @@ _file__) -)) + - 3 %0A%0A# Secr @@ -665,16 +665,44 @@ RET_KEY' +, default='this_is_a_secret' )%0A%0A# App @@ -1611,20 +1611,19 @@ -join +str (BASE_DI @@ -1623,18 +1623,22 @@ BASE_DIR -, +.path( 'templat @@ -1641,16 +1641,17 @@ plates') +) ,%0A @@ -2342,16 +2342,82 @@ env.db( +'DATABASE_URL', default='postgres:///%7B%7B cookiecutter.repo_name %7D%7D' ),%0A%7D%0A%0A# @@ -2945,13 +2945,17 @@ S = -%5Bjoin +(%0A str (BAS @@ -2959,18 +2959,22 @@ BASE_DIR -, +.path( 'static' @@ -2974,17 +2974,20 @@ static') -%5D +),%0A) %0A%0ASTATIC
c90fce44f30398fef0c20ec08f761ae19951308a
Delete unused pipeline settings
{{cookiecutter.repo_name}}/src/settings/project.py
{{cookiecutter.repo_name}}/src/settings/project.py
# -*- coding: utf-8 -*- """ Project settings for {{cookiecutter.project_name}} Author : {{cookiecutter.author_name}} <{{cookiecutter.email}}> """ from defaults import * from getenv import env INSTALLED_APPS += ( 'applications.front', ) GRAPPELLI_ADMIN_TITLE = "Admin" PIPELINE_CSS = { 'stylesheets': { 'source_filenames': ( ), 'output_filename': 'stylesheets.css', 'extra_context': { 'media': 'screen,projection', }, }, } PIPELINE_JS = { 'scripts': { 'source_filenames': ( ), 'output_filename': 'scripts.js', } }
Python
0.000001
@@ -285,343 +285,4 @@ in%22%0A -%0APIPELINE_CSS = %7B%0A 'stylesheets': %7B%0A 'source_filenames': (%0A ),%0A 'output_filename': 'stylesheets.css',%0A 'extra_context': %7B%0A 'media': 'screen,projection',%0A %7D,%0A %7D,%0A%7D%0A%0APIPELINE_JS = %7B%0A 'scripts': %7B%0A 'source_filenames': (%0A ),%0A 'output_filename': 'scripts.js',%0A %7D%0A%7D%0A
c83e8d7d83b9395b5b0428dcac2909b8d6762fe4
make Tool work without invoke scripts again
hublib/rappture/tool.py
hublib/rappture/tool.py
from __future__ import print_function from .node import Node import numpy as np from lxml import etree as ET import os import subprocess import sys from .rappture import RapXML class Tool(RapXML): def __init__(self, tool): """ tool can be any of the following: - Path to a tool.xml file. - Name of a published tool. The current version will be run. """ dirname, xml = os.path.split(tool) if dirname == "": if xml != "tool.xml": # must be tool name dirname = "/apps/%s/current" % xml xml = dirname + "/rappture/tool.xml" else: dirname = os.getcwd() else: xml = tool dirname = os.path.abspath(os.path.join(dirname, '..')) xml = os.path.abspath(xml) if not os.path.isfile(xml): raise ValueError("tool must be a toolname or path to a tool.xml file.") invoke_file = os.path.join(dirname, 'middleware', 'invoke') if os.path.isfile(invoke_file): self.invoke_file = invoke_file sessdir = os.environ['SESSIONDIR'] self.tmp_name = os.path.join(sessdir, 'tool_driver_%s.xml' % os.getpid()) self.run_name = "" self.toolparameters_name = os.path.join(sessdir, 'driver_%s.hz' % os.getpid()) self.rappturestatus_name = os.path.join(sessdir, 'rappture.status') self.fname = xml self.tree = ET.parse(xml) self.path = '' def run(self, verbose=True): # print("Writing", self.tmp_name) with open(self.tmp_name, 'w') as f: f.write(str(self.xml(pretty=False, header=True))) with open(self.toolparameters_name, 'w') as f: f.write("file(execute):%s" % (self.tmp_name)) cmd = "TOOL_PARAMETERS=%s %s" % (self.toolparameters_name,self.invoke_file) if verbose: print("cmd=", cmd) cwd = os.getcwd() os.chdir(os.environ['SESSIONDIR']) try: ret = subprocess.call(cmd, shell=True) if ret: print('Error: "%s"' % cmd, file=sys.stderr) if ret < 0: print("Terminated by signal", -ret, file=sys.stderr) else: print("Returncode", ret, file=sys.stderr) except OSError as e: print('Error: "%s"' % cmd, file=sys.stderr) print("Failed:", e, file=sys.stderr) sys.exit(1) with(open(self.rappturestatus_name, 'r')) as f: statusData = f.readlines() for record in statusData: if 'output saved in' in record: self.run_name = record.strip().split()[-1] break if self.run_name: self.tree = ET.parse(self.run_name) os.chdir(cwd)
Python
0
@@ -773,35 +773,15 @@ ath( -os.path.join(dirname, '..') +dirname )%0A%0A @@ -935,16 +935,59 @@ ile.%22)%0A%0A + sessdir = os.environ%5B'SESSIONDIR'%5D%0A @@ -1133,51 +1133,351 @@ ile%0A -%0A sessdir = os.environ%5B'SESSIONDIR'%5D + else:%0A self.invoke_file = os.path.join(sessdir, 'invoke_%25s' %25 os.getpid())%0A with open(self.invoke_file, 'w') as f:%0A print('#!/bin/sh', file=f)%0A print('/usr/bin/invoke_app -T %25s -C rappture' %25 dirname, file=f)%0A subprocess.call('chmod +x %25s' %25 self.invoke_file, shell=True)%0A %0A @@ -1762,19 +1762,25 @@ elf. -f +dir name = -xml +dirname %0A @@ -1794,27 +1794,17 @@ lf.t -ree = ET.parse( +ool = xml -) %0A @@ -1812,22 +1812,34 @@ -self.path = '' +RapXML.__init__(self, xml) %0A%0A @@ -2131,17 +2131,16 @@ _name))%0A -%0A @@ -2197,16 +2197,17 @@ rs_name, + self.inv
89057827a1d301715f9275dafb6a3936c8fa170d
Fix breadcrumb path
interface/views.py
interface/views.py
import hashlib import hmac import json import re from django.conf import settings from django.contrib.auth import logout from django.contrib.auth.decorators import login_required from django.contrib.auth.mixins import LoginRequiredMixin from django.core.urlresolvers import reverse from django.db.models import Count from django.http import HttpResponse, Http404 from django.shortcuts import redirect, render from django.utils.decorators import method_decorator from django.views import generic from django.views.decorators.csrf import csrf_exempt from github import UnknownObjectException, BadCredentialsException from social.apps.django_app.default.models import UserSocialAuth from interface.models import Repo from documents.models import Document from interface.utils import get_github from interface.path_processor import PathProcessor class RepoDetailView(generic.DetailView, generic.UpdateView): model = Repo slug_field = 'full_name' slug_url_kwarg = 'full_name' template_name = 'interface/repo_detail.html' fields = ['wiki_branch'] def get(self, request, *args, **kwargs): self.object = self.get_object() context = self.get_context_data(object=self.object) is_collab = self.object.user_is_collaborator(request.user) context['is_owner'] = is_collab if self.object.is_private and not is_collab: raise Http404('You are not allowed to view this Repo') repo_name = self.object.full_name branches = [] if is_collab: g = get_github(self.object.user) grepo = g.get_repo(repo_name) branches = [i.name for i in grepo.get_branches()] context['branches'] = branches path = kwargs.get('path') path = path or '/' path_processor = PathProcessor(repo_name, path) is_directory = False try: # Viewing a single file filename = path_processor.filename trunc_path = path_processor.directory document = Document.objects.get(repo=self.object, path=trunc_path, filename=filename) documents = [] except Document.DoesNotExist: path_processor = PathProcessor(repo_name, path, is_directory=True) trunc_path = path_processor.directory is_directory = True try: # Viewing a folder with a README document = Document.objects.get( repo=self.object, path=trunc_path, filename__istartswith='README') except Document.DoesNotExist: # Viewing a folder without a README document = None documents = Document.objects.filter(repo=self.object, path__startswith=trunc_path) context['document'] = document context['path'] = path_processor.path_in_repo context['files'] = self.object.get_folder_contents(trunc_path, documents) context['directory'] = is_directory if is_directory and re.match('.+[^/]$', request.path): return redirect(request.path + '/') if len(context['files']) == 0 and 'document' not in context: raise Http404 context['base_url'] = request.build_absolute_uri(self.object.get_absolute_url()) b_tuples = [] if path != '/': path = path[1:] breadcrumbs = path.split('/') for b in breadcrumbs: if not b_tuples: url = '{0}{1}/'.format(context['base_url'], b) else: url = '{0}{1}/'.format(b_tuples[-1][0], b) b_tuples.append((url, b)) context['breadcrumbs'] = b_tuples return self.render_to_response(context) def form_invalid(self, form): # TODO: Submit form via ajax, show error message if invalid # I have no idea how someone would submit an invalid form return render(self.request, 'interface/500.html') class RepoListView(LoginRequiredMixin, generic.ListView): template_name = 'interface/repo_list.html' def get(self, request, *args, **kwargs): g = get_github(self.request.user) try: repos = [r for r in g.get_user().get_repos()] except BadCredentialsException: UserSocialAuth.objects.filter(user=request.user).delete() return redirect(reverse('social:begin', args=['github'])) + '?next=' + request.path self.object_list = Repo.objects.filter( full_name__in=[i.full_name for i in repos], disabled=False ).annotate(doc_count=Count('documents')) names = [x.full_name for x in self.object_list] filtered = [] for repo in repos: if repo.full_name not in names: filtered.append(repo) context = self.get_context_data() context['repos'] = filtered context['welcome'] = request.GET.get('welcome', False) return self.render_to_response(context) class RepoDeleteView(generic.DetailView): model = Repo slug_field = 'full_name' slug_url_kwarg = 'full_name' @method_decorator(csrf_exempt) def dispatch(self, request, *args, **kwargs): return super(RepoDeleteView, self).dispatch(request, *args, **kwargs) def soft_delete(self, request): obj = self.get_object() if not obj.user_is_collaborator(request.user): raise Http404('You are not allowed to delete this repo') obj.soft_delete() def get(self, request, *args, **kwargs): self.soft_delete(request) return redirect(reverse('repo_list')) def delete(self, request): self.soft_delete(request) return HttpResponse(status=204) @login_required def ProcessRepo(request, full_name): user = request.user g = get_github(request.user) grepo = g.get_repo(full_name) if not grepo.full_name: raise Http404('Repo not found') guser = g.get_user(user.username) is_collab = grepo.has_in_collaborators(guser) if not is_collab and grepo.private: raise Http404('You are not a collaborator of this repo') try: repo = Repo.objects.get(full_name=grepo.full_name) repo.disabled = False repo.is_private = grepo.private repo.save() except Repo.DoesNotExist: repo = Repo.objects.create( full_name=grepo.full_name, user=user, wiki_branch=grepo.default_branch, is_private=grepo.private ) if not repo.webhook_id: try: repo.add_webhook(request) except UnknownObjectException: raise Http404('Github failed to create a hook') repo.enqueue() url = reverse('repo_detail', kwargs={'full_name': repo.full_name}) return redirect(url) def LogoutView(request): next = request.GET.get('next', '/') logout(request) return redirect(next) def handler404(request): response = render(request, 'interface/404.html') response.status_code = 404 return response def handler500(request): response = render(request, 'interface/500.html') response.status_code = 500 return response
Python
0.999749
@@ -3477,32 +3477,33 @@ url = '%7B0%7D +/ %7B1%7D/'.format(con
c06ceb1c3f06bb08e9adb84d82d33325e54ec507
Update accordion.py
cmsplugin_cascade/bootstrap4/accordion.py
cmsplugin_cascade/bootstrap4/accordion.py
from django.forms import widgets, BooleanField, CharField from django.forms.fields import IntegerField from django.utils.translation import ungettext_lazy, ugettext_lazy as _ from django.utils.text import Truncator, mark_safe from django.utils.html import escape from entangled.forms import EntangledModelFormMixin from cms.plugin_pool import plugin_pool from cmsplugin_cascade.forms import ManageChildrenFormMixin from cmsplugin_cascade.plugin_base import TransparentWrapper, TransparentContainer from cmsplugin_cascade.widgets import NumberInputWidget from .plugin_base import BootstrapPluginBase class AccordionFormMixin(ManageChildrenFormMixin, EntangledModelFormMixin): num_children = IntegerField( min_value=1, initial=1, widget=NumberInputWidget(attrs={'size': '3', 'style': 'width: 5em !important;'}), label=_("Groups"), help_text=_("Number of groups for this accordion."), ) close_others = BooleanField( label=_("Close others"), initial=True, required=False, help_text=_("Open only one card at a time.") ) first_is_open = BooleanField( label=_("First open"), initial=True, required=False, help_text=_("Start with the first card open.") ) class Meta: untangled_fields = ['num_children'] entangled_fields = {'glossary': ['close_others', 'first_is_open']} class BootstrapAccordionPlugin(TransparentWrapper, BootstrapPluginBase): name = _("Accordion") default_css_class = 'accordion' require_parent = True parent_classes = ['BootstrapColumnPlugin'] direct_child_classes = ['BootstrapAccordionGroupPlugin'] allow_children = True form = AccordionFormMixin render_template = 'cascade/bootstrap4/{}accordion.html' @classmethod def get_identifier(cls, obj): num_cards = obj.get_num_children() content = ungettext_lazy('with {0} card', 'with {0} cards', num_cards).format(num_cards) return mark_safe(content) def render(self, context, instance, placeholder): context = self.super(BootstrapAccordionPlugin, self).render(context, instance, placeholder) context.update({ 'close_others': instance.glossary.get('close_others', True), 'first_is_open': instance.glossary.get('first_is_open', True), }) return context def save_model(self, request, obj, form, change): wanted_children = int(form.cleaned_data.get('num_children')) super().save_model(request, obj, form, change) self.extend_children(obj, wanted_children, BootstrapAccordionGroupPlugin) plugin_pool.register_plugin(BootstrapAccordionPlugin) class AccordionGroupFormMixin(EntangledModelFormMixin): heading = CharField( label=_("Heading"), widget=widgets.TextInput(attrs={'size': 80}), ) body_padding = BooleanField( label=_("Body with padding"), initial=True, required=False, help_text=_("Add standard padding to card body."), ) class Meta: entangled_fields = {'glossary': ['heading', 'body_padding']} def clean_heading(self): return escape(self.cleaned_data['heading']) class BootstrapAccordionGroupPlugin(TransparentContainer, BootstrapPluginBase): name = _("Accordion Group") direct_parent_classes = parent_classes = ['BootstrapAccordionPlugin'] render_template = 'cascade/generic/naked.html' require_parent = True form = AccordionGroupFormMixin alien_child_classes = True @classmethod def get_identifier(cls, instance): heading = instance.glossary.get('heading', '') return Truncator(heading).words(3, truncate=' ...') def render(self, context, instance, placeholder): context = self.super(BootstrapAccordionGroupPlugin, self).render(context, instance, placeholder) context.update({ 'heading': mark_safe(instance.glossary.get('heading', '')), 'no_body_padding': not instance.glossary.get('body_padding', True), }) return context plugin_pool.register_plugin(BootstrapAccordionGroupPlugin)
Python
0.000001
@@ -1,24 +1,25 @@ +%0A from django.forms import @@ -184,47 +184,192 @@ ango -.utils.text import Truncator, mark_safe + import VERSION as DJANGO_VERSION%0Aif DJANGO_VERSION %3C (2, 0):%0A from django.utils.text import Truncator, mark_safe%0Aelse:%0A from django.utils.safestring import Truncator, mark_safe %0Afro
eaf390b065944a64a3b74c1b0e43b1df60d4e88f
Reimplement deduping hurr
invoke/executor.py
invoke/executor.py
class Executor(object): """ An execution strategy for Task objects. Subclasses may override various extension points to change, add or remove behavior. """ def __init__(self, collection): """ Create executor with a pointer to the task collection ``collection``. This pointer is used for looking up tasks by name and storing/retrieving state, e.g. how many times a given task has been run this session and so on. """ self.collection = collection def execute(self, name, kwargs=None, dedupe=True): """ Execute task named ``name``, optionally passing along ``kwargs``. If ``dedupe`` is ``True`` (default), will ensure any given task within ``self.collection`` is only run once per session. To disable this behavior, say ``dedupe=False``. """ kwargs = kwargs or {} # Expand task list all_tasks = self.task_list(name) # Compact (preserving order, so not using list+set) compact_tasks = [] for task in all_tasks: if task not in compact_tasks: compact_tasks.append(task) # Remove tasks already called tasks = [] for task in compact_tasks: if not task.called: tasks.append(task) # Execute for task in tasks: task.body(**kwargs) def task_list(self, name): task = self.collection[name] tasks = [task] prereqs = [] for pretask in task.pre: prereqs.append(self.collection[pretask]) return prereqs + tasks
Python
0.000001
@@ -975,16 +975,69 @@ # + Dedupe if requested%0A if dedupe:%0A # Compact @@ -1079,32 +1079,36 @@ st+set)%0A + compact_tasks = @@ -1110,32 +1110,36 @@ ks = %5B%5D%0A + + for task in all_ @@ -1137,32 +1137,36 @@ k in all_tasks:%0A + if t @@ -1199,32 +1199,36 @@ + + compact_tasks.ap @@ -1230,32 +1230,36 @@ ks.append(task)%0A + # Remove @@ -1280,32 +1280,36 @@ called%0A + + tasks = %5B%5D%0A @@ -1295,32 +1295,36 @@ tasks = %5B%5D%0A + for task @@ -1354,16 +1354,20 @@ + + if not t @@ -1390,24 +1390,28 @@ + tasks.append @@ -1409,32 +1409,76 @@ ks.append(task)%0A + else:%0A tasks = all_tasks%0A # Execut @@ -1502,24 +1502,24 @@ k in tasks:%0A + @@ -1526,13 +1526,8 @@ task -.body (**k
375d12ab7486f6bb0d57232d48c556e6c0eda0c1
Update P05_stylingExcel fixed PEP8 spacing
books/AutomateTheBoringStuffWithPython/Chapter12/P05_stylingExcel.py
books/AutomateTheBoringStuffWithPython/Chapter12/P05_stylingExcel.py
# This program uses the OpenPyXL module to manipulate Excel documents import openpyxl from openpyxl.styles import Font, NamedStyle wb = openpyxl.Workbook() sheet = wb["Sheet"] # Setting the Font Style of Cells italic24Font = NamedStyle(name="italic24Font") italic24Font.font = Font(size=24, italic=True) sheet["A1"].style = italic24Font sheet["A1"] = "Hello world!" wb.save("styled.xlsx") # Font Objects wb = openpyxl.Workbook() sheet = wb["Sheet"] fontObj1 = Font(name="Times New Roman", bold=True) styleObj1 = NamedStyle(name="styleObj1") styleObj1.font = fontObj1 sheet["A1"].style = styleObj1 sheet["A1"] = "Bold Times New Roman" fontObj2 = Font(size=24, italic=True) styleObj2 = NamedStyle(name="styleObj2") styleObj2.font = fontObj2 sheet["B3"].style = styleObj2 sheet["B3"] = "24 pt Italic" wb.save("styles.xlsx") # Formulas wb = openpyxl.Workbook() sheet = wb.active sheet["A1"] = 200 sheet["A2"] = 300 sheet["A3"] = "=SUM(A1:A2)" wb.save("writeFormula.xlsx") wbFormulas = openpyxl.load_workbook("writeFormula.xlsx") sheet = wbFormulas.active print(sheet["A3"].value) wbDataOnly = openpyxl.load_workbook("writeFormula.xlsx", data_only=True) sheet = wbDataOnly.active print(sheet["A3"].value) # Not working with LibreOffice 6.0.3.2 # Adjusting Rows and Columns wb = openpyxl.Workbook() sheet = wb.active sheet["A1"] = "Tall row" sheet["B2"] = "Wide column" sheet.row_dimensions[1].height = 70 sheet.column_dimensions['B'].width = 20 wb.save("dimensions.xlsx") wb = openpyxl.Workbook() sheet = wb.active sheet.merge_cells("A1:D3") sheet["A1"] = "Twelve cells merged together." sheet.merge_cells("C5:D5") sheet["C5"] = "Two merged cells." wb.save("merged.xlsx") wb = openpyxl.load_workbook("merged.xlsx") sheet = wb.active sheet.unmerge_cells("A1:D3") sheet.unmerge_cells("C5:D5") #wb.save("merged.xlsx") # uncomment to see changes wb = openpyxl.load_workbook("produceSales.xlsx") sheet = wb.active sheet.freeze_panes = "A2" wb.save("freezeExample.xlsx") # Charts wb = openpyxl.Workbook() sheet = wb.get_active_sheet() for i in range(1, 11): # create some data in column A sheet['A' + str(i)] = i refObj = openpyxl.charts.Reference(sheet, (1, 1), (10, 1)) seriesObj = openpyxl.charts.Series(refObj, title="First Series") chartObj = openpyxl.charts.BarChart() chartObj.append(seriesObj) chartObj.drawing.top = 50 # set the position chartObj.drawing.left = 100 chartObj.drawing.width = 300 # set the size chartObj.drawing.height = 200 sheet.add_chart(chartObj) wb.save("sampleChart.xlsx")
Python
0
@@ -2415,16 +2415,17 @@ h = 300 + # set th
17793c9b3ceecc206aab1d1c34c0d3dc69892cbd
Use ArgumentParser to enforce required arguments
monitor/runner.py
monitor/runner.py
import sys from time import sleep from camera import Camera from controller import Controller from plotter_pygame import PyGamePlotter import epics import argparse if __name__ == "__main__": parser = argparse.ArgumentParser(description='') parser.add_argument('--prefix', dest='prefix', help='controller IOC prefix') parser.add_argument('--name', dest='name', help='name of monitor') parser.add_argument('--fullscreen', dest='fullscreen', default=1, help='1 for fullscreen (default), 0 for small window') args = parser.parse_args() if not (args.prefix and args.name): parser.error("Arguments missing. Please use both --prefix and --name") controller = Controller(args.prefix, args.name) plotter = PyGamePlotter(args.name, args.fullscreen) camera = Camera() old_cmap = "" while True: try: # check for quit events if not plotter.i_shall_continue(): break # get camera name camera_name = controller.camera # if no camera is selected, make screen blank if camera_name == "": plotter.blank() # otherwise, display camera feed else: camera.set_name(camera_name) # update colormap cmap = controller.colourmap_name if cmap != old_cmap: old_cmap = cmap plotter.set_colormap(controller.colourmap_data) # update aspect ratio plotter.set_aspect_ratio(controller.aspect) # get camera data and process it plotter.process(camera.get_data()) # udpate label info if controller.label == 1: plotter.show_label(camera_name) pass # show and wait plotter.show() sleep(controller.rate) except KeyboardInterrupt: plotter.quit() pass plotter.quit()
Python
0
@@ -158,16 +158,17 @@ gparse%0A%0A +%0A if __nam @@ -186,17 +186,16 @@ ain__%22:%0A -%0A pars @@ -274,16 +274,31 @@ prefix', + required=True, dest='p @@ -368,16 +368,31 @@ --name', + required=True, dest='n @@ -582,128 +582,8 @@ ()%0A%0A - if not (args.prefix and args.name):%0A parser.error(%22Arguments missing. Please use both --prefix and --name%22)%0A%0A
81a4b04173033d7e678ad6c4b4efae654af9ac11
Use a threading local object to isolate MongoDB connection between different threads but reuse the same connection in the same thread
moocng/mongodb.py
moocng/mongodb.py
# Copyright 2013 Rooter Analysis S.L. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import urlparse from django.conf import settings from django.core.exceptions import ImproperlyConfigured from pymongo.connection import Connection DEFAULT_MONGODB_HOST = 'localhost' DEFAULT_MONGODB_PORT = 27017 DEFAULT_MONGODB_NAME = 'moocng' DEFAULT_MONGODB_URI = 'mongodb://%s:%d/%s' % (DEFAULT_MONGODB_HOST, DEFAULT_MONGODB_PORT, DEFAULT_MONGODB_NAME) class MongoDB(object): def __init__(self, db_uri=DEFAULT_MONGODB_URI, connection_factory=Connection): self.db_uri = urlparse.urlparse(db_uri) self.connection = connection_factory( host=self.db_uri.hostname or DEFAULT_MONGODB_HOST, port=self.db_uri.port or DEFAULT_MONGODB_PORT) if self.db_uri.path: self.database_name = self.db_uri.path[1:] else: self.database_name = DEFAULT_MONGODB_NAME self.database = self.get_database() def get_connection(self): return self.connection def get_database(self): database = self.connection[self.database_name] if self.db_uri.username and self.db_uri.password: database.authenticate(self.db_uri.username, self.db_uri.password) return database def get_collection(self, collection): return self.database[collection] def get_db(): try: db_uri = settings.MONGODB_URI except AttributeError: raise ImproperlyConfigured('Missing required MONGODB_URI setting') return MongoDB(db_uri)
Python
0.000001
@@ -592,16 +592,44 @@ urlparse +%0Afrom threading import local %0A%0Afrom d @@ -1987,16 +1987,40 @@ tion%5D%0A%0A%0A +connections = local()%0A%0A%0A def get_ @@ -2183,27 +2183,122 @@ -return MongoDB(db_uri) +if not hasattr(connections, 'default'):%0A connections.default = MongoDB(db_uri)%0A%0A return connections.default %0A
aa9143302b376e1274c8c11b53687771d0444b5a
Remove now-unused isInt code
morss/__main__.py
morss/__main__.py
# ran on `python -m morss` import os import sys from . import wsgi from . import cli from .morss import MorssException import wsgiref.simple_server import wsgiref.handlers PORT = int(os.getenv('PORT', 8080)) def isInt(string): try: int(string) return True except ValueError: return False def main(): if 'REQUEST_URI' in os.environ: # mod_cgi (w/o file handler) app = wsgi.cgi_app app = wsgi.cgi_dispatcher(app) app = wsgi.cgi_error_handler(app) app = wsgi.cgi_encode(app) wsgiref.handlers.CGIHandler().run(app) elif len(sys.argv) <= 1: # start internal (basic) http server (w/ file handler) app = wsgi.cgi_app app = wsgi.cgi_file_handler(app) app = wsgi.cgi_dispatcher(app) app = wsgi.cgi_error_handler(app) app = wsgi.cgi_encode(app) print('Serving http://localhost:%s/' % port) httpd = wsgiref.simple_server.make_server('', PORT, app) httpd.serve_forever() else: # as a CLI app try: cli.cli_app() except (KeyboardInterrupt, SystemExit): raise except Exception as e: print('ERROR: %s' % e.message) if __name__ == '__main__': main()
Python
0.000444
@@ -213,123 +213,8 @@ )%0A%0A%0A -def isInt(string):%0A try:%0A int(string)%0A return True%0A%0A except ValueError:%0A return False%0A%0A%0A def
f021922dec168a4bb97516eb6b7a7ca5fe3bfb96
Use HostAddressOpt for opts that accept IP and hostnames
ironic/conf/api.py
ironic/conf/api.py
# Copyright 2016 Intel Corporation # Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from ironic.common.i18n import _ opts = [ cfg.StrOpt('host_ip', default='0.0.0.0', help=_('The IP address on which ironic-api listens.')), cfg.PortOpt('port', default=6385, help=_('The TCP port on which ironic-api listens.')), cfg.IntOpt('max_limit', default=1000, help=_('The maximum number of items returned in a single ' 'response from a collection resource.')), cfg.StrOpt('public_endpoint', help=_("Public URL to use when building the links to the API " "resources (for example, \"https://ironic.rocks:6384\")." " If None the links will be built using the request's " "host URL. If the API is operating behind a proxy, you " "will want to change this to represent the proxy's URL. " "Defaults to None.")), cfg.IntOpt('api_workers', help=_('Number of workers for OpenStack Ironic API service. ' 'The default is equal to the number of CPUs available ' 'if that can be determined, else a default worker ' 'count of 1 is returned.')), cfg.BoolOpt('enable_ssl_api', default=False, help=_("Enable the integrated stand-alone API to service " "requests via HTTPS instead of HTTP. If there is a " "front-end service performing HTTPS offloading from " "the service, this option should be False; note, you " "will want to change public API endpoint to represent " "SSL termination URL with 'public_endpoint' option.")), cfg.BoolOpt('restrict_lookup', default=True, help=_('Whether to restrict the lookup API to only nodes ' 'in certain states.')), cfg.IntOpt('ramdisk_heartbeat_timeout', default=300, deprecated_group='agent', deprecated_name='heartbeat_timeout', help=_('Maximum interval (in seconds) for agent heartbeats.')), ] opt_group = cfg.OptGroup(name='api', title='Options for the ironic-api service') def register_opts(conf): conf.register_group(opt_group) conf.register_opts(opts, group=opt_group)
Python
0.000094
@@ -761,27 +761,35 @@ = %5B%0A cfg. -Str +HostAddress Opt('host_ip @@ -787,24 +787,32 @@ ('host_ip',%0A + @@ -840,32 +840,40 @@ %0A + help=_('The IP a @@ -879,16 +879,28 @@ address +or hostname on which @@ -903,32 +903,65 @@ hich ironic-api +'%0A ' listens.')),%0A
2159e6c2b550367d456e3d743b7757c59636a3c7
update dictionary on ramiro's desktop to use starmon data
pycqed/init/config/setup_dict.py
pycqed/init/config/setup_dict.py
# Dictionaries used in setup. mac_dict = {'203178706891063': 'CDickel_Desktop', '203308017140376': 'Adriaans_Macbook', '963460802314': 'La_Ferrari', '46390847648': 'La_Maserati', '215977245841658': 'La_Maserati_JrJr', '13795386264098': 'Serwans_Laptop', '963460956772': 'La_Ducati', '203050745808564': 'La_Ducati_Jr', '57277341811788': 'Simulation_PC', '272774795670508': 'Nathans_Laptop', '46390847630': 'tud276606_FPGA_PC', '198690273946987': 'Bart_Laptop', '167746772205643': 'NuovaFerrari', '167746772714689' : 'Xiang_PC', '180725258210527': 'Niels_macbook', '109952948723616': 'Ramiro_Desktop', '215977245834050': 'Ramiro_Desktop', '31054844829911': 'Sjoerd_laptop' } data_dir_dict = {'tud276606_FPGA_PC': 'D:\Experiments/CBox_Testing/Data', 'CDickel_Desktop': 'D:\Experiments/ExperimentName/Data', 'Sjoerd_laptop': 'D:\data', # 'Adriaans_Macbook': ('/Users/Adriaan/Dropbox/PhD-Delft/' + # 'DataFolders/ExperimentalDataTUD277620/CBox_Testing/Data'), 'Adriaans_Macbook': ('/Users/Adriaan/Documents/Testing/Data'), 'Niels_macbook': '/Users/nbultink/temp_data', 'La_Ferrari': 'D:\Experiments/1511_RabiSims2/Data', 'La_Maserati': 'D:\Experiments/JJO-type_IV/Data', # 'La_Maserati_JrJr': 'D:\\Experimentsp7_Qcodes_5qubit\data', 'La_Maserati_JrJr': 'D:\\Experiments\\1607_Qcodes_5qubit\\data', 'Xiang_PC' : 'D:\PycQED\data', 'Serwans_Laptop': 'W:/tnw/NS/qt/Serwan/MuxMon/', 'La_Ducati': 'D:\Experiments/Simultaneous_Driving/Data', 'La_Ducati_Jr': 'D:\Experiments/MixerCalibrations/data', 'Simulation_PC': 'D:\Experiments/testSingleShotFidelityAnalysis/Data', 'Ramiro_Desktop': r'\\131.180.82.81\\data', # 'Ramiro_Desktop': r'\\131.180.82.237\\1511_RabiSims2\\Data', r'Nathans_Laptop': 'D:/nlangford\My Documents\Projects\Rabi Model\Experiment_1504\Data', 'Bart_Laptop' : 'C:\Experiments/NumericalOptimization/Data' }
Python
0
@@ -2044,16 +2044,18 @@ + # 'Ramiro @@ -2083,32 +2083,121 @@ 0.82.81%5C%5Cdata',%0A + 'Ramiro_Desktop': r'%5C%5C131.180.82.190%5C%5CExperiments%5C%5C1611_Starmon%5C%5CData',%0A
3927fd757ff404af61e609cc1728d1f3fe398230
Fix on error text.
mp3datastorage.py
mp3datastorage.py
#store file attributes component import sqlite3 as sql import os import mp3metadata #TODO add directory of the database #Allow database recognition and resetting the database class SQLmgr: def __init__(self, username): #note everytime function is called MusicData table is dropped! self.serv = False self.errors = open("error.txt", "w") self.servcount=1 db = username + ".db" self.db = db if self.db in os.listdir("."): #database already exists pass else: try: serv = sql.connect(db) with serv: self.serv = serv.cursor() self.serv.execute("DROP TABLE IF EXISTS MusicData") self.serv.execute("CREATE TABLE MusicData(Id INT, ALBUM TEXT, ARTIST TEXT, TITLE TEXT, PATH TEXT)") self.serv.close() except sql.Error, e: print "Error executing SQL table. ", e.args[0] return 1 def wipe_database(self, username): self.db = username + ".db" try: serv = sql.connect(db) with serv: self.serv = serv.cursor() self.serv.execute("DROP TABLE IF EXISTS MusicData") self.serv.execute("CREATE TABLE MusicData(Id INT, ALBUM TEXT, ARTIST TEXT, TITLE TEXT, PATH TEXT)") self.serv.close() except sql.Error, e: print "Error wiping database." return 1 def add_db(self, case): try: with sql.connect(self.db) as serv: self.serv = serv.cursor() self.serv.execute("INSERT INTO MusicData VALUES (?, ?, ?, ?, ?);", case) self.servcount += 1 self.serv.close() except sql.Error, e: errors.write(str(case[-1])) def addmp3todb(self, filetup): try: case = [] case.append(self.servcount) for h,j in filetup[1].items(): if h in ["ALBUM", "ARTIST", "TITLE"]: case.append(j) case.append(filetup[0]) self.add_db(tuple(case)) except: errors.write("Error writing: " + filetup[1]) def add_test(self, filedir): try: tester = mp3metadata.mp3data().returnobj() case = [] case.append(self.servcount) #tuple pairings will proceed in this order. for k,v in tester.items(): if k in ["ALBUM", "ARTIST", "TITLE"]: case.append(v) case.append(filedir) self.add_db(tuple(case)) return 0 except sql.Error, e: print e.args[0] return 1
Python
0
@@ -1472,24 +1472,29 @@ rror, e:%0A%09%09%09 +self. errors.write @@ -1757,16 +1757,21 @@ ept:%0A%09%09%09 +self. errors.w
73529579a6abaf1b33e6135d4abaa2c892dbfa3c
exit with retcode when called directly
kcheck/command.py
kcheck/command.py
#!/usr/bin/env python3 """ Entry point for utility, option handling. """ def main() -> int: """ Entry point for command line utility. :return: integer for return code of command line """ import configargparse import importlib import logging import platform from configparser import DuplicateOptionError import kcheck parser = configargparse.ArgumentParser( add_config_file_help=True, default_config_files=['/etc/kcheck.conf'], ignore_unknown_config_file_keys=True, formatter_class=lambda prog: configargparse.HelpFormatter(prog,max_help_position=35) ) parser.add_argument('--config', '-c', is_config_file=True, help='kcheck config file') parser.add_argument('--kernel', '-k', help='kernel config file', default='/usr/src/linux/.config') parser.add_argument('--logfile', '-l', help='file to write logging into') parser.add_argument('--verbose', '-v', help='Output extra information', action='count', default=2) parser.add_argument('--version', '-V', help='Print version information and exit', action='store_true') # subparsers = parser.add_subparsers(help='commands') # # gen_parser = subparsers.add_parser('genconfig', help='Generate config requirements from installed packages') # gen_parser.add_argument('-l', '--list', help='list available package manager integrations', action='store_true') # gen_parser.add_argument('-m', '--manager', help='Package manager', choices=kcheck.ALLOWED_PKGMGR, default='portage') # gen_parser.set_defaults(mode='genconfig') args = parser.parse_args() ## set up logging ## # logging output level log_level = 50 - (args.verbose * 10) # format and handler if args.logfile: logHandler = logging.FileHandler(args.logfile) logHandler.setFormatter(logging.Formatter("%(asctime)s [%(name)s] [%(levelname)-5.5s] %(message)s")) else: logHandler = logging.NullHandler() logging.basicConfig(level=log_level, handlers=[logHandler]) # initialise logger and log basics log = logging.getLogger('main') log.info('kcheck %s' % kcheck.__version__) [log.debug(line) for line in parser.format_values().splitlines()] if args.version: print('kcheck %s (Python %s)' % (kcheck.__version__, platform.python_version())) return 0 if 'mode' in args: if args.mode == 'genconfig': if args.list: print('The following package managers can be used for generating required kernel configurations') [print(' ', p) for p in kcheck.ALLOWED_PKGMGR] return 0 # get the module name for the package manager, import and hand over module = 'kcheck.'+args.manager log.debug('Loading module %s' % module) try: package_manager = importlib.import_module(module) except ImportError as exception: log.critical("Unable to load module for package manager %s" % module) log.exception(exception) return -1 return package_manager.generate_config(args) else: # no "mode", so run kcheck import kcheck.checker try: return kcheck.checker.check_config(args.config, args.kernel) except DuplicateOptionError: print('Your config file has duplicate keys in a section.') if args.logfile: print('See the log file %s for more details' % args.logfile) print('Correct your config file and try running this again.') return -2 if __name__ == '__main__': main()
Python
0
@@ -3676,15 +3676,21 @@ _':%0A +exit( main() +) %0A
2d92e69d00a7419a23bcb38ab7c55ccc533237df
Fix artwork size
itunescli/query.py
itunescli/query.py
import logging from cliff.command import Command from cliff.lister import Lister from cliff.show import ShowOne import itunes class ITunesSearchBase(object): MEDIA_TYPES = frozenset([ 'movie', 'podcast', 'music', 'musicVideo', 'audiobook', 'shortFilm', 'tvShow', 'tvSeason', 'software', 'ebook', 'all', ]) def config_parser(self, parser): parser.add_argument('query', metavar='SEARCH_QUERY') parser.add_argument('--country', default='US', type=str) parser.add_argument('--media', default='all', choices=self.MEDIA_TYPES) parser.add_argument('--entity', default=None) return parser def artwork_url(self, artwork): """Return the largest artwork URL possible""" return artwork['100'].replace('.100x100-75', '.400x400-75') class SearchLister(Lister, ITunesSearchBase): """Search iTunes""" log = logging.getLogger(__name__) def get_parser(self, prog_name): parser = super(SearchLister, self).get_parser(prog_name) parser = self.config_parser(parser) parser.add_argument('--limit', default=100, type=int) return parser def get_data(self, parsed_args): results = itunes.Search(query=parsed_args.query, limit=parsed_args.limit, country=parsed_args.country, entity=parsed_args.entity, media=parsed_args.media).get() return (('name', 'url', 'genre', 'release_date', 'artwork', 'type'), ((n.get_name(), n.get_url(), n.get_genre(), n.get_release_date(), self.artwork_url(n.get_artwork()), n.type) for n in results) ) class SearchOne(ShowOne, ITunesSearchBase): """Show the first result from a search query""" log = logging.getLogger(__name__) def get_parser(self, prog_name): parser = super(SearchOne, self).get_parser(prog_name) parser = self.config_parser(parser) return parser def get_data(self, parsed_args): results = itunes.Search(query=parsed_args.query, limit=1, country=parsed_args.country, entity=parsed_args.entity, media=parsed_args.media).get() result = results[0] columns = ('name', 'url', 'genre', 'release_date', 'artwork', 'type') data = ( result.get_name(), result.get_url(), result.get_genre(), result.get_release_date(), self.artwork_url(result.get_artwork()), result.type ) return (columns, data) class GetArtwork(Command, ITunesSearchBase): """Get the album artwork from the first result of a query""" log = logging.getLogger(__name__) def get_parser(self, prog_name): parser = super(GetArtwork, self).get_parser(prog_name) parser = self.config_parser(parser) return parser def run(self, parsed_args): results = itunes.Search(query=parsed_args.query, limit=1, country=parsed_args.country, entity=parsed_args.entity, media=parsed_args.media).get() all_artwork = results[0].get_artwork() artwork_url = self.artwork_url(all_artwork) self.app.stdout.write("%s\n" % artwork_url)
Python
0.000001
@@ -882,13 +882,13 @@ , '. -400x4 +300x3 00-7
e1e25bc1166efa9a39fdf769f1081fafd08dd937
handle unknown source country, add recovered
pyfibot/modules/module_korona.py
pyfibot/modules/module_korona.py
# -*- coding: utf-8 -*- """ Koronavirus statistics from HS.fi open data https://github.com/HS-Datadesk/koronavirus-avoindata """ from __future__ import unicode_literals, print_function, division from collections import Counter def init(bot): global lang config = bot.config.get("module_posti", {}) lang = config.get("language", "en") def command_korona(bot, user, channel, args): """Get latest info about COVID-19 in Finland (Source: https://github.com/HS-Datadesk/koronavirus-avoindata )""" url = "https://w3qa5ydb4l.execute-api.eu-west-1.amazonaws.com/prod/finnishCoronaData" try: r = bot.get_url(url) data = r.json() except Exception as e: bot.say( channel, "Error while getting data.", ) raise e msg = "[COVID-19] Vahvistettuja tapauksia: %s Kuolleita: %s" % (len(data['confirmed']), len(data['deaths'])) # top5 infection sources top5 = Counter(map(lambda x: x['infectionSourceCountry'], data['confirmed'])).most_common(5) msg = msg + " | Top5 lähdemaat: " topstr = [] for country, count in top5: topstr.append(country+":"+str(count)) msg = msg + " ".join(topstr) bot.say(channel, msg)
Python
0
@@ -851,16 +851,31 @@ olleita: + %25s Parantunut: %25s%22 %25 ( @@ -917,16 +917,40 @@ eaths'%5D) +, len(data%5B'recovered'%5D) )%0A%0A # @@ -1158,16 +1158,73 @@ n top5:%0A + if country == None:%0A country = %22N/A%22%0A%0A
7427adb86b23ece7d7b577754c771e1b31429eab
return to default queue if deleting current queue
ztf_sim/Scheduler.py
ztf_sim/Scheduler.py
from builtins import object import configparser import numpy as np from astropy.time import Time from .QueueManager import ListQueueManager, GreedyQueueManager, GurobiQueueManager from .ObsLogger import ObsLogger from .configuration import SchedulerConfiguration from .constants import BASE_DIR from .utils import block_index class Scheduler(object): def __init__(self, scheduler_config_file_fullpath, run_config_file_fullpath, other_queue_configs = None): self.scheduler_config = SchedulerConfiguration( scheduler_config_file_fullpath) self.queue_configs = self.scheduler_config.build_queue_configs() self.queues = self.scheduler_config.build_queues(self.queue_configs) self.timed_queues_tonight = [] self.set_queue('default') self.run_config = configparser.ConfigParser() self.run_config.read(run_config_file_fullpath) if 'log_name' in self.run_config['scheduler']: log_name = self.run_config['scheduler']['log_name'] else: log_name = self.scheduler_config.config['run_name'] # initialize sqlite history self.obs_log = ObsLogger(log_name, clobber=self.run_config['scheduler'].getboolean('clobber_db')) def set_queue(self, queue_name): # TODO: log the switch if queue_name not in self.queues: raise ValueError(f'Requested queue {queue_name} not available!') self.Q = self.queues[queue_name] def add_queue(self, queue_name, queue, clobber=True): if clobber or (queue_name not in self.queues): self.queues[queue_name] = queue else: raise ValueError(f"Queue {queue_name} already exists!") def delete_queue(self, queue_name): if (queue_name in self.queues): del self.queues[queue_name] else: raise ValueError(f"Queue {queue_name} does not exist!") def find_excluded_blocks_tonight(self, time_now): # also sets up timed_queues_tonight # start of the night mjd_today = np.floor(time_now.mjd).astype(int) # Look for timed queues that will be valid tonight, # to exclude from the nightly solution self.timed_queues_tonight = [] block_start = block_index(Time(mjd_today, format='mjd')) block_stop = block_index(Time(mjd_today + 1, format='mjd')) exclude_blocks = [] for qq_name, qq in self.queues.items(): if qq.queue_name in ['default', 'fallback']: continue if qq.validity_window is not None: valid_blocks = qq.valid_blocks(complete_only=True) valid_blocks_tonight = [b for b in valid_blocks if (block_start <= b <= block_stop)] if len(valid_blocks_tonight): self.timed_queues_tonight.append(qq_name) exclude_blocks.extend(valid_blocks_tonight) return exclude_blocks def check_for_TOO_queue_and_switch(self, time_now): # check if a TOO queue is now valid for qq_name, qq in self.queues.items(): if qq.is_TOO: if qq.is_valid(time_now): # only switch if we don't have an active TOO queue if not self.Q.is_TOO and len(qq.queue): self.set_queue(qq_name) def check_for_timed_queue_and_switch(self, time_now): # drop out of a timed queue if it's no longer valid if self.Q.queue_name != 'default': if not self.Q.is_valid(time_now): self.set_queue('default') # check if a timed queue is now valid for qq_name in self.timed_queues_tonight: qq = self.queues[qq_name] if qq.is_valid(time_now) and len(qq.queue): # only switch if we are in the default or fallback queue if self.Q.queue_name in ['default', 'fallback']: self.set_queue(qq_name)
Python
0.000001
@@ -1844,32 +1844,122 @@ n self.queues):%0A + if self.Q.queue_name == queue_name:%0A self.set_queue('default')%0A del
ea07c07604232e59d238ea8f6b1605bad1124b8c
add kwargs to __init__ of CSVData
pylearn2/datasets/csv_dataset.py
pylearn2/datasets/csv_dataset.py
# -*- coding: utf-8 -*- """ A simple general csv dataset wrapper for pylearn2. Can do automatic one-hot encoding based on labels present in a file. """ __authors__ = "Zygmunt Zając" __copyright__ = "Copyright 2013, Zygmunt Zając" __credits__ = ["Zygmunt Zając", "Nicholas Dronen"] __license__ = "3-clause BSD" __maintainer__ = "?" __email__ = "[email protected]" import csv import numpy as np import os from pylearn2.datasets.dense_design_matrix import DenseDesignMatrix from pylearn2.utils import serial from pylearn2.utils.string_utils import preprocess class CSVDataset(DenseDesignMatrix): """A generic class for accessing CSV files labels, if present, should be in the first column if there's no labels, set expect_labels to False if there's no header line in your file, set expect_headers to False Parameters ---------- path : str The path to the CSV file. task : str The type of task in which the dataset will be used -- either "classification" or "regression". The task determines the shape of the target variable. For classification, it is a vector; for regression, a matrix. expect_labels : bool Whether the CSV file contains a target variable in the first column. expect_headers : bool Whether the CSV file contains column headers. delimiter : str The CSV file's delimiter. start : int The first row of the CSV file to load. stop : int The last row of the CSV file to load. start_fraction : float The fraction of rows, starting at the beginning of the file, to load. end_fraction : float The fraction of rows, starting at the end of the file, to load. """ def __init__(self, path='train.csv', task='classification', expect_labels=True, expect_headers=True, delimiter=',', start=None, stop=None, start_fraction=None, end_fraction=None): """ .. todo:: WRITEME """ self.path = path self.task = task self.expect_labels = expect_labels self.expect_headers = expect_headers self.delimiter = delimiter self.start = start self.stop = stop self.start_fraction = start_fraction self.end_fraction = end_fraction self.view_converter = None if task not in ['classification', 'regression']: raise ValueError('task must be either "classification" or ' '"regression"; got ' + str(task)) if start_fraction is not None: if end_fraction is not None: raise ValueError("Use start_fraction or end_fraction, " " not both.") if start_fraction <= 0: raise ValueError("start_fraction should be > 0") if start_fraction >= 1: raise ValueError("start_fraction should be < 1") if end_fraction is not None: if end_fraction <= 0: raise ValueError("end_fraction should be > 0") if end_fraction >= 1: raise ValueError("end_fraction should be < 1") if start is not None: if start_fraction is not None or end_fraction is not None: raise ValueError("Use start, start_fraction, or end_fraction," " just not together.") if stop is not None: if start_fraction is not None or end_fraction is not None: raise ValueError("Use stop, start_fraction, or end_fraction," " just not together.") # and go self.path = preprocess(self.path) X, y = self._load_data() if self.task == 'regression': super(CSVDataset, self).__init__(X=X, y=y) else: super(CSVDataset, self).__init__(X=X, y=y, y_labels=np.max(y) + 1) def _load_data(self): """ .. todo:: WRITEME """ assert self.path.endswith('.csv') if self.expect_headers: data = np.loadtxt(self.path, delimiter=self.delimiter, skiprows=1) else: data = np.loadtxt(self.path, delimiter=self.delimiter) def take_subset(X, y): if self.start_fraction is not None: n = X.shape[0] subset_end = int(self.start_fraction * n) X = X[0:subset_end, :] y = y[0:subset_end] elif self.end_fraction is not None: n = X.shape[0] subset_start = int((1 - self.end_fraction) * n) X = X[subset_start:, ] y = y[subset_start:] elif self.start is not None: X = X[self.start:self.stop, ] if y is not None: y = y[self.start:self.stop] return X, y if self.expect_labels: y = data[:, 0] X = data[:, 1:] y = y.reshape((y.shape[0], 1)) else: X = data y = None X, y = take_subset(X, y) return X, y
Python
0.000057
@@ -2046,16 +2046,43 @@ ion=None +,%0A **kwargs ):%0A @@ -3972,16 +3972,26 @@ X=X, y=y +, **kwargs )%0A @@ -4120,16 +4120,26 @@ x(y) + 1 +, **kwargs )%0A%0A d
13f26d9007629be019140aa3bedd5f6fbfefe69b
delete all() method when apply document filter
jellyblog/views.py
jellyblog/views.py
# -*- coding: utf-8 -*- from django.shortcuts import render, get_object_or_404 from django.core.paginator import Paginator from .models import Category, Document from htmlmin.decorators import minified_response from .util import get_page_number_range, get_documents, \ categoryList def home(request): Category.init_category() return render(request, 'jellyblog/home.html') def index(request): return index_with_page(request, 1) @minified_response def index_with_page(request, page): document_list = Document.objects.all().filter(public_doc=True).order_by('-id') paginator = Paginator(document_list, 4) documents = get_documents(paginator, page) context = { 'documents': documents, 'category_list': categoryList, 'page_range': get_page_number_range( paginator, documents ) } return render(request, 'jellyblog/index.html', context) def category_detail(request, category_id): return category_with_page(request, category_id, 1) @minified_response def category_with_page(request, category_id, page): selected_category = Category.objects.get(id=category_id) document_list = [] if selected_category.parent.id == 1: # 카테고리가 상위 카테고리인지 아닌지를 판별 후, 상위 카테고리일 경우엔 하위 카테고리의 문서 리스트를 추가함 children = Category.objects.all().filter(parent=selected_category.id) for child in children: document_list += Document.objects.all() \ .filter(category_id=child.id, public_doc=True) document_list += Document.objects.all().filter( category=category_id, public_doc=True) document_list.sort(key=lambda x: x.pk, reverse=True) paginator = Paginator(document_list, 4) documents = get_documents(paginator, page) context = { 'documents': documents, 'category_list': categoryList, 'category_id': category_id, 'page_range': get_page_number_range( paginator, documents), 'category_name': selected_category.name, } return render(request, 'jellyblog/category.html', context) @minified_response def detail(request, document_id): document = get_object_or_404(Document, pk=document_id) document.read() return render(request, 'jellyblog/detail.html', {'document': document, 'category_list': categoryList})
Python
0.000001
@@ -526,38 +526,32 @@ ocument.objects. -all(). filter(public_do
deaee894589a2247b9322ba5cdb94e4c127c35bd
correct docstring for KeyringLocked class
keyring/errors.py
keyring/errors.py
import sys __metaclass__ = type class KeyringError(Exception): """Base class for exceptions in keyring """ class PasswordSetError(KeyringError): """Raised when the password can't be set. """ class PasswordDeleteError(KeyringError): """Raised when the password can't be deleted. """ class InitError(KeyringError): """Raised when the keyring could not be initialised """ class KeyringLocked(KeyringError): """Raised when the keyring could not be initialised """ class ExceptionRaisedContext: """ An exception-trapping context that indicates whether an exception was raised. """ def __init__(self, ExpectedException=Exception): self.ExpectedException = ExpectedException self.exc_info = None def __enter__(self): self.exc_info = object.__new__(ExceptionInfo) return self.exc_info def __exit__(self, *exc_info): self.exc_info.__init__(*exc_info) return self.exc_info.type and issubclass( self.exc_info.type, self.ExpectedException ) class ExceptionInfo: def __init__(self, *info): if not info: info = sys.exc_info() self.type, self.value, _ = info def __bool__(self): """ Return True if an exception occurred """ return bool(self.type) __nonzero__ = __bool__
Python
0
@@ -476,32 +476,24 @@ ing -could not be initialised +failed unlocking %0A
15f45377dffa2e267464b38f5f87ffe9526fa8f6
Update support to jax (#585)
tensorboardX/x2num.py
tensorboardX/x2num.py
# DO NOT alter/distruct/free input object ! from __future__ import absolute_import from __future__ import division from __future__ import print_function import logging import numpy as np import six def check_nan(array): tmp = np.sum(array) if np.isnan(tmp) or np.isinf(tmp): logging.warning('NaN or Inf found in input tensor.') return array def make_np(x): if isinstance(x, list): return check_nan(np.array(x)) if isinstance(x, np.ndarray): return check_nan(x) if isinstance(x, six.string_types): # Caffe2 will pass name of blob(s) to fetch return check_nan(prepare_caffe2(x)) if np.isscalar(x): return check_nan(np.array([x])) if 'torch' in str(type(x)): return check_nan(prepare_pytorch(x)) if 'chainer' in str(type(x)): return check_nan(prepare_chainer(x)) if 'mxnet' in str(type(x)): return check_nan(prepare_mxnet(x)) raise NotImplementedError( 'Got {}, but expected numpy array or torch tensor.'.format(type(x))) def prepare_pytorch(x): import torch if isinstance(x, torch.autograd.Variable): x = x.data x = x.cpu().numpy() return x def prepare_theano(x): import theano pass def prepare_caffe2(x): from caffe2.python import workspace x = workspace.FetchBlob(x) return x def prepare_mxnet(x): x = x.asnumpy() return x def prepare_chainer(x): import chainer x = chainer.cuda.to_cpu(x.data) return x
Python
0
@@ -922,24 +922,92 @@ e_mxnet(x))%0A + if 'jax' in str(type(x)):%0A return check_nan(np.array(x))%0A raise No
4e280094687d8c369a1eee3c8b7bb246549898eb
Update utils.py
backend/utils.py
backend/utils.py
from rest_framework.views import exception_handler from rest_framework.exceptions import APIException, AuthenticationFailed checks = ['Username not found', 'Username already exists', 'Authentication failed'] def custom_exception_handler(exc): """ Exception handler called by all raised exceptions during HTTP requests. Return value: { "error":"message body" } """ # Debug exceptions print 'EXCEPTION DEBUG %s' %exc if not isinstance(exc.detail, unicode): try: # original error message is {'detail':[list of messages]} # Get values from dictionary and take first list element msg = exc.detail.values()[0][0] exc = GenericException(msg) except: exc = GenericException() response = exception_handler(exc) if response is not None: # Uncomment to add status code in message body # response.data['status_code'] = response.status_code if response.data['detail']: response.data['error'] = response.data['detail'] del response.data['detail'] return response class GenericException(APIException): status_code = 400 default_detail = 'Error encountered' class UsernameNotFound(APIException): status_code = 400 default_detail = 'Username not found' class UsernameAlreadyExists(APIException): status_code = 400 default_detail = 'Username already exists' class AuthenticationFailure(AuthenticationFailed): status_code = 401 default_detail = 'Authentication failed'
Python
0.000001
@@ -463,16 +463,17 @@ s' %25exc%0A +%0A if n
cd4e7c5bc10c8e946ddf31d99a249a5a97b2dfda
Update get-observations.py
python-files/get-observations.py
python-files/get-observations.py
#!/usr/bin/env python3 """ Utility to get observations from a SatNOGS Network server. Collects the paginated objects into a single JSON list and stores in a file. """ import json import requests OBSERVATIONS_API = 'https://network.satnogs.org/api/observations' OBSERVATIONS_JSON = 'observations.json' def get(url): print(url) return requests.get(url) observations = [] r = get(OBSERVATIONS_API) # r = requests.get(OBSERVATIONS_API) observations.extend(r.json()) nextpage = r.links.get('next') while nextpage: # r = requests.get(nextpage['url']) r = get(nextpage['url']) observations.extend(r.json()) nextpage = r.links.get('next') observations = sorted(observations, key=lambda s: s['id']) with open(OBSERVATIONS_JSON, 'w') as fp: json.dump(observations, fp, sort_keys=True, indent=2)
Python
0
@@ -364,119 +364,729 @@ l)%0A%0A -observations = %5B%5D%0Ar = get(OBSERVATIONS_API)%0A# r = requests.get(OBSERVATIONS_API)%0A%0Aobservations.extend(r.json()) +%0Atry:%0A with open(OBSERVATIONS_JSON) as f:%0A data = json.load(f)%0A # json.dump() coerces to string keys%0A # convert keys back to integers%0A observations = %7B%7D%0A for k,v in data.items():%0A print(k)%0A observations%5Bint(k)%5D = v%0A # observations = %7Bv%5B'id'%5D:v for k,v in data.items()%7D%0Aexcept IOError:%0A observations = %7B%7D%0A%0A%0A%0Adef update(o, observations):%0A o_id = o%5B'id'%5D%0A print(o_id)%0A if o_id not in observations:%0A observations%5Bo_id%5D = o%0A was_new = True%0A else:%0A observations.update(o)%0A was_new = False%0A return was_new%0A%0A%0Ar = get(OBSERVATIONS_API)%0Aupdated = %5Bupdate(o, observations) for o in r.json()%5D%0Aany_updated = any(updated)%0A %0Anex @@ -1117,15 +1117,30 @@ t')%0A -%0A while +any_updated and next @@ -1152,24 +1152,13 @@ %0A - # r = -requests. get( @@ -1182,160 +1182,169 @@ -r = get(nextpage%5B'url'%5D)%0A observations.extend(r.json())%0A nextpage = r.links.get('next')%0A%0Aobservations = sorted(observations, key=lambda s: s%5B'id'%5D +updated = %5Bupdate(o, observations) for o in r.json()%5D%0A print(updated)%0A any_updated = any(updated)%0A if any_updated:%0A nextpage = r.links.get('next' )%0A%0Aw @@ -1441,9 +1441,8 @@ dent=2)%0A -%0A
1b18e4a777b387af072afec4d3df4d3b71b4042b
version bump to 0.1.1
nflcmd/version.py
nflcmd/version.py
__version__ = '0.1.0' __pdoc__ = { '__version__': "The version of the installed nflcmd module.", }
Python
0.000001
@@ -12,17 +12,17 @@ = '0.1. -0 +1 '%0A%0A__pdo
bf349b5f41e3b7edb4efbe279f79ded856320388
Fix typo
python/xchainer/testing/array.py
python/xchainer/testing/array.py
import numpy.testing import xchainer # NumPy-like assertion functions that accept both NumPy and xChainer arrays def _check_xchainer_array(x): # Checks basic conditions that are assumed to hold true for any given xChainer array passed to assert_array_close and # assert_array_equal. assert isinstance(x, xchainer.Array) assert not x.is_grad_required() def _as_numpy(x): if isinstance(x, xchainer.Array): # TODO(hvy): Use a function that convers an xChainer array to a NumPy array. return x.to_device('native:0') assert isinstance(x, numpy.ndarray) or numpy.isscalar(x) return x def assert_allclose(x, y, rtol=1e-7, atol=0, equal_nan=True, err_msg='', verbose=True): """Raises an AssertionError if two array_like objects are not equal up to a tolerance. Args: x(numpy.ndarray or xchainer.Array): The actual object to check. y(numpy.ndarray or xchainer.Array): The desired, expected object. rtol(float): Relative tolerance. atol(float): Absolute tolerance. equal_nan(bool): Allow NaN values if True. Otherwise, fail the assertion if any NaN is found. err_msg(str): The error message to be printed in case of failure. verbose(bool): If ``True``, the conflicting values are appended to the error message. .. seealso:: :func:`numpy.testing.assert_allclose` """ def check_array(array): if isinstance(array, xchainer.Array): _check_xchainer_array(array) check_array(x) check_array(y) # TODO(sonots): Uncomment after strides compatibility between xChainer and NumPy is implemented. # assert x.strides == y.strides numpy.testing.assert_allclose( _as_numpy(x), _as_numpy(y), rtol=rtol, atol=atol, equal_nan=equal_nan, err_msg=err_msg, verbose=verbose) def assert_array_equal(x, y, err_msg='', verbose=True): """Raises an AssertionError if two array_like objects are not equal. Args: x(numpy.ndarray or xchainer.Array): The actual object to check. y(numpy.ndarray or xchainer.Array): The desired, expected object. err_msg(str): The error message to be printed in case of failure. verbose(bool): If ``True``, the conflicting values are appended to the error message. .. seealso:: :func:`numpy.testing.assert_array_equal` """ def check_array(array): if isinstance(array, xchainer.Array): _check_xchainer_array(array) check_array(x) check_array(y) # TODO(sonots): Uncomment after strides compatibility between xChainer and NumPy is implemented. # assert x.strides == y.strides numpy.testing.assert_array_equal(_as_numpy(x), _as_numpy(y), err_msg=err_msg, verbose=verbose)
Python
0.999999
@@ -470,20 +470,18 @@ t conver +t s - an xChaine @@ -487,22 +487,21 @@ er array +s to -a NumPy ar @@ -503,16 +503,17 @@ Py array +s .%0A
d52bbb41ee6d5760e94b5414b74750cc6c308e0a
Fix cache middleware key prefix
opps/core/middleware.py
opps/core/middleware.py
# -*- coding: utf-8 -*- import re from django.contrib.sites.models import Site from django.contrib.sites.models import get_current_site from django.http import HttpResponseRedirect from django.conf import settings from opps.channels.models import Channel class URLMiddleware(object): def process_request(self, request): """ if the requested site is id 2 it will force the ROOT_URLCONF = 'yourproject.urls_2.py' """ self.request = request site = get_current_site(request) if site.id > 1: prefix = "_{0}".format(site.id) self.request.urlconf = settings.ROOT_URLCONF + prefix class TemplateContextMiddleware(object): """ Include aditional items in response context_data """ def process_template_response(self, request, response): if hasattr(response, 'context_data'): if not 'channel' in response.context_data: site = get_current_site(request) response.context_data['channel'] = Channel.objects\ .get_homepage(site=site or Site.objects.get(pk=1)) return response class DynamicSiteMiddleware(object): def hosting_parse(self, hosting): """ Returns ``(host, port)`` for ``hosting`` of the form ``'host:port'``. If hosting does not have a port number, ``port`` will be None. """ if ':' in hosting: return hosting.rsplit(':', 1) return hosting, None def get_hosting(self, hosting): domain, port = self.hosting_parse(hosting) if domain in settings.OPPS_DEFAULT_URLS: domain = 'example.com' try: return Site.objects.get(domain=domain) except Site.DoesNotExist: return Site.objects.all()[0] def process_request(self, request): hosting = request.get_host().lower() site = self.get_hosting(hosting) settings.SITE_ID = site.id settings.CACHE_MIDDLEWARE_KEY_PREFIX = "opps_site:{}".format(site.id) class MobileDetectionMiddleware(object): u"""Used django-mobile core https://github.com/gregmuellegger/django-mobile/blob/3093a9791e5e812021e49 3226e5393033115c8bf/django_mobile/middleware.py """ user_agents_test_match = ( "w3c ", "acs-", "alav", "alca", "amoi", "audi", "avan", "benq", "bird", "blac", "blaz", "brew", "cell", "cldc", "cmd-", "dang", "doco", "eric", "hipt", "inno", "ipaq", "java", "jigs", "kddi", "keji", "leno", "lg-c", "lg-d", "lg-g", "lge-", "maui", "maxo", "midp", "mits", "mmef", "mobi", "mot-", "moto", "mwbp", "nec-", "newt", "noki", "xda", "palm", "pana", "pant", "phil", "play", "port", "prox", "qwap", "sage", "sams", "sany", "sch-", "sec-", "send", "seri", "sgh-", "shar", "sie-", "siem", "smal", "smar", "sony", "sph-", "symb", "t-mo", "teli", "tim-", "tosh", "tsm-", "upg1", "upsi", "vk-v", "voda", "wap-", "wapa", "wapi", "wapp", "wapr", "webc", "winw", "winw", "xda-",) user_agents_test_search = u"(?:%s)" % u'|'.join(( 'up.browser', 'up.link', 'mmp', 'symbian', 'smartphone', 'midp', 'wap', 'phone', 'windows ce', 'pda', 'mobile', 'mini', 'palm', 'netfront', 'opera mobi',)) user_agents_exception_search = u"(?:%s)" % u'|'.join(('ipad',)) http_accept_regex = re.compile("application/vnd\.wap\.xhtml\+xml", re.IGNORECASE) def __init__(self): user_agents_test_match = r'^(?:%s)' % '|'.join( self.user_agents_test_match) self.user_agents_test_match_regex = re.compile( user_agents_test_match, re.IGNORECASE) self.user_agents_test_search_regex = re.compile( self.user_agents_test_search, re.IGNORECASE) self.user_agents_exception_search_regex = re.compile( self.user_agents_exception_search, re.IGNORECASE) def process_request(self, request): is_mobile = False if 'HTTP_USER_AGENT' in request.META: user_agent = request.META['HTTP_USER_AGENT'] if self.user_agents_test_search_regex.search(user_agent) and \ not self.user_agents_exception_search_regex.search(user_agent): is_mobile = True else: if 'HTTP_ACCEPT' in request.META: http_accept = request.META['HTTP_ACCEPT'] if self.http_accept_regex.search(http_accept): is_mobile = True if not is_mobile: if self.user_agents_test_match_regex.match(user_agent): is_mobile = True request.is_mobile = is_mobile settings.TEMPLATE_DIRS = settings.TEMPLATE_DIRS_WEB if is_mobile and settings.OPPS_CHECK_MOBILE: settings.TEMPLATE_DIRS = settings.TEMPLATE_DIRS_MOBILE if settings.OPPS_DOMAIN_MOBILE and \ request.META.get('HTTP_HOST', '') != \ settings.OPPS_DOMAIN_MOBILE: return HttpResponseRedirect(u"{}://{}".format( settings.OPPS_PROTOCOL_MOBILE, settings.OPPS_DOMAIN_MOBILE))
Python
0.00001
@@ -2023,17 +2023,17 @@ pps_site -: +- %7B%7D%22.form
f238a7d227036510b91ea4a7e1e9178ea60b3997
Update imagecodecs/__main__.py
imagecodecs/__main__.py
imagecodecs/__main__.py
# -*- coding: utf-8 -*- # imagecodecs/__main__.py """Imagecodecs package command line script.""" import sys from matplotlib.pyplot import show from tifffile import imshow import imagecodecs def askopenfilename(**kwargs): """Return file name(s) from Tkinter's file open dialog.""" try: from Tkinter import Tk import tkFileDialog as filedialog except ImportError: from tkinter import Tk, filedialog root = Tk() root.withdraw() root.update() filenames = filedialog.askopenfilename(**kwargs) root.destroy() return filenames def main(argv=None, verbose=True, decoders=None): """Imagecodecs command line usage main function.""" if argv is None: argv = sys.argv if len(argv) < 2: fname = askopenfilename(title='Select a. image file') if not fname: print('No file selected') return -1 elif len(argv) == 2: fname = argv[1] else: print('Usage: imagecodecs filename') return -1 with open(fname, 'rb') as fh: data = fh.read() if decoders is None: decoders = [ imagecodecs.png_decode, imagecodecs.jpeg8_decode, imagecodecs.jpeg12_decode, imagecodecs.jpegsof3_decode, imagecodecs.jpegls_decode, imagecodecs.j2k_decode, imagecodecs.jxr_decode, imagecodecs.webp_decode, ] messages = [] image = None for decode in decoders: try: image = decode(data) if image.dtype == 'object': image = None raise ValueError('failed') except Exception as exception: # raise(exception) messages.append('%s: %s' % (decode.__name__.upper(), exception)) continue break if verbose: print() if image is None: print('Could not decode the file\n') if verbose: for message in messages: print(message) return -1 if verbose: print("%s: %s %s" % (decode.__name__.upper(), image.shape, image.dtype)) imshow(image, title=fname) show() return 0 sys.exit(main())
Python
0.000005
@@ -1423,32 +1423,106 @@ cs.webp_decode,%0A + imagecodecs.zfp_decode,%0A imagecodecs.numpy_decode,%0A %5D%0A%0A m
2af301cf69cb0a9c3c22d57f290f4962868f7d05
Add liwc entities to ElasticSearch index
folia2es.py
folia2es.py
#!/usr/bin/env python # -*- coding: utf-8 -*- """Script to put a folia xml file in ElasticSearch. """ import os import argparse import time from elasticsearch import Elasticsearch from lxml import etree from bs4 import BeautifulSoup from emotools.plays import extract_character_name, xml_id2play_id from emotools.bs4_helpers import sentence, note def create_index(es, index_name, type_name): config = {} config['mappings'] = { type_name: { '_id': { 'path': 'event_id' }, 'properties': { 'event_id': { 'type': 'string', 'include_in_all': 'false', 'index': 'not_analyzed' }, 'text_id': { 'type': 'string', 'include_in_all': 'false', 'index': 'not_analyzed' }, 'event_class': { 'type': 'string', 'include_in_all': 'false', 'index': 'not_analyzed' }, 'speaker': { 'type': 'string', 'include_in_all': 'false', 'index': 'not_analyzed' }, 'order': { 'type': 'integer', }, 'text': { 'type': 'string', 'include_in_all': 'false', 'index': 'analyzed' }, } } } es.indices.create(index=index_name, body=config, ignore=400) # sleep to prevent error message when checking whether document exists time.sleep(2) def event2es(event_xml, event_order, es, index_name, type_name): events = event_xml.find_all('event') event = events[0] event_id = event.attrs.get('xml:id') if not es.exists(index=index_name, doc_type=type_name, id=event_id): play_id = xml_id2play_id(event_id) cls = event.attrs.get('class') if cls == 'speakerturn': actor = extract_character_name(event.attrs.get('actor')) text = [] for elem in event.descendants: if sentence(elem) and not note(elem.parent): text.append(elem.t.string) doc = { 'event_id': event_id, 'text_id': play_id, 'event_class': cls, 'order': event_order, 'text': ' '.join(text) } if cls == 'speakerturn': doc['actor'] = actor es.index(index_name, type_name, doc) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('dir', help='the name of the directory containing the ' 'FoLiA XML files that should be processed') args = parser.parse_args() input_dir = args.dir # TODO: ES host + port as script arguments es = Elasticsearch() # TODO: index name as script argument index_name = 'embodied_emotions' type_name = 'event' create_index(es, index_name, type_name) event_tag = '{http://ilk.uvt.nl/folia}event' os.chdir(input_dir) for file_name in os.listdir(input_dir): print file_name # load document context = etree.iterparse(file_name, events=('start', 'end'), tag=event_tag, huge_tree=True) order = 0 delete = True for event, elem in context: if event == 'start': delete = False if event == 'end': # ignore subevents if not elem.getparent().tag == event_tag: order += 1 event_xml = BeautifulSoup(etree.tostring(elem), 'xml') event2es(event_xml, order, es, index_name, type_name) delete = True if delete: elem.clear() while elem.getprevious() is not None: del elem.getparent()[0] del context
Python
0
@@ -133,16 +133,46 @@ rt time%0A +from datetime import datetime%0A from ela @@ -203,16 +203,16 @@ csearch%0A - from lxm @@ -369,16 +369,24 @@ ce, note +, entity %0A%0A%0Adef c @@ -2641,16 +2641,926 @@ doc)%0A%0A%0A +def entities2es(event_xml, entity_class, timestamp, es, index_name, doc_type):%0A events = event_xml.find_all('event')%0A event = events%5B0%5D%0A event_id = event.attrs.get('xml:id')%0A%0A entities = %7B%7D%0A for elem in event.descendants:%0A if entity(elem) and not note(elem.parent.parent.parent):%0A ent_class = '%7B%7D-'.format(entity_class)%0A if elem.get('class').startswith(ent_class):%0A entity_name = elem.get('class').replace(ent_class, '')%0A if not entities.get(entity_name):%0A entities%5Bentity_name%5D = %5B%5D%0A entities%5Bentity_name%5D.append(elem.wref.get('t'))%0A doc = %7B%0A '%7B%7D-entities'.format(entity_class): %7B%0A 'data': entities,%0A 'timestamp': timestamp%0A %7D%0A %7D%0A%0A es.update(index=index_name,%0A doc_type=type_name,%0A id=event_id,%0A body=%7B'doc': doc%7D)%0A%0A%0A if __nam @@ -4092,16 +4092,59 @@ a%7Devent' +%0A timestamp = datetime.now().isoformat() %0A%0A os @@ -4828,16 +4828,16 @@ 'xml')%0A - @@ -4902,16 +4902,137 @@ e_name)%0A + entities2es(event_xml, 'liwc', timestamp, es, index_name,%0A type_name)%0A
d73804341f19f577ee68a45e38234e9097b827aa
Improve summary
joblib/__init__.py
joblib/__init__.py
""" Joblib is a set of tools to provide **lightweight pipelining in Python**. In particular, joblib offers: 1. transparent disk-caching of the output values and lazy re-evaluation (memoize pattern) 2. easy simple parallel computing 3. logging and tracing of the execution Joblib is optimized to be fast and robust in particular on large, long-running functions and has specific optimizations for `numpy` arrays. ____ * The latest user documentation for `joblib` can be found on http://packages.python.org/joblib/ * The latest packages can be downloaded from http://pypi.python.org/pypi/joblib * Instructions for developpers can be found at: http://github.com/joblib/joblib joblib is **BSD-licensed**. Vision -------- Joblib came out of long-running data-analysis Python scripts. The long term vision is to provide tools for scientists to achieve better reproducibility when running jobs, without changing the way numerical code looks like. However, Joblib can also be used to provide a light-weight make replacement. The main problems identified are: 1) **Lazy evaluation:** People need to rerun over and over the same script as it is tuned, but end up commenting out steps, or uncommenting steps, as they are needed, as they take long to run. 2) **Persistence:** It is difficult to persist in an efficient way arbitrary objects containing large numpy arrays. In addition, hand-written persistence to disk does not link easily the file on disk to the corresponding Python object it was persists from in the script. This leads to people not a having a hard time resuming the job, eg after a crash and persistence getting in the way of work. The approach taken by Joblib to address these problems is not to build a heavy framework and coerce user into using it (e.g. with an explicit pipeline). It strives to leave your code and your flow control as unmodified as possible. Current features ------------------ 1) **Transparent and fast disk-caching of output value:** a make-like functionality for Python functions that works well for arbitrary Python objects, including very large numpy arrays. The goal is to separate operations in a set of steps with well-defined inputs and outputs, that are saved and reran only if necessary, by using standard Python functions:: >>> from joblib import Memory >>> mem = Memory(cachedir='/tmp/joblib') >>> import numpy as np >>> a = np.vander(np.arange(3)) >>> square = mem.cache(np.square) >>> b = square(a) ________________________________________________________________________________ [Memory] Calling square... square(array([[0, 0, 1], [1, 1, 1], [4, 2, 1]])) ___________________________________________________________square - 0.0s, 0.0min >>> c = square(a) >>> # The above call did not trigger an evaluation 2) **Embarrassingly parallel helper:** to make is easy to write readable parallel code and debug it quickly: >>> from joblib import Parallel, delayed >>> from math import sqrt >>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10)) [0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0] 3) **Logging/tracing:** The different functionalities will progressively acquire better logging mechanism to help track what has been ran, and capture I/O easily. In addition, Joblib will provide a few I/O primitives, to easily define define logging and display streams, and provide a way of compiling a report. We want to be able to quickly inspect what has been run. Contributing ------------- The code is `hosted <http://github.com/joblib/joblib>`_ on github. It is easy to clone the project and experiment with making your own modifications. If you need extra features, don't hesitate to contribute them. .. >>> import shutil ; shutil.rmtree('/tmp/joblib/') """ __version__ = '0.4.4' from .memory import Memory from .logger import PrintTime, Logger from .hashing import hash from .numpy_pickle import dump, load from .parallel import Parallel, delayed
Python
0
@@ -310,23 +310,31 @@ be +** fast +** and +** robust +** in @@ -433,277 +433,445 @@ s.%0A%0A -____%0A%0A* The latest user documentation for %60joblib%60 can be found on%0A http://packages.python.org/joblib/%0A%0A* The latest packages can be downloaded from%0A http://pypi.python.org/pypi/joblib%0A%0A* Instructions for developpers can be found at:%0A http://github.com/joblib/joblib +%0A ============================== ========================================%0A **User documentation**: http://packages.python.org/joblib%0A %0A **Download packages**: http://pypi.python.org/pypi/joblib%0A %0A **Source code**: http://github.com/joblib/joblib%0A ============================== ======================================== %0A%0Ajo
43e8b090d806d615a8153d1e14063cc6d274bb25
Update issue 130 Now I also applied the fix :)
rdflib/plugins/serializers/nt.py
rdflib/plugins/serializers/nt.py
""" N-Triples RDF graph serializer for RDFLib. See <http://www.w3.org/TR/rdf-testcases/#ntriples> for details about the format. """ from rdflib.serializer import Serializer import warnings class NTSerializer(Serializer): """ Serializes RDF graphs to NTriples format. """ def serialize(self, stream, base=None, encoding=None, **args): if base is not None: warnings.warn("NTSerializer does not support base.") if encoding is not None: warnings.warn("NTSerializer does not use custom encoding.") encoding = self.encoding for triple in self.store: stream.write(_nt_row(triple).encode(encoding, "replace")) stream.write("\n") def _nt_row(triple): return u"%s %s %s .\n" % (triple[0].n3(), triple[1].n3(), _xmlcharref_encode(triple[2].n3())) # from <http://code.activestate.com/recipes/303668/> def _xmlcharref_encode(unicode_data, encoding="ascii"): """Emulate Python 2.3's 'xmlcharrefreplace' encoding error handler.""" chars = [] # nothing to do about xmlchars, but replace newlines with escapes: unicode_data=unicode_data.replace("\n","\\n") if unicode_data.startswith('"""'): unicode_data = unicode_data.replace('"""', '"') # Step through the unicode_data string one character at a time in # order to catch unencodable characters: for char in unicode_data: try: chars.append(char.encode(encoding, 'strict')) except UnicodeError: chars.append('\u%04X' % ord(char)) return ''.join(chars)
Python
0
@@ -1563,17 +1563,68 @@ rd(char) -) + if ord(char) %3C= 0xFFFF else '%5CU%2508X' %25 ord(char)) %0A %0A ret
e22a222865f19099e73b1294000adb4dd624d9fe
Use is_callable_object function from kyoto.utils module
kyoto/dispatch.py
kyoto/dispatch.py
import types import gevent import traceback import termformat import kyoto import kyoto.conf import kyoto.utils.modules import kyoto.utils.validation try: # Python 2.x from itertools import izip except ImportError: # Python 3.x izip = zip class Dispatcher(object): __slots__ = ("address", "handlers", "modules") def __init__(self, modules, address): self.address = address self.handlers = { ":call": self.handle_call, ":cast": self.handle_cast, } self.modules = self.transform_modules(modules) def transform_modules(self, modules): """ Creates dispatching dictionary from given list of modules: [kyoto.tests.dummy] => { ":dummy": <module 'kyoto.tests.dummy' object>, } """ def transform(module): name = kyoto.utils.modules.get_module_name(module) return (termformat.binary_to_atom(name), module) return dict((transform(m) for m in modules)) def transform_exceptions(function): """ Catches exceptions and transforms it to BERT response terms. Python: raise ValueError("with message") BERT: {error, {user, 500, "ValueError", "with message", ["Traceback (most recent call last):", ...]}} """ def transform(*args, **kwargs): try: response = function(*args, **kwargs) except Exception as exception: name = exception.__class__.__name__ message = str(exception) trace = traceback.format_exc().splitlines() return (":error", (":user", 500, name, message, trace)) else: return response return transform def transform_response(function): """ Transforms function output to BERT response terms. 1. No response Python: None BERT: {noreply} 2. Has response Python: { "length": 1024, "checksum": "06aef8bb71e72b2abec01d4bd3aa9dda48fd20e6", } BERT: {reply, { "length": 1024, "checksum": "06aef8bb71e72b2abec01d4bd3aa9dda48fd20e6", }} 3. Streaming response Python: {"content-type": "image/png"} { binary data } { binary data } BERT: {info, stream, []} {reply, {"content-type": "image/png"}} { binary data } { binary data } """ def transform(*args, **kwargs): response = function(*args, **kwargs) if response: if isinstance(response, types.GeneratorType): yield (":info", ":stream", []) message = next(response) if message: yield (":reply", message) else: yield (":noreply",) for message in response: yield message else: if kyoto.utils.validation.is_valid_error_response(response): yield response else: yield (":reply", response) else: yield (":noreply",) return transform @transform_response def handle(self, request, **kwargs): rtype, module, function, args = request if module in self.modules: module = self.modules.get(module) name = termformat.atom_to_binary(function) function = getattr(module, name, None) if function and isinstance(function, (types.FunctionType, types.MethodType, types.BuiltinFunctionType, types.BuiltinMethodType)): if kyoto.is_blocking(function): future = kyoto.conf.settings.BLOCKING_POOL.submit(self.handle_call, function, args, **kwargs) if rtype == ":call": response = future.result() else: response = None else: response = self.handlers[rtype](function, args, **kwargs) return response else: function = termformat.binary_to_atom(name) return (":error", (":server", 2, "NameError", "No such function: '{0}'".format(function), [])) else: return (":error", (":server", 1, "NameError", "No such module: '{0}'".format(module), [])) @transform_exceptions def handle_call(self, function, args, **kwargs): return function(*args, **kwargs) @transform_exceptions def handle_cast(self, function, args, **kwargs): gevent.spawn(function, *args, **kwargs)
Python
0.000004
@@ -3834,169 +3834,55 @@ and -isinstance(function, (types.FunctionType, types.MethodType,%0A types.BuiltinFunctionType, types.BuiltinMethodType) +kyoto.utils.modules.is_callable_object(function ):%0A
318457d727eece813cc1dd0b5037b6df89734ec5
Use Domain objects for CNAME symlinking
readthedocs/projects/symlinks.py
readthedocs/projects/symlinks.py
import os import logging from django.conf import settings import redis from readthedocs.core.utils import run_on_app_servers from readthedocs.projects.constants import LOG_TEMPLATE from readthedocs.restapi.client import api log = logging.getLogger(__name__) def symlink_cnames(version): """ OLD Link from HOME/user_builds/cnames/<cname> -> HOME/user_builds/<project>/rtd-builds/ NEW Link from HOME/user_builds/cnametoproject/<cname> -> HOME/user_builds/<project>/ """ try: redis_conn = redis.Redis(**settings.REDIS) cnames = redis_conn.smembers('rtd_slug:v1:%s' % version.project.slug) except redis.ConnectionError: log.error(LOG_TEMPLATE.format(project=version.project.slug, version=version.slug, msg='Failed to symlink cnames, Redis error.'), exc_info=True) return for cname in cnames: log.debug(LOG_TEMPLATE.format(project=version.project.slug, version=version.slug, msg="Symlinking CNAME: %s" % cname)) docs_dir = version.project.rtd_build_path(version.slug) # Chop off the version from the end. docs_dir = '/'.join(docs_dir.split('/')[:-1]) # Old symlink location -- Keep this here til we change nginx over symlink = version.project.cnames_symlink_path(cname) run_on_app_servers('mkdir -p %s' % '/'.join(symlink.split('/')[:-1])) run_on_app_servers('ln -nsf %s %s' % (docs_dir, symlink)) # New symlink location new_docs_dir = version.project.doc_path new_cname_symlink = os.path.join(getattr(settings, 'SITE_ROOT'), 'cnametoproject', cname) run_on_app_servers('mkdir -p %s' % '/'.join(new_cname_symlink.split('/')[:-1])) run_on_app_servers('ln -nsf %s %s' % (new_docs_dir, new_cname_symlink)) def symlink_subprojects(version): """ Link from HOME/user_builds/project/subprojects/<project> -> HOME/user_builds/<project>/rtd-builds/ """ # Subprojects if getattr(settings, 'DONT_HIT_DB', True): subproject_slugs = [data['slug'] for data in api.project(version.project.pk).subprojects.get()['subprojects']] else: rels = version.project.subprojects.all() subproject_slugs = [rel.child.slug for rel in rels] for slug in subproject_slugs: slugs = [slug] if '_' in slugs[0]: slugs.append(slugs[0].replace('_', '-')) for subproject_slug in slugs: log.debug(LOG_TEMPLATE.format(project=version.project.slug, version=version.slug, msg="Symlinking subproject: %s" % subproject_slug)) # The directory for this specific subproject symlink = version.project.subprojects_symlink_path(subproject_slug) run_on_app_servers('mkdir -p %s' % '/'.join(symlink.split('/')[:-1])) # Where the actual docs live docs_dir = os.path.join(settings.DOCROOT, subproject_slug, 'rtd-builds') run_on_app_servers('ln -nsf %s %s' % (docs_dir, symlink)) def symlink_translations(version): """ Link from HOME/user_builds/project/translations/<lang> -> HOME/user_builds/<project>/rtd-builds/ """ translations = {} if getattr(settings, 'DONT_HIT_DB', True): for trans in (api .project(version.project.pk) .translations.get()['translations']): translations[trans['language']] = trans['slug'] else: for trans in version.project.translations.all(): translations[trans.language] = trans.slug # Default language, and pointer for 'en' version_slug = version.project.slug.replace('_', '-') translations[version.project.language] = version_slug if not translations.has_key('en'): translations['en'] = version_slug run_on_app_servers( 'mkdir -p {0}' .format(os.path.join(version.project.doc_path, 'translations'))) for (language, slug) in translations.items(): log.debug(LOG_TEMPLATE.format( project=version.project.slug, version=version.slug, msg="Symlinking translation: %s->%s" % (language, slug) )) # The directory for this specific translation symlink = version.project.translations_symlink_path(language) translation_path = os.path.join(settings.DOCROOT, slug, 'rtd-builds') run_on_app_servers('ln -nsf {0} {1}'.format(translation_path, symlink)) def symlink_single_version(version): """ Link from HOME/user_builds/<project>/single_version -> HOME/user_builds/<project>/rtd-builds/<default_version>/ """ default_version = version.project.get_default_version() log.debug(LOG_TEMPLATE.format(project=version.project.slug, version=default_version, msg="Symlinking single_version")) # The single_version directory symlink = version.project.single_version_symlink_path() run_on_app_servers('mkdir -p %s' % '/'.join(symlink.split('/')[:-1])) # Where the actual docs live docs_dir = os.path.join(settings.DOCROOT, version.project.slug, 'rtd-builds', default_version) run_on_app_servers('ln -nsf %s %s' % (docs_dir, symlink)) def remove_symlink_single_version(version): """Remove single_version symlink""" log.debug(LOG_TEMPLATE.format( project=version.project.slug, version=version.project.get_default_version(), msg="Removing symlink for single_version") ) symlink = version.project.single_version_symlink_path() run_on_app_servers('rm -f %s' % symlink)
Python
0
@@ -176,16 +176,63 @@ EMPLATE%0A +from readthedocs.projects.models import Domain%0A from rea @@ -266,16 +266,16 @@ ort api%0A - %0Alog = l @@ -573,365 +573,102 @@ -try:%0A redis_conn = redis.Redis(**settings.REDIS)%0A cnames = redis_conn.smembers('rtd_slug:v1:%25s' %25 version.project.slug)%0A except redis.ConnectionError:%0A log.error(LOG_TEMPLATE.format(project=version.project.slug, version=version.slug, msg='Failed to symlink cnames, Redis error.'), exc_info=True)%0A return%0A for cname in cname +domains = Domain.objects.filter(project=version.project, active=True)%0A for domain in domain s:%0A @@ -785,21 +785,26 @@ : %25s%22 %25 -cname +domain.url ))%0A @@ -1089,21 +1089,26 @@ nk_path( -cname +domain.url )%0A @@ -1415,21 +1415,26 @@ oject', -cname +domain.url )%0A
0035200543a7b226a095d2fb4ec880e0dd8732fd
Rearrange test data
make_test_data.py
make_test_data.py
import sqlite3 INSERT_SONG = ''' INSERT INTO jukebox_song_queue VALUES (?) ''' TEST_URIS = [ 'spotify:track:7udJ4LFSIrRnySD3eI8lad', 'spotify:track:0p1VSXFdkr71f0nO21IEyq', 'spotify:track:68MToCqJRJvNW8tYoxDl5p' ] if __name__ == '__main__': conn = sqlite3.connect('jukebox.db') cursor = conn.cursor() for uri in TEST_URIS: uri = (uri,) cursor.execute(INSERT_SONG, uri) conn.commit() conn.close()
Python
0.000026
@@ -112,33 +112,32 @@ ack: -7udJ4LFSIrRnySD3eI8lad +68MToCqJRJvNW8tYoxDl5p ', - %0A @@ -196,38 +196,38 @@ y:track: -68MToCqJRJvNW8tYoxDl5p +7udJ4LFSIrRnySD3eI8lad '%0A%5D%0A%0A%0Aif
95ea1d7d6564bcbb2e3b8d2ba254ccd2c1c38436
Add import for focused stuff
mamba/__init__.py
mamba/__init__.py
__version__ = '0.9.2' def description(message): pass def _description(message): pass def it(message): pass def _it(message): pass def context(message): pass def _context(message): pass def before(): pass def after(): pass
Python
0
@@ -88,24 +88,62 @@ %0A pass%0A%0A%0A +def fdescription(message):%0A pass%0A%0A%0A def it(messa @@ -183,24 +183,53 @@ %0A pass%0A%0A%0A +def fit(message):%0A pass%0A%0A%0A def context( @@ -279,24 +279,58 @@ %0A pass%0A%0A%0A +def fcontext(message):%0A pass%0A%0A%0A def before()
b98429f96e228eab675a97cd5f15f473360a7155
fix typo
parsers/pywwa/common.py
parsers/pywwa/common.py
"""Support lib for the parser scripts found in this directory""" import os import inspect import logging import pwd import datetime from io import StringIO import socket import sys import traceback from email.mime.text import MIMEText from syslog import LOG_LOCAL2 # 3rd party from twisted.python import log as tplog from twisted.logger import formatEvent from twisted.python import syslog from twisted.python import failure from twisted.internet import reactor from twisted.mail import smtp import pyiem from pyiem.util import LOG, utc # Local Be careful of circeref here import pywwa from pywwa.cmdline import parse_cmdline from pywwa.database import get_sync_dbconn from pywwa.xmpp import make_jabber_client # http://bugs.python.org/issue7980 datetime.datetime.strptime("2013", "%Y") SETTINGS = pywwa.SETTINGS EMAIL_TIMESTAMPS = [] class CustomFormatter(logging.Formatter): """A custom log formatter class.""" def format(self, record): """Return a string!""" return ( f"[{record.filename}:{record.lineno} {record.funcName}] " f"{record.getMessage()}" ) def shutdown(default=5): """Shutdown method in given number of seconds.""" # Careful, default could have been passed in as an error if not isinstance(default, int): LOG.error(default) delay = 5 else: delay = ( pywwa.CTX.shutdown_delay if pywwa.CTX.shutdown_delay is not None else default ) LOG.info("Shutting down in %s seconds...", delay) reactor.callLater(delay, reactor.callFromThread, reactor.stop) def utcnow(): """Return what utcnow is based on command line.""" return utc() if pywwa.CTX.utcnow is None else pywwa.CTX.utcnow def dbwrite_enabled(): """Is database writing not-disabled as per command line.""" return not pywwa.CTX.disable_dbwrite def replace_enabled(): """Is -r --replace enabled.""" return pywwa.CTX.replace def setup_syslog(): """Setup how we want syslogging to work""" # https://stackoverflow.com/questions/13699283 frame = inspect.stack()[-1] module = inspect.getmodule(frame[0]) filename = "None" if module is None else os.path.basename(module.__file__) syslog.startLogging( prefix=f"pyWWA/{filename}", facility=LOG_LOCAL2, setStdout=not pywwa.CTX.stdout_logging, ) # pyIEM does logging via python stdlib logging, so we need to patch those # messages into twisted's logger. sh = logging.StreamHandler(stream=tplog.logfile) sh.setFormatter(CustomFormatter()) LOG.addHandler(sh) # Log stuff to stdout if we are running from command line. if pywwa.CTX.stdout_logging: tplog.addObserver(lambda x: print(formatEvent(x))) # Allow for more verbosity when we are running this manually. LOG.setLevel(logging.DEBUG if sys.stdout.isatty() else logging.INFO) def load_settings(): """Load database properties.""" with get_sync_dbconn("mesosite") as dbconn: cursor = dbconn.cursor() cursor.execute("SELECT propname, propvalue from properties") for row in cursor: SETTINGS[row[0]] = row[1] LOG.info("Loaded %s settings from database", len(SETTINGS)) cursor.close() def should_email(): """Prevent email bombs Use the setting `pywwa_email_limit` to threshold the number of emails permitted within the past hour @return boolean if we should email or not """ EMAIL_TIMESTAMPS.insert(0, utc()) delta = EMAIL_TIMESTAMPS[0] - EMAIL_TIMESTAMPS[-1] email_limit = int(SETTINGS.get("pywwa_email_limit", 10)) if len(EMAIL_TIMESTAMPS) < email_limit: return True while len(EMAIL_TIMESTAMPS) > email_limit: EMAIL_TIMESTAMPS.pop() return delta > datetime.timedelta(hours=1) def email_error(exp, message, trimstr=100): """ Helper function to generate error emails when necessary and hopefully not flood! @param exp A string or perhaps a twisted python failure object @param message A string of more information to pass along in the email @return boolean If an email was sent or not... """ # Always log a message about our fun cstr = StringIO() if isinstance(exp, failure.Failure): exp.printTraceback(file=cstr) LOG.error(exp) elif isinstance(exp, Exception): traceback.print_exc(file=cstr) LOG.error(exp) else: LOG.info(exp) cstr.seek(0) if isinstance(message, str): LOG.info(message[:trimstr]) else: LOG.info(message) # Logic to prevent email bombs if not should_email(): LOG.info( "Email threshold of %s exceeded, so no email sent!", SETTINGS.get("pywwa_email_limit", 10), ) return False hn = socket.gethostname() hh = f"{pwd.getpwuid(os.getuid())[0]}@{hn}" la = " ".join(["{a:.2f}" for a in os.getloadavg()]) txt = ( f"System : {hh} [CWD: {os.getcwd()}]\n" f"pyiem.version : {pyiem.__version__}\n" f"System UTC date : {utc()}\n" f"pyWWA UTC date : {utcnow()}\n" f"process id : {os.getpid()}\n" f"system load : {la}\n" f"Exception : {exp}\n" f"Message:\n{message}\n" ) # prevent any noaaport text from making ugly emails msg = MIMEText(txt.replace("\r\r\n", "\n"), "plain", "utf-8") # Send the email already! msg["subject"] = ( f"[pyWWA] {sys.argv[0].split('/')[-1]} Traceback -- {hn}" ) msg["From"] = SETTINGS.get("pywwa_errors_from", "ldm@localhost") msg["To"] = SETTINGS.get("pywwa_errors_to", "ldm@localhost") if not pywwa.CTX.disable_email: df = smtp.sendmail( SETTINGS.get("pywwa_smtp", "smtp"), msg["From"], msg["To"], msg ) df.addErrback(LOG.error) else: LOG.info("Sending email disabled by command line `-e` flag.") return True def send_message(plain, text, extra): """Helper to connect with running JABBER instance.""" if pywwa.JABBER is None: LOG.info("failed to send as pywwa.JABBER is None, not setup?") return pywwa.JABBER.send_message(plain, text, extra) def main(with_jabber=True): """Standard workflow from our parsers. Args: with_jabber(bool): Should we setup a jabber instance? """ # This is blocking, but necessary to make sure settings are loaded before # we go on our merry way pywwa.CTX = parse_cmdline(sys.argv) setup_syslog() load_settings() if with_jabber: make_jabber_client()
Python
0.999991
@@ -4918,16 +4918,17 @@ %22.join(%5B +f %22%7Ba:.2f%7D
d3b3e9af722ac00b21bf36706f4e0ab7cf94af00
bump to v0.6.4
mando/__init__.py
mando/__init__.py
__version__ = '0.5' try: from mando.core import Program except ImportError as e: # pragma: no cover # unfortunately the only workaround for Python2.6, argparse and setup.py e.version = __version__ raise e main = Program() command = main.command arg = main.arg parse = main.parse execute = main.execute
Python
0
@@ -14,9 +14,11 @@ '0. -5 +6.4 '%0A%0At
59400100aa2f35bfea52b3cf049ef8d0f958527d
Fix error when reaching a dead end in the markov chain
markov/markov2.py
markov/markov2.py
#!python3 import string import random import time import sys ''' This is an implementation of a markov chain used for text generation. Just pass a file name as an argument and it should load it up, build a markov chain with a state for each word(s), and start walking through the chain, writing incoherent text to the terminal. ''' asciiset = set(string.ascii_letters) asciiset.add(' ') asciiset.add('.') def strip2ascii(txt): return ''.join([ch for ch in txt if ch in asciiset]) def tokenize(fname): ''' Generate tokens defined by - Sequences of characters that aren't spaces - Periods For example, 'This is a test. Ok.' => ('This', 'is', 'a', 'test', '.', 'Ok, '.') ''' with open(fname, 'r') as f: for line in f: stripped = strip2ascii(line) for word in stripped.split(): if word[-1] == '.': yield word[:-1] yield '.' else: yield word def buildtransitionmap(tokens, order): dct = {} prev = ('',)*order for token in tokens: if prev in dct: dct[prev].append(token) else: dct[prev] = [token] prev = prev[1:]+(token,) return dct def transition(word, transmap): return random.choice(transmap[word]) def eternalramble(fname, order): ''' Walk through the markov chain printing out words to the terminal one at a time ''' transmap = buildtransitionmap(tokenize(fname), order) prev = random.choice(list(transmap.keys())) while True: word = transition(prev, transmap) print(word, end=' ') prev = prev[1:]+(word,) sys.stdout.flush() time.sleep(0.25) def printusage(): print('Usage: markov filename order') print(' filename: the filename of the text to base the markov chain on.') print(' order: how many consecutive words make up each state (2 works well)') def launch(): if len(sys.argv) != 3: printusage() return try: order = int(sys.argv[2]) except: printusage() return eternalramble(sys.argv[1], order) if __name__ == '__main__': launch()
Python
0.000006
@@ -1288,24 +1288,147 @@ def -transition(word, +walk(transmap, prev=None):%0A if prev == None:%0A prev = random.choice(list(transmap.keys()))%0A%0A while True:%0A if not prev in tra @@ -1436,50 +1436,161 @@ smap -) :%0A -return random.choice(transmap%5Bword%5D + prev = random.choice(list(transmap.keys()))%0A%0A word = random.choice(transmap%5Bprev%5D)%0A yield word%0A prev = prev%5B1:%5D+(word, )%0A%0A%0A @@ -1787,100 +1787,25 @@ -prev = random.choice(list(transmap.keys()))%0A while True:%0A word = transition(prev, +for word in walk( tran @@ -1809,16 +1809,17 @@ ransmap) +: %0A @@ -1844,41 +1844,8 @@ ')%0A - prev = prev%5B1:%5D+(word,)%0A
7ccb52897e82629e4bbb0298dba4de76bc6a63db
Add a deprecation warning
pathvalidate/_symbol.py
pathvalidate/_symbol.py
""" .. codeauthor:: Tsuyoshi Hombashi <[email protected]> """ import re from typing import Sequence from ._common import ascii_symbols, preprocess, unprintable_ascii_chars from .error import InvalidCharError __RE_UNPRINTABLE = re.compile( "[{}]".format(re.escape("".join(unprintable_ascii_chars))), re.UNICODE ) __RE_SYMBOL = re.compile( "[{}]".format(re.escape("".join(ascii_symbols + unprintable_ascii_chars))), re.UNICODE ) def validate_unprintable(text: str) -> None: # deprecated match_list = __RE_UNPRINTABLE.findall(preprocess(text)) if match_list: raise InvalidCharError("unprintable character found: {}".format(match_list)) def replace_unprintable(text: str, replacement_text: str = "") -> str: # deprecated try: return __RE_UNPRINTABLE.sub(replacement_text, preprocess(text)) except (TypeError, AttributeError): raise TypeError("text must be a string") def validate_symbol(text: str) -> None: """ Verifying whether symbol(s) included in the ``text`` or not. Args: text: Input text to validate. Raises: ValidationError (ErrorReason.INVALID_CHARACTER): If symbol(s) included in the ``text``. """ match_list = __RE_SYMBOL.findall(preprocess(text)) if match_list: raise InvalidCharError("invalid symbols found: {}".format(match_list)) def replace_symbol( text: str, replacement_text: str = "", exclude_symbols: Sequence[str] = [], is_replace_consecutive_chars: bool = False, is_strip: bool = False, ) -> str: """ Replace all of the symbols in the ``text``. Args: text: Input text. replacement_text: Replacement text. exclude_symbols: Symbols that exclude from the replacement. is_replace_consecutive_chars: If |True|, replace consecutive multiple ``replacement_text`` characters to a single character. is_strip: If |True|, strip ``replacement_text`` from the beginning/end of the replacement text. Returns: A replacement string. Example: :ref:`example-sanitize-symbol` """ if exclude_symbols: regexp = re.compile( "[{}]".format( re.escape( "".join(set(ascii_symbols + unprintable_ascii_chars) - set(exclude_symbols)) ) ), re.UNICODE, ) else: regexp = __RE_SYMBOL try: new_text = regexp.sub(replacement_text, preprocess(text)) except TypeError: raise TypeError("text must be a string") if not replacement_text: return new_text if is_replace_consecutive_chars: new_text = re.sub("{}+".format(re.escape(replacement_text)), replacement_text, new_text) if is_strip: new_text = new_text.strip(replacement_text) return new_text
Python
0.001163
@@ -76,16 +76,32 @@ port re%0A +import warnings%0A from typ @@ -759,36 +759,137 @@ -%3E str:%0A -# deprecated +warnings.warn(%0A %22'replace_unprintable' has moved to 'replace_unprintable_char'%22, DeprecationWarning%0A )%0A %0A try:%0A
12dac769152ebd074ed2b415d3980729bdbe3e46
Make nonexistent warning more obvious
regparser/commands/compare_to.py
regparser/commands/compare_to.py
import json import os import click from json_delta import udiff import requests import requests_cache def local_and_remote_generator(api_base, paths): """Find all local files in `paths` and pair them with the appropriate remote file (prefixing with api_base). As the local files could be at any position in the file system, we back out directories until we hit one of the four root resource types (diff, layer, notice, regulation)""" local_names = [path for path in paths if os.path.isfile(path)] # this won't duplicate the previous line as it'll only add files in dirs local_names.extend(os.path.join(dirpath, filename) for path in paths for dirpath, _, filenames in os.walk(path) for filename in filenames) for local_name in local_names: dirname, basename = os.path.split(local_name) reversed_suffix = [basename] # these are the four root resource types while basename not in ('diff', 'layer', 'notice', 'regulation'): dirname, basename = os.path.split(dirname) reversed_suffix.append(basename) remote_name = api_base + '/'.join(reversed(reversed_suffix)) yield (local_name, remote_name) def compare(local_path, remote_url): """Downloads and compares a local JSON file with a remote one. If there is a difference, notifies the user and prompts them if they want to see the diff""" remote_response = requests.get(remote_url) if remote_response.status_code == 404: click.echo("Nonexistent: " + remote_url) else: remote = remote_response.json() with open(local_path) as f: local = json.load(f) if remote != local: click.echo("Content differs: {} {}".format(local_path, remote_url)) if click.confirm("Show diff?"): diffs_str = '\n'.join(udiff(remote, local)) click.echo_via_pager(diffs_str) @click.command() @click.argument('api_base') @click.argument('paths', nargs=-1, required=True, type=click.Path(exists=True, resolve_path=True)) @click.pass_context def compare_to(ctx, api_base, paths): """Compare local JSON to a remote server. This is useful for verifying changes to the parser. API_BASE is the uri of the root of the API. Use what would be the last parameter in the `write_to` command. PATH parameters indicate specific files or directories to use when comparing. For example, use `/some/path/to/regulation/555` to compare all versions of 555. Glob syntax works if your shell supports it""" if not api_base.endswith("/"): api_base += "/" # @todo: ugly to uninstall the cache after installing it in eregs.py. # Remove the globalness requests_cache.uninstall_cache() for local, remote in local_and_remote_generator(api_base, paths): compare(local, remote)
Python
0
@@ -5,16 +5,31 @@ rt json%0A +import logging%0A import o @@ -1579,26 +1579,28 @@ -click.echo +logging.warn (%22Nonexi @@ -1610,11 +1610,12 @@ nt: -%22 + +%25s%22, rem
cfb09353b02dd230546775d18dadb1ba7ed2acc6
Refactor submit_comment tests
regulations/tests/tasks_tests.py
regulations/tests/tasks_tests.py
import json import mock import six from celery.exceptions import Retry, MaxRetriesExceededError from requests.exceptions import RequestException from django.test import SimpleTestCase, override_settings from regulations.tasks import submit_comment @mock.patch('regulations.tasks.save_failed_submission') @mock.patch('regulations.tasks.submit_comment.retry') @mock.patch('requests.post') @mock.patch('regulations.tasks.html_to_pdf') @override_settings( ATTACHMENT_BUCKET='test-bucket', ATTACHMENT_ACCESS_KEY_ID='test-access-key', ATTACHMENT_SECRET_ACCESS_KEY='test-secret-key', ATTACHMENT_MAX_SIZE=42, REGS_GOV_API_URL='test-url', REGS_GOV_API_KEY='test-key', ) class TestSubmitComment(SimpleTestCase): def test_submit_comment(self, html_to_pdf, post, retry, save_failed_submission): file_handle = six.BytesIO("foobar") html_to_pdf.return_value.__enter__ = mock.Mock( return_value=file_handle) expected_result = {'tracking_number': '133321'} post.return_value.status_code = 201 post.return_value.json.return_value = expected_result body = {'assembled_comment': {'sections': []}} result = submit_comment(body) self.assertEqual(result, expected_result) def test_failed_submit_raises_retry(self, html_to_pdf, post, retry, save_failed_submission): file_handle = six.BytesIO("foobar") html_to_pdf.return_value.__enter__ = mock.Mock( return_value=file_handle) post.side_effect = [RequestException] retry.return_value = Retry() body = {'assembled_comment': {'sections': []}} with self.assertRaises(Retry): submit_comment(body) def test_failed_submit_maximum_retries(self, html_to_pdf, post, retry, save_failed_submission): file_handle = six.BytesIO("foobar") html_to_pdf.return_value.__enter__ = mock.Mock( return_value=file_handle) post.side_effect = [RequestException] retry.return_value = MaxRetriesExceededError() body = {'assembled_comment': {'sections': []}} submit_comment(body) save_failed_submission.assert_called_with(json.dumps(body))
Python
0
@@ -724,16 +724,292 @@ Case):%0A%0A + def setUp(self):%0A self.file_handle = six.BytesIO(%22some-content%22)%0A self.submission = %7B'assembled_comment': %5B%0A %7B%22id%22: %22A1%22, %22comment%22: %22A simple comment%22, %22files%22: %5B%5D%7D,%0A %7B%22id%22: %22A5%22, %22comment%22: %22Another comment%22, %22files%22: %5B%5D%7D%0A %5D%7D%0A%0A def @@ -1117,52 +1117,8 @@ n):%0A - file_handle = six.BytesIO(%22foobar%22)%0A @@ -1186,32 +1186,37 @@ return_value= +self. file_handle)%0A%0A @@ -1264,14 +1264,28 @@ ': ' -133321 +some-tracking-number '%7D%0A @@ -1394,63 +1394,8 @@ lt%0A%0A - body = %7B'assembled_comment': %7B'sections': %5B%5D%7D%7D%0A @@ -1418,28 +1418,39 @@ mit_comment( -body +self.submission )%0A%0A s @@ -1632,52 +1632,8 @@ n):%0A - file_handle = six.BytesIO(%22foobar%22)%0A @@ -1701,32 +1701,37 @@ return_value= +self. file_handle)%0A%0A @@ -1817,63 +1817,8 @@ ()%0A%0A - body = %7B'assembled_comment': %7B'sections': %5B%5D%7D%7D%0A @@ -1879,20 +1879,31 @@ comment( -body +self.submission )%0A%0A d @@ -2044,52 +2044,8 @@ n):%0A - file_handle = six.BytesIO(%22foobar%22)%0A @@ -2121,16 +2121,21 @@ n_value= +self. file_han @@ -2255,82 +2255,38 @@ -body = %7B'assembled_comment': %7B'sections': %5B%5D%7D%7D%0A submit_comment(body +submit_comment(self.submission )%0A @@ -2348,11 +2348,22 @@ mps( -body +self.submission ))%0A
29e491c5505d2068b46eb489044455968e53ab70
Add tests for strait and fjord
test/400-bay-water.py
test/400-bay-water.py
assert_has_feature( 14, 2623, 6318, 'water', { 'kind': 'bay', 'label_placement': 'yes' })
Python
0.000007
@@ -1,8 +1,47 @@ +# osm_id: 43950409 name: San Pablo Bay%0A assert_h @@ -130,8 +130,290 @@ yes' %7D)%0A +%0A# osm_id: 360566115 name: Byron strait%0Aassert_has_feature(%0A 14, 15043, 8311, 'water',%0A %7B 'kind': 'strait', 'label_placement': 'yes' %7D)%0A%0A# osm_id: -1451065 name: Horsens Fjord%0Aassert_has_feature(%0A 14, 8645, 5114, 'water',%0A %7B 'kind': 'fjord', 'label_placement': 'yes' %7D)%0A
83781f3b2f1cde0aab913ff4d64de45cf9b798be
Update snooper for multi-spline qp controller inputs
software/control/src/qp_controller_input_snooper.py
software/control/src/qp_controller_input_snooper.py
#!/usr/bin/python ''' Listens to QP Controller Inputs and draws, in different but order-consistent colors, the cubic splines being followed by each body motion block. ''' import lcm import drc from drake import lcmt_qp_controller_input, lcmt_body_motion_data import sys import time from bot_lcmgl import lcmgl, GL_LINES import numpy as np color_order = [[1.0, 0.1, 0.1], [0.1, 1.0, 0.1], [0.1, 0.1, 1.0], [1.0, 1.0, 0.1], [1.0, 0.1, 1.0], [0.1, 1.0, 1.0]]; def pval(coefs, t_off): out = np.array([0.0]*6) for j in range(0, 6): out[j] = coefs[j, 0]*(t_off**3.0) + coefs[j, 1]*(t_off**2.0) + coefs[j, 2]*t_off + coefs[j, 3] return out def handle_qp_controller_input_msg(channel, data): msg = lcmt_qp_controller_input.decode(data) #print("received") # draw spline segment for each tracked body for i in range(0, msg.num_tracked_bodies): bmd = msg.body_motion_data[i] ts = bmd.ts; tsdense = np.linspace(ts[0], ts[-1], 20); coefs = np.array(bmd.coefs); color = color_order[i%len(color_order)]; gl.glColor3f(color[0], color[1], color[2]); gl.glLineWidth(5); gl.glBegin(GL_LINES); ps = np.array([pval(coefs, t-ts[0]) for t in tsdense]); for j in range(0,tsdense.size-1): gl.glVertex3f(ps[j,0], ps[j,1], ps[j,2]); gl.glVertex3f(ps[j+1,0], ps[j+1,1], ps[j+1,2]); gl.glEnd(); gl.switch_buffer() lc = lcm.LCM() gl = lcmgl('qp input bmd snoop', lc); subscription = lc.subscribe("QP_CONTROLLER_INPUT", handle_qp_controller_input_msg) subscription.set_queue_capacity(1); try: while True: lc.handle() except KeyboardInterrupt: pass
Python
0
@@ -902,16 +902,128 @@ bmd.ts;%0A + color = color_order%5Bi%25len(color_order)%5D;%0A for j in range(0, msg.body_motion_data%5Bi%5D.num_spline_coefs):%0A tsde @@ -1047,16 +1047,17 @@ (ts%5B -0 +j %5D, ts%5B -- +j+ 1%5D, @@ -1061,16 +1061,18 @@ %5D, 20);%0A + coef @@ -1097,56 +1097,22 @@ oefs -);%0A color = color_order%5Bi%25len(color_order)%5D;%0A +%5Bj%5D.coefs);%0A @@ -1155,24 +1155,26 @@ or%5B2%5D);%0A + gl.glLineWid @@ -1180,16 +1180,18 @@ dth(5);%0A + gl.g @@ -1212,16 +1212,18 @@ S);%0A + + ps = np. @@ -1246,17 +1246,17 @@ s, t-ts%5B -0 +j %5D) for t @@ -1270,24 +1270,26 @@ ense%5D);%0A + for j in ran @@ -1312,24 +1312,26 @@ e-1):%0A + + gl.glVertex3 @@ -1356,24 +1356,26 @@ , ps%5Bj,2%5D);%0A + gl.glV @@ -1412,24 +1412,26 @@ ps%5Bj+1,2%5D);%0A + gl.glEnd
cdf545cf9385a0490590cd0162141025a1301c09
Use argparse formatter RawDescriptionHelpFormatter, maybe temporarily
track/config.py
track/config.py
import configargparse DEFAULT_CONFIG_FILES=[ './track.cfg', '~/.track.cfg', ] # Bit of a cheat... not actually an object constructor, just a 'make me an object' method def ArgParser(): return configargparse.ArgParser( ignore_unknown_config_file_keys =True, allow_abbrev =True, default_config_files =DEFAULT_CONFIG_FILES, formatter_class =configargparse.ArgumentDefaultsHelpFormatter, config_file_parser_class =configargparse.DefaultConfigFileParser, # INI format args_for_setting_config_path =['-c', '--cfg'], args_for_writing_out_config_file=['-w', '--cfg-write'], )
Python
0
@@ -384,16 +384,17 @@ _FILES,%0A +# @@ -472,16 +472,101 @@ matter,%0A + formatter_class =configargparse.RawDescriptionHelpFormatter,%0A
148d4c44a9eb63016b469c6bf317a3dbe9ed7918
Add documentation for Permutations class
permuta/permutations.py
permuta/permutations.py
from .misc import DancingLinks from .permutation import Permutation import random class Permutations(object): def __init__(self, n): assert 0 <= n self.n = n def __iter__(self): left = DancingLinks(range(1, self.n+1)) res = [] def gen(): if len(left) == 0: yield Permutation(list(res)) else: cur = left.front while cur is not None: left.erase(cur) res.append(cur.value) for p in gen(): yield p res.pop() left.restore(cur) cur = cur.next return gen() def random_element(self): p = [ i+1 for i in range(self.n) ] for i in range(self.n-1, -1, -1): j = random.randint(0, i) p[i],p[j] = p[j],p[i] return Permutation(p) def __str__(self): return 'The set of Permutations of length %d' % self.n def __repr__(self): return 'Permutations(%d)' % self.n
Python
0
@@ -76,16 +76,17 @@ random%0A%0A +%0A class Pe @@ -113,97 +113,305 @@ -def __init__(self, n):%0A assert 0 %3C= n%0A self.n = n%0A%0A def __iter__(self):%0A +%22%22%22Class for iterating through all Permutations of length n%22%22%22%0A%0A def __init__(self, n):%0A %22%22%22Returns an object giving all permutations of length n%22%22%22%0A assert 0 %3C= n%0A self.n = n%0A%0A def __iter__(self):%0A %22%22%22Iterates through permutations of length n in lexical order%22%22%22 %0A @@ -472,16 +472,17 @@ es = %5B%5D%0A +%0A @@ -952,32 +952,87 @@ _element(self):%0A + %22%22%22Returns a random permutation of length n%22%22%22%0A p = %5B i+ @@ -1028,17 +1028,16 @@ p = %5B - i+1 for @@ -1054,17 +1054,16 @@ (self.n) - %5D%0A @@ -1152,16 +1152,17 @@ p%5Bi%5D, + p%5Bj%5D = p @@ -1165,16 +1165,17 @@ = p%5Bj%5D, + p%5Bi%5D%0A @@ -1348,17 +1348,16 @@ s(%25d)' %25 self.n%0A -%0A
f2e14108374d0b6afe67cd6c310804144016af5e
Fix numba.test() after rebase
numba/__init__.py
numba/__init__.py
# Import all special functions before registering the Numba module # type inferer from ._version import get_versions __version__ = get_versions()['version'] del get_versions from numba.special import * import os import sys import logging from numba import typesystem def get_include(): numba_root = os.path.dirname(os.path.abspath(__file__)) return os.path.join(numba_root, "include") # NOTE: Be sure to keep the logging level commented out before commiting. See: # https://github.com/numba/numba/issues/31 # A good work around is to make your tests handle a debug flag, per # numba.tests.test_support.main(). class _RedirectingHandler(logging.Handler): ''' A log hanlder that applies its formatter and redirect the emission to a parent handler. ''' def set_handler(self, handler): self.handler = handler def emit(self, record): # apply our own formatting record.msg = self.format(record) record.args = [] # clear the args # use parent handler to emit record self.handler.emit(record) def _config_logger(): root = logging.getLogger(__name__) format = "\n\033[1m%(levelname)s -- "\ "%(module)s:%(lineno)d:%(funcName)s\033[0m\n%(message)s" try: parent_hldr = root.parent.handlers[0] except IndexError: # parent handler is not initialized? # build our own handler --- uses sys.stderr by default. parent_hldr = logging.StreamHandler() hldr = _RedirectingHandler() hldr.set_handler(parent_hldr) fmt = logging.Formatter(format) hldr.setFormatter(fmt) root.addHandler(hldr) root.propagate = False # do not propagate to the root logger _config_logger() from . import special from numba.typesystem import * from . import decorators from numba.minivect.minitypes import FunctionType from .decorators import * from numba.error import * # doctest compatible for jit or autojit numba functions from numba.tests.test_support import testmod EXCLUDE_TEST_PACKAGES = ["bytecode"] def split_path(path): return path.split(os.sep) def exclude_package_dirs(dirs): for exclude_pkg in EXCLUDE_TEST_PACKAGES: if exclude_pkg in dirs: dirs.remove(exclude_pkg) def qualified_test_name(root): qname = root.replace("/", ".").replace("\\", ".").replace(os.sep, ".") + "." offset = qname.rindex('numba.tests.') return qname[offset:] def whitelist_match(whitelist, modname): if whitelist: return any(item in modname for item in whitelist) return True def map_returncode_to_message(retcode): if retcode < 0: retcode = -retcode return signal_to_name.get(retcode, "Signal %d" % retcode) return "" try: import signal except ImportError: signal_to_name = {} else: signal_to_name = dict((signal_code, signal_name) for signal_name, signal_code in vars(signal).items() if signal_name.startswith("SIG")) def test(whitelist=None, blacklist=None): import os from os.path import dirname, join import subprocess run = failed = 0 for root, dirs, files in os.walk(join(dirname(__file__), 'tests')): qname = qualified_test_name(root) exclude_package_dirs(dirs) for fn in files: if fn.startswith('test_') and fn.endswith('.py'): modname, ext = os.path.splitext(fn) modname = qname + modname if not whitelist_match(whitelist, modname): continue if blacklist and whitelist_match(blacklist, modname): continue run += 1 print "running %-60s" % (modname,), process = subprocess.Popen([sys.executable, '-m', modname], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) out, err = process.communicate() if process.returncode == 0: print "SUCCESS" else: print "FAILED: %s" % map_returncode_to_message( process.returncode) print out, err print "-" * 80 failed += 1 print "ran test files: failed: (%d/%d)" % (failed, run) return failed def nose_run(module=None): import nose.config import __main__ #os.environ["NOSE_EXCLUDE"] = "(test_all|test_all_noskip|.*compile_with_pycc.*|bytecode)" #os.environ["NOSE_VERBOSE"] = "4" result = nose.main() return len(result.errors), len(result.failures) __all__ = typesystem.__all__ + decorators.__all__ + special.__all__ from numba.typesystem import map_dtype from numba.type_inference.module_type_inference import (is_registered, register, register_inferer, get_inferer, register_unbound, register_callable) from numba.typesystem.typeset import * __all__.extend(["numeric", "floating", "complextypes"])
Python
0.000022
@@ -2036,61 +2036,8 @@ %22%5D%0A%0A -def split_path(path):%0A return path.split(os.sep)%0A%0A def @@ -4243,16 +4243,139 @@ %22-%22 * 80 +%0A%0A run += 1%0A res = call(%5Bsys.executable, '-m', qname + modname%5D)%0A if res != 0: %0A
8b9f68514d78851f3b445f996f3eaf607831d352
Add more descriptive names to variables and functions
raspisump/checkpid.py
raspisump/checkpid.py
#!/usr/bin/python # Check to make sure process raspi-sump is running and restart if required. import subprocess import time def check_pid(): '''Check status of raspisump.py process.''' cmdp1 = "ps aux" cmdp2 = "grep -v grep" cmdp3 = "grep -v sudo" cmdp4 = "grep -c /home/pi/raspi-sump/raspisump.py" cmdp1list = cmdp1.split(' ') cmdp2list = cmdp2.split(' ') cmdp3list = cmdp3.split(' ') cmdp4list = cmdp4.split(' ') part1 = subprocess.Popen(cmdp1list, stdout=subprocess.PIPE) part2 = subprocess.Popen(cmdp2list, stdin=part1.stdout, stdout=subprocess.PIPE) part1.stdout.close() part3 = subprocess.Popen(cmdp3list, stdin=part2.stdout,stdout=subprocess.PIPE) part2.stdout.close() part4 = subprocess.Popen(cmdp4list, stdin=part3.stdout,stdout=subprocess.PIPE) part3.stdout.close() x = int(part4.communicate()[0]) if x == 0: log_check("Process stopped, restarting") restart() elif x == 1: exit(0) else: log_check("Multiple Processes...Killing and Restarting") kill_start() def restart(): '''Restart raspisump.py process.''' restart_cmd = "/home/pi/raspi-sump/raspisump.py &" restart_now = restart_cmd.split(' ') subprocess.Popen(restart_now) exit(0) def kill_start(): '''Kill all instances of raspisump.py process.''' kill_cmd = "killall 09 raspisump.py" kill_it = kill_cmd.split(' ') subprocess.call(kill_it) restart() def log_check(reason): logfile = open("/home/pi/raspi-sump/logs/process_log", 'a') logfile.write(time.strftime("%Y-%m-%d %H:%M:%S,")), logfile.write(reason), logfile.write("\n") logfile.close if __name__ == "__main__": check_pid()
Python
0.000001
@@ -839,17 +839,35 @@ e()%0A -x +number_of_processes = int(p @@ -896,17 +896,35 @@ %0A if -x +number_of_processes == 0:%0A @@ -934,21 +934,24 @@ log_ -check +restarts (%22Proces @@ -1003,17 +1003,35 @@ elif -x +number_of_processes == 1:%0A @@ -1067,21 +1067,24 @@ log_ -check +restarts (%22Multip @@ -1086,17 +1086,17 @@ ultiple -P +p rocesses @@ -1098,17 +1098,17 @@ esses... -K +k illing a @@ -1110,17 +1110,17 @@ ing and -R +r estartin @@ -1554,21 +1554,24 @@ def log_ -check +restarts (reason) @@ -1572,16 +1572,51 @@ eason):%0A + '''Log all process restarts'''%0A logf
51373b776403b94cf0b72b43952013f3b4ecdb2d
Remove useless codes
holosocket/encrypt.py
holosocket/encrypt.py
import struct from Cryptodome.Cipher import AES from Cryptodome.Hash import SHA256 from Cryptodome.Random import get_random_bytes #Cipher_Tag = {'aes-256-gcm': 16} #Nonce_Len = 8 # fuck you 12 bytes class aes_gcm: def __init__(self, key, salt=None): """Create a new AES-GCM cipher. key: Your password like: passw0rd salt: a 16 bytes length byte string, if not provided a random salt will be used nonce: a 8 bytes length byte string, if not provided a random nonce will be used""" self.raw_key = key.encode() if not salt: self._salt = get_random_bytes(16) else: if len(salt) != 16: error_msg = 'salt length should be 16, not {}' raise ValueError(error_msg.format(len(salt))) else: self._salt = salt self.key = SHA256.new(self.raw_key + self._salt).digest() # generate a 256 bytes key self.nonce = 0 def _new(self): nonce = struct.pack('>Q', self.nonce) self.cipher = AES.new(self.key, AES.MODE_GCM, nonce) self.nonce += 1 def encrypt(self, data): """Encrypt data return cipher. data: raw data""" self._new() #Return (cpiher, MAC) return self.cipher.encrypt_and_digest(data) def decrypt(self, data, mac): """Decrypt data. data: cipher mac: gmac""" self._new() #Verify MAC, if matching, will return plain text or raise ValueError plain = self.cipher.decrypt_and_verify(data, mac) return plain @property def salt(self): return self._salt def test(): # AES-GCM print('AES-256-GCM') gen = aes_gcm('test') salt = gen.salt gcipher = gen.encrypt(b'holo') gde = aes_gcm('test', salt) print(gde.decrypt(*gcipher)) if __name__ == '__main__': test()
Python
0.000221
@@ -128,79 +128,8 @@ es%0A%0A -#Cipher_Tag = %7B'aes-256-gcm': 16%7D%0A#Nonce_Len = 8 # fuck you 12 bytes%0A%0A %0Acla @@ -1191,38 +1191,8 @@ w()%0A - #Return (cpiher, MAC)%0A @@ -1371,16 +1371,17 @@ # + Verify M
9e450865a92e21ba1a40c494575b0205ed2c14fa
Fix bug in CommentSecureRedirectToMixin
pinax/comments/views.py
pinax/comments/views.py
from django.contrib.auth.decorators import login_required from django.contrib.contenttypes.models import ContentType from django.core.exceptions import ImproperlyConfigured from django.http import JsonResponse, HttpResponseRedirect from django.shortcuts import get_object_or_404 from django.template import RequestContext from django.template.loader import render_to_string from django.utils.decorators import method_decorator from django.views.generic import CreateView, UpdateView, DeleteView from .authorization import load_can_delete, load_can_edit from .forms import CommentForm from .models import Comment from .signals import commented, comment_updated can_delete = load_can_delete() can_edit = load_can_edit() class CommentSecureRedirectToMixin(object): def get_secure_redirect_to(self, object=None): redirect_to = self.request.POST.get("next") # light security check -- make sure redirect_to isn't garbage. if not redirect_to or " " in redirect_to or redirect_to.startswith("http"): try: if object is not None: url = object.get_absolute_url() elif self.object is not None: url = self.object.content_object.get_absolute_url() except AttributeError: raise ImproperlyConfigured( "No URL to redirect to. Either provide a url or define" " a get_absolute_url method on the Model.") return redirect_to class CommentCreateView(CommentSecureRedirectToMixin, CreateView): form_class = CommentForm content_object = None def get_form_kwargs(self): kwargs = super(CommentCreateView, self).get_form_kwargs() kwargs.update({ "request": self.request, "obj": self.content_object, "user": self.request.user, }) return kwargs def post(self, request, *args, **kwargs): content_type = get_object_or_404(ContentType, pk=self.kwargs.get("content_type_id")) self.content_object = content_type.get_object_for_this_type(pk=self.kwargs.get("object_id")) return super(CommentCreateView, self).post(request, *args, **kwargs) def form_valid(self, form): self.object = form.save() commented.send(sender=self.content_object, comment=self.object, request=self.request) if self.request.is_ajax(): data = { "status": "OK", "comment": self.object.data, "html": render_to_string("pinax/comments/_comment.html", { "comment": self.object }, context_instance=RequestContext(self.request)) } return JsonResponse(data) return HttpResponseRedirect(self.get_secure_redirect_to(self.content_object)) def form_invalid(self, form): if self.request.is_ajax(): data = { "status": "ERROR", "errors": form.errors, "html": render_to_string("pinax/comments/_form.html", { "form": form, "obj": self.content_object }, context_instance=RequestContext(self.request)) } return JsonResponse(data) return HttpResponseRedirect(self.get_secure_redirect_to(self.content_object)) @method_decorator(login_required, name="dispatch") class CommentUpdateView(CommentSecureRedirectToMixin, UpdateView): model = Comment form_class = CommentForm def get_form_kwargs(self): kwargs = super(CommentUpdateView, self).get_form_kwargs() kwargs.update({ "request": self.request, "obj": self.object.content_object, "user": self.request.user, }) return kwargs def form_valid(self, form): self.object = form.save() comment_updated.send(sender=self.object.content_object, comment=self.object, request=self.request) if self.request.is_ajax(): data = { "status": "OK", "comment": self.object.data } return JsonResponse(data) return HttpResponseRedirect(self.get_secure_redirect_to()) def form_invalid(self, form): if self.request.is_ajax(): data = { "status": "ERROR", "errors": form.errors } return JsonResponse(data) return HttpResponseRedirect(self.get_secure_redirect_to()) @method_decorator(login_required, name="dispatch") class CommentDeleteView(CommentSecureRedirectToMixin, DeleteView): model = Comment def post(self, request, *args, **kwargs): self.object = self.get_object() success_url = self.get_secure_redirect_to() if can_delete(request.user, self.object): self.object.delete() if request.is_ajax(): return JsonResponse({"status": "OK"}) else: if request.is_ajax(): return JsonResponse({"status": "ERROR", "errors": "You do not have permission to delete this comment."}) return HttpResponseRedirect(success_url)
Python
0
@@ -1094,19 +1094,27 @@ -url +redirect_to = objec @@ -1200,19 +1200,27 @@ -url +redirect_to = self.
53d34397a8df598378820a9115a94097a1e92bce
use val as the value for the update
repository/netrepos/instances.py
repository/netrepos/instances.py
# # Copyright (c) 2004 Specifix, Inc. # All rights reserved # import idtable import sqlite class InstanceTable: """ Generic table for assigning id's to a 3-tuple of IDs. """ def __init__(self, db): self.db = db cu = self.db.cursor() cu.execute("SELECT tbl_name FROM sqlite_master WHERE type='table'") tables = [ x[0] for x in cu ] if "Instances" not in tables: cu.execute("""CREATE TABLE Instances( instanceId INTEGER PRIMARY KEY, itemId INT, versionId INT, flavorId INT, silentRedirect INT, isPresent INT); CREATE UNIQUE INDEX InstancesIdx ON Instances(itemId, versionId, flavorId); """) def addId(self, itemId, versionId, flavorId): cu = self.db.cursor() cu.execute("INSERT INTO Instances VALUES (NULL, %d, %d, %d, 0, 1)", (itemId, versionId, flavorId)) return cu.lastrowid def addRedirect(self, itemId, versionId, redirectId): cu = self.db.cursor() cu.execute("INSERT INTO Instances VALUES (NULL, %d, %d, -1, %d, 1)", (itemId, versionId, redirectId)) return cu.lastrowid def delId(self, theId): assert(type(theId) is int) cu = self.db.cursor() cu.execute("DELETE FROM Instances WHERE instanceId=%d", theId) def getId(self, theId): cu = self.db.cursor() cu.execute("SELECT itemId, versionId, flavorId, isPresent " "FROM Instances WHERE instanceId=%d", theId) try: return cu.next() except StopIteration: raise KeyError, theId def isPresent(self, item): cu = self.db.cursor() cu.execute("SELECT isPresent FROM Instances WHERE " "itemId=%d AND versionId=%d AND flavorId=%d", item) val = cu.fetchone() if not val: return 0 return val[0] def setPresent(self, theId, val): cu = self.db.cursor() cu.execute("UPDATE Instances SET isPresent=0 WHERE instanceId=%d" % theId) def has_key(self, item): cu = self.db.cursor() cu.execute("SELECT instanceId FROM Instances WHERE " "itemId=%d AND versionId=%d AND flavorId=%d", item) return not(cu.fetchone() == None) def __delitem__(self, item): cu = self.db.cursor() cu.execute("DELETE FROM Instances WHERE " "itemId=%d AND versionId=%d AND flavorId=%d", item) def __getitem__(self, item): cu = self.db.cursor() cu.execute("SELECT instanceId FROM Instances WHERE " "itemId=%d AND versionId=%d AND flavorId=%d", item) try: return cu.next()[0] except StopIteration: raise KeyError, item def get(self, item, defValue): cu = self.db.cursor() cu.execute("SELECT instanceId FROM Instances WHERE " "itemId=%d AND versionId=%d AND flavorId=%d", item) item = cu.fetchone() if not item: return defValue return item[0] def removeUnused(self): cu = self.db.cursor() cu.execute(""" DELETE from instances WHERE instanceId IN (SELECT Instances.instanceId from Instances LEFT OUTER JOIN TroveTroves ON Instances.instanceId = TroveTroves.includedId WHERE TroveTroves.includedId is NULL and Instances.isPresent = 0 );""") class FileStreams: def __init__(self, db): self.db = db cu = self.db.cursor() cu.execute("SELECT tbl_name FROM sqlite_master WHERE type='table'") tables = [ x[0] for x in cu ] if 'FileStreams' not in tables: cu.execute("""CREATE TABLE FileStreams(streamId INTEGER PRIMARY KEY, fileId STR, versionId INT, flavorId INT, stream BINARY);""") cu.execute("""CREATE UNIQUE INDEX FileStreamsIdx ON FileStreams(fileId, versionId)""") cu.execute("""CREATE INDEX FileStreamsVersionIdx ON FileStreams(versionId)""") #cu.execute(""" #CREATE TRIGGER FileStreamsDel AFTER DELETE ON TroveFiles #FOR EACH ROW #BEGIN #DELETE FROM FileStreams WHERE streamId = OLD.streamId; #END; #""") def _rowGenerator(self, cu): for row in cu: yield row[0] def addStream(self, key, stream): (fileId, versionId, flavorId) = key cu = self.db.cursor() cu.execute("INSERT INTO FileStreams VALUES (NULL, %s, %d, %d, %s)", (fileId, versionId, flavorId, sqlite.encode(stream))) return cu.lastrowid def __delitem__(self, key): (fileId, versionId) = key cu = self.db.cursor() cu.execute("DELETE FROM FileStreams WHERE " "fileId=%s and versionId=%d", (fileId, versionId)) def has_key(self, key): (fileId, versionId) = key cu = self.db.cursor() cu.execute("SELECT stream from FileStreams WHERE " "fileId=%s and versionId=%d", (fileId, versionId)) row = cu.fetchone() return row is not None def __getitem__(self, key): (fileId, versionId) = key cu = self.db.cursor() cu.execute("SELECT stream from FileStreams WHERE " "fileId=%s and versionId=%d", (fileId, versionId)) row = cu.fetchone() if row is None: raise KeyError, key return row[0] def getStreamId(self, key): (fileId, versionId) = key cu = self.db.cursor() cu.execute("SELECT streamId from FileStreams WHERE " "fileId=%s and versionId=%d", (fileId, versionId)) row = cu.fetchone() if row is None: raise KeyError, key return row[0] def removeUnusedStreams(self): cu = self.db.cursor() cu.execute(""" DELETE from fileStreams WHERE streamId in (SELECT streamId FROM (SELECT fileStreams.streamId, troveFiles.instanceId from FileStreams LEFT OUTER JOIN TroveFiles ON FileStreams.streamId = trovefiles.streamId) WHERE instanceId is NULL) """)
Python
0
@@ -1963,17 +1963,18 @@ Present= -0 +%25d WHERE i @@ -1990,21 +1990,42 @@ =%25d%22 - %0A%09%09%09%25 +,%0A (val, theId) +) %0A%0A
6288caa954c8834ef6fec0bf24c62a1c8265e302
Use InstanceProfileName value to remove
pipes/iam/create_iam.py
pipes/iam/create_iam.py
"""Create IAM Instance Profiles, Roles, Users, and Groups.""" import logging import boto3 from boto3.exceptions import botocore from .utils import get_details, get_template LOG = logging.getLogger(__name__) def create_iam_resources(env='dev', app=''): """Create the IAM Resources for the application. Args: env (str): Deployment environment, i.e. dev, stage, prod. app (str): Spinnaker Application name. Returns: True upon successful completion. """ session = boto3.session.Session(profile_name=env) client = session.client('iam') details = get_details(env=env, app=app) resource_action( client, action='create_role', log_format='Role: %(RoleName)s', RoleName=details.role, AssumeRolePolicyDocument=get_template('iam_role_policy.json')) resource_action(client, action='create_instance_profile', log_format='Instance Profile: %(InstanceProfileName)s', InstanceProfileName=details.profile) attach_profile_to_role(client, role_name=details.role, profile_name=details.profile) resource_action(client, action='create_user', log_format='User: %(UserName)s', UserName=details.user) resource_action(client, action='create_group', log_format='Group: %(GroupName)s', GroupName=details.group) resource_action(client, action='add_user_to_group', log_format='User to Group: %(UserName)s -> %(GroupName)s', log_failure=True, GroupName=details.group, UserName=details.user) return True def attach_profile_to_role(client, role_name='forrest_unicorn_role', profile_name='forrest_unicorn_profile'): """Attach an IAM Instance Profile _profile_name_ to Role _role_name_. Args: role_name (str): Name of Role. profile_name (str): Name of Instance Profile. Returns: True upon successful completion. """ current_instance_profiles = client.list_instance_profiles_for_role( RoleName=role_name)['InstanceProfiles'] for profile in current_instance_profiles: if profile['InstanceProfileName'] == profile_name: LOG.info('Found Instance Profile attached to Role: %s -> %s', profile_name, role_name) break else: for remove_profile in current_instance_profiles: client.remove_role_from_instance_profile( InstanceProfileName=remove_profile, RoleName=role_name) LOG.info('Removed Instance Profile from Role: %s -> %s', remove_profile, role_name) client.add_role_to_instance_profile(InstanceProfileName=profile_name, RoleName=role_name) LOG.info('Added Instance Profile to Role: %s -> %s', profile_name, role_name) return True def resource_action(client, action='', log_format='item: %(key)s', log_failure=False, **kwargs): """Call _action_ using boto3 _client_ with _kwargs_. This is meant for _action_ methods that will create or implicitely prove a given Resource exists. The _log_failure_ flag is available for methods that should always succeed, but will occasionally fail due to unknown AWS issues. Args: client (botocore.client.IAM): boto3 client object. action (str): Client method to call. log_format (str): Generic log message format, 'Added' or 'Found' will be prepended depending on the scenario. log_failure (bool): Will log WARNING level 'Failed' instead of 'Found' message. **kwargs: Keyword arguments to pass to _action_ method. Returns: True upon successful completion. """ try: getattr(client, action)(**kwargs) LOG.info(' '.join(('Added', log_format)), kwargs) except botocore.exceptions.ClientError: if not log_failure: LOG.info(' '.join(('Found', log_format)), kwargs) else: LOG.warning(' '.join(('Failed', log_format)), kwargs) return True
Python
0
@@ -2771,16 +2771,39 @@ _profile +%5B'InstanceProfileName'%5D ,%0A
069ef4868e1015dda609c0747b988ec99e461f92
Tweak Badge PDF
registration/admin.py
registration/admin.py
from registration.models import School, Teacher, Participant, Team from django.contrib import admin from reportlab.pdfgen import canvas from django.http import HttpResponse from django.contrib.admin.views.decorators import staff_member_required from reportlab.lib.pagesizes import letter from reportlab.lib.units import inch from math import ceil import time class TeamAdmin(admin.ModelAdmin): list_filter = ('school', 'division',) list_display = ('name', 'school', 'division', 'id') search_fields = ('name', 'id') class SchoolAdmin(admin.ModelAdmin): list_filter = ('school_type',) list_display = ('name', 'school_type',) search_fields = ('name',) class ParticipantAdmin(admin.ModelAdmin): list_display = ('name',) list_filter = ('teacher',) search_fields = ('teacher', 'name',) admin.site.register(Team, TeamAdmin) admin.site.register(School, SchoolAdmin) admin.site.register(Teacher) admin.site.register(Participant, ParticipantAdmin) # # Define an inline admin descriptor for UserProfile model # # which acts a bit like a singleton # class UserProfileInline(admin.StackedInline): # model = TeacherProfile # can_delete = False # verbose_name_plural = 'TeacherProfile' # # # Define a new User admin # class UserAdmin(UserAdmin): # inlines = (UserProfileInline, ) # # # Re-register UserAdmin # admin.site.unregister(User) # admin.site.register(User, UserAdmin) # ## def GetParticipantLabels(request): # Create the HttpResponse object with the appropriate PDF headers. response = HttpResponse(mimetype='application/pdf') response['Content-Disposition'] = 'attachment; filename="participant_labels_' + time.strftime("%b_%d_%Y_%H_%M", time.gmtime()) + '.pdf"' # Create the PDF object, using the response object as its "file." p = canvas.Canvas(response, pagesize=letter, pageCompression=1,) p.setFont('Helvetica', 12) width, height = letter LABELW = 8 * inch LABELSEP = 1 * inch LABELH = 3 * inch data = [] students = Participant.objects.all() for student in students: team_names = [] for team in student.teams.all(): obj = {'team_id':team.id, 'team_name':team.name} team_names.append(obj) label = {'name':student.name, 'teams':team_names} data.append(label) def LabelPosition(ordinal): y, x = divmod(ordinal, 1) x = 14 + x * LABELSEP y = 756 - y * LABELH return x, y total_labels = len(data) sheets = int(ceil(total_labels / 3.0)) for i in range(0, sheets): for pos in range(0, 3): if data: participant = data.pop() x, y = LabelPosition(pos) p.rect(x, y, LABELW, -LABELH) p.rect(x, y, LABELW / 2, -LABELH) tx = p.beginText(x + 25, y - 50) tx.setFont('Helvetica', 36, 36) name_parts = participant['name'].split() name_string = '' for name_part in name_parts: name_string = name_string + '\n' + name_part tx.textLines(name_string) p.drawText(tx) team_string = '' for team in participant['teams']: team_string = team_string + '\n' + team['team_name'] + ' : ' + str(team['team_id']) tx = p.beginText(x + 375, y - 50) tx.setFont('Helvetica', 12, 12) tx.textLines(team_string) p.drawText(tx) p.addPageLabel(p.getPageNumber(),style=' DECIMAL_ARABIC_NUMERALS' ) p.showPage() # Close the PDF object cleanly, and we're done. p.save() return response GetParticipantLabels = staff_member_required(GetParticipantLabels) admin.site.register_view('labels/GetAllParticipantLabels', GetParticipantLabels)
Python
0
@@ -340,16 +340,59 @@ rt ceil%0A +from scoring.models import PreRegistration%0A import t @@ -836,26 +836,16 @@ elds = ( -'teacher', 'name', @@ -3421,16 +3421,199 @@ m_id'%5D)%0A + for event_team in PreRegistration.objects.filter(teams__name=team%5B'team_name'%5D):%0A team_string = team_string + '%5Cn' + event_team.event.name%0A %0A @@ -3638,17 +3638,17 @@ xt(x + 3 -7 +2 5, y - 5
f21204c8828e840dc54c6822348fa9a47bc8964e
Add model's to_dict method.
opensrs/models.py
opensrs/models.py
from dateutil.parser import parse class Domain(object): def __init__(self, data): self.name = data['name'] self.auto_renew = (data['f_auto_renew'] == 'Y') self.expiry_date = parse(data['expiredate']).date() @property def tld(self): return self.name.split('.')[-1]
Python
0
@@ -304,8 +304,177 @@ .')%5B-1%5D%0A +%0A def to_dict(self):%0A return %7B%0A 'name': self.name,%0A 'auto_renew': self.auto_renew,%0A 'expiry_date': self.expiry_date%0A %7D%0A
fa82883576a659d9cd9d830919e744299ac14ac7
improve show command to show target types, build phase types and filter other build phases.
pbxproj/pbxcli/pbxproj_show.py
pbxproj/pbxcli/pbxproj_show.py
""" usage: pbxproj show [options] <project> pbxproj show [options] (--target <target>...) <project> [(-s | --source-files) | (-H | --header-files) | (-r | --resource-files) | (-f | --framework-files)] positional arguments: <project> Project path to the .xcodeproj folder. generic options: -h, --help This message. -t, --target <target> Target name to be modified. If there is no target specified, all targets are used. -b, --backup Creates a backup before start processing the command. target options: -s, --source-files Show the source files attached to the target -r, --resource-files Show the resource files attached to the target -f, --framework-files Show the library files attached to the target -H, --header-files Show the header files attached to the target -c, --configurations Show the configurations attached to the target """ from pbxproj.pbxcli import * def execute(project, args): # make a decision of what function to call based on the -D flag if args[u'--target']: return _target_info(project, args[u'--target'], args) else: return _summary(project, args) def _summary(project, args): info = '' for target in project.objects.get_targets(): info += "{name}:\n" \ "\tProduct name: {productName}\n" \ "\tConfigurations: {configs}\n" \ .format(name=target.name, productName=target.productName, configs=', '.join([c.name for c in project.objects.get_configurations_on_targets(target.name)]), ) for build_phase_id in target.buildPhases: build_phase = project.objects[build_phase_id] info += "\t{name} count: {count}\n"\ .format(name=build_phase._get_comment(), count=build_phase.files.__len__()) info += "\n" return info def _target_info(project, target_name, args): build_phases = [] if args[u'--source-files']: build_phases += [u'PBXSourcesBuildPhase'] elif args[u'--header-files']: build_phases += [u'PBXHeadersBuildPhase'] elif args[u'--resource-files']: build_phases += [u'PBXResourcesBuildPhase'] elif args[u'--framework-files']: build_phases += [u'PBXFrameworksBuildPhase'] info = '' for target in project.objects.get_targets(target_name): info += "{name}:\n" \ "\tProduct name: {productName}\n" \ .format(name=target.name, productName=target.productName, configs=', '.join([c.name for c in project.objects.get_configurations_on_targets(target.name)]), ) if args[u'--configurations']: info += "\tConfigurations: {configs}\n" \ .format(configs=', '.join([c.name for c in project.objects.get_configurations_on_targets(target.name)])) for build_phase_id in target.buildPhases: build_phase = project.objects[build_phase_id] if build_phase.isa in build_phases: info += "\t{name}: \n\t\t".format(name=build_phase._get_comment()) files = [] for build_file_id in build_phase.files: build_file = project.objects[build_file_id] files.append(project.objects[build_file.fileRef]._get_comment()) info += '{files}\n'.format(files="\n\t\t".join(sorted(files))) info += '\n' return info
Python
0
@@ -129,82 +129,369 @@ s) %7C - (-H %7C --header-files) %7C (-r %7C --resource-files) %7C (-f %7C --framework-files +%0A (-H %7C --header-files) %7C%0A (-r %7C --resource-files) %7C%0A (-f %7C --framework-files) %7C%0A (--build-phase-files %3Cbuild_phase_type%3E )%5D%0A%0A @@ -1290,16 +1290,116 @@ target%0A + -B, --build-phase-files %3Ctype%3E Show the files associated to the build phase of the given type.%0A %22%22%22%0A%0Afro @@ -1425,16 +1425,17 @@ port *%0A%0A +%0A def exec @@ -1775,32 +1775,76 @@ = %22%7Bname%7D:%5Cn%22 %5C%0A + %22%5CtTarget type: %7Btype%7D%5Cn%22 %5C%0A @@ -2011,32 +2011,69 @@ et.productName,%0A + type=target.isa,%0A @@ -2333,16 +2333,30 @@ %5Ct%7Bname%7D + (%7Btype%7D) file count: @@ -2423,16 +2423,38 @@ mment(), + type=build_phase.isa, count=b @@ -2872,32 +2872,32 @@ mework-files'%5D:%0A - build_ph @@ -2932,16 +2932,110 @@ dPhase'%5D +%0A elif args%5Bu'--build-phase-files'%5D:%0A build_phases += %5Bargs%5Bu'--build-phase-files'%5D%5D %0A%0A in
93c0deffe1d8edc9a3508a623a4f412261127702
Remove new internal field.
pdcupdater/handlers/compose.py
pdcupdater/handlers/compose.py
# -*- coding: utf-8 -*- import copy import logging import requests import pdcupdater.handlers import pdcupdater.services import pdcupdater.utils log = logging.getLogger(__name__) session = requests.Session() # These are the states of a pungi4 compose that we care about. # There are other states that we don't care about.. like DOOMED, etc.. final = [ 'FINISHED', 'FINISHED_INCOMPLETE', ] class NewComposeHandler(pdcupdater.handlers.BaseHandler): """ When pungi-koji finishes a new compose. """ def __init__(self, *args, **kwargs): super(NewComposeHandler, self).__init__(*args, **kwargs) self.old_composes_url = self.config['pdcupdater.old_composes_url'] @property def topic_suffixes(self): return [ 'pungi.compose.status.change', ] def can_handle(self, pdc, msg): if not msg['topic'].endswith('pungi.compose.status.change'): return False if not msg['msg']['status'] in final: return False return True def handle(self, pdc, msg): # This is something like Fedora-24-20151130.n.2 or Fedora-Rawhide-201.. compose_id = msg['msg']['compose_id'] # The URL given looks like # http://kojipkgs.fedoraproject.org/compose/rawhide/COMPOSEID/compose # but we want # http://kojipkgs.fedoraproject.org/compose/rawhide/COMPOSEID # So handle it carefully, like this compose_url = msg['msg']['location']\ .strip('/')\ .strip('compose')\ .strip('/') self._import_compose(pdc, compose_id, compose_url) def audit(self, pdc): # Query the data sources old_composes = pdcupdater.services.old_composes(self.old_composes_url) pdc_composes = pdc.get_paged(pdc['composes']._) # normalize the two lists old_composes = set([idx for branch, idx, url in old_composes]) pdc_composes = set([c['compose_id'] for c in pdc_composes]) # use set operators to determine the difference present = pdc_composes - old_composes absent = old_composes - pdc_composes # XXX - HACK - we're only interested (really) in things that exist on # kojipkgs but which do not exist in PDC. We are not interested in # things which appears in PDC but do not appear on kojipkgs. releng # will periodically clean up the old unused composes from kojipkgs.. so # we'll just silence ourselves here on that matter. present = set() # This is fine ☕ return present, absent def initialize(self, pdc): old_composes = pdcupdater.services.old_composes(self.old_composes_url) for _, compose_id, url in old_composes: try: self._import_compose(pdc, compose_id, url) except Exception as e: if getattr(e, 'response', None): log.exception("Failed to import %r - %r %r" % ( url, e.response.url, e.response.text)) else: log.exception("Failed to import %r" % url) @pdcupdater.utils.with_ridiculous_timeout def _import_compose(self, pdc, compose_id, compose_url): base = compose_url + "/compose/metadata" url = base + '/composeinfo.json' response = session.get(url) if not bool(response): raise IOError("Failed to get %r: %r" % (url, response)) composeinfo = response.json() # Before we waste any more time pulling down 100MB files from koji and # POSTing them back to PDC, let's check to see if we already know about # this compose. compose_id = composeinfo['payload']['compose']['id'] log.info("Importing compose %r" % compose_id) if pdcupdater.utils.compose_exists(pdc, compose_id): log.warn("%r already exists in PDC." % compose_id) return # OK, go ahead and pull down these gigantic files. url = base + '/images.json' response = session.get(url) if not bool(response): raise IOError("Failed to get %r: %r" % (url, response)) images = response.json() url = base + '/rpms.json' response = session.get(url) # Check first for a 404... if response.status_code == 404: # Not all composes have rpms. In particular, atomic ones. # https://github.com/fedora-infra/pdc-updater/issues/11 log.warn('Found no rpms.json file at %r' % r) rpms = None elif not bool(response): # Something other than a 404 means real failure, so complain. raise IOError("Failed to get %r: %r" % (url, response)) else: rpms = response.json() # PDC demands lowercase composeinfo['payload']['release']['short'] = \ composeinfo['payload']['release']['short'].lower() release = copy.copy(composeinfo['payload']['release']) release['release_type'] = release.pop('type', 'ga') release_id = "{short}-{version}".format(**release) pdcupdater.utils.ensure_release_exists(pdc, release_id, release) # https://github.com/product-definition-center/product-definition-center/issues/228 # https://pdc.fedoraproject.org/rest_api/v1/compose-images/ pdc['compose-images']._(dict( release_id=release_id, composeinfo=composeinfo, image_manifest=images, )) # https://pdc.fedoraproject.org/rest_api/v1/compose-rpms/ if rpms: pdc['compose-rpms']._(dict( release_id=release_id, composeinfo=composeinfo, rpm_manifest=rpms, ))
Python
0
@@ -5063,16 +5063,163 @@ , 'ga')%0A +%0A # PDC doesn't know about this field which showed up recently in pungi%0A # productmd metadata here.%0A release.pop('internal')%0A%0A
da12bb0058cb48d3262eb70469aa30cdb8312ee2
fix typos/bugs/indexing in block dicing
Control/dice_block.py
Control/dice_block.py
import os import sys import subprocess import h5py def check_file(filename): # verify the file has the expected data f = h5py.File(filename, 'r') if set(f.keys()) != set(['segmentations', 'probabilities']): os.unlink(filename) return False return True try: args = sys.argv[1:] i_min = int(args.pop()) j_min = int(args.pop()) i_max = int(args.pop()) j_max = int(args.pop()) output = args.pop() input_slices = args if os.path.exists(segmentations_file): print segmentations_file, "already exists" if check_file(segmentations_file): sys.exit(0) else: os.unlink(output) # Write to a temporary location to avoid partial files temp_file_path = output + '_partial' out_f = h5py.File(temp_file_path, 'classify') num_slices = len(input_slices) for slice_idx, slice in enumerate(input_slices): in_f = h5py.File(slice, 'r') segs = in_f['segmentations'][i_min:i_max, j_min:j_max, :] probs = in_f['segmentations'][i_min:i_max, j_min:j_max] if not 'segmentations' in out_f.keys(): outsegs = out_f.create_dataset('segmentations', tuple(list(segs.shape) + [num_slices]), dtype=segs.dtype, chunks=(64, 64, segs.shape[2], 1)) outprobs = out_f.create_dataset('probabilities', dtype=probabilities.dtype, chunks=(64, 64, num_slices) chunks=(64, 64, 1)) outsegs[:, :, :, slice_idx] = segs outprobs[:, :, slice_idx] = probs outf.close() # move to final location os.rename(output + '_partial', output) print "Successfully wrote", output except KeyboardInterrupt: pass
Python
0.000017
@@ -325,32 +325,33 @@ = int(args.pop( +0 ))%0A j_min = i @@ -358,24 +358,25 @@ nt(args.pop( +0 ))%0A i_max @@ -387,24 +387,25 @@ nt(args.pop( +0 ))%0A j_max @@ -420,16 +420,17 @@ rgs.pop( +0 ))%0A o @@ -446,16 +446,17 @@ rgs.pop( +0 )%0A in @@ -500,61 +500,37 @@ sts( -segmentations_file):%0A print segmentations_file +output):%0A print output , %22a @@ -570,26 +570,14 @@ ile( -segmentations_file +output ):%0A @@ -787,16 +787,9 @@ h, ' -classify +w ')%0A%0A @@ -997,36 +997,36 @@ obs = in_f%5B' -segmentation +probabilitie s'%5D%5Bi_min:i_ @@ -1482,33 +1482,47 @@ -dtype=probabilities.dtype +tuple(list(probs.shape) + %5Bnum_slices%5D) ,%0A @@ -1567,35 +1567,26 @@ -chunks=(64, 64, num_slices) +dtype=probs.dtype, %0A @@ -1739,16 +1739,17 @@ %0A out +_ f.close(
2443c891e5f9cccb5c36b02303a3b9b7a94a4c45
Change Jinja escape sequences.
generate.py
generate.py
#!/usr/bin/env python3 import os import shutil from jinja2 import Environment,FileSystemLoader from pygments import highlight from pygments.lexers import TexLexer from pygments.formatters import HtmlFormatter from subprocess import Popen,PIPE env = Environment(loader=FileSystemLoader("tmpl")) snippets_dir = "snippets" dist_dir = "dist" html_index = "/index.html" gen_snippets_dir = "/gen_snippets" static_dir = "static" shutil.rmtree(dist_dir, ignore_errors=True) shutil.copytree(static_dir, dist_dir) os.makedirs(dist_dir+"/"+gen_snippets_dir) snippets = [] for subdir, dirs, files in os.walk(snippets_dir): for fname in files: trimmedName, ext = os.path.splitext(fname) full_path = subdir + "/" + fname if ext == '.tex': with open(full_path, "r") as snippet_f: gen_tex_name = gen_snippets_dir+"/"+fname gen_pdf_name = gen_snippets_dir+"/"+trimmedName+".pdf" gen_png_name = gen_snippets_dir+"/"+trimmedName+".png" snippet_content = snippet_f.read().strip() with open(dist_dir+"/"+gen_tex_name, "w") as f: f.write(env.get_template("base.jinja.tex").render( content=snippet_content )) snippets.append({ 'fname': trimmedName, 'pdf': gen_pdf_name, 'png': gen_png_name, 'content': highlight(snippet_content, TexLexer(), HtmlFormatter()) }) p = Popen(['make', "-f", "../../Makefile.slides", "-C", dist_dir+"/"+gen_snippets_dir], stdout=PIPE, stderr=PIPE) out = p.communicate() if out[1]: print("Warning: Make stderr non-empty.") print("===Stdout:") print(out[0].decode()) print("===Stderr:") print(out[1].decode()) with open("tmpl/preamble.tex", "r") as f: preamble = f.read() with open(dist_dir+"/"+html_index, "w") as idx_f: idx_f.write(env.get_template("index.jinja.html").render( snippets=snippets, base=highlight( env.get_template("base.jinja.tex").render( content="Start content here." ), TexLexer(), HtmlFormatter() ) ))
Python
0
@@ -288,16 +288,123 @@ (%22tmpl%22) +,%0A block_start_string='~%7B',block_end_string='%7D~',%0A variable_start_string='~%7B%7B', variable_end_string='%7D%7D~' )%0A%0Asnipp
18aa5e20a5dbc931f48774c4bf034e6efe022923
Implement 'force' and 'directory' options
generate.py
generate.py
#!/usr/bin/env python import sys import os import re from optparse import OptionParser from crontab import CronTab from jinja2 import FileSystemLoader, Environment from yaml import dump def remove_user_from_command(command_with_user): match = re.search(r'\{{0,2}\s?\w+\s?\}{0,2}\s(.*)', command_with_user) return match.group(1) if match else command_with_user def replace_template_variables(command): config_vars = [] def replace(input_string): config_vars.append(input_string.group(1)) return '{{{}}}'.format(input_string.group(1)) formatted_string = re.sub(r'\{{2}\s*(\w+)\s*\}{2}', replace, command) formatted_args = ', '.join( ['{0}=task_config[\'{0}\']'.format(var) for var in config_vars]) if config_vars: result_string = '\'{0}\'.format({1})'.format( formatted_string, formatted_args) else: result_string = '\'{0}\''.format(formatted_string) return result_string, config_vars def task_name(shell_command): match = re.search(r'\/(.*)\.', shell_command) task_name = match.group(1) if match else '' task_name = task_name.replace('-', '_') return task_name def main(): parser = OptionParser() parser.add_option("-d", "--directory", dest="directory", help="directory for output files") parser.add_option("-f", "--force", action="store_true", dest="force", default=False, help="force file overwrite") (options, args) = parser.parse_args() env = Environment(loader=FileSystemLoader('.')) for cron in [CronTab(tabfile=os.path.abspath(arg)) for arg in args]: for job in cron: test_template = env.get_template('workflow-test-template.jj2') workflow_template = env.get_template('workflow-template.jj2') task = task_name(job.command) command = remove_user_from_command(job.command) command, vars = replace_template_variables(command) values = { 'hour': job.hour, 'minute': job.minute, 'task_config_filename': task + '.yaml', 'dag_id': task, 'task_id': task, 'command': command } with open(task + '.py', 'w') as wfile: wfile.write(workflow_template.render(**values)) with open('test_' + task + '.py', 'w') as tfile: tfile.write(test_template.render(workflow_module_name=task)) with open(task + '.yaml', 'w') as cfile: dump({var: '' for var in vars}, cfile) return 0 if __name__ == '__main__': sys.exit(main())
Python
0.999999
@@ -1172,16 +1172,152 @@ _name%0A%0A%0A +def render_to_file(filename, template, **kwargs):%0A with open(filename, 'w') as ofile:%0A ofile.write(template.render(kwargs))%0A%0A%0A def main @@ -1464,16 +1464,28 @@ t files%22 +, default='' )%0A pa @@ -2435,75 +2435,333 @@ -with open(task + '.py', 'w') as wfile:%0A wfile.write( +if options.directory and not os.path.exists(options.directory):%0A os.mkdir(options.directory)%0A%0A workflow_filename = os.path.join(%0A options.directory, task + '.py')%0A if not os.path.exists(workflow_filename) or options.force:%0A render_to_file(workflow_filename, work @@ -2773,24 +2773,18 @@ template -.render( +, **values @@ -2784,17 +2784,16 @@ *values) -) %0A%0A @@ -2802,106 +2802,256 @@ -with open('test_' + task + '.py', 'w') as tfile:%0A tfile.write(test_template.render( +test_filename = os.path.join(%0A options.directory, 'test_' + task + '.py')%0A if not os.path.exists(test_filename) or options.force:%0A render_to_file(test_filename, test_template,%0A work @@ -3072,17 +3072,16 @@ me=task) -) %0A%0A @@ -3090,32 +3090,201 @@ -with open(task + '.yaml' +config_filename = os.path.join(%0A options.directory, task + '.yaml')%0A if not os.path.exists(config_filename) or options.force:%0A with open(config_filename , 'w @@ -3292,24 +3292,28 @@ ) as cfile:%0A +
6da8bcbf2946b35188e99474b87d2c79856895a1
fix an import
source/jormungandr/jormungandr/scripts/qualifier.py
source/jormungandr/jormungandr/scripts/qualifier.py
# coding=utf-8 from ANSI import term import navitiacommon.response_pb2 as response_pb2 from functools import partial from datetime import datetime, timedelta import logging #compute the duration to get to the transport plus que transfert def get_nontransport_duration(journey): sections = journey.sections current_duration = 0 for section in sections: if section.type == response_pb2.STREET_NETWORK \ or section.type == response_pb2.TRANSFER: current_duration += section.duration return current_duration def has_car(journey): for section in journey.sections: if section.type == response_pb2.STREET_NETWORK: if section.street_network.mode == response_pb2.Car: return True return False def min_from_criteria(journey_list, criteria): best = None for journey in journey_list: if best is None: best = journey continue for crit in criteria: val = crit(journey, best) #we stop at the first criterion that answers if val == 0: # 0 means the criterion cannot decide continue if val > 0: best = journey break return best # The caracteristic consists in 2 things : # - a list of constraints # the constraints filter a sublist of journey # - a list of optimisation critera # the criteria select the best journey in the sublist class trip_carac: def __init__(self, constraints, criteria): self.constraints = constraints self.criteria = criteria class and_filters: def __init__(self, filters): self.filters = filters def __call__(self, value): for f in self.filters: if not f(value): return False return True def get_arrival_datetime(journey): return datetime.strptime(journey.arrival_date_time, "%Y%m%dT%H%M%S") def choose_standard(journeys): standard = None for journey in journeys: car = has_car(journey) if standard is None or has_car(standard) and not car: standard = journey # the standard shouldnt use the car if possible continue if not car and standard.arrival_date_time > journey.arrival_date_time: standard = journey return standard #comparison of 2 fields. 0=>equality, 1=>1 better than 2 def compare(field_1, field_2): if field_1 == field_2: return 0 elif field_1 < field_2: return 1 else: return -1 #criteria transfers_crit = lambda j_1, j_2: compare(j_1.nb_transfers, j_2.nb_transfers) def arrival_crit(j_1, j_2): return compare(j_1.arrival_date_time, j_2.arrival_date_time) def nonTC_crit(j_1, j_2): duration1 = get_nontransport_duration(j_1) duration2 = get_nontransport_duration(j_2) return compare(duration1, duration2) def qualifier_one(journeys): if not journeys: logging.info("no journeys to qualify") return standard = choose_standard(journeys) assert standard is not None #constraints def journey_length_constraint(journey, max_evolution): max_allow_duration = standard.duration * (1 + max_evolution) return journey.duration <= max_allow_duration def journey_arrival_constraint(journey, max_mn_shift): arrival_date_time = get_arrival_datetime(standard) max_date_time = arrival_date_time + timedelta(minutes=max_mn_shift) return get_arrival_datetime(journey) <= max_date_time def nonTC_relative_constraint(journey, evol): transport_duration = get_nontransport_duration(standard) max_allow_duration = transport_duration * (1 + evol) return get_nontransport_duration(journey) <= max_allow_duration def nonTC_abs_constraint(journey, max_mn_shift): transport_duration = get_nontransport_duration(standard) max_allow_duration = transport_duration + max_mn_shift return get_nontransport_duration(journey) <= max_allow_duration def no_train(journey): ter_uris = ["network:TER", "network:SNCF"] #TODO share this list has_train = any(section.pt_display_informations.uris.network in ter_uris for section in journey.sections) return not has_train #definition of the journeys to qualify trip_caracs = [ #the cheap journey, is the fastest one without train ("cheap", trip_carac([ partial(no_train), #partial(journey_length_constraint, max_evolution=.50), #partial(journey_arrival_constraint, max_mn_shift=40), ], [ transfers_crit, arrival_crit, nonTC_crit ] )), ("healthy", trip_carac([ partial(journey_length_constraint, max_evolution=.20), partial(journey_arrival_constraint, max_mn_shift=20), partial(nonTC_abs_constraint, max_mn_shift=20 * 60), ], [ transfers_crit, arrival_crit, nonTC_crit ] )), ("comfort", trip_carac([ partial(journey_length_constraint, max_evolution=.40), partial(journey_arrival_constraint, max_mn_shift=40), partial(nonTC_relative_constraint, evol=-.1), ], [ transfers_crit, nonTC_crit, arrival_crit, ] )), ("rapid", trip_carac([ partial(journey_length_constraint, max_evolution=.10), partial(journey_arrival_constraint, max_mn_shift=10), ], [ transfers_crit, arrival_crit, nonTC_crit ] )), ] for name, carac in trip_caracs: sublist = filter(and_filters(carac.constraints), journeys) best = min_from_criteria(sublist, carac.criteria) if best is None: continue best.type = name
Python
0.004115
@@ -12,30 +12,8 @@ f-8%0A -from ANSI import term%0A impo
54e2359ed2cd75b87dc4a8007df6b252af3a3765
fix typo
HARK/ConsumptionSaving/tests/test_ConsLaborModel.py
HARK/ConsumptionSaving/tests/test_ConsLaborModel.py
from HARK.ConsumptionSaving.ConsLaborModel import ( LaborIntMargConsumerType, init_labor_lifecycle, ) import unittest class test_LaborIntMargConsumerType(unittest.TestCase): def setUp(self): self.model = LaborIntMargConsumerType() self.model_finte_lifecycle = LaborIntMargConsumerType(**init_labor_lifecycle) self.model_finte_lifecycle.cycles = 1 def test_solution(self): self.model.solve() self.model_finte_lifecycle.solve() self.model.T_sim = 120 self.model.track_vars = ["bNrmNow", "cNrmNow"] self.model.initializeSim() self.model.simulate()
Python
0.999991
@@ -260,32 +260,33 @@ self.model_fin +i te_lifecycle = L @@ -347,32 +347,33 @@ self.model_fin +i te_lifecycle.cyc @@ -459,16 +459,17 @@ odel_fin +i te_lifec