commit
stringlengths
40
40
subject
stringlengths
1
3.25k
old_file
stringlengths
4
311
new_file
stringlengths
4
311
old_contents
stringlengths
0
26.3k
lang
stringclasses
3 values
proba
float64
0
1
diff
stringlengths
0
7.82k
ca2a6d06f09f5f2d511d6cf676fdd9a8f6c411cf
remove cruft, bump heroku
src/settings/production.py
src/settings/production.py
from base import * DEBUG = False ALLOWED_HOSTS = ["*"] SECRET_KEY = os.environ.get("DJANGO_SECRET_KEY", "afaefawe23af") assert SECRET_KEY, "Set your DJANGO_SECRET_KEY env var" # Celery BROKER_URL = os.environ.get('CLOUDAMQP_URL', None) # BROKER_URL = os.environ.get("RABBITMQ_BIGWIG_URL", None) #assert BROKER_URL, "Celery BROKER_URL env var missing!" # Memcached CACHES = { 'default': { 'BACKEND': 'django_bmemcached.memcached.BMemcached', 'LOCATION': os.environ.get('MEMCACHEDCLOUD_SERVERS', '').split(','), 'OPTIONS': { 'username': os.environ.get('MEMCACHEDCLOUD_USERNAME'), 'password': os.environ.get('MEMCACHEDCLOUD_PASSWORD') } } }
Python
0
@@ -237,67 +237,8 @@ one) -%0A# BROKER_URL = os.environ.get(%22RABBITMQ_BIGWIG_URL%22, None) %0A%0A#a
5526f8e3dca2f84fce34df5a134bada8479a2f69
Fix dumpdata ordering for VRFs
netbox/ipam/models/__init__.py
netbox/ipam/models/__init__.py
from .fhrp import * from .ip import * from .services import * from .vlans import * from .vrfs import * __all__ = ( 'ASN', 'Aggregate', 'IPAddress', 'IPRange', 'FHRPGroup', 'FHRPGroupAssignment', 'Prefix', 'RIR', 'Role', 'RouteTarget', 'Service', 'ServiceTemplate', 'VLAN', 'VLANGroup', 'VRF', )
Python
0
@@ -1,14 +1,124 @@ -from .fhrp +# Ensure that VRFs are imported before IPs/prefixes so dumpdata & loaddata work correctly%0Afrom .fhrp import *%0Afrom .vrfs imp @@ -185,36 +185,16 @@ import * -%0Afrom .vrfs import * %0A%0A__all_
e0c046abe14d7666d9fea54dc0339579f2b0ba98
Fix indentation
neuralmonkey/runners/runner.py
neuralmonkey/runners/runner.py
from typing import Callable, Dict, List import numpy as np import tensorflow as tf from neuralmonkey.runners.base_runner import (BaseRunner, Executable, ExecutionResult, NextExecute) # tests: mypy,pylint # pylint: disable=too-few-public-methods class GreedyRunner(BaseRunner): def __init__(self, output_series: str, decoder, postprocess: Callable[[List[str]], List[str]]=None) -> None: super(GreedyRunner, self).__init__(output_series, decoder) self._postprocess = postprocess self.image_summaries = tf.merge_summary( tf.get_collection("summary_val_plots")) def get_executable(self, train=False, summaries=True): if train: fecthes = {"train_xent": self._decoder.train_loss, "runtime_xent": self._decoder.runtime_loss} else: fecthes = {"train_xent": tf.zeros([]), "runtime_xent": tf.zeros([])} fecthes["decoded_logprobs"] = self._decoder.runtime_logprobs if summaries: fecthes['image_summaries'] = self.image_summaries return GreedyRunExecutable(self.all_coders, fecthes, self._decoder.vocabulary, self._postprocess) @property def loss_names(self) -> List[str]: return ["train_xent", "runtime_xent"] class GreedyRunExecutable(Executable): def __init__(self, all_coders, fecthes, vocabulary, postprocess): self.all_coders = all_coders self._fetches = fecthes self._vocabulary = vocabulary self._postprocess = postprocess self.decoded_sentences = [] self.result = None # type: Option[ExecutionResult] def next_to_execute(self) -> NextExecute: """Get the feedables and tensors to run.""" return self.all_coders, self._fetches, {} def collect_results(self, results: List[Dict]) -> None: train_loss = 0. runtime_loss = 0. summed_logprobs = [-np.inf for _ in self._fetches["decoded_logprobs"]] for sess_result in results: train_loss += sess_result["train_xent"] runtime_loss += sess_result["runtime_xent"] for i, logprob in enumerate(sess_result["decoded_logprobs"]): summed_logprobs[i] = np.logaddexp(summed_logprobs[i], logprob) argmaxes = [np.argmax(l, axis=1) for l in summed_logprobs] decoded_tokens = self._vocabulary.vectors_to_sentences(argmaxes) if self._postprocess is not None: decoded_tokens = [self._postprocess(seq) for seq in decoded_tokens] image_summaries = results[0].get('image_summaries') self.result = ExecutionResult( outputs=decoded_tokens, losses=[train_loss, runtime_loss], scalar_summaries=None, histogram_summaries=None, image_summaries=image_summaries )
Python
0.017244
@@ -654,20 +654,16 @@ - tf.get_c
29c7e28e56e6affb60a71364a0e55ad47baa0952
Add dependency for number of posts on archive pages.
nikola/plugins/task/archive.py
nikola/plugins/task/archive.py
# -*- coding: utf-8 -*- # Copyright © 2012-2013 Roberto Alsina and others. # Permission is hereby granted, free of charge, to any # person obtaining a copy of this software and associated # documentation files (the "Software"), to deal in the # Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the # Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice # shall be included in all copies or substantial portions of # the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR # PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS # OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. import os # for tearDown with _reload we cannot use 'import from' to access LocaleBorg import nikola.utils from nikola.plugin_categories import Task from nikola.utils import config_changed class Archive(Task): """Render the post archives.""" name = "render_archive" def set_site(self, site): site.register_path_handler('archive', self.archive_path) return super(Archive, self).set_site(site) def gen_tasks(self): kw = { "messages": self.site.MESSAGES, "translations": self.site.config['TRANSLATIONS'], "output_folder": self.site.config['OUTPUT_FOLDER'], "filters": self.site.config['FILTERS'], "create_monthly_archive": self.site.config['CREATE_MONTHLY_ARCHIVE'], "create_single_archive": self.site.config['CREATE_SINGLE_ARCHIVE'], } self.site.scan_posts() yield self.group_task() # TODO add next/prev links for years if kw['create_monthly_archive'] and kw['create_single_archive']: raise Exception('Cannot create monthly and single archives at the same time.') for lang in kw["translations"]: archdata = self.site.posts_per_year # A bit of a hack. if kw['create_single_archive']: archdata = {None: self.site.posts} for year, posts in archdata.items(): output_name = os.path.join( kw['output_folder'], self.site.path("archive", year, lang)) context = {} context["lang"] = lang if year: context["title"] = kw["messages"][lang]["Posts for year %s"] % year else: context["title"] = kw["messages"][lang]["Archive"] context["permalink"] = self.site.link("archive", year, lang) if not kw["create_monthly_archive"]: template_name = "list_post.tmpl" post_list = [self.site.global_data[post] for post in posts] post_list.sort(key=lambda a: a.date) post_list.reverse() context["posts"] = post_list else: # Monthly archives, just list the months months = set([m.split('/')[1] for m in self.site.posts_per_month.keys() if m.startswith(str(year))]) months = sorted(list(months)) months.reverse() template_name = "list.tmpl" context["items"] = [[nikola.utils.LocaleBorg().get_month_name(int(month), lang), month] for month in months] post_list = [] task = self.site.generic_post_list_renderer( lang, [], output_name, template_name, kw['filters'], context, ) task_cfg = {1: task['uptodate'][0].config, 2: kw} task['uptodate'] = [config_changed(task_cfg)] task['basename'] = self.name yield task if not kw["create_monthly_archive"]: continue # Just to avoid nesting the other loop in this if template_name = "list_post.tmpl" for yearmonth, posts in self.site.posts_per_month.items(): output_name = os.path.join( kw['output_folder'], self.site.path("archive", yearmonth, lang)) year, month = yearmonth.split('/') post_list = [self.site.global_data[post] for post in posts] post_list.sort(key=lambda a: a.date) post_list.reverse() context = {} context["lang"] = lang context["posts"] = post_list context["permalink"] = self.site.link("archive", year, lang) context["title"] = kw["messages"][lang]["Posts for {month} {year}"].format( year=year, month=nikola.utils.LocaleBorg().get_month_name(int(month), lang)) task = self.site.generic_post_list_renderer( lang, post_list, output_name, template_name, kw['filters'], context, ) task_cfg = {1: task['uptodate'][0].config, 2: kw} task['uptodate'] = [config_changed(task_cfg)] task['basename'] = self.name yield task if not kw['create_single_archive']: # And an "all your years" page for yearly and monthly archives years = list(self.site.posts_per_year.keys()) years.sort(reverse=True) template_name = "list.tmpl" kw['years'] = years for lang in kw["translations"]: context = {} output_name = os.path.join( kw['output_folder'], self.site.path("archive", None, lang)) context["title"] = kw["messages"][lang]["Archive"] context["items"] = [(year, self.site.link("archive", year, lang)) for year in years] context["permalink"] = self.site.link("archive", None, lang) task = self.site.generic_post_list_renderer( lang, [], output_name, template_name, kw['filters'], context, ) task_cfg = {1: task['uptodate'][0].config, 2: kw} task['uptodate'] = [config_changed(task_cfg)] task['basename'] = self.name yield task def archive_path(self, name, lang): if name: return [_f for _f in [self.site.config['TRANSLATIONS'][lang], self.site.config['ARCHIVE_PATH'], name, self.site.config['INDEX_FILE']] if _f] else: return [_f for _f in [self.site.config['TRANSLATIONS'][lang], self.site.config['ARCHIVE_PATH'], self.site.config['ARCHIVE_FILENAME']] if _f]
Python
0
@@ -4074,32 +4074,106 @@ )%0A + n = len(post_list) if 'posts' in context else len(months)%0A @@ -4212,32 +4212,38 @@ 0%5D.config, 2: kw +, 3: n %7D%0A @@ -5709,32 +5709,51 @@ 0%5D.config, 2: kw +, 3: len(post_list) %7D%0A @@ -7018,16 +7018,31 @@ g, 2: kw +, 3: len(years) %7D%0A
fe0691595eea7197db07f3505446e1553df3d188
Bump version number after merging pull request.
src/openvr/version.py
src/openvr/version.py
# Store the version here so: # 1) we don't load dependencies by storing it in __init__.py # 2) we can import it in setup.py for the same reason # 3) we can import it into your module module # http://stackoverflow.com/questions/458550/standard-way-to-embed-version-into-python-package __version__ = '1.0.0601'
Python
0
@@ -300,11 +300,12 @@ '1.0.060 -1 +2a '%0A
4e74ba40f442dd27ddd29464b518c2a06ad1019a
Bump version
src/oscar/__init__.py
src/oscar/__init__.py
import os # Use 'dev', 'beta', or 'final' as the 4th element to indicate release type. VERSION = (1, 0, 1, 'machtfit', 21) def get_short_version(): return '%s.%s' % (VERSION[0], VERSION[1]) def get_version(): return '{}.{}.{}-{}-{}'.format(*VERSION) # Cheeky setting that allows each template to be accessible by two paths. # Eg: the template 'oscar/templates/oscar/base.html' can be accessed via both # 'base.html' and 'oscar/base.html'. This allows Oscar's templates to be # extended by templates with the same filename OSCAR_MAIN_TEMPLATE_DIR = os.path.join( os.path.dirname(os.path.abspath(__file__)), 'templates/oscar') OSCAR_CORE_APPS = [ 'oscar', 'oscar.apps.analytics', 'oscar.apps.checkout', 'oscar.apps.address', 'oscar.apps.shipping', 'oscar.apps.catalogue', 'oscar.apps.catalogue.reviews', 'oscar.apps.partner', 'oscar.apps.basket', 'oscar.apps.payment', 'oscar.apps.offer', 'oscar.apps.order', 'oscar.apps.customer', 'oscar.apps.promotions', 'oscar.apps.voucher', 'oscar.apps.wishlists', 'oscar.apps.dashboard', 'oscar.apps.dashboard.reports', 'oscar.apps.dashboard.users', 'oscar.apps.dashboard.orders', 'oscar.apps.dashboard.promotions', 'oscar.apps.dashboard.catalogue', 'oscar.apps.dashboard.offers', 'oscar.apps.dashboard.partners', 'oscar.apps.dashboard.pages', 'oscar.apps.dashboard.ranges', 'oscar.apps.dashboard.reviews', 'oscar.apps.dashboard.vouchers', 'oscar.apps.dashboard.communications', # 3rd-party apps that oscar depends on 'treebeard', 'sorl.thumbnail', 'django_tables2', ] def get_core_apps(overrides=None): """ Return a list of oscar's apps amended with any passed overrides """ if not overrides: return OSCAR_CORE_APPS def get_app_label(app_label, overrides): pattern = app_label.replace('oscar.apps.', '') for override in overrides: if override.endswith(pattern): if 'dashboard' in override and 'dashboard' not in pattern: continue return override return app_label apps = [] for app_label in OSCAR_CORE_APPS: apps.append(get_app_label(app_label, overrides)) return apps
Python
0
@@ -115,17 +115,17 @@ tfit', 2 -1 +2 )%0A%0A%0Adef
bbf5ba5b87acb4a2dc858b15030eef8abae6b52a
Can't raise a string
src/parsy/__init__.py
src/parsy/__init__.py
# -*- coding: utf-8 -*- # import re from .version import __version__ from functools import wraps from collections import namedtuple def line_info_at(stream, index): if index > len(stream): raise "invalid index" prefix = stream[0:index] line = prefix.count("\n") last_nl = prefix.rfind("\n") col = index - 1 - last_nl if last_nl >= 0 else index return (line, col) class ParseError(RuntimeError): def __init__(self, expected, stream, index): self.expected = expected self.stream = stream self.index = index def line_info(self): return line_info_at(self.stream, self.index) def __str__(self): (line, col) = self.line_info() return 'parse error: expected {!s} at {!r}:{!r}'.format(self.expected, line, col) class Parser(object): """ A Parser is an object that wraps a function whose arguments are a string to be parsed and the index on which to begin parsing. The function returns a 3-tuple of (status, next_index, value), where the status is True if the parse was successful and False otherwise, the next_index is where to begin the next parse (or where to report a failure), and the value is the yielded value (or an error message). """ def __init__(self, wrapped_fn): self.wrapped_fn = wrapped_fn def __call__(self, stream, index): return self.wrapped_fn(stream, index) def parse(self, string): """Parse a string and return the result or raise a ParseError.""" (result, _) = (self << eof).parse_partial(string) return result def parse_partial(self, string): """ Parse the longest possible prefix of a given string. Return a tuple of the result and the rest of the string, or raise a ParseError. """ (status, index, value) = self(string, 0) if status: return (value, string[index:]) else: raise ParseError(value, string, index) def bind(self, bind_fn): @Parser def bound_parser(stream, index): (success, new_index, value) = self(stream, index) if success: next_parser = bind_fn(value) return next_parser(stream, new_index) else: return (False, index, value) return bound_parser def map(self, map_fn): return self.bind(lambda res: success(map_fn(res))) def then(self, other): return self.bind(lambda _: other) def skip(self, other): return self.bind(lambda res: other.result(res)) def result(self, res): return self >> success(res) def many(self): @Parser def many_parser(stream, index): aggregate = [] next_index = index while True: (status, next_index, value) = self(stream, index) if status: aggregate.append(value) index = next_index else: break return (True, index, aggregate) return many_parser def times(self, min, max=None): if max is None: max = min @Parser def times_parser(stream, index): aggregate = [] next_index = index for times in range(0, min): (status, next_index, value) = self(stream, index) index = next_index if status: aggregate.append(value) else: return (False, index, value) for times in range(min, max): (status, next_index, value) = self(stream, index) if status: index = next_index aggregate.append(value) else: break return (True, index, aggregate) return times_parser def at_most(self, n): return self.times(0, n) def at_least(self, n): @generate def at_least_parser(): start = yield self.times(n) end = yield self.many() return start + end return at_least_parser def desc(self, description): return self | fail(description) def mark(self): @generate def marked(): start = yield line_info body = yield self end = yield line_info return (start, body, end) return marked def __or__(self, other): if not isinstance(other, Parser): raise TypeError('{!r} is not a parser!'.format(other)) @Parser def or_parser(stream, index): def failure(new_index, message): # we use the closured index here so it backtracks return other(stream, index) (status, next_index, value) = self(stream, index) if status: return (True, next_index, value) else: return other(stream, index) return or_parser # haskelley operators, for fun # # >> def __rshift__(self, other): return self.then(other) # << def __lshift__(self, other): return self.skip(other) # combinator syntax def generate(fn): if isinstance(fn, str): return lambda f: generate(f).desc(fn) @wraps(fn) @Parser def generated(stream, index): iterator = fn() value = None try: while True: next_parser = iterator.send(value) (status, index, value) = next_parser(stream, index) if not status: return (False, index, value) except StopIteration as result: returnVal = result.value if isinstance(returnVal, Parser): return returnVal(stream, index) return (True, index, returnVal) return generated.desc(fn.__name__) @Parser def index(stream, index): return (True, index, index) @Parser def line_info(stream, index): return (True, index, line_info_at(stream, index)) def success(val): return Parser(lambda _, index: (True, index, val)) def fail(message): return Parser(lambda _, index: (False, index, message)) def string(s): slen = len(s) @Parser def string_parser(stream, index): if stream[index:index+slen] == s: return (True, index+slen, s) else: return (False, index, s) string_parser.__name__ = 'string_parser<%s>' % s return string_parser def regex(exp, flags=0): if isinstance(exp, str): exp = re.compile(exp, flags) @Parser def regex_parser(stream, index): match = exp.match(stream, index) if match: return (True, match.end(), match.group(0)) else: return (False, index, exp.pattern) regex_parser.__name__ = 'regex_parser<%s>' % exp.pattern return regex_parser whitespace = regex(r'\s+') @Parser def letter(stream, index): if index < len(stream): if stream[index].isalpha(): return (True, index+1, stream[index]) return (False, index, 'a letter') @Parser def digit(stream, index): if index < len(stream): if stream[index].isdigit(): return (True, index+1, stream[index]) return (False, index, 'a digit') @Parser def eof(stream, index): if index < len(stream): return (False, index, 'EOF') return (True, index, None)
Python
0.999999
@@ -187,23 +187,42 @@ stream): - raise +%0A raise ValueError( %22invalid @@ -228,17 +228,17 @@ d index%22 -%0A +) %0A pre
6011cf6d892d4ca941c47b578fdaebc80672f532
Raise an error if the run was cancelled.
api/kiveapi/runstatus.py
api/kiveapi/runstatus.py
""" This module defines a class that keeps track of a run in Kive. """ from . import KiveRunFailedException from .dataset import Dataset class RunStatus(object): """ This keeps track of a run in Kive. There isn't a direct analogue in Kive for this, but it represents a part of Run's functionality. """ def __init__(self, obj, api): self.run_id = obj['id'] self.pipeline_id = obj['pipeline'] self.url = obj['run_status'] self.results_url = obj['run_outputs'] self.api = api self.raw = obj def _grab_stats(self): data = self.api.get(self.url).json() if "!" in data["status"]: raise KiveRunFailedException("Run %s failed" % self.run_id) return data def get_status(self): """ Queries the server for the status of a run :return: A description string of the status """ # TODO: Make change kive to return sane overall statuses status = self._grab_stats()['status'] if status == '?': return "Waiting to start..." if '!' in status: raise KiveRunFailedException("Run %s failed" % self.run_id) if '*' in status and '.' not in status: return 'Complete.' return 'Running...' def is_waiting(self): """ Returns whether or not the run is queued on the server for processing. :return: """ status = self._grab_stats()['status'] return status == '?' def is_running(self): """ Returns whether or not the run is running on the server :return: """ status = self._grab_stats() return status.get('start', False) and not status.get('end', False) def is_complete(self): """ Returns whether or not the run has completed. :return: """ status = self._grab_stats() return status.get('end', None) is not None def is_successful(self): """ Returns whether the run was successful, provided that it's also complete :return: """ return self.is_complete() def get_progress(self): """ Gets the current run's progress bar :return: """ return self._grab_stats()['status'] def get_progress_percent(self): """ Gets the current progress as a percentage. :return: """ status = self._grab_stats()['status'] return 100*float(status.count('*'))/float(len(status) - status.count('-')) def get_inputs(self): """ Gets all the datasets that fed this pipeline. :return: A list of Dataset objects. """ datasets = self.api.get(self.results_url).json()['input_summary'] return [Dataset(d, self.api) for d in datasets] def get_results(self): """ Gets all the datasets that resulted from this pipeline. Includes pipeline outputs and intermediate results. If the run is still active, return any outputs that are ready. :return: A dictionary of Dataset objects, keyed by name. """ datasets = self.api.get(self.results_url).json()['output_summary'] return {d['name']: Dataset(d, self.api) for d in datasets}
Python
0
@@ -736,16 +736,125 @@ run_id)%0A + if %22x%22 in data%5B%22status%22%5D:%0A raise KiveRunFailedException(%22Run %25s cancelled%22 %25 self.run_id)%0A @@ -1204,107 +1204,8 @@ .%22%0A%0A - if '!' in status:%0A raise KiveRunFailedException(%22Run %25s failed%22 %25 self.run_id)%0A%0A
74d877abfad21b8d2865ac2491cbd1babb5fd82b
Make sure if this pattern needs repeating that we do it properly
api/nodes/serializers.py
api/nodes/serializers.py
from rest_framework import serializers as ser from api.base.serializers import JSONAPISerializer, LinksField, Link, WaterbutlerLink from website.models import Node from framework.auth.core import Auth class NodeSerializer(JSONAPISerializer): category_choices = Node.CATEGORY_MAP.keys() category_choices_string = ', '.join(["'{}'".format(choice) for choice in category_choices]) filterable_fields = frozenset(['title', 'description', 'public']) id = ser.CharField(read_only=True, source='_id') title = ser.CharField(required=True) description = ser.CharField(required=False, allow_blank=True, allow_null=True) category = ser.ChoiceField(choices=category_choices, help_text="Choices: " + category_choices_string) date_created = ser.DateTimeField(read_only=True) date_modified = ser.DateTimeField(read_only=True) tags = ser.SerializerMethodField(help_text='A dictionary that contains two lists of tags: ' 'user and system. Any tag that a user will define in the UI will be ' 'a user tag') links = LinksField({ 'html': 'get_absolute_url', 'children': { 'related': Link('nodes:node-children', kwargs={'pk': '<pk>'}), 'count': 'get_node_count', }, 'contributors': { 'related': Link('nodes:node-contributors', kwargs={'pk': '<pk>'}), 'count': 'get_contrib_count', }, 'pointers': { 'related': Link('nodes:node-pointers', kwargs={'pk': '<pk>'}), 'count': 'get_pointers_count', }, 'registrations': { 'related': Link('nodes:node-registrations', kwargs={'pk': '<pk>'}), 'count': 'get_registration_count', }, 'files': { 'related': Link('nodes:node-files', kwargs={'pk': '<pk>'}) }, }) properties = ser.SerializerMethodField(help_text='A dictionary of read-only booleans: registration, collection,' 'and dashboard. Collections are special nodes used by the Project ' 'Organizer to, as you would imagine, organize projects. ' 'A dashboard is a collection node that serves as the root of ' 'Project Organizer collections. Every user will always have ' 'one Dashboard') public = ser.BooleanField(source='is_public', help_text='Nodes that are made public will give read-only access ' 'to everyone. Private nodes require explicit read ' 'permission. Write and admin access are the same for ' 'public and private nodes. Administrators on a parent ' 'node have implicit read permissions for all child nodes') # TODO: finish me class Meta: type_ = 'nodes' def get_absolute_url(self, obj): return obj.absolute_url def get_node_count(self, obj): request = self.context['request'] user = request.user if user.is_anonymous(): auth = Auth(None) else: auth = Auth(user) nodes = [node for node in obj.nodes if node.can_view(auth) and node.primary] return len(nodes) def get_contrib_count(self, obj): return len(obj.contributors) def get_registration_count(self, obj): return len(obj.node__registrations) def get_pointers_count(self, obj): return len(obj.nodes_pointer) @staticmethod def get_properties(obj): ret = { 'registration': obj.is_registration, 'collection': obj.is_folder, 'dashboard': obj.is_dashboard, } return ret @staticmethod def get_tags(obj): ret = { 'system': [tag._id for tag in obj.system_tags], 'user': [tag._id for tag in obj.tags], } return ret def create(self, validated_data): node = Node(**validated_data) node.save() return node def update(self, instance, validated_data): """Update instance with the validated data. Requires the request to be in the serializer context. """ assert isinstance(instance, Node), 'instance must be a Node' if 'is_public' in validated_data: is_public = validated_data.pop('is_public') else: is_public = instance.is_public for attr, value in validated_data.items(): setattr(instance, attr, value) request = self.context['request'] user = request.user auth = Auth(user) if is_public != instance.is_public: privacy = 'public' if is_public else 'private' instance.set_privacy(privacy, auth) instance.save() return instance class NodePointersSerializer(JSONAPISerializer): id = ser.CharField(read_only=True, source='_id') node_id = ser.CharField(source='node._id', help_text='The ID of the node that this pointer points to') title = ser.CharField(read_only=True, source='node.title', help_text='The title of the node that this pointer ' 'points to') class Meta: type_ = 'pointers' links = LinksField({ 'html': 'get_absolute_url', }) def get_absolute_url(self, obj): pointer_node = Node.load(obj.node._id) return pointer_node.absolute_url def create(self, validated_data): request = self.context['request'] user = request.user auth = Auth(user) node = self.context['view'].get_node() pointer_node = Node.load(validated_data['node']['_id']) pointer = node.add_pointer(pointer_node, auth, save=True) return pointer def update(self, instance, validated_data): pass class NodeFilesSerializer(JSONAPISerializer): id = ser.CharField(read_only=True, source='_id') provider = ser.CharField(read_only=True) path = ser.CharField(read_only=True) item_type = ser.CharField(read_only=True) name = ser.CharField(read_only=True) metadata = ser.DictField(read_only=True) class Meta: type_ = 'files' links = LinksField({ 'self': WaterbutlerLink(kwargs={'node_id': '<node_id>'}), 'self_methods': 'valid_self_link_methods', 'related': Link('nodes:node-files', kwargs={'pk': '<node_id>'}, query_kwargs={'path': '<path>', 'provider': '<provider>'}), }) @staticmethod def valid_self_link_methods(obj): return obj['valid_self_link_methods'] def create(self, validated_data): # TODO pass def update(self, instance, validated_data): # TODO pass
Python
0
@@ -234,24 +234,268 @@ erializer):%0A + # TODO: If we have to redo this implementation in any of the other serializers, subclass ChoiceField and make it%0A # handle blank choices properly. Currently DRF ChoiceFields ignore blank options, which is incorrect in this%0A # instance %0A categor
580c133c09758050aae30ae3aa453ce3c5b22e56
refactor python
Python/stack.py
Python/stack.py
__author__ = 'Daniel' class Stack(): def __init__(self): self.items = [] def push(self, item): self.items.append(item) def is_empty(self): return self.items == [] def size(self): return len(self.items) def pop(self): return self.items.pop() def peek(self): return self.items[len(self.items) - 1] def check_parentheses(inp): stack = Stack() for c in inp: if c == ')' or c == ']' or c == '}' or c == '>': if stack.is_empty() or stack.pop() != c: return False if c == '(': stack.push(')') if c == '[': stack.push(']') if c == '{': stack.push('}') if c == '<': stack.push('>') return stack.is_empty() def to_base(num, base): digits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" stack = Stack() while num > 0: stack.push(digits[num % base]) num //= base res = "" while not stack.is_empty(): res += stack.pop() return res def to_binary(num): return to_base(num, 2) def to_postfix(string): tokens = string.split() prec = {"+": 1, "-": 1, "*": 2, "/": 2, "(": 0, ")": 0} operators = Stack() res = [] for token in tokens: if token == "(": operators.push("(") elif token == ")": op = operators.pop() while op != "(": res.append(op) op = operators.pop() elif token in "+-*/": while not operators.is_empty() and prec[token] <= prec[operators.peek()]: res.append(operators.pop()) operators.push(token) else: res.append(token) while not operators.is_empty(): res.append(operators.pop()) return " ".join(res) def eval_postfix(string): tokens = string.split() evalStack = Stack() for token in tokens: if token == "+": right = evalStack.pop() left = evalStack.pop() evalStack.push(left + right) elif token == "-": right = evalStack.pop() left = evalStack.pop() evalStack.push(left - right) elif token == "*": right = evalStack.pop() left = evalStack.pop() evalStack.push(left * right) elif token == "/": right = evalStack.pop() left = evalStack.pop() evalStack.push(left / right) else: evalStack.push(int(token)) return evalStack.pop()
Python
0.999983
@@ -1902,86 +1902,29 @@ t()%0A +%0A -evalStack = Stack()%0A for token in tokens:%0A if token == %22+%22:%0A +def eval_op(f):%0A @@ -1931,33 +1931,34 @@ right = eval -S +_s tack.pop()%0A @@ -1952,36 +1952,32 @@ k.pop()%0A - left = evalStack @@ -1963,33 +1963,34 @@ left = eval -S +_s tack.pop()%0A @@ -1988,33 +1988,30 @@ p()%0A - eval -S +_s tack.push(le @@ -2012,160 +2012,124 @@ ush( +f( left - + +, right) -%0A elif token == %22-%22:%0A right = eval +)%0A%0A eval_stack = Stack -.pop ()%0A - left = evalStack.pop()%0A evalStack.push(left - right) +from operator import add, floordiv, mul, sub%0A%0A for token in tokens: %0A @@ -2125,34 +2125,32 @@ tokens:%0A -el if token == %22*%22: @@ -2146,17 +2146,17 @@ ken == %22 -* ++ %22:%0A @@ -2166,106 +2166,71 @@ -right = evalStack.pop()%0A left = evalStack.pop()%0A evalStack.push(left * right +eval_op(add)%0A elif token == %22-%22:%0A eval_op(sub )%0A @@ -2250,17 +2250,17 @@ ken == %22 -/ +* %22:%0A @@ -2270,106 +2270,76 @@ -right = evalStack.pop()%0A left = evalStack.pop()%0A evalStack.push(left / right +eval_op(mul)%0A elif token == %22/%22:%0A eval_op(floordiv )%0A @@ -2362,25 +2362,26 @@ eval -S +_s tack.push(in @@ -2406,17 +2406,18 @@ urn eval -S +_s tack.pop
343fa1849457202a393ccfdc5b86075cc1b0b88c
add observables
plugins/feeds/public/hybdrid_analysis.py
plugins/feeds/public/hybdrid_analysis.py
import logging from datetime import timedelta from core.errors import ObservableValidationError from core.feed import Feed from core.observables import Hash, Hostname class Hybrid_Analysis(Feed): default_values = { "frequency": timedelta(minutes=5), "name": "Hybdrid-Analysis", "source": "https://www.hybrid-analysis.com/feed?json", "description": "Hybrid Analysis Public Feeds", } def update(self): for item in self.update_json(headers={'User-agent': 'VxApi Connector'})['data']: self.analyze(item) pass def analyze(self, item): sha256 = Hash.get_or_create(value=item['sha256']) tags = [] context = {'source': self.name} if 'vxfamily' in item: tags.append(' '.join(item['vxfamily'].split('.'))) if 'tags' in item: tags.extend(item['tags']) if 'threatlevel_human' in item: context['threatlevel_human'] = item['threatlevel_human'] if 'threatlevel' in item: context['threatlevel'] = item['threatlevel'] if 'type' in item: context['type'] = item['type'] if 'size' in item: context['size'] = item['size'] if 'vt_detect' in item: context['virustotal_score'] = item['vt_detect'] if 'et_alerts_total' in item: context['et_alerts_total'] = item['et_alerts_total'] if 'process_list' in item: context['count process spawn'] = len(item['process_list']) context['url'] = 'https://www.hybrid-analysis.com' + item['reporturl'] sha256.add_context(context) sha256.tag(tags) md5 = Hash.get_or_create(value=item['md5']) md5.tag(tags) md5.add_context(context) sha1 = Hash.get_or_create(value=item['sha1']) sha1.tag(tags) sha1.add_context(context) sha256.active_link_to(md5, 'md5', self.name) sha256.active_link_to(sha1, 'sha1', self.name) if 'domains' in item: for domain in item['domains']: try: new_host = Hostname.get_or_create(value=domain) sha256.active_link_to(new_host, 'C2', self.name) sha1.active_link_to(new_host, 'C2', self.name) md5.active_link_to(new_host, 'C2', self.name) new_host.add_context({'source':self.name, 'contacted by': sha256}) except ObservableValidationError as e: logging.error(e)
Python
0.00209
@@ -2466,16 +2466,17 @@ source': + self.nam @@ -2594,8 +2594,1297 @@ error(e) +%0A%0A if 'extracted_files' in item:%0A for extracted_file in item%5B'extracted_files'%5D:%0A context_file_dropped = %7B'source': self.name%7D%0A%0A if not 'sha256' in extracted_file:%0A logging.error(extracted_file)%0A continue%0A%0A new_file = Hash.get_or_create(value=extracted_file%5B'sha256'%5D)%0A context_file_dropped%5B'virustotal_score'%5D = 0%0A context_file_dropped%5B'size'%5D = extracted_file%5B'file_size'%5D%0A%0A if 'av_matched' in extracted_file:%0A context_file_dropped%5B'virustotal_score'%5D = extracted_file%5B'av_matched'%5D%0A%0A if 'threatlevel_readable' in extracted_file:%0A context_file_dropped%5B'threatlevel'%5D = extracted_file%5B'threatlevel_readable'%5D%0A%0A if 'av_label' in extracted_file:%0A new_file.tag(extracted_file%5B'av_label'%5D)%0A%0A if 'type_tags' in extracted_file:%0A new_file.tag(extracted_file%5B'type_tags'%5D)%0A%0A new_file.add_context(context_file_dropped)%0A%0A new_file.active_link_to(sha256, 'drop', self.name)%0A new_file.active_link_to(md5, 'drop', self.name)%0A new_file.active_link_to(sha1, 'drop', self.name)
aa7bbd84fa16105417ceb7f9e06d392a4e54fdc6
Remove unused import
salt/beacons/twilio_txt_msg.py
salt/beacons/twilio_txt_msg.py
# -*- coding: utf-8 -*- ''' Beacon to emit Twilio text messages ''' # Import Python libs from __future__ import absolute_import from datetime import datetime import logging # Import 3rd Party libs try: from twilio.rest import TwilioRestClient HAS_TWILIO = True except ImportError: HAS_TWILIO = False log = logging.getLogger(__name__) __virtualname__ = 'twilio_txt_msg' def __virtual__(): if HAS_TWILIO: return __virtualname__ else: return False def beacon(config): ''' Emit a dict name "texts" whose value is a list of texts. .. code-block:: yaml beacons: twilio_txt_msg: account_sid: "<account sid>" auth_token: "<auth token>" twilio_number: "+15555555555" interval: 10 ''' log.trace('twilio_txt_msg beacon starting') ret = [] if not all([config['account_sid'], config['auth_token'], config['twilio_number']]): return ret output = {} output['texts'] = [] client = TwilioRestClient(config['account_sid'], config['auth_token']) messages = client.messages.list(to=config['twilio_number']) log.trace('Num messages: {0}'.format(len(messages))) if len(messages) < 1: log.trace('Twilio beacon has no texts') return ret for message in messages: item = {} item['id'] = str(message.sid) item['body'] = str(message.body) item['from'] = str(message.from_) item['sent'] = str(message.date_sent) item['images'] = [] if int(message.num_media): media = client.media(message.sid).list() if len(media): for pic in media: item['images'].append(str(pic.uri)) output['texts'].append(item) message.delete() ret.append(output) return ret
Python
0.000001
@@ -126,38 +126,8 @@ ort%0A -from datetime import datetime%0A impo
f59da23fc66c24759d202ca30d6c407954db757c
fix openbsdservice
salt/modules/openbsdservice.py
salt/modules/openbsdservice.py
# -*- coding: utf-8 -*- ''' The service module for OpenBSD ''' # Import python libs import os import logging log = logging.getLogger(__name__) # XXX enable/disable support would be nice # Define the module's virtual name __virtualname__ = 'service' __func_alias__ = { 'reload_': 'reload' } def __virtual__(): ''' Only work on OpenBSD ''' if __grains__['os'] == 'OpenBSD' and os.path.exists('/etc/rc.d/rc.subr'): krel = map(int, __grains__['kernelrelease'].split('.')) # The -f flag, used to force a script to run even if disabled, # was added after the 5.0 release. if krel[0] > 5 or (krel[0] == 5 and krel[1] > 0): return __virtualname__ return False def start(name): ''' Start the specified service CLI Example: .. code-block:: bash salt '*' service.start <service name> ''' cmd = '/etc/rc.d/{0} -f start'.format(name) return not __salt__['cmd.retcode'](cmd) def stop(name): ''' Stop the specified service CLI Example: .. code-block:: bash salt '*' service.stop <service name> ''' cmd = '/etc/rc.d/{0} -f stop'.format(name) return not __salt__['cmd.retcode'](cmd) def restart(name): ''' Restart the named service CLI Example: .. code-block:: bash salt '*' service.restart <service name> ''' cmd = '/etc/rc.d/{0} -f restart'.format(name) return not __salt__['cmd.retcode'](cmd) def status(name, sig=None): ''' Return the status for a service, returns a bool whether the service is running. CLI Example: .. code-block:: bash salt '*' service.status <service name> ''' if sig: return bool(__salt__['status.pid'](sig)) cmd = '/etc/rc.d/{0} -f check'.format(name) return not __salt__['cmd.retcode'](cmd) def reload_(name): ''' .. versionadded:: 2014.7.0 Reload the named service CLI Example: .. code-block:: bash salt '*' service.reload <service name> ''' cmd = '/etc/rc.d/{0} -f reload'.format(name) return not __salt__['cmd.retcode'](cmd) import re service_flags_regex = re.compile(r'^\s*(\w[\d\w]*)_flags=(?:(NO)|.*)$') pkg_scripts_regex = re.compile(r'^\s*pkg_scripts=\'(.*)\'$') start_daemon_call_regex = re.compile(r'(\s*start_daemon(?!\(\)))') start_daemon_parameter_regex = re.compile(r'(?:\s+(\w[\w\d]*))') def _get_rc(): ''' Returns a dict where the key is the daemon's name and the value a boolean indicating its status (True: enabled or False: disabled). Check the daemons started by the system in /etc/rc and configured in /etc/rc.conf and /etc/rc.conf.local. Also add to the dict all the localy enabled daemons via $pkg_scripts. ''' daemons_flags = {} try: # now read the system startup script /etc/rc # to know what are the system enabled daemons with open('/etc/rc', 'r') as handle: lines = handle.readlines() except IOError: log.error('Unable to read /etc/rc') else: for line in lines: match = start_daemon_call_regex.match(line) if match: # the matched line is a call to start_daemon() # we remove the function name line = line[len(match.group(1)):] # we retrieve each daemon name from the parameters of start_daemon() for daemon in start_daemon_parameter_regex.findall(line): # mark it as enabled daemons_flags[daemon] = True # this will execute rc.conf and rc.conf.local # used in /etc/rc at boot to start the daemons variables = __salt__['cmd.run']('(. /etc/rc.conf && set)', clean_env=True, output_loglevel='quiet').split('\n') for var in variables: match = service_flags_regex.match(var) if match: # the matched var look like daemon_name_flags=, we test its assigned value # NO: disabled, everything else: enabled # do not create a new key if the service hasn't been found in /etc/rc, see $pkg_scripts if match.group(2) == 'NO': daemons_flags[match.group(1)] = False else: match = pkg_scripts_regex.match(var) if match: # the matched var is pkg_scripts # we can retrieve the name of each localy enabled daemon that wasn't hand started via /etc/rc for daemon in match.group(1).split(): # create a new key and mark it as enabled daemons_flags[daemon] = True return daemons_flags def available(name): ''' .. versionadded:: 2014.7.0 Returns ``True`` if the specified service is available, otherwise returns ``False``. CLI Example: .. code-block:: bash salt '*' service.available sshd ''' path = '/etc/rc.d/{0}'.format(name) return os.path.isfile(path) and os.access(path, os.X_OK) def missing(name): ''' .. versionadded:: 2014.7.0 The inverse of service.available. Returns ``True`` if the specified service is not available, otherwise returns ``False``. CLI Example: .. code-block:: bash salt '*' service.missing sshd ''' return not available(name) def get_all(): ''' .. versionadded:: 2014.7.0 Return all available boot services CLI Example: .. code-block:: bash salt '*' service.get_all ''' services = [] if not os.path.isdir('/etc/rc.d'): return services for service in os.listdir('/etc/rc.d'): # this will remove rc.subr and all non executable files if available(service): services.append(service) return sorted(services) def get_enabled(): ''' .. versionadded:: 2014.7.0 Return a list of service that are enabled on boot CLI Example: .. code-block:: bash salt '*' service.get_enabled ''' services = [] for daemon, is_enabled in _get_rc().items(): if is_enabled: services.append(daemon) return sorted(set(get_all()) & set(services)) def enabled(name): ''' .. versionadded:: 2014.7.0 Return True if the named service is enabled, false otherwise CLI Example: .. code-block:: bash salt '*' service.enabled <service name> ''' return name in get_enabled() def get_disabled(): ''' .. versionadded:: 2014.7.0 Return a set of services that are installed but disabled CLI Example: .. code-block:: bash salt '*' service.get_disabled ''' services = [] for daemon, is_enabled in _get_rc().items(): if not is_enabled: services.append(daemon) return sorted(set(get_all()) & set(services)) def disabled(name): ''' .. versionadded:: 2014.7.0 Return True if the named service is disabled, false otherwise CLI Example: .. code-block:: bash salt '*' service.disabled <service name> ''' return name in get_disabled()
Python
0.000041
@@ -3749,48 +3749,175 @@ t)', - clean_env=True, output_loglevel='quiet' +%0A clean_env=True,%0A output_loglevel='quiet',%0A python_shell=True ).sp
c340c1b92a3d82a25ce2e43b19603ee58de0b146
Improve celery logging
home/core/async.py
home/core/async.py
""" async.py ~~~~~~~~ Handles running of tasks in an asynchronous fashion. Not explicitly tied to Celery. The `run` method simply must exist here and handle the execution of whatever task is passed to it, whether or not it is handled asynchronously. """ from apscheduler.schedulers.background import BackgroundScheduler from celery import Celery from celery.security import setup_security setup_security(allowed_serializers=['pickle', 'json'], serializer='pickle') queue = Celery('home', broker='redis://', backend='redis://', serializer='pickle') queue.conf.update( CELERY_TASK_SERIALIZER='pickle', CELERY_ACCEPT_CONTENT=['pickle', 'json'], ) scheduler = BackgroundScheduler() scheduler.start() @queue.task def _run(method, **kwargs) -> None: """ Run the configured actions in multiple processes. """ method(**kwargs) def run(method, delay=0, **kwargs): return _run.apply_async(args=[method], kwargs=kwargs, countdown=float(delay))
Python
0.000005
@@ -382,16 +382,61 @@ security +%0Afrom celery.utils.log import get_task_logger %0A%0Asetup_ @@ -807,16 +807,52 @@ tart()%0A%0A +logger = get_task_logger(__name__)%0A%0A %0A@queue. @@ -958,24 +958,102 @@ es.%0A %22%22%22%0A + logger.info('Running %7B%7D with config: %7B%7D'.format(method.__name__, kwargs))%0A method(*
d4a89c2a2400e984bd74c27cc9bf5cb5222f8226
Adding a waiting time
src/snakefiles/assembly.py
src/snakefiles/assembly.py
rule assembly_split_pe_files: """ Split pe_pe files into _1 and _2. """ input: fastq_pe = norm + "{sample}.final.pe_pe.fq.gz" output: left = assembly + "{sample}_1.fq.gz", right = assembly + "{sample}_2.fq.gz" threads: 1 priority: 20 params: left = "{sample}.final.pe_pe.fq.gz.1", right = "{sample}.final.pe_pe.fq.gz.2" log: assembly + "split_pe_files_{sample}.log" benchmark: assembly + "split_pe_files_{sample}.json" shell: "split-paired-reads.py " "--output-first >(pigz --best > {output.left}) " "--output-second >(pigz --best > {output.right}) " "{input.fastq_pe} " "> {log} 2>&1" rule assembly_merge_right_and_left: """ Generate the left.fq and right.fq left.fq = /1 reads from PE + all SE reads right.fq = /2 reads form PE Forced the use of gzip because pigz doesn't have --force option. --force is required because /dev/null isn't a file, doesn't even have an end, and isn't compressed. Test if SAMPLES_PE is empty because gzip/pigz may because otherwise it may be waiting something from stdout. """ input: forward = expand( assembly + "{sample}_1.fq.gz", sample=SAMPLES_PE ) if SAMPLES_PE else ["/dev/null"], reverse = expand( assembly + "{sample}_2.fq.gz", sample=SAMPLES_PE ) if SAMPLES_PE else ["/dev/null"], single = expand( # pe_se norm + "{sample}.final.pe_se.fq.gz", sample=SAMPLES_PE ) + expand( # se norm + "{sample}.final.se.fq.gz", sample=SAMPLES_SE ) output: left = assembly + "left.fq", right = assembly + "right.fq", single = assembly + "single.fq" threads: 1 priority: 50 log: assembly + "merge_right_and_left.log" benchmark: assembly + "merge_right_and_left.json" shell: "gzip --decompress --stdout " "{input.forward} " "> {output.left} 2> {log}; " "gzip --decompress --stdout " "{input.reverse} " "> {output.right} 2>> {log}; " "gzip --decompress --stdout " "{input.single} " "> {output.single} 2>> {log}" rule assembly_run_trinity: """ Assembly reads with Trinity. Notes on hardcoded settings: - Runs on paired end mode - Expect fastq files as inputs (left and right) - Does the full cleanup so it only remains a fasta file. """ input: left = assembly + "left.fq", right = assembly + "right.fq", #single = assembly + "single.fq" output: fasta = protected(assembly + "Trinity.fasta") threads: ALL_THREADS priority: 50 params: memory= config["trinity_params"]["memory"], outdir= assembly + "trinity_out_dir" log: assembly + "run_trinity.log" benchmark: assembly + "run_trinity.json" shell: "Trinity " "--seqType fq " "--no_normalize_reads " "--max_memory {params.memory} " "--left {input.left} " "--right {input.right} " "--CPU {threads} " "--full_cleanup " "--output {params.outdir} " "> {log} ; " "mv {params.outdir}.Trinity.fasta {output.fasta}" rule assembly_gene_to_trans_map: """ Create the gene_id TAB transcript_id file """ input: fasta = assembly + "Trinity.fasta" output: tsv = assembly + "Trinity_gene_to_trans.tsv" log: assembly + "gene_to_trans_map.log" benchmark: assembly + "gene_to_trans_map.json" shell: "get_Trinity_gene_to_trans_map.pl " "< {input.fasta} " "> {output.tsv} " "2> {log}" rule assembly_index_trinity: """ Create a samtool index for the assembly """ input: fasta = assembly + "Trinity.fasta" output: fai = assembly + "Trinity.fasta.fai" log: assembly + "index.log" benchmark: assembly + "index.json" shell: "samtools faidx {input.fasta} " "2> {log} 1>&2"
Python
0.999953
@@ -747,16 +747,26 @@ og%7D 2%3E&1 + ; sleep 5 %22%0A%0Arule
8414668c97c359a39cf96a37819cb7e37b54c670
Fix new Pylint
ixdjango/utils.py
ixdjango/utils.py
""" Utility classes/functions """ import os from random import choice import re from subprocess import PIPE, Popen def random_string( length=10, chars='abcdefghjkmnpqrstuvwxyzABCDEFGHJKLMNPQRSTUVWXYZ23456789' ): """ Generates a random string of length specified and using supplied chars. Useful for salting hashing functions """ return ''.join([choice(chars) for _ in range(length)]) def querydict_to_dict(querydict): """ Converts a QueryDict instance (i.e.request params) into a plain dictionary """ pure_dict = {} for item_key in querydict.keys(): item_val_list = querydict.getlist(item_key) if item_val_list: if len(item_val_list) == 0: pure_dict[item_key] = None if len(item_val_list) == 1: pure_dict[item_key] = item_val_list[0] else: pure_dict[item_key] = item_val_list else: pure_dict[item_key] = None return pure_dict def remote_addr_from_request(request): """ Returns the correct remote address from the request object. If the request was proxied, this correct information is in HTTP_X_FORWARDED_FOR """ if not request: raise TypeError("No request passed to function") if 'HTTP_X_FORWARDED_FOR' in request.META: return request.META['HTTP_X_FORWARDED_FOR'] else: return request.META['REMOTE_ADDR'] def flatten_request_header(header): """ Transform a dict representing header parameters into a flat string of comma separated parameters suitable for inserting into the actual headers """ flattened_header = '' if isinstance(header, dict): contents = [] for content_key, content_val in header.items(): contents.append('%s="%s"' % (content_key, content_val)) flattened_header = ','.join(contents) else: flattened_header = str(header) return flattened_header def flatten_auth_header(headers_dict, auth_type): """ Auth headers have auth type at the start of the string """ return "%s %s" % (auth_type, flatten_request_header(headers_dict)) def flat_header_val_to_dict(header_val): """ Transform a header string of comma separated parameters into a dict """ val_dict = {} val_comps = header_val.rsplit(',') if len(val_comps): for val_comp in val_comps: key, sep, val = val_comp.partition("=") if sep != "=": return {} key = key.strip() val = val.strip() val = val.strip('"') if key in val_dict: if isinstance(val_dict[key], list): val_dict[key].append(val) else: val_dict[key] = [val_dict[key], val] else: val_dict[key] = val return val_dict def flat_auth_header_val_to_data(header_val): """ Capture auth type from the string and then remove it before passing on to flat_header_val_to_dict """ match = re.match(r'^([\S]+[\s]+)?(.*)$', header_val) if match and match.group(1): return (flat_header_val_to_dict(match.group(2).strip()), match.group(1).strip()) return (flat_header_val_to_dict(header_val), None) def get_npm_module(module): """ Return the path of an npm module binary Example: get_npm_module('lessc') """ proc = Popen(['npm', 'bin'], stdout=PIPE) proc.wait() path = proc.stdout.read().strip() return os.path.join(path, module)
Python
0.000025
@@ -114,52 +114,23 @@ n%0A%0A%0A -def random_string(%0A length=10,%0A chars= +ALPHANUMERIC = 'abc @@ -183,16 +183,65 @@ 456789'%0A +%0A%0Adef random_string(length=10, chars=ALPHANUMERIC ):%0A %22
5859e7102654e016dd836e2cb9428f5b2daac35c
Support mixing generic and non generic inlines
genericadmin/admin.py
genericadmin/admin.py
import json from functools import update_wrapper from django.contrib import admin from django.conf.urls import patterns, url from django.conf import settings from django.contrib.contenttypes import generic from django.contrib.contenttypes.models import ContentType try: from django.utils.encoding import force_text except ImportError: from django.utils.encoding import force_unicode as force_text from django.utils.text import capfirst from django.contrib.admin.widgets import url_params_from_lookup_dict from django.http import HttpResponse, HttpResponseNotAllowed, Http404 try: from django.contrib.admin.views.main import IS_POPUP_VAR except ImportError: from django.contrib.admin.options import IS_POPUP_VAR from django.core.exceptions import ObjectDoesNotExist JS_PATH = getattr(settings, 'GENERICADMIN_JS', 'genericadmin/js/') class BaseGenericModelAdmin(object): class Media: js = () content_type_lookups = {} generic_fk_fields = [] content_type_blacklist = [] content_type_whitelist = [] def __init__(self, model, admin_site): try: media = list(self.Media.js) except: media = [] media.append(JS_PATH + 'genericadmin.js') self.Media.js = tuple(media) super(BaseGenericModelAdmin, self).__init__(model, admin_site) def get_generic_field_list(self, request, prefix=''): if hasattr(self, 'ct_field') and hasattr(self, 'ct_fk_field'): exclude = [self.ct_field, self.ct_fk_field] else: exclude = [] field_list = [] if hasattr(self, 'generic_fk_fields') and self.generic_fk_fields: for fields in self.generic_fk_fields: if fields['ct_field'] not in exclude and \ fields['fk_field'] not in exclude: fields['inline'] = prefix != '' fields['prefix'] = prefix field_list.append(fields) else: for field in self.model._meta.virtual_fields: if isinstance(field, generic.GenericForeignKey) and \ field.ct_field not in exclude and field.fk_field not in exclude: field_list.append({ 'ct_field': field.ct_field, 'fk_field': field.fk_field, 'inline': prefix != '', 'prefix': prefix, }) if hasattr(self, 'inlines') and len(self.inlines) > 0: for FormSet, inline in zip(self.get_formsets(request), self.get_inline_instances(request)): prefix = FormSet.get_default_prefix() field_list = field_list + inline.get_generic_field_list(request, prefix) return field_list def get_urls(self): def wrap(view): def wrapper(*args, **kwargs): return self.admin_site.admin_view(view)(*args, **kwargs) return update_wrapper(wrapper, view) custom_urls = patterns('', url(r'^obj-data/$', wrap(self.generic_lookup), name='admin_genericadmin_obj_lookup'), url(r'^genericadmin-init/$', wrap(self.genericadmin_js_init), name='admin_genericadmin_init'), ) return custom_urls + super(BaseGenericModelAdmin, self).get_urls() def genericadmin_js_init(self, request): if request.method == 'GET': obj_dict = {} for c in ContentType.objects.all(): val = force_text('%s/%s' % (c.app_label, c.model)) params = self.content_type_lookups.get('%s.%s' % (c.app_label, c.model), {}) params = url_params_from_lookup_dict(params) if self.content_type_whitelist: if val in self.content_type_whitelist: obj_dict[c.id] = (val, params) elif val not in self.content_type_blacklist: obj_dict[c.id] = (val, params) data = { 'url_array': obj_dict, 'fields': self.get_generic_field_list(request), 'popup_var': IS_POPUP_VAR, } resp = json.dumps(data, ensure_ascii=False) return HttpResponse(resp, mimetype='application/json') return HttpResponseNotAllowed(['GET']) def generic_lookup(self, request): if request.method != 'GET': return HttpResponseNotAllowed(['GET']) if 'content_type' in request.GET and 'object_id' in request.GET: content_type_id = request.GET['content_type'] object_id = request.GET['object_id'] obj_dict = { 'content_type_id': content_type_id, 'object_id': object_id, } content_type = ContentType.objects.get(pk=content_type_id) obj_dict["content_type_text"] = capfirst(force_text(content_type)) try: obj = content_type.get_object_for_this_type(pk=object_id) obj_dict["object_text"] = capfirst(force_text(obj)) except ObjectDoesNotExist: raise Http404 resp = json.dumps(obj_dict, ensure_ascii=False) else: resp = '' return HttpResponse(resp, mimetype='application/json') class GenericAdminModelAdmin(BaseGenericModelAdmin, admin.ModelAdmin): """Model admin for generic relations. """ class GenericTabularInline(BaseGenericModelAdmin, generic.GenericTabularInline): """Model admin for generic tabular inlines. """ class GenericStackedInline(BaseGenericModelAdmin, generic.GenericStackedInline): """Model admin for generic stacked inlines. """ class TabularInlineWithGeneric(BaseGenericModelAdmin, admin.TabularInline): """"Normal tabular inline with a generic relation""" class StackedInlineWithGeneric(BaseGenericModelAdmin, admin.StackedInline): """"Normal stacked inline with a generic relation"""
Python
0
@@ -2675,16 +2675,82 @@ uest)):%0A + if hasattr(inline, 'get_generic_field_list'):%0A @@ -2795,16 +2795,20 @@ refix()%0A + @@ -2888,24 +2888,16 @@ prefix)%0A - %0A
c8df242ed423717891f5b5fcd061cf702990c362
Fix serialisation of unicode values to XML
molly/utils/simplify.py
molly/utils/simplify.py
import itertools import datetime from logging import getLogger from lxml import etree from django.contrib.gis.geos import Point from django.core.paginator import Page from django.db import models logger = getLogger(__name__) class DateUnicode(unicode): pass class DateTimeUnicode(unicode): pass _XML_DATATYPES = ( (DateUnicode, 'date'), (DateTimeUnicode, 'datetime'), (str, 'string'), (unicode, 'string'), (int, 'integer'), (float, 'float'), ) def simplify_value(value): if hasattr(value, 'simplify_for_render'): return value.simplify_for_render(simplify_value, simplify_model) elif isinstance(value, dict): out = {} for key in value: new_key = key if isinstance(key, (basestring, int)) else str(key) try: out[new_key] = simplify_value(value[key]) except NotImplementedError: logger.info('Could not simplify a value', exc_info=True) pass return out elif isinstance(value, (list, tuple, set, frozenset)): out = [] for subvalue in value: try: out.append(simplify_value(subvalue)) except NotImplementedError: pass if isinstance(value, tuple): return tuple(out) else: return out elif isinstance(value, (basestring, int, float)): return value elif isinstance(value, datetime.datetime): return DateTimeUnicode(value.isoformat(' ')) elif isinstance(value, datetime.date): return DateUnicode(value.isoformat()) elif hasattr(type(value), '__mro__') and models.Model in type(value).__mro__: return simplify_model(value) elif isinstance(value, Page): return { 'has_next': value.has_next(), 'has_previous': value.has_next(), 'next_page_number': value.has_next(), 'previous_page_number': value.has_next(), 'number': value.number, 'objects': simplify_value(value.object_list) } elif value is None: return None elif isinstance(value, Point): return simplify_value(list(value)) elif hasattr(value, '__iter__'): # Iterators may be unbounded; silently ignore elements once we've already had 1000. return [simplify_value(item) for item in itertools.islice(value, 1000)] else: raise NotImplementedError def simplify_model(obj, terse=False): if obj is None: return None # It's a Model instance if hasattr(obj._meta, 'expose_fields'): expose_fields = obj._meta.expose_fields else: expose_fields = [f.name for f in obj._meta.fields] out = { '_type': '%s.%s' % (obj.__module__[:-7], obj._meta.object_name), '_pk': obj.pk, } if hasattr(obj, 'get_absolute_url'): out['_url'] = obj.get_absolute_url() if terse: out['_terse'] = True else: for field_name in expose_fields: if field_name in ('password',): continue try: value = getattr(obj, field_name) if isinstance(value, models.Model): value = simplify_model(value, terse=True) out[field_name] = simplify_value(value) except NotImplementedError: pass # Add any non-field attributes for field in list(dir(obj)): try: if field[0] != '_' and field != 'objects' \ and not isinstance(getattr(obj, field), models.Field): try: out[field] = simplify_value(getattr(obj, field)) except NotImplementedError: pass except AttributeError: pass return out def serialize_to_xml(value): if value is None: node = etree.Element('null') elif isinstance(value, bool): node = etree.Element('literal') node.text = 'true' if value else 'false' node.attrib['type'] = 'boolean' elif isinstance(value, (basestring, int, float)): node = etree.Element('literal') node.text = unicode(value) node.attrib['type'] = [d[1] for d in _XML_DATATYPES if isinstance(value, d[0])][0] elif isinstance(value, dict): if '_type' in value: node = etree.Element('object', {'type': value['_type'], 'pk': unicode(value.get('_pk', ''))}) del value['_type'] del value['_pk'] if '_url' in value: node.attrib['url'] = value['_url'] del value['_url'] if value.get('_terse'): node.attrib['terse'] = 'true' del value['_terse'] else: node = etree.Element('collection', {'type': 'mapping'}) for key in value: v = serialize_to_xml(value[key]) subnode = etree.Element('item', {'key':key}) subnode.append(v) node.append(subnode) elif isinstance(value, (list, tuple, set, frozenset)): for x,y in ((list, 'list'), (tuple, 'tuple')): if isinstance(value, x): node = etree.Element('collection', {'type': y}) break else: node = etree.Element('collection', {'type':'set'}) for item in value: v = serialize_to_xml(item) subnode = etree.Element('item') subnode.append(v) node.append(subnode) else: node = etree.Element('unknown') return node
Python
0.000008
@@ -4192,32 +4192,49 @@ ment('literal')%0A + try:%0A node.tex @@ -4244,32 +4244,180 @@ unicode(value)%0A + except UnicodeDecodeError:%0A # Encode as UTF-8 if ASCII string can not be encoded%0A node.text = unicode(value, 'utf-8')%0A node.att
771860a6a9176dc6627f25f5faac960ab3edcc50
add expand user
src/speaker-recognition.py
src/speaker-recognition.py
#!/usr/bin/env python2 # -*- coding: UTF-8 -*- # File: speaker-recognition.py # Date: Wed Oct 29 22:42:26 2014 +0800 # Author: Yuxin Wu <[email protected]> import argparse import sys import glob import os import itertools import scipy.io.wavfile as wavfile sys.path.append(os.path.join( os.path.dirname(os.path.realpath(__file__)), 'gui')) from gui.interface import ModelInterface from filters.silence import remove_silence def get_args(): desc = "Speaker Recognition Command Line Tool" epilog = """ Wav files in each input directory will be labeled as the basename of the directory. Note that wildcard inputs should be *quoted*, and they will be sent to glob module. Examples: Train: ./speaker-recognition.py -t enroll -i "/tmp/person* ./mary" -m model.out Predict: ./speaker-recognition.py -t predict -i "./*.wav" -m model.out """ parser = argparse.ArgumentParser(description=desc,epilog=epilog, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('-t', '--task', help='Task to do. Either "enroll" or "predict"', required=True) parser.add_argument('-i', '--input', help='Input Files(to predict) or Directories(to enroll)', required=True) parser.add_argument('-m', '--model', help='Model file to save(in enroll) or use(in predict)', required=True) ret = parser.parse_args() return ret def task_enroll(input_dirs, output_model): m = ModelInterface() input_dirs = input_dirs.strip().split() dirs = itertools.chain(*(glob.glob(d) for d in input_dirs)) dirs = [d for d in dirs if os.path.isdir(d)] files = [] if len(dirs) == 0: print "No valid directory found!" sys.exit(1) for d in dirs: label = os.path.basename(d) wavs = glob.glob(d + '/*.wav') if len(wavs) == 0: print "No wav file found in {0}".format(d) continue print "Label {0} has files {1}".format(label, ','.join(wavs)) for wav in wavs: fs, signal = wavfile.read(wav) m.enroll(label, fs, signal) m.train() m.dump(output_model) def task_predict(input_files, input_model): m = ModelInterface.load(input_model) for f in glob.glob(input_files): fs, signal = wavfile.read(f) label = m.predict(fs, signal) print f, '->', label if __name__ == '__main__': global args args = get_args() task = args.task if task == 'enroll': task_enroll(args.input, args.model) elif task == 'predict': task_predict(args.input, args.model)
Python
0.000002
@@ -83,27 +83,27 @@ te: -Wed Oct 29 22:42:26 +Sat Nov 29 14:06:43 201 @@ -1627,16 +1627,48 @@ t_dirs = + %5Bos.path.expanduser(k) for k in input_d @@ -1686,16 +1686,17 @@ .split() +%5D %0A dir @@ -2426,16 +2426,48 @@ ob.glob( +%5Bos.path.expanduser(k) for k in input_fi @@ -2469,16 +2469,17 @@ ut_files +%5D ):%0A
ff4ebc169392d8768cce4be39765682dbba64e8f
Return a Glyph() in Spell.getGlyphLearned()
game/spells/__init__.py
game/spells/__init__.py
# -*- coding: utf-8 -*- """ Spells - Spell.dbc """ from .. import * from .. import durationstring from ..globalstrings import * POWER_TYPE_HEALTH = -2 POWER_TYPE_MANA = 0 POWER_TYPE_RAGE = 1 POWER_TYPE_FOCUS = 2 POWER_TYPE_ENERGY = 3 POWER_TYPE_RUNES = 5 POWER_TYPE_RUNIC_POWER = 6 POWER_TYPE_SOUL_SHARDS = 7 class Spell(Model): def getCooldownText(self): cooldown = self.getCooldown() if cooldown: return SPELL_RECAST_TIME % (durationstring.duration(cooldown, durationstring.SHORTCAP)) return "" def getPowerCostText(self): powerType, powerAmount, powerPerLevel, powerPercent, powerPerSecond, powerDisplay = self.getPowerInfo() if powerType == POWER_TYPE_RUNES: bloodCost, unholyCost, frostCost = self.getRuneCostInfo() runes = [] if bloodCost: runes.append(RUNE_COST_BLOOD % (bloodCost)) if frostCost: runes.append(RUNE_COST_FROST % (frostCost)) if unholyCost: runes.append(RUNE_COST_UNHOLY % (unholyCost)) return " ".join(runes) if powerPercent: if powerType == POWER_TYPE_HEALTH: return "%i%% of base health" % (powerPercent) if powerType == POWER_TYPE_FOCUS: return "%i%% of base focus" % (powerPercent) #assert powerType == POWER_TYPE_MANA, "%r: %i" % (self, powerType) return "%i%% of base mana" % (powerPercent) if powerPerSecond: if powerDisplay: return POWER_DISPLAY_COST_PER_TIME % (powerAmount, powerDisplay, powerPerSecond) if type == POWER_TYPE_HEALTH: return HEALTH_COST_PER_TIME % (powerAmount, powerPerSecond) if type == POWER_TYPE_MANA: return MANA_COST_PER_TIME % (powerAmount, powerPerSecond) if type == POWER_TYPE_RAGE: return RAGE_COST_PER_TIME % (powerAmount / 10, powerPerSecond) if type == POWER_TYPE_ENERGY: return ENERGY_COST_PER_TIME % (powerAmount, powerPerSecond) if type == POWER_TYPE_FOCUS: return FOCUS_COST_PER_TIME % (powerAmount, powerPerSecond) if type == POWER_TYPE_RUNIC_POWER: return RUNIC_POWER_COST_PER_TIME % (powerAmount / 10, powerPerSecond) if powerAmount: if powerDisplay: return POWER_DISPLAY_COST % (powerAmount, powerDisplay, powerPerSecond) if type == POWER_TYPE_HEALTH: return HEALTH_COST % (powerAmount, powerPerSecond) if type == POWER_TYPE_MANA: return MANA_COST % (powerAmount, powerPerSecond) if type == POWER_TYPE_RAGE: return RAGE_COST % (powerAmount / 10, powerPerSecond) if type == POWER_TYPE_ENERGY: return ENERGY_COST % (powerAmount, powerPerSecond) if type == POWER_TYPE_FOCUS: return FOCUS_COST % (powerAmount, powerPerSecond) if type == POWER_TYPE_RUNIC_POWER: return RUNIC_POWER_COST % (powerAmount / 10, powerPerSecond) if type == POWER_TYPE_SOUL_SHARDS: if powerAmount == 1: return SOUL_SHARDS_COST % (powerAmount, powerPerSecond) return SOUL_SHARDS_COST_PLURAL % (powerAmount, powerPerSecond) def getRangeText(self): rangeMin, rangeMinFriendly, rangeMax, rangeMaxFriendly, flags = self.getRangeInfo() if rangeMaxFriendly and rangeMax != rangeMaxFriendly: enemy = SPELL_RANGE_DUAL % (ENEMY, rangeMax) friendly = SPELL_RANGE_DUAL % (FRIENDLY, rangeMaxFriendly) return "%s\n%s" % (enemy, friendly) if rangeMax == 50000: return SPELL_RANGE_UNLIMITED if rangeMax: if rangeMin or flags & 0x2: range = "%i-%i" % (rangeMin or 5, rangeMax) return SPELL_RANGE % (range) if rangeMax == 5 and flags & 0x1: return MELEE_RANGE return SPELL_RANGE % (rangeMax) return "" class SpellTooltip(Tooltip): pass class SpellProxy(object): """ WDBC proxy for spells """ def __init__(self, cls): from pywow import wdbc self.__file = wdbc.get("Spell.dbc", build=-1) def get(self, id): return self.__file[id] def getCooldown(self, row): return row.cooldowns and row.cooldowns.cooldown or 0 def getDescription(self, row): from pywow.spellstrings import SpellString, WDBCProxy description = row.description_enus return SpellString(description).format(self.get(row.id), proxy=WDBCProxy) def getEffects(self, row): return row.spelleffect__spell def getGlyphInfo(self, row): return row.class_options.spell_class_set def getGlyphLearned(self, row): effects = self.getEffects(row) if effects and effects[0]._raw("effect") == 74: return effects[0].misc_value_1 return 0 def getIcon(self, row): icon = row.icon and row.icon.path or "" return icon.lower().replace("\\", "/").split("/")[-1] def getLevel(self, row): return row.levels and row.levels.level or 0 def getName(self, row): return row.name_enus def getPowerInfo(self, row): if row.power: powerDisplay = row.power.power_display and row.power.power_display.name powerDisplay = powerDisplay and globals()[powerDisplay] or "" return row.power_type, row.power.power_amount, row.power.power_per_level, row.power.power_percent, row.power.power_per_second, powerDisplay return 0, 0, 0, 0, 0, "" def getRangeInfo(self, row): return int(row.range.range_min), int(row.range.range_min_friendly), int(row.range.range_max), int(row.range.range_max_friendly), row.range.flags def getRank(self, row): return row.rank_enus def getRuneCostInfo(self, row): if row.rune_cost: return row.rune_cost.blood, row.rune_cost.unholy, row.rune_cost.frost return 0, 0, 0
Python
0.000281
@@ -4370,16 +4370,89 @@ == 74:%0A +%09%09%09from ..glyphs import Glyph, GlyphProxy%0A%09%09%09Glyph.initProxy(GlyphProxy)%0A %09%09%09retur @@ -4453,16 +4453,22 @@ %09return +Glyph( effects%5B @@ -4482,27 +4482,17 @@ _value_1 -%0A%09%09return 0 +) %0A%09%0A%09def
64cecfb1ada97d1aeb5987289772fd2200da78d7
fix creation of view
recycleview.py
recycleview.py
""" RecycleView =========== Data accepted: list of dict. TODO: - recycle old widgets based on the class - add custom function to get view height - add custom function to get view class - update view size when created - move all internals to adapter - selection """ from kivy.compat import string_types from kivy.uix.relativelayout import RelativeLayout from kivy.lang import Builder from kivy.properties import NumericProperty, AliasProperty, StringProperty, \ ObjectProperty from kivy.factory import Factory from kivy.clock import Clock Builder.load_string(""" <RecycleView>: ScrollView: id: sv do_scroll_x: False on_scroll_y: root.refresh_from_data() RecycleViewLayout: id: layout size_hint: None, None size: root.width, root.computed_height """) class RecycleViewLayout(RelativeLayout): pass class RecycleView(RelativeLayout): data = ObjectProperty() adapter = ObjectProperty() default_height = NumericProperty("48dp") key_height = StringProperty() viewclass = ObjectProperty() key_viewclass = StringProperty() # internals computed_height = NumericProperty(0) computed_heights = [] computed_positions = [] views = {} dirty_views = {} def on_viewclass(self, instance, value): if isinstance(value, string_types): self.viewclass = getattr(Factory, value) def do_layout(self, *args): super(RecycleView, self).do_layout(*args) self.refresh_from_data(True) def make_view_dirty(self, view, index): viewclass = view.__class__ if viewclass not in self.dirty_views: self.dirty_views[viewclass] = {index: view} else: self.dirty_views[viewclass][index] = view def refresh_from_data(self, force=False): """The data has changed, update the RecycleView internals """ if force: for index, view in self.views.items(): self.make_view_dirty(view, index) self.views = {} self.compute_views_heights() self.compute_visible_views() def compute_views_heights(self): """(internal) Calculate all the views height according to default_height, key_height, and then calculate their future positions """ height = 0 key_height = self.key_height default_height = self.default_height self.computed_heights = [ item.get(key_height, default_height) for item in self.data ] self.computed_height = sum(self.computed_heights) self.computed_positions = list( self._compute_positions(self.computed_heights)) def _compute_positions(self, heights): y = 0 for height in heights: yield y y += height def compute_visible_views(self): """(internal) Determine the views that need to be showed in the current scrollview. All the hidden views will be flagged as dirty, and might be resued for others views. """ # determine the view to create for the scrollview y / height sv = self.ids.sv layout = self.ids.layout scroll_y = 1 - (min(1, max(sv.scroll_y, 0))) px_start = (layout.height - self.height) * scroll_y px_end = px_start + self.height # now calculate the view indices we must show i_start = self.get_view_index_at(px_start) i_end = self.get_view_index_at(px_end) current_views = self.views visible_views = {} dirty_views = self.dirty_views # iterate though the visible view # add them into the layout if not already done for index in range(i_start, i_end + 1): view = self.get_view(index) if not view: continue visible_views[index] = view current_views.pop(index, None) # add to the layout if it's not already done if view.parent: continue layout.add_widget(view) # put all the hidden view as dirty views for index, view in current_views.items(): layout.remove_widget(view) self.make_view_dirty(view, index) # save the current visible views self.views = visible_views def get_view(self, index): """Return a view instance for the `index` """ if index in self.views: return self.views[index] dirty_views = self.dirty_views viewclass = self.get_viewclass(index) if viewclass in dirty_views: # we found ourself in the dirty list, no need to update data! if index in dirty_views[viewclass]: view = dirty_views[viewclass].pop(index) self.refresh_view_layout(view, index) self.views[index] = view return view # we are not in the dirty list, just take one and reuse it. if dirty_views[viewclass]: previous_index = dirty_views[viewclass].keys()[-1] view = dirty_views[viewclass].pop(previous_index) # update view data item = self.data[index] for key, value in item.items(): setattr(view, key, value) self.refresh_view_layout(view, index) self.views[index] = view return view # create a fresh one self.views[index] = view = self.create_view(index) self.refresh_view_layout(view, index) return view def refresh_view_layout(self, view, index): """(internal) Refresh the layout of a view. Size and pos are determine by the `RecycleView` according to the view `index` informations """ view.size_hint = None, None view.width = self.width view.height = h = self.computed_heights[index] view.y = self.computed_height - self.computed_positions[index] - h def create_view(self, index): """Create the view for the `index` """ viewclass = self.get_viewclass(index) item = self.data[index] view = viewclass() # we could pass the data though the constructor, but that wont work # for kv-declared classes, and might lead the user to think it can # work for reloading as well. for key, value in item.items(): setattr(view, key, value) return view def get_view_position(self, index): """Get the position for the view at `index` """ return self.computed_positions[index] def get_view_height(self, index): """Get the height for the view at `index` """ return self.computed_heights[index] def get_viewclass(self, index): """Get the class needed to create the view `index` """ viewclass = None if self.key_viewclass: viewclass = self.data[index].get(self.key_viewclass) viewclass = getattr(Factory, viewclass) if not viewclass: viewclass = self.viewclass return viewclass def get_view_index_at(self, y): """Return the view `index` for the `y` position """ for index, pos in enumerate(self.computed_positions): if pos > y: return index - 1 return index def on_data(self, instance, value): # data changed, right now, remove all the widgets. self.dirty_views = {} self.views = {} self.ids.layout.clear_widgets() self._trigger_layout()
Python
0
@@ -6247,36 +6247,16 @@ -view = viewclass()%0A # +# FIXME: we @@ -6312,21 +6312,16 @@ hat wont - work %0A @@ -6322,16 +6322,21 @@ # + work for kv- @@ -6431,16 +6431,49 @@ s well.%0A + view = viewclass(**item)%0A
06a648614d51e2c9f456a33dc164c11021c724a8
Handle adding to WHERE where WHERE already exists.
gemini/gemini_region.py
gemini/gemini_region.py
#!/usr/bin/env python import sqlite3 import re import os import sys import GeminiQuery def _report_results(args, query, gq): # report the results of the region query gq.run(query) if args.use_header and gq.header: print gq.header for row in gq: print row def get_region(args, gq): region_regex = re.compile("(\S+):(\d+)-(\d+)") try: region = region_regex.findall(args.region)[0] except IndexError: sys.exit("Malformed region (--reg) string") if len(region) != 3: sys.exit("Malformed region (--reg) string") chrom = region[0] start = region[1] end = region[2] if args.columns is not None: query = "SELECT " + str(args.columns) + \ " FROM variants " else: query = "SELECT * FROM variants " query += "WHERE chrom = " + "'" + chrom + "'" + \ " AND ((start BETWEEN " + start + " AND " + end + ")" +\ " OR (end BETWEEN " + start + " AND " + end + "))" if args.filter: query += " AND " + args.filter query += " ORDER BY chrom, start" _report_results(args, query, gq) def get_gene(args, gq): """ Report all variants in a specific gene. """ if args.columns is not None: query = "SELECT " + str(args.columns) + \ " FROM variants " else: query = "SELECT * FROM variants " query += "WHERE gene = " + "'" + args.gene + "' " if args.filter: query += " AND " + args.filter query += " ORDER BY chrom, start" _report_results(args, query, gq) def add_region_to_query(args): region_regex = re.compile("(\S+):(\d+)-(\d+)") try: region = region_regex.findall(args.region)[0] except IndexError: sys.exit("Malformed region (--reg) string") if len(region) != 3: sys.exit("Malformed region (--reg) string") chrom = region[0] start = region[1] end = region[2] where_clause = " WHERE chrom = " + "'" + chrom + "'" + \ " AND ((start BETWEEN " + start + " AND " + end + ")" +\ " OR (end BETWEEN " + start + " AND " + end + "))" args.query += where_clause def region(parser, args): if os.path.exists(args.db): gq = GeminiQuery.GeminiQuery(args.db, out_format=args.format) if args.region is not None and args.gene is not None: sys.exit('EXITING: Choose either --reg or --gene, not both.\n') elif args.region is not None: get_region(args, gq) elif args.gene is not None: get_gene(args, gq)
Python
0
@@ -1977,14 +1977,8 @@ = %22 - WHERE chr @@ -2000,32 +2000,32 @@ chrom + %22'%22 + %5C%0A + %22 AND (( @@ -2152,23 +2152,410 @@ ery -+= where_clause += _add_to_where_clause(args.query, where_clause)%0A%0A%0Adef _add_to_where_clause(query, where_clause):%0A where_index = query.lower().find(%22where%22)%0A prefix = query%5B0:where_index%5D%0A suffix = query%5Bwhere_index + len(%22where%22):%5D%0A if where_index == -1:%0A query += %22 WHERE %22 + where_clause%0A else:%0A query = %22%7B0%7D WHERE (%7B1%7D) AND (%7B2%7D)%22.format(prefix, suffix, where_clause)%0A return query%0A %0A%0A%0Ad
9b18db54d64e168231079255334649fb9b503f3e
Add murrine back into monodevelop-mac-dev packages list
profiles/monodevelop-mac-dev/packages.py
profiles/monodevelop-mac-dev/packages.py
import os from bockbuild.darwinprofile import DarwinProfile class MonoDevelopMacDevPackages: def __init__ (self): # Toolchain self.packages.extend ([ 'autoconf.py', 'automake.py', 'libtool.py', 'gettext.py', 'pkg-config.py' ]) # Base Libraries self.packages.extend ([ 'libpng.py', 'libjpeg.py', 'libtiff.py', 'libxml2.py', 'freetype.py', 'fontconfig.py', 'pixman.py', 'cairo.py', 'glib.py', 'pango.py', 'atk.py', 'intltool.py', 'gdk-pixbuf.py', 'gtk+.py', 'libglade.py', ]) # Theme self.packages.extend ([ 'librsvg.py', 'hicolor-icon-theme.py', 'gtk-engines.py', 'gtk-quartz-engine.py' ]) # Mono self.packages.extend ([ 'mono.py', 'gtk-sharp.py', 'mono-addins.py', ]) self.packages = [os.path.join ('..', '..', 'packages', p) for p in self.packages]
Python
0
@@ -641,24 +641,41 @@ ngines.py',%0A +%09%09%09'murrine.py',%0A %09%09%09'gtk-quar
5ca4e1df8fc67f9b56d5ea55cb4e17e78c5c6ed5
Fix test factory
project/apps/smanager/tests/factories.py
project/apps/smanager/tests/factories.py
# Standard Library import datetime import rest_framework_jwt # Third-Party from factory import Faker # post_generation, from factory import Iterator from factory import LazyAttribute from factory import PostGenerationMethodCall from factory import RelatedFactory from factory import Sequence from factory import SubFactory from factory.django import DjangoModelFactory from factory.django import mute_signals from factory.fuzzy import FuzzyInteger # Django from django.db.models.signals import pre_delete from django.db.models.signals import pre_save from django.db.models.signals import m2m_changed # First-Party from apps.smanager.models import Repertory from apps.smanager.models import Assignment from apps.smanager.models import Contest from apps.smanager.models import Entry from apps.smanager.models import Session from rest_framework_jwt.models import User class AssignmentFactory(DjangoModelFactory): # status = Assignment.STATUS.active kind = Assignment.KIND.official # convention = SubFactory('factories.ConventionFactory') # person = SubFactory('factories.PersonFactory') class Meta: model = Assignment class ContestFactory(DjangoModelFactory): # status = Contest.STATUS.included session = SubFactory('apps.smanager.tests.factories.SessionFactory') # award = SubFactory('factories.AwardFactory') class Meta: model = Contest class EntryFactory(DjangoModelFactory): status = Entry.STATUS.new is_evaluation = True is_private = False session = SubFactory('apps.smanager.tests.factories.SessionFactory') # group = SubFactory('factories.GroupFactory') class Meta: model = Entry class RepertoryFactory(DjangoModelFactory): # status = Repertory.STATUS.active # group = SubFactory('factories.GroupFactory') entry = SubFactory('apps.smanager.tests.factories.EntryFactory') class Meta: model = Repertory class SessionFactory(DjangoModelFactory): status = Session.STATUS.new kind = Session.KIND.quartet is_invitational = False num_rounds = 2 # convention = SubFactory('factories.ConventionFactory') class Meta: model = Session # @post_generation # def create_rounds(self, create, extracted, **kwargs): # if create: # for i in range(self.num_rounds): # num = i + 1 # kind = self.num_rounds - i # RoundFactory( # session=self, # num=num, # kind=kind, # ) @mute_signals(pre_delete, pre_save, m2m_changed) class UserFactory(DjangoModelFactory): username = Faker('uuid4') password = PostGenerationMethodCall('set_password', 'password') is_staff = False class Meta: model = User
Python
0.000001
@@ -2034,16 +2034,92 @@ quartet%0A + name = %22International Championship%22%0A district = Session.DISTRICT.bhs%0A is_i
1e4c1c7213763ba70780707e690e37a1c01e6b59
use cpp to preprocess the input files and handle multiple DGETs per line
generate_task_header.py
generate_task_header.py
#!/usr/bin/python import os import re from toposort import toposort_flatten import copy dtask_re = re.compile('DTASK\(\s*(\w+)\s*,(.+)\)') dget_re = re.compile('DGET\(\s*(\w+)\s*\)') def find_tasks_in_file(filename): tasks = [] with open(filename) as f: for line in f: match = dtask_re.search(line) if match: #print(match.group(0)) tasks.append({'name': match.group(1), 'type': match.group(2).strip(), 'deps': set()}) match = dget_re.search(line) if match: #print(match.group(0)) tasks[-1]['deps'].add(match.group(1)) return tasks def find_tasks(dir): tasks = [] for root, dirs, files in os.walk(dir): for filename in files: ext = os.path.splitext(filename)[1][1:] if ext == 'c' or ext == 'cpp': new_tasks = find_tasks_in_file(os.path.join(root, filename)) tasks.extend(new_tasks) return tasks def order_tasks(tasks): types = {} deps = {} for task in tasks: types[task['name']] = task['type'] deps[task['name']] = task['deps'] deps_copy = copy.deepcopy(deps) return map(lambda name: (name, types[name], deps_copy[name]), toposort_flatten(deps)) def generate_header(dir, header): tasks = order_tasks(find_tasks(dir)) ids = {} id = 0 with open(header, 'w') as f: f.write('''#ifndef __ALL_TASKS__ #define __ALL_TASKS__ #include "dtask.h" ''') for (task, type, deps) in tasks: f.write('#define {} 0x{:x}\n'.format(task.upper(), 1 << id)) ids[task] = id id = id + 1 f.write('\n') f.write('#define ALL_TASKS { \\\n') for (task, type, deps) in tasks: f.write(' {{ __dtask_{}, "{}", {}, {:d} }}, \\\n' .format(task, task, ' | '.join(map(lambda x: x.upper(), deps)), ids[task])) f.write(' }\n\n') for (task, type, deps) in tasks: f.write('DECLARE_DTASK({}, {});\n'.format(task, type)) f.write(''' #endif ''') generate_header('.', 'all_tasks.h')
Python
0
@@ -81,16 +81,34 @@ ort copy +%0Aimport subprocess %0A%0Adtask_ @@ -123,16 +123,17 @@ compile( +r 'DTASK%5C( @@ -174,16 +174,17 @@ compile( +r 'DGET%5C(%5C @@ -257,55 +257,197 @@ -with open(filename) as f:%0A for line in f +cpp = subprocess.Popen(%5B'cpp', '-w', filename%5D,%0A stdout=subprocess.PIPE)%0A lines = iter(cpp.stdout.readline, '')%0A for line in lines:%0A if line%5B0%5D != '#' :%0A @@ -721,23 +721,28 @@ +for match -= +in dget_re @@ -738,37 +738,44 @@ in dget_re. -search(line)%0A +finditer(line):%0A @@ -792,32 +792,36 @@ + + #print(match.gro @@ -819,32 +819,36 @@ match.group(0))%0A + @@ -1579,24 +1579,182 @@ r, header):%0A + #touch the header file%0A with open(header, 'w') as f:%0A os.utime(header, None)%0A f.write('#undef DTASK%5Cn')%0A f.write('#undef DGET%5Cn')%0A tasks =
f76daf38fb8998bbf5d0b663ff64572fb240fd24
bump Python API version am: 454844e69f am: 07e940c2de am: 19c98a2e99 am: 9bc9e3d185 am: 4289bd8427
src/trace_processor/python/setup.py
src/trace_processor/python/setup.py
from distutils.core import setup setup( name='perfetto', packages=['perfetto', 'perfetto.trace_processor'], package_data={'perfetto.trace_processor': ['*.descriptor']}, include_package_data=True, version='0.2.9', license='apache-2.0', description='Python API for Perfetto\'s Trace Processor', author='Perfetto', author_email='[email protected]', url='https://perfetto.dev/', download_url='https://github.com/google/perfetto/archive/v6.0.tar.gz', keywords=['trace processor', 'tracing', 'perfetto'], install_requires=[ 'protobuf', ], classifiers=[ 'Development Status :: 3 - Alpha', 'License :: OSI Approved :: Apache Software License', "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", ], )
Python
0
@@ -225,11 +225,11 @@ ='0. -2.9 +3.0 ',%0A @@ -481,12 +481,23 @@ ive/ -v6.0 +refs/tags/v20.1 .tar
1a49fafea536a8cdd992ebb6e0ae08c8e0174923
Update model.py
geostatsmodels/model.py
geostatsmodels/model.py
import numpy as np import variograms def opt( fct, x, y, c, parameterRange=None, meshSize=1000 ): ''' Optimize parameters for a model of the semivariogram ''' if parameterRange == None: parameterRange = [ x[1], x[-1] ] mse = np.zeros( meshSize ) a = np.linspace( parameterRange[0], parameterRange[1], meshSize ) for i in range( meshSize ): mse[i] = np.mean( ( y - fct( x, a[i], c ) )**2.0 ) return a[ mse.argmin() ] def typetest( h, a, lta, gta ): ''' Input: (h) scalar or NumPy ndarray (a) scalar representing the range parameter (lta) function to perfrom for values less than (a) (gta) function to perform for values greater than (a) Output: scalar or array, depending on (h) ''' # if (h) is a numpy ndarray, then.. try: # apply lta() to elements less than a lt = lta( h[ np.where( h <= a ) ] ) # apply gta() to elements greater than a gt = gta( h[ np.where( h > a ) ] ) return np.hstack((lt,gt)) # otherwise, if (h) is a scalar.. except TypeError: if h <= a: return lta( h ) else: return gta( h ) def nugget( h, a, c ): ''' Nugget model of the semivariogram ''' c = float(c) lta = lambda x: 0+x*0 gta = lambda x: c+x*0 return typetest( h, 0, lta, gta ) def linear( h, a, c ): ''' Linear model of the semivariogram ''' a, c = float(a), float(c) lta = lambda x: (c/a)*x gta = lambda x: c+x*0 return typetest( h, a, lta, gta ) def spherical( h, a, c ): ''' Spherical model of the semivariogram ''' a, c = float(a), float(c) lta = lambda x: c*( 1.5*(x/a) - 0.5*(x/a)**3.0 ) gta = lambda x: c+x*0 return typetest( h, a, lta, gta ) def exponential( h, a, c ): ''' Exponential model of the semivariogram ''' a, c = float( a ), float( c ) return c*( 1.0 - np.exp( -3.0*h/a ) ) def gaussian( h, a, c ): ''' Gaussian model of the semivariogram ''' a, c = float( a ), float( c ) return c*( 1.0 - np.exp( -3.0*h**2.0/a**2.0 ) ) def power( h, w, c ): ''' Power model of the semivariogram ''' return c*h**w def semivariogram( fct, param ): ''' Input: (fct) function that takes data and parameters (param) list or tuple of parameters Output: (inner) function that only takes data as input parameters are set internally ''' def inner( h ): return fct(h,*param) return inner def covariance( fct, param ): ''' Input: (fct) function that takes data and parameters (param) list or tuple of parameters Output: (inner) function that only takes data as input parameters are set internally ''' def inner( h ): return param[-1] - fct(h,*param) return inner def fitmodel( data, fct, lags, tol ): ''' Input: (P) ndarray, data (model) modeling function - spherical - exponential - gaussian (lags) lag distances (tol) tolerance Output: (covfct) function modeling the covariance ''' # calculate the semivariogram sv = variograms.semivariogram( data, lags, tol ) # calculate the sill c = np.var( data[:,2] ) # calculate the optimal parameters a = opt( fct, sv[0], sv[1], c ) # return a covariance function covfct = covariance( fct, ( a, c ) ) return covfct
Python
0.000001
@@ -2273,21 +2273,20 @@ semivari -ogram +ance ( fct, p
5ff44efbcfca5316796a1ea0191b2a92894a59ee
Fix resolve error
gerber/render/render.py
gerber/render/render.py
#! /usr/bin/env python # -*- coding: utf-8 -*- # copyright 2014 Hamilton Kibbe <[email protected]> # Modified from code by Paulo Henrique Silva <[email protected]> # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..gerber_statements import ( CommentStmt, UnknownStmt, EofStmt, ParamStmt, CoordStmt, ApertureStmt ) class GerberContext(object): settings = {} x = 0 y = 0 aperture = 0 interpolation = 'linear' image_polarity = 'positive' level_polarity = 'dark' def __init__(self): pass def set_format(self, settings): self.settings = settings def set_coord_format(self, zero_suppression, format, notation): self.settings['zero_suppression'] = zero_suppression self.settings['format'] = format self.settings['notation'] = notation def set_coord_notation(self, notation): self.settings['notation'] = notation def set_coord_unit(self, unit): self.settings['units'] = unit def set_image_polarity(self, polarity): self.image_polarity = polarity def set_level_polarity(self, polarity): self.level_polarity = polarity def set_interpolation(self, interpolation): self.interpolation = 'linear' if interpolation in ("G01", "G1") else 'arc' def set_aperture(self, d): self.aperture = d def resolve(self, x, y): return x or self.x, y or self.y def define_aperture(self, d, shape, modifiers): pass def move(self, x, y, resolve=True): if resolve: self.x, self.y = self.resolve(x, y) else: self.x, self.y = x, y def stroke(self, x, y): pass def line(self, x, y): pass def arc(self, x, y): pass def flash(self, x, y): pass def drill(self, x, y, diameter): pass def evaluate(self, stmt): if isinstance(stmt, (CommentStmt, UnknownStmt, EofStmt)): return elif isinstance(stmt, ParamStmt): self._evaluate_param(stmt) elif isinstance(stmt, CoordStmt): self._evaluate_coord(stmt) elif isinstance(stmt, ApertureStmt): self._evaluate_aperture(stmt) else: raise Exception("Invalid statement to evaluate") def _evaluate_param(self, stmt): if stmt.param == "FS": self.set_coord_format(stmt.zero_suppression, stmt.format, stmt.notation) self.set_coord_notation(stmt.notation) elif stmt.param == "MO:": self.set_coord_unit(stmt.mode) elif stmt.param == "IP:": self.set_image_polarity(stmt.ip) elif stmt.param == "LP:": self.set_level_polarity(stmt.lp) elif stmt.param == "AD": self.define_aperture(stmt.d, stmt.shape, stmt.modifiers) def _evaluate_coord(self, stmt): if stmt.function in ("G01", "G1", "G02", "G2", "G03", "G3"): self.set_interpolation(stmt.function) if stmt.op == "D01": self.stroke(stmt.x, stmt.y) elif stmt.op == "D02": self.move(stmt.x, stmt.y) elif stmt.op == "D03": self.flash(stmt.x, stmt.y) def _evaluate_aperture(self, stmt): self.set_aperture(stmt.d)
Python
0.000006
@@ -1881,38 +1881,104 @@ -return x or self.x, y or self. +x = x if x is not None else self.x%0A y = y if y is not None else self.y%0A return x, y%0A%0A
75ece1dbe7d101948bf8810bc4050764be2aca47
add explanation to warm_start error for lightfm
polara/recommender/external/lightfm/lightfmwrapper.py
polara/recommender/external/lightfm/lightfmwrapper.py
import numpy as np from numpy.lib.stride_tricks import as_strided from lightfm import LightFM from polara.recommender.models import RecommenderModel from polara.lib.similarity import stack_features from polara.tools.timing import track_time class LightFMWrapper(RecommenderModel): def __init__(self, *args, item_features=None, user_features=None, **kwargs): super(LightFMWrapper, self).__init__(*args, **kwargs) self.method='LightFM' self._rank = 10 self.fit_method = 'fit' self.fit_params = {} self.item_features = item_features self.item_features_labels = None self.item_alpha = 0.0 self.item_identity = True self._item_features_csr = None self.user_features = user_features self.user_features_labels = None self.user_alpha = 0.0 self.user_identity = True self._user_features_csr = None self.loss = 'warp' self.learning_schedule = 'adagrad' self.learning_rate = 0.05 self.max_sampled = 10 self.seed = 0 self._model = None @property def rank(self): return self._rank @rank.setter def rank(self, new_value): if new_value != self._rank: self._rank = new_value self._is_ready = False self._recommendations = None def build(self): self._model = LightFM(no_components=self.rank, item_alpha=self.item_alpha, user_alpha=self.user_alpha, loss=self.loss, learning_rate=self.learning_rate, learning_schedule=self.learning_schedule, max_sampled=self.max_sampled, random_state=self.seed) fit = getattr(self._model, self.fit_method) matrix = self.get_training_matrix(sparse_format='coo') # as reqired by LightFM try: item_index = self.data.index.itemid.training except AttributeError: item_index = self.data.index.itemid if self.item_features is not None: item_features = self.item_features.reindex( item_index.old.values, fill_value=[]) self._item_features_csr, self.item_features_labels = stack_features( item_features, add_identity=self.item_identity, normalize=True, dtype='f4') if self.user_features is not None: user_features = self.user_features.reindex( self.data.index.userid.training.old.values, fill_value=[]) self._user_features_csr, self.user_features_labels = stack_features( user_features, add_identity=self.user_identity, normalize=True, dtype='f4') with track_time(self.training_time, verbose=self.verbose, model=self.method): fit(matrix, item_features=self._item_features_csr, user_features=self._user_features_csr, **self.fit_params) def slice_recommendations(self, test_data, shape, start, stop, test_users=None): if self.data.warm_start: raise NotImplementedError slice_data = self._slice_test_data(test_data, start, stop) all_items = self.data.index.itemid.new.values n_users = stop - start n_items = len(all_items) test_shape = (n_users, n_items) test_users_index = test_users[start:stop].astype('i4', copy=False) test_items_index = all_items.astype('i4', copy=False) # use stride tricks to avoid unnecessary copies of repeated indices # have to conform with LightFM's dtype to avoid additional copies itemsize = np.dtype('i4').itemsize scores = self._model.predict( as_strided(test_users_index, test_shape, (itemsize, 0)).ravel(), as_strided(test_items_index, test_shape, (0, itemsize)).ravel(), user_features=self._user_features_csr, item_features=self._item_features_csr, num_threads=self.fit_params.get('num_threads', 1) ).reshape(test_shape) return scores, slice_data
Python
0
@@ -3300,16 +3300,45 @@ tedError +('Not supported by LightFM.') %0A%0A
2025dae49acd6827d0e961e9be345ad9cb3f1086
Add pytables to save cosine similarity
pygraphc/similarity/LogTextSimilarity.py
pygraphc/similarity/LogTextSimilarity.py
from pygraphc.preprocess.PreprocessLog import PreprocessLog from pygraphc.similarity.StringSimilarity import StringSimilarity from itertools import combinations class LogTextSimilarity(object): """A class for calculating cosine similarity between a log pair. This class is intended for non-graph based clustering method. """ def __init__(self, logtype, logs): """The constructor of class LogTextSimilarity. Parameters ---------- logtype : str Type for event log, e.g., auth, syslog, etc. logs : list List of every line of original logs. """ self.logtype = logtype self.logs = logs def get_cosine_similarity(self): """Get cosine similarity from a pair of log lines in a file. Returns ------- cosine_similarity : dict Dictionary of cosine similarity in non-graph clustering. Key: (log_id1, log_id2), value: cosine similarity distance. """ preprocess = PreprocessLog(self.logtype) preprocess.preprocess_text(self.logs) events = preprocess.events_text # calculate cosine similarity cosines_similarity = {} for log_pair in combinations(range(preprocess.loglength), 2): cosines_similarity[log_pair] = StringSimilarity.get_cosine_similarity(events[log_pair[0]]['tf-idf'], events[log_pair[1]]['tf-idf'], events[log_pair[0]]['length'], events[log_pair[1]]['length']) return cosines_similarity
Python
0
@@ -154,16 +154,37 @@ nations%0A +from tables import *%0A %0A%0Aclass @@ -352,24 +352,146 @@ od.%0A %22%22%22%0A + class Cosine(IsDescription):%0A source = Int32Col()%0A dest = Int32Col()%0A similarity = Float32Col()%0A%0A def __in @@ -1294,16 +1294,365 @@ s_text%0A%0A + # h5 configuration for cosine similarity%0A h5cosine_file = open_file('cosine.h5', mode='w', title='Cosine similarity')%0A h5group = h5file.create_group(%22/%22, 'cosine_group', 'Cosine similarity group')%0A h5table = h5file.create_table(h5group, 'cosine_table', Cosine, %22Cosine similarity table%22)%0A h5cosine = h5table.row%0A%0A @@ -2118,24 +2118,24 @@ %5B'length'%5D,%0A - @@ -2235,16 +2235,295 @@ ngth'%5D)%0A + h5cosine%5B'source'%5D = log_pair%5B0%5D%0A h5cosine%5B'dest'%5D = log_pair%5B1%5D%0A h5cosine%5B'similarity'%5D = cosines_similarity%5Blog_pair%5D%0A h5cosine.append()%0A%0A # write to file and then close%0A h5table.flush()%0A h5cosine_file.close()%0A%0A
446bdb2d9906c39b0917dfd1cc5ce35230f78cfa
rename an argument
pytablereader/spreadsheet/excelloader.py
pytablereader/spreadsheet/excelloader.py
# encoding: utf-8 """ .. codeauthor:: Tsuyoshi Hombashi <[email protected]> """ from __future__ import absolute_import, unicode_literals from pytablereader import DataError from six.moves import range from tabledata import TableData from .._logger import FileSourceLogger from .._validator import FileValidator from ..error import OpenError from .core import SpreadSheetLoader class ExcelTableFileLoader(SpreadSheetLoader): """ A file loader class to extract tabular data from Microsoft Excel |TM| files. :param str file_path: Path to the loading Excel workbook file. .. py:attribute:: table_name Table name string. Defaults to ``%(sheet)s``. .. py:attribute:: start_row The first row to search header row. """ @property def format_name(self): return "excel" @property def _sheet_name(self): return self._worksheet.name @property def _row_count(self): return self._worksheet.nrows @property def _col_count(self): return self._worksheet.ncols def __init__(self, file_path=None, quoting_flags=None, type_hints=None): super(ExcelTableFileLoader, self).__init__(file_path, quoting_flags, type_hints) self._validator = FileValidator(file_path) self._logger = FileSourceLogger(self) def load(self): """ Extract tabular data as |TableData| instances from an Excel file. |spreadsheet_load_desc| :return: Loaded |TableData| iterator. |TableData| created for each sheet in the workbook. |load_table_name_desc| =================== ==================================== Format specifier Value after the replacement =================== ==================================== ``%(filename)s`` Filename of the workbook ``%(sheet)s`` Name of the sheet ``%(format_name)s`` ``"spreadsheet"`` ``%(format_id)s`` |format_id_desc| ``%(global_id)s`` |global_id| =================== ==================================== :rtype: |TableData| iterator :raises pytablereader.DataError: If the header row is not found. :raises pytablereader.error.OpenError: If failed to open the source file. """ import xlrd self._validate() self._logger.logging_load() try: workbook = xlrd.open_workbook(self.source) except xlrd.biffh.XLRDError as e: raise OpenError(e) for worksheet in workbook.sheets(): self._worksheet = worksheet if self._is_empty_sheet(): continue self.__extract_not_empty_col_idx() try: start_row_idx = self._get_start_row_idx() except DataError: continue rows = [ self.__get_row_values(row_idx) for row_idx in range(start_row_idx + 1, self._row_count) ] self.inc_table_count() yield TableData( self._make_table_name(), self.__get_row_values(start_row_idx), rows, dp_extractor=self.dp_extractor, type_hints=self.type_hints, ) def _is_empty_sheet(self): return any( [ self._col_count == 0, self._row_count <= 1, # nrows == 1 means exists header row only ] ) def _get_start_row_idx(self): for row_idx in range(self.start_row, self._row_count): if self.__is_header_row(row_idx): break else: raise DataError("header row not found") return row_idx def __is_header_row(self, row_idx): from xlrd import XL_CELL_EMPTY return XL_CELL_EMPTY not in self._worksheet.row_types( row_idx, self._start_col_idx, self._end_col_idx + 1 ) @staticmethod def __is_empty_cell_types(cell_type_list): from xlrd import XL_CELL_EMPTY return all([cell_type == XL_CELL_EMPTY for cell_type in cell_type_list]) def __extract_not_empty_col_idx(self): col_idx_list = [ col_idx for col_idx in range(self._col_count) if not self.__is_empty_cell_types(self._worksheet.col_types(col_idx)) ] self._start_col_idx = min(col_idx_list) self._end_col_idx = max(col_idx_list) def __get_row_values(self, row_idx): return self._worksheet.row_values(row_idx, self._start_col_idx, self._end_col_idx + 1)
Python
0.000123
@@ -4140,21 +4140,17 @@ ell_type -_list +s ):%0A @@ -4257,21 +4257,17 @@ ell_type -_list +s %5D)%0A%0A
e384aee40e41d36c2ff32b76a1d4d162e0c9cecc
Stopping for the night. corrected JsLibraryAnalyser.js
pythonium/BowerLoad/JsLibraryAnalyser.py
pythonium/BowerLoad/JsLibraryAnalyser.py
__author__ = 'Jason' import glob import os class Mapper: #map out files -> classes -> functions+returns moduleMap = {} RootJsfileList = [] def __init__(self): pass def _find_entry_Points(self): os.chdir("/mydir") for file in glob.glob("*.js"): print(file) pass class Skelmaker: #Create Skeleton Python Modules For Easy Ide Intergration def __init__(self): pass
Python
0.999996
@@ -37,16 +37,31 @@ port os%0A +import fnmatch%0A %0A%0Aclass @@ -86,16 +86,18 @@ ut files +%7B%7D -%3E clas @@ -99,16 +99,18 @@ classes +%7B%7D -%3E func @@ -118,21 +118,37 @@ ions -+ +%7B%7Binputs:%5B%5D, returns +:%5B%5D%7D %0A +# modu @@ -159,34 +159,161 @@ p = -%7B%7D%0A RootJsfileList = %5B%5D +files%7Bclasses%7Bfunctions%7B%7Binputs:%5B%5D,returns:%5B%5D%7D%7D%0A moduleMap = %7B%22files%22:%7B%7D%7D%0A RootJsfileList = %5B%5D%0A RootDir = os.curdir() #or js library folder path %0A @@ -358,97 +358,168 @@ def -_ find_ -entry_Points(self):%0A os.chdir(%22/mydir%22)%0A for file in glob.glob(%22 +all_js_files(self,RootDir=RootDir):%0A for root, dirnames, filenames in os.walk(RootDir):%0A for filename in fnmatch.filter(filenames, ' *.js -%22 +' ):%0A @@ -533,19 +533,57 @@ + -print(file) + self.moduleMap%5B%22files%22%5D += %7Bstr(filename):%22%22%7D %0A
a5840150f7089b1e00296f12254ef161f8ce93b6
fix foo("bar").baz()
pythonscript/pythonscript_transformer.py
pythonscript/pythonscript_transformer.py
from ast import Str from ast import Expr from ast import Call from ast import Name from ast import Assign from ast import Attribute from ast import FunctionDef from ast import NodeTransformer class PythonScriptTransformer(NodeTransformer): def visit_ClassDef(self, node): name = Name(node.name, None) yield Assign([name], Call(Name('JSObject', None), None, None, None, None)) yield Assign([Name('parents', None)], Call(Name('JSArray', Name), None, None, None, None)) if node.bases: yield Expr( Call( Attribute( Name('parents', None), 'push', None ), node.bases, None, None, None ) ) for item in node.body: yield self.generic_visit(item) if isinstance(item, FunctionDef): item_name = item.name item.name = closure_name = '%s__%s' % (node.name, item_name) yield Assign([Attribute(name, item_name, None)], Name(closure_name, None)) elif isinstance(item, Assign): item_name = item.targets[0].id item.targets[0].id = closure_name = '%s__%s' % (name.id, item_name) yield Assign([Attribute(name, item_name, None)], Name(closure_name, None)) yield Assign([name], Call(Name('create_class', None), [Str(node.name), Name('parents', None), Name(name.id, None)], None, None, None)) def visit_Attribute(self, node): return Call(Name('get_attribute', None), [self.generic_visit(node.value), Str(node.attr)], None, None, None) def visit_Assign(self, node): attr = node.targets[0] if isinstance(attr, Attribute): return Expr(Call(Name('set_attribute', None), [attr.value, Str(attr.attr), node.value], None, None, None)) else: return self.generic_visit(node) def visit_Call(self, node): if hasattr(node.func, 'id') and node.func.id in ('JS', 'toString'): return self.generic_visit(node) return Call( Call( Name('get_attribute', None), [self.visit(node.func), Str('__call__')], None, None, None, ), map(self.visit, node.args), None, None, None, )
Python
0.999868
@@ -1686,32 +1686,24 @@ one), %5Bself. -generic_ visit(node.v @@ -1741,24 +1741,95 @@ one, None)%0A%0A + def visit_Expr(self, node):%0A return self.visit(node.value)%0A%0A def visi
3b2730edbbef3f32aef6682d9d446d8416fc7562
add setWindowMinimizeButtonHint() for dialog
quite/controller/dialog_ui_controller.py
quite/controller/dialog_ui_controller.py
from . import WidgetUiController from ..gui import Shortcut class DialogUiController(WidgetUiController): def __init__(self, parent=None, ui_file=None): super().__init__(parent, ui_file) Shortcut('ctrl+w', self.w).excited.connect(self.w.close) def exec(self): return self.w.exec() @classmethod def class_exec(cls, *args, **kwargs): return cls(*args, **kwargs).exec()
Python
0
@@ -53,16 +53,45 @@ hortcut%0A +from PySide.QtCore import Qt%0A %0A%0Aclass @@ -340,16 +340,149 @@ exec()%0A%0A + def setWindowMinimizeButtonHint(self):%0A self.w.setWindowFlags(Qt.WindowMinimizeButtonHint %7C Qt.WindowMaximizeButtonHint)%0A%0A @cla
a08919c24e1af460ccba8820eb6646492848621e
Bump Version 0.5.4
libmc/__init__.py
libmc/__init__.py
from ._client import ( PyClient, ThreadUnsafe, encode_value, MC_DEFAULT_EXPTIME, MC_POLL_TIMEOUT, MC_CONNECT_TIMEOUT, MC_RETRY_TIMEOUT, MC_HASH_MD5, MC_HASH_FNV1_32, MC_HASH_FNV1A_32, MC_HASH_CRC_32, MC_RETURN_SEND_ERR, MC_RETURN_RECV_ERR, MC_RETURN_CONN_POLL_ERR, MC_RETURN_POLL_TIMEOUT_ERR, MC_RETURN_POLL_ERR, MC_RETURN_MC_SERVER_ERR, MC_RETURN_PROGRAMMING_ERR, MC_RETURN_INVALID_KEY_ERR, MC_RETURN_INCOMPLETE_BUFFER_ERR, MC_RETURN_OK, ) __VERSION__ = '0.5.3' __version__ = "v0.5.3-3-g3eb1a97" __author__ = "mckelvin" __email__ = "[email protected]" __date__ = "Sat Jul 11 14:24:54 2015 +0800" class Client(PyClient): pass __all__ = [ 'Client', 'ThreadUnsafe', '__VERSION__', 'encode_value', 'MC_DEFAULT_EXPTIME', 'MC_POLL_TIMEOUT', 'MC_CONNECT_TIMEOUT', 'MC_RETRY_TIMEOUT', 'MC_HASH_MD5', 'MC_HASH_FNV1_32', 'MC_HASH_FNV1A_32', 'MC_HASH_CRC_32', 'MC_RETURN_SEND_ERR', 'MC_RETURN_RECV_ERR', 'MC_RETURN_CONN_POLL_ERR', 'MC_RETURN_POLL_TIMEOUT_ERR', 'MC_RETURN_POLL_ERR', 'MC_RETURN_MC_SERVER_ERR', 'MC_RETURN_PROGRAMMING_ERR', 'MC_RETURN_INVALID_KEY_ERR', 'MC_RETURN_INCOMPLETE_BUFFER_ERR', 'MC_RETURN_OK', ]
Python
0
@@ -536,15 +536,15 @@ _ = -' +%22 0.5. -3' +4%22 %0A__v @@ -564,20 +564,9 @@ 0.5. -3-3-g3eb1a97 +4 %22%0A__ @@ -651,27 +651,27 @@ = %22 -Sat +Thu Jul 1 -1 14:24:54 +6 18:20:00 201
778f284c2208438b7bc26226cc295f80de6343e0
Use loop.add_signal_handler for handling SIGWINCH.
libpymux/utils.py
libpymux/utils.py
import array import asyncio import fcntl import signal import termios def get_size(stdout): # Thanks to fabric (fabfile.org), and # http://sqizit.bartletts.id.au/2011/02/14/pseudo-terminals-in-python/ """ Get the size of this pseudo terminal. :returns: A (rows, cols) tuple. """ #assert stdout.isatty() # Buffer for the C call buf = array.array('h', [0, 0, 0, 0 ]) # Do TIOCGWINSZ (Get) #fcntl.ioctl(stdout.fileno(), termios.TIOCGWINSZ, buf, True) fcntl.ioctl(0, termios.TIOCGWINSZ, buf, True) # Return rows, cols return buf[0], buf[1] def set_size(stdout_fileno, rows, cols): """ Set terminal size. (This is also mainly for internal use. Setting the terminal size automatically happens when the window resizes. However, sometimes the process that created a pseudo terminal, and the process that's attached to the output window are not the same, e.g. in case of a telnet connection, or unix domain socket, and then we have to sync the sizes by hand.) """ # Buffer for the C call buf = array.array('h', [rows, cols, 0, 0 ]) # Do: TIOCSWINSZ (Set) fcntl.ioctl(stdout_fileno, termios.TIOCSWINSZ, buf) def alternate_screen(write): class Context: def __enter__(self): # Enter alternate screen buffer write(b'\033[?1049h') def __exit__(self, *a): # Exit alternate screen buffer and make cursor visible again. write(b'\033[?1049l') write(b'\033[?25h') return Context() def call_on_sigwinch(callback): """ Set a function to be called when the SIGWINCH signal is received. (Normally, on terminal resize.) """ def sigwinch_handler(n, frame): loop = asyncio.get_event_loop() loop.call_soon(callback) signal.signal(signal.SIGWINCH, sigwinch_handler)
Python
0
@@ -1587,19 +1587,30 @@ callback +, loop=None ):%0A - %22%22%22%0A @@ -1731,38 +1731,23 @@ -def sigwinch_handler(n, frame) +if loop is None :%0A @@ -1783,16 +1783,45 @@ t_loop() +%0A%0A def sigwinch_handler(): %0A @@ -1854,21 +1854,31 @@ -signal.signal +loop.add_signal_handler (sig
fa57fa679b575ce871af3c4769828f400e6ab28b
bump version 2.1.3 for issue #70
premailer/__init__.py
premailer/__init__.py
from premailer import Premailer, transform __version__ = '2.1.2'
Python
0
@@ -56,11 +56,11 @@ = '2.1. -2 +3 '%0A
062ce0f6fd9f6940b6c9ca45185e98e5d566e916
add timestamp to masterbugtable
preproc/buildlists.py
preproc/buildlists.py
#!/usr/bin/python import json, glob, urllib, os, urllib2,csv,StringIO,re, sys from pprint import pprint from urlparse import urlparse import socket socket.setdefaulttimeout(240) # Seconds. Loading Bugzilla searches can be slow if os.path.exists('/home/hallvors/lib'): # custom path for tldextract module on hallvord.com host. sys.path.insert(1, '/home/hallvors/lib/tldextract-1.2-py2.6.egg') # TODO: remove or fix this for people.mozilla :) import tldextract os.chdir(os.path.dirname(os.path.abspath(__file__))) # For CRON usage.. # We generally want to handle the main domains (i.e. google.com for www.google.com) instead of the full host names # however, for some major sites with lots of distinct properties we loose too much useful information if we classify it just by domain name.. # conf['subdomainsWeWant'] is a list of hostnames that should not be reduced to domain names. For these, we'll strip any *www.* prefix, but # no other subdomains conf = { 'weWantSubdomainsFor': r'(\.google\.com|\.live\.com|\.yahoo\.com|\.js$)' } # the latter is not, strictly speaking, a subdomain.. # http://stackoverflow.com/questions/8230315/python-sets-are-not-json-serializable :-( class SetEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, set): return list(obj) return json.JSONEncoder.default(self, obj) f = open('data/aliases.json') aliases = json.load(f) f.close() def main(): urltemplate = 'https://api-dev.bugzilla.mozilla.org/latest/bug?component=Mobile&product=Tech%20Evangelism&include_fields=id,summary,creation_time,last_change_time,status,resolution,depends_on,whiteboard,cf_last_resolved,url,priority' # removed ",flags" to work around bugzilla bug.. outputfn = 'test-output' pprint('Getting '+urltemplate) req = urllib2.Request(urltemplate) req.add_header('Accept', 'application/json') # req.add_header('User-agent', 'Mozilla/5.0 (Windows NT 5.1; rv:27.0) Gecko/20100101 Firefox/27.0') if 1 : # get data from bugzilla (slow..) bzresponse = urllib2.urlopen(req, timeout=240) bzdata = bzresponse.read() bzdataobj = json.loads(bzdata) print 'Writing '+outputfn+'.json' f = open('./data/bugzilla/'+outputfn+'.json', 'w') f.write(json.dumps(bzdataobj, indent=2)) f.close() else : #TEMPORARY code to speed up testing: f = open('./data/bugzilla/'+outputfn+'.json', 'r') bzdataobj = json.load(f) f.close() masterBugTable = {'hostIndex':{}, 'bugs':{}, 'lists':{}} for fn in glob.glob('..' + os.sep +'data' + os.sep + '*.json'): f = open(fn) data = json.load(f) listname = os.path.splitext(os.path.basename(fn))[0] if listname : masterBugTable['lists'][listname] = data f.close() metrics={'allOpenBugsForAllLists':set(), 'hostsWithOpenBugs':set(), 'totalUniqueHosts':set()} for bug in bzdataobj['bugs'] : if re.search(r'\[meta\]', bug['summary'], re.I) : # We don't care about [meta] stuff. Why? Well, we're post-post-modern, that's why. continue masterBugTable['bugs'][bug['id']] = bug; # extract host names: hostnames = set() if 'url' in bug : hostnames = hostsFromText(bug['url']) # extract host name from URL field if 'summary' in bug : hostnames = hostnames.union(hostsFromText(bug['summary'])) # ...and extract host name(s) from summary # for host in hostnames : if not host in masterBugTable['hostIndex'] : masterBugTable['hostIndex'][host] = {'open':[], 'resolved':[]} metrics['totalUniqueHosts'].add(host) if bug['status'] in ['RESOLVED', 'CLOSED', 'VERIFIED'] : masterBugTable['hostIndex'][host]['resolved'].append(bug['id']) else : masterBugTable['hostIndex'][host]['open'].append(bug['id']) metrics['allOpenBugsForAllLists'].add(bug['id']) metrics['hostsWithOpenBugs'].add(host) # Done looking at bug status, updating structures and calculating metrics # Done processing each host mentioned for this bug # Done processing all bugs in the data dump, one at a time # Calculate metrics masterBugTable['metrics'] = {"numOpenBugs":len(metrics['allOpenBugsForAllLists']), "numHosts":len(metrics['totalUniqueHosts']), "numHostsWithOpenBugs":len(metrics['hostsWithOpenBugs'])} # Write a JS(ON) file print 'Writing masterbugtable.js' f = open('../data/masterbugtable.js', 'w') f.write('/* This file is generated by preproc/buildlists.py - do not edit */\nvar masterBugTable = '+json.dumps(masterBugTable, indent=2, cls=SetEncoder)) f.close() return; def hostsFromText(text): # We need to extract any hosts names mentioned in # a) URL field # b) Summary # c) Alias words from aliases.json (i.e. "Hotmail" in summary -> file under live.com) # Also, we want domain names without subdomains (no www. or m. prefixes, for example) # This is a generic method to extract one or more domain names from a text string - the argument can be a URL or some text with a host name mentioned text = text.strip().lower() hosts = [] text = re.split('\s', text) for word in text : word = word.strip('.()!?,[]') # make sure we don't assume a random 'foo.' is a domain due to a sentence-delimiting dot in summary.. Also removing some other characters that might be adjacent.. if '.' in word: # now go on to assume the first white-space separated string that contains at least one internal period is a domain name hosts.append(word) else : # for hostname in aliases : if aliases[hostname] in word : #print ('alias match '+hostname+' in '+word+' '+' '.join(text)) hosts.append(hostname) # now we've listed any words/character sequences that contain internal dots. # However, we do not want www. or m. or similar prefixes, so we'll run through the list and use # tldextract to remove those uniquehosts = set() for hostname in hosts : parts = tldextract.extract(hostname) if re.search(conf['weWantSubdomainsFor'], hostname, re.I) and not re.search('^www', parts[0]) : hostname = '.'.join( parts[0:3]) else : hostname = '.'.join( parts[1:3]) uniquehosts.add(hostname) # That's it! (We hope..) return uniquehosts if __name__ == "__main__": main()
Python
0.000016
@@ -70,16 +70,22 @@ ,re, sys +, time %0Afrom pp @@ -4154,16 +4154,66 @@ ugs'%5D)%7D%0A + masterBugTable%5B'timestamp'%5D = time.time()%0A %09# Write
0b42303f94d0662b0b50439b9c2fdfa649633bd5
Remove newline
librb/rbRadios.py
librb/rbRadios.py
import requests from librb.rbConstants import endpoints, BASE_URL def request(endpoint, **kwargs): fmt = kwargs.get("format", "json") if fmt == "xml": content_type = "application/%s" % fmt else: content_type = "application/%s" % fmt headers = {"content-type": content_type, "User-Agent": "pyradios/dev"} params = kwargs.get("params", {}) url = BASE_URL + endpoint resp = requests.get(url, headers=headers, params=params) if resp.status_code == 200: if fmt == "xml": return resp.text return resp.json() return resp.raise_for_status() class EndPointBuilder: def __init__(self, fmt="json"): self.fmt = fmt self._option = None self._endpoint = None @property def endpoint(self): return endpoints[self._endpoint][self._option] def produce_endpoint(self, **parts): self._option = len(parts) self._endpoint = parts["endpoint"] parts.update({"fmt": self.fmt}) return self.endpoint.format(**parts) class RadioBrowser: def __init__(self, fmt="json"): self.fmt = fmt self.builder = EndPointBuilder(fmt=self.fmt) def countries(self, filter=""): endpoint = self.builder.produce_endpoint(endpoint="countries") return request(endpoint) def codecs(self, filter=""): endpoint = self.builder.produce_endpoint(endpoint="codecs") return request(endpoint) def states(self, country="", filter=""): endpoint = self.builder.produce_endpoint( endpoint="states", country=country, filter=filter ) return request(endpoint) def languages(self, filter=""): endpoint = self.builder.produce_endpoint(endpoint="languages", filter=filter) return request(endpoint) def tags(self, filter=""): endpoint = self.builder.produce_endpoint(endpoint="tags", filter=filter) return request(endpoint) def stations(self, **params): endpoint = self.builder.produce_endpoint(endpoint="stations") kwargs = {} if params: kwargs.update({"params": params}) return request(endpoint, **kwargs) def stations_byid(self, id): endpoint = self.builder.produce_endpoint( endpoint="stations", by="byid", search_term=id ) return request(endpoint) def stations_byuuid(self, uuid): endpoint = self.builder.produce_endpoint( endpoint="stations", by="byuuid", search_term=uuid ) return request(endpoint) def stations_byname(self, name): endpoint = self.builder.produce_endpoint( endpoint="stations", by="byname", search_term=name ) return request(endpoint) def stations_bynameexact(self, nameexact): endpoint = self.builder.produce_endpoint( endpoint="stations", by="bynameexact", search_term=nameexact ) return request(endpoint) def stations_bycodec(self, codec): endpoint = self.builder.produce_endpoint( endpoint="stations", by="bycodec", search_term=codec ) return request(endpoint) def stations_bycodecexact(self, codecexact): endpoint = self.builder.produce_endpoint( endpoint="stations", by="bycodecexact", search_term=codecexact ) return request(endpoint) def stations_bycountry(self, country): endpoint = self.builder.produce_endpoint( endpoint="stations", by="bycountry", search_term=country ) return request(endpoint) def stations_bycountryexact(self, countryexact): endpoint = self.builder.produce_endpoint( endpoint="stations", by="bycountryexact", search_term=countryexact ) return request(endpoint) def stations_bystate(self, state): endpoint = self.builder.produce_endpoint( endpoint="stations", by="bystate", search_term=state ) return request(endpoint) def stations_bystateexact(self, stateexact): endpoint = self.builder.produce_endpoint( endpoint="stations", by="bystateexact", search_term=stateexact ) return request(endpoint) # def stations_bylanguage(self, language): endpoint = self.builder.produce_endpoint( endpoint="stations", by="bylanguage", search_term=language ) return request(endpoint) def stations_bylanguageexact(self, languageexact): endpoint = self.builder.produce_endpoint( endpoint="stations", by="bylanguageexact", search_term=languageexact ) return request(endpoint) def stations_bytag(self, tag): endpoint = self.builder.produce_endpoint( endpoint="stations", by="bytag", search_term=tag ) return request(endpoint) def stations_bytagexact(self, tagexact): endpoint = self.builder.produce_endpoint( endpoint="stations", by="bytagexact", search_term=tagexact ) return request(endpoint) def playable_station(self, station_id): endpoint = self.builder.produce_endpoint( endpoint="playable_station", station_id=station_id, ver="v2" ) return request(endpoint) def station_search(self, params, **kwargs): # http://www.radio-browser.info/webservice#Advanced_station_search assert isinstance(params, dict), "params is not a dict" kwargs["params"] = params endpoint = self.builder.produce_endpoint(endpoint="station_search") return request(endpoint, **kwargs)
Python
0.000073
@@ -5633,29 +5633,28 @@ request(endpoint, **kwargs) -%0A
3cdbcc16450faa958e27f60d5f2adc7a943562d8
Fix MacOS build
platforms/osx/build_framework.py
platforms/osx/build_framework.py
#!/usr/bin/env python """ The script builds OpenCV.framework for OSX. """ from __future__ import print_function import os, os.path, sys, argparse, traceback, multiprocessing # import common code sys.path.insert(0, os.path.abspath(os.path.abspath(os.path.dirname(__file__))+'/../ios')) from build_framework import Builder MACOSX_DEPLOYMENT_TARGET='10.12' # default, can be changed via command line options or environment variable class OSXBuilder(Builder): def getToolchain(self, arch, target): return None def getBuildCommand(self, archs, target): buildcmd = [ "xcodebuild", "MACOSX_DEPLOYMENT_TARGET=" + os.environ['MACOSX_DEPLOYMENT_TARGET'], "ARCHS=%s" % archs[0], "-sdk", target.lower(), "-configuration", "Debug" if self.debug else "Release", "-parallelizeTargets", "-jobs", str(multiprocessing.cpu_count()) ] return buildcmd def getInfoPlist(self, builddirs): return os.path.join(builddirs[0], "osx", "Info.plist") if __name__ == "__main__": folder = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), "../..")) parser = argparse.ArgumentParser(description='The script builds OpenCV.framework for OSX.') parser.add_argument('out', metavar='OUTDIR', help='folder to put built framework') parser.add_argument('--opencv', metavar='DIR', default=folder, help='folder with opencv repository (default is "../.." relative to script location)') parser.add_argument('--contrib', metavar='DIR', default=None, help='folder with opencv_contrib repository (default is "None" - build only main framework)') parser.add_argument('--without', metavar='MODULE', default=[], action='append', help='OpenCV modules to exclude from the framework') parser.add_argument('--enable_nonfree', default=False, dest='enablenonfree', action='store_true', help='enable non-free modules (disabled by default)') parser.add_argument('--macosx_deployment_target', default=os.environ.get('MACOSX_DEPLOYMENT_TARGET', MACOSX_DEPLOYMENT_TARGET), help='specify MACOSX_DEPLOYMENT_TARGET') parser.add_argument('--debug', action='store_true', help='Build "Debug" binaries (CMAKE_BUILD_TYPE=Debug)') parser.add_argument('--debug_info', action='store_true', help='Build with debug information (useful for Release mode: BUILD_WITH_DEBUG_INFO=ON)') args = parser.parse_args() os.environ['MACOSX_DEPLOYMENT_TARGET'] = args.macosx_deployment_target print('Using MACOSX_DEPLOYMENT_TARGET=' + os.environ['MACOSX_DEPLOYMENT_TARGET']) b = OSXBuilder(args.opencv, args.contrib, False, False, args.without, args.enablenonfree, [ (["x86_64"], "MacOSX") ], args.debug, args.debug_info) b.build(args.out)
Python
0.000047
@@ -1795,32 +1795,169 @@ the framework')%0A + parser.add_argument('--disable', metavar='FEATURE', default=%5B%5D, action='append', help='OpenCV features to disable (add WITH_*=OFF)')%0A parser.add_a @@ -2799,16 +2799,30 @@ without, + args.disable, args.en
0782ba56218e825dea5b76cbf030a522932bcfd6
Remove unnecessary (and debatable) comment.
networkx/classes/ordered.py
networkx/classes/ordered.py
""" OrderedDict variants of the default base classes. These classes are especially useful for doctests and unit tests. """ try: # Python 2.7+ from collections import OrderedDict except ImportError: # Oython 2.6 try: from ordereddict import OrderedDict except ImportError: OrderedDict = None from .graph import Graph from .multigraph import MultiGraph from .digraph import DiGraph from .multidigraph import MultiDiGraph __all__ = [] if OrderedDict is not None: __all__.extend([ 'OrderedGraph', 'OrderedDiGraph', 'OrderedMultiGraph', 'OrderedMultiDiGraph' ]) class OrderedGraph(Graph): node_dict_factory = OrderedDict adjlist_dict_factory = OrderedDict edge_attr_dict_factory = OrderedDict class OrderedDiGraph(DiGraph): node_dict_factory = OrderedDict adjlist_dict_factory = OrderedDict edge_attr_dict_factory = OrderedDict class OrderedMultiGraph(MultiGraph): node_dict_factory = OrderedDict adjlist_dict_factory = OrderedDict edge_key_dict_factory = OrderedDict edge_attr_dict_factory = OrderedDict class OrderedMultiDiGraph(MultiDiGraph): node_dict_factory = OrderedDict adjlist_dict_factory = OrderedDict edge_key_dict_factory = OrderedDict edge_attr_dict_factory = OrderedDict
Python
0
@@ -52,74 +52,8 @@ s.%0A%0A -These classes are especially useful for doctests and unit tests.%0A%0A %22%22%22%0A
05e162a7fcc9870e37a6deab176cf3c6491e8481
add docstrings for methods of PrimaryDecider and refactor them a bit
plenum/server/primary_decider.py
plenum/server/primary_decider.py
from typing import Iterable from collections import deque from plenum.common.message_processor import MessageProcessor from plenum.server.has_action_queue import HasActionQueue from plenum.server.router import Router, Route from stp_core.common.log import getlogger from typing import List logger = getlogger() class PrimaryDecider(HasActionQueue, MessageProcessor): def __init__(self, node): HasActionQueue.__init__(self) # TODO: How does primary decider ensure that a node does not have a # primary while its catching up self.node = node self.name = node.name self.f = node.f self.replicas = node.replicas self.viewNo = node.viewNo self.rank = node.rank self.nodeNames = node.allNodeNames self.nodeCount = 0 self.inBox = deque() self.outBox = deque() self.inBoxRouter = Router(*self.routes) # Need to keep track of who was primary for the master protocol # instance for previous view, this variable only matters between # elections, the elector will set it before doing triggering new # election and will reset it after primary is decided for master # instance self.previous_master_primary = None def __repr__(self): return "{}".format(self.name) @property def was_master_primary_in_prev_view(self): return self.previous_master_primary == self.name @property def routes(self) -> Iterable[Route]: raise NotImplementedError @property def supported_msg_types(self) -> Iterable[type]: return [k for k, v in self.routes] def decidePrimaries(self): """ Choose the primary replica for each protocol instance in the system using a PrimaryDecider. """ raise NotImplementedError async def serviceQueues(self, limit): # TODO: this should be abstract return 0 def viewChanged(self, viewNo: int): if viewNo > self.viewNo: self.viewNo = viewNo self.previous_master_primary = self.node.master_primary_name for replica in self.replicas: replica.primaryName = None return True else: logger.warning("Provided view no {} is not greater than the " "current view no {}".format(viewNo, self.viewNo)) return False def get_msgs_for_lagged_nodes(self) -> List[int]: raise NotImplementedError def send(self, msg): """ Send a message to the node on which this replica resides. :param msg: the message to send """ logger.debug("{}'s elector sending {}".format(self.name, msg)) self.outBox.append(msg) def start_election_for_instance(self, instance_id): """ Called when starting election for a particular protocol instance """ raise NotImplementedError
Python
0
@@ -436,124 +436,8 @@ lf)%0A - # TODO: How does primary decider ensure that a node does not have a%0A # primary while its catching up%0A @@ -1555,24 +1555,32 @@ maries(self) + -%3E None :%0A %22%22 @@ -1593,14 +1593,25 @@ -Choose +Start election of the @@ -1658,53 +1658,15 @@ nce -in the system%0A using a PrimaryDecider. + %0A @@ -1804,15 +1804,32 @@ r -eturn 0 +aise NotImplementedError %0A%0A @@ -1878,492 +1878,763 @@ -if viewNo %3E self.viewNo:%0A self.viewNo = viewNo%0A self.previous_master_primary = self.node.master_primary_name%0A for replica in self.replicas:%0A replica.primaryName = None%0A return True%0A else:%0A logger.warning(%22Provided view no %7B%7D is not greater than the %22%0A %22current view no %7B%7D%22.format(viewNo, self.viewNo))%0A return False%0A%0A def get_msgs_for_lagged_nodes(self) -%3E List%5Bint%5D: +%22%22%22%0A Notifies primary decider about the fact that view changed to let it%0A prepare for election, which then will be started from outside by %0A calling decidePrimaries() %0A %22%22%22%0A if viewNo %3C= self.viewNo:%0A logger.warning(%22Provided view no %7B%7D is not greater than the %22%0A %22current view no %7B%7D%22.format(viewNo, self.viewNo))%0A return False%0A self.viewNo = viewNo%0A self.previous_master_primary = self.node.master_primary_name%0A for replica in self.replicas:%0A replica.primaryName = None%0A return True%0A%0A def get_msgs_for_lagged_nodes(self) -%3E List%5Bobject%5D:%0A %22%22%22%0A Returns election messages from the last view change %0A %22%22%22 %0A
8f78c04f6e2f21deb02a285fc78c5da907f0287b
Delete extra print()
nn/file/cnn_dailymail_rc.py
nn/file/cnn_dailymail_rc.py
import functools import tensorflow as tf from .. import flags from ..flags import FLAGS class _RcFileReader: def __init__(self): # 0 -> null, 1 -> unknown self._word_indices = flags.word_indices def read(self, filename_queue): key, value = tf.WholeFileReader().read(filename_queue) return (key, *self._read_record(value)) def _read_record(self, string): def read_record(string): context, question, answer = string.split("\n\n")[1:4] context = self._map_words_to_indices(context) question = self._map_words_to_indices(question) return (context, question, self._map_word_to_index(answer), len(context), len(question)) context, question, answer, context_length, question_length = tf.py_func( read_record, [string], [tf.int32, tf.int32, tf.int32, tf.int32, tf.int32], name="read_rc_data") context_length.set_shape([]) question_length.set_shape([]) print(tf.reshape(context, [context_length]).get_shape()) return (tf.reshape(context, [context_length]), tf.reshape(question, [question_length]), tf.reshape(answer, [])) def _map_word_to_index(): return word_indices[word] if word in self._word_indices else 1 # unknown def _map_document_to_indices(self, document): return np.array([self._map_word_to_index(word) for word in document.split()], dtype=np.int32) def read_files(filename_queue): tensors = _RcFileReader().read(filename_queue) return tf.contrib.training.bucket_by_sequence_length( tf.shape(tensors[1])[0], list(tensors), FLAGS.batch_size, [int(num) for num in FLAGS.length_boundaries.split(",")], num_threads=FLAGS.num_threads_per_queue, capacity=FLAGS.queue_capacity, dynamic_pad=True, allow_smaller_final_batch=True)[1]
Python
0.000003
@@ -1000,70 +1000,8 @@ %5D)%0A%0A - print(tf.reshape(context, %5Bcontext_length%5D).get_shape())%0A%0A
9a59705e3d85a93a8e82a22cfdde66600d18e650
Version bump
projector/__init__.py
projector/__init__.py
""" Project management Django application with task tracker and repository backend integration. """ VERSION = (0, 1, 9) __version__ = '.'.join((str(each) for each in VERSION[:4])) def get_version(): """ Returns shorter version (digit parts only) as string. """ return '.'.join((str(each) for each in VERSION[:3]))
Python
0.000001
@@ -115,9 +115,17 @@ 1, -9 +10, 'dev' )%0A%0A_
55eac8bed7e08c245642c1292ebc644fcbd8e12a
Add jobs serializers' tests
polyaxon/api/jobs/serializers.py
polyaxon/api/jobs/serializers.py
from rest_framework import fields, serializers from rest_framework.exceptions import ValidationError from db.models.jobs import Job, JobStatus from libs.spec_validation import validate_job_spec_config class JobStatusSerializer(serializers.ModelSerializer): uuid = fields.UUIDField(format='hex', read_only=True) job = fields.SerializerMethodField() class Meta: model = JobStatus exclude = ('id',) def get_job(self, obj): return obj.job.uuid.hex class JobSerializer(serializers.ModelSerializer): uuid = fields.UUIDField(format='hex', read_only=True) user = fields.SerializerMethodField() project = fields.SerializerMethodField() project_name = fields.SerializerMethodField() started_at = fields.DateTimeField(read_only=True) finished_at = fields.DateTimeField(read_only=True) class Meta: model = Job fields = ( 'uuid', 'unique_name', 'user', 'sequence', 'description', 'created_at', 'updated_at', 'last_status', 'started_at', 'finished_at', 'is_running', 'is_done', 'is_clone', 'project', 'project_name',) def get_user(self, obj): return obj.user.username def get_project(self, obj): return obj.project.uuid.hex def get_project_name(self, obj): return obj.project.unique_name class JobDetailSerializer(JobSerializer): original = fields.SerializerMethodField() resources = fields.SerializerMethodField() class Meta(JobSerializer.Meta): fields = JobSerializer.Meta.fields + ( 'original', 'original_job', 'description', 'config', 'resources',) extra_kwargs = {'original_job': {'write_only': True}} def get_original(self, obj): return obj.original_job.unique_name if obj.original_job else None def get_resources(self, obj): return obj.resources.to_dict() if obj.resources else None class JobCreateSerializer(serializers.ModelSerializer): user = fields.SerializerMethodField() class Meta: model = Job fields = ('user', 'description', 'config',) def get_user(self, obj): return obj.user.username def validate_config(self, config): """We only validate the config if passed. Also we use the JobSpecification to check if this config was intended as job. """ # config is optional if not config: return config spec = validate_job_spec_config(config) if spec.is_job: # Resume normal creation return config # Raise an error to tell the user to use job creation instead raise ValidationError('Current job creation could not be performed.\n' 'The reason is that the specification sent correspond ' 'to a `{}`.\n'.format(spec.kind)) def validate(self, attrs): if self.initial_data.get('check_specification') and not attrs.get('config'): raise ValidationError('Experiment expects a `config`.') return attrs def create(self, validated_data): """Check the params or set the value from the specification.""" if not validated_data.get('declarations') and validated_data.get('config'): config = validate_job_spec_config(validated_data['config']) validated_data['declarations'] = config.declarations return super(JobCreateSerializer, self).create(validated_data=validated_data)
Python
0
@@ -2370,87 +2370,8 @@ %22%22%22%0A - # config is optional%0A if not config:%0A return config%0A%0A @@ -2806,628 +2806,4 @@ d))%0A -%0A def validate(self, attrs):%0A if self.initial_data.get('check_specification') and not attrs.get('config'):%0A raise ValidationError('Experiment expects a %60config%60.')%0A return attrs%0A%0A def create(self, validated_data):%0A %22%22%22Check the params or set the value from the specification.%22%22%22%0A if not validated_data.get('declarations') and validated_data.get('config'):%0A config = validate_job_spec_config(validated_data%5B'config'%5D)%0A validated_data%5B'declarations'%5D = config.declarations%0A return super(JobCreateSerializer, self).create(validated_data=validated_data)%0A
16bf079d1b139db08988fdb3cc1ff818cecfc12e
Add ModelTranslationAdminMixin.
linguist/admin.py
linguist/admin.py
# -*- coding: utf-8 -*- from django.contrib import admin from .models import Translation class TranslationAdmin(admin.ModelAdmin): pass admin.site.register(Translation, TranslationAdmin)
Python
0
@@ -95,43 +95,201 @@ ass -TranslationAdmin(admin.ModelAdmin): +ModelTranslationAdminMixin(object):%0A %22%22%22%0A Mixin for model admin classes.%0A %22%22%22%0A pass%0A%0A%0A%0Aclass TranslationAdmin(admin.ModelAdmin):%0A %22%22%22%0A Translation model admin options.%0A %22%22%22 %0A
9515271143c0ae9cbcde98b981f665e8ccda5955
Clean up jupyter testing.
positivity_check/jupiter_test.py
positivity_check/jupiter_test.py
#%% import pandas as pd import collections import matplotlib.pyplot as plt import os import sys import pprint import re module_path = os.path.abspath(os.path.join('..')) if module_path not in sys.path: sys.path.append(module_path) from positivity_check.positivity_check import UserText office_file = os.getcwd() + "\\positivity_check\\resources\\the-office-lines-scripts.csv" columns = pd.read_csv(office_file, nrows=1).columns print(columns) data = pd.read_csv(office_file) def count_lines(): df = pd.DataFrame(data=data, columns=columns) char_count = collections.Counter(df['speaker']) epi_count = collections.Counter(df['episode']) season_count = collections.Counter(df['season']) counts = {} counts["char_count"] = collections.OrderedDict(char_count.most_common(10)) counts["season_count"] = dict(season_count) counts["epi_count"] = dict(epi_count) return counts def lines_per(): df = pd.DataFrame(data=data, columns=columns) jim_lines = df[(df['season'] == 1) & (df['episode'] == 1) & (df['speaker'] == 'Jim')]['line_text'] return jim_lines def remove_stage_directions_and_punctuation(text): without_directions = re.sub("[\(\[].*?[\)\]]", "", text) result = re.sub('[^A-Za-z0-9 ]+', '', without_directions) return result # Make Season[Episode][Character] data structure main_characters = ["Michael", "Jim", "Pam", "Dwight", "Ryan", "Andy", "Robert"] recurring_characters = ["Jan", "Roy", "Stanley", "Kevin", "Meredith", "Angela", "Oscar", "Phyllis", "Kelly", "Toby", "Creed", "Gabe", "Holly", "Nellie", "Clark", "Pete", "Erin"] other_characters = ["Todd", "David", "Karen", "Charles", "Jo", "Deangelo", "Val", "Cathy"] df = pd.DataFrame(data=data, columns=columns) # seasons = dict.fromkeys(set(data['season'])) seasons = {1:None} for season in seasons: episodes = df[ (df['season']) == season] seasons[season] = dict.fromkeys(set(episodes['episode'])) for episode in seasons[season]: seasons[season][episode] = dict.fromkeys(main_characters) for char in seasons[season][episode]: seasons[season][episode][char] = {'total_text': "", 'text_value_counts': []} for line in df[ (df['season'] == season) & (df['episode'] == episode) & (df['speaker'] == char)]['line_text']: # print(line) seasons[season][episode][char]['total_text'] += " " + remove_stage_directions_and_punctuation(line) analyzed_chars = {} for char in seasons[1][1]: analyzed_chars[char] = UserText(seasons[1][1][char]['total_text']) # pprint.pprint(seasons[1][1]['Jim']['total_text']) # analyzed_text = UserText(seasons[1][1]['Jim']['total_text']) analyzed_chars['Jim'].print_stats() # Line counting, season counting results # counts = count_lines() # counts = pd.Series(count_lines()['char_count']) # counts.plot(kind='bar', rot=-45) # counts = pd.Series(count_lines()['season_count']) # counts.plot(kind='bar', rot=0) counts = pd.Series(analyzed_chars['Jim'].word_values_count) counts.plot(kind='bar', rot=0, logy=True, figsize=(12, 8), title="Distribution of Positive and Negative Words") plt.xlabel('Word Values') plt.ylabel('Occurance Rate')
Python
0
@@ -1,9 +1,10 @@ # + %25%25%0A%0Aimpo @@ -476,16 +476,17 @@ _file)%0A%0A +%0A def coun @@ -906,16 +906,17 @@ counts%0A%0A +%0A def line @@ -1027,32 +1027,51 @@ 'episode'%5D == 1) +%0A & (df%5B'speaker' @@ -1118,16 +1118,17 @@ _lines%0A%0A +%0A def remo @@ -1312,16 +1312,17 @@ result%0A%0A +%0A # Make S @@ -1530,16 +1530,40 @@ %22Oscar%22, +%0A %22Phylli @@ -1689,16 +1689,36 @@ %22Karen%22, +%0A %22Charle @@ -1863,16 +1863,17 @@ ns = %7B1: + None%7D%0A%0Af @@ -1908,25 +1908,24 @@ isodes = df%5B - (df%5B'season' @@ -2194,16 +2194,33 @@ har%5D = %7B +%0A 'total_t @@ -2281,17 +2281,16 @@ e in df%5B - (df%5B'sea @@ -2475,16 +2475,38 @@ += %22 %22 + + %5C%0A remove_ @@ -3171,16 +3171,28 @@ (12, 8), +%0A title=%22
cfeb26b8c591b6d61f3184de74b2a37a2c2c21cc
Fix `lintreview register`
lintreview/cli.py
lintreview/cli.py
import argparse import lintreview.github as github from lintreview.web import app def main(): parser = create_parser() args = parser.parse_args() args.func(args) def register_hook(args): credentials = None if args.login_user and args.login_pass: credentials = { 'GITHUB_USER': args.login_user, 'GITHUB_PASSWORD': args.login_pass } github.register_hook(app, args.user, args.repo, credentials) def remove_hook(args): print 'unregister' print args def create_parser(): desc = """ Command line utilities for lintreview. """ parser = argparse.ArgumentParser(description=desc) commands = parser.add_subparsers( title="Subcommands", description="Valid subcommands") desc = """ Register webhooks for a given user & repo The installed webhook will be used to trigger lint reviews as pull requests are opened/updated. """ register = commands.add_parser('register', help=desc) register.add_argument( '-u', '--user', dest='login_user', help="The user that has admin rights to the repo " "you are adding hooks to. Useful when the user " "in settings is not the administrator of " "your repositories.") register.add_argument( '-p', '--password', dest='login_pass', help="The password of the admin user.") register.add_argument('user', help="The user or organization the repo is under.") register.add_argument('repo', help="The repository to install a hook into.") register.set_defaults(func=register_hook) desc = """ Unregister webhooks for a given user & repo. """ remove = commands.add_parser('unregister', help=desc) remove.add_argument( '-u', '--user', dest='login_user', help="The user that has admin rights to the repo you " "are removing hooks from. Useful when the " "user in settings is not the administrator of " "your repositories.") remove.add_argument( '-p', '--password', dest='login_pass', help="The password of the admin user.") remove.add_argument('user', help="The user or organization the repo is under.") remove.add_argument('repo', help="The repository to remove a hook from.") remove.set_defaults(func=remove_hook) return parser if __name__ == '__main__': main()
Python
0
@@ -45,16 +45,42 @@ github%0A%0A +from flask import url_for%0A from lin @@ -102,16 +102,16 @@ ort app%0A - %0A%0Adef ma @@ -419,71 +419,496 @@ %7D%0A - github.register_hook(app, args.user, args.repo, credentials +%0A with app.app_context():%0A if credentials:%0A credentials%5B'GITHUB_URL'%5D = app.config%5B'GITHUB_URL'%5D%0A gh = github.get_client(%0A credentials,%0A args.user,%0A args.repo)%0A else:%0A gh = github.get_client(%0A app.config,%0A args.user,%0A args.repo)%0A endpoint = url_for('start_review', _external=True)%0A%0A github.register_hook(gh, endpoint, args.user, args.repo )%0A%0A%0A
01d65552b406ef21a5ab4f53fd20cdd9ed6c55f8
support github ping events
lintreview/web.py
lintreview/web.py
import logging import pkg_resources from flask import Flask, request, Response from lintreview.config import load_config from lintreview.github import get_client from lintreview.github import get_lintrc from lintreview.tasks import process_pull_request from lintreview.tasks import cleanup_pull_request config = load_config() app = Flask("lintreview") app.config.update(config) log = logging.getLogger(__name__) version = pkg_resources.get_distribution('lintreview').version @app.route("/ping") def ping(): return "lint-review: %s pong\n" % (version,) @app.route("/review/start", methods=["POST"]) def start_review(): try: action = request.json["action"] pull_request = request.json["pull_request"] number = pull_request["number"] base_repo_url = pull_request["base"]["repo"]["git_url"] head_repo_url = pull_request["head"]["repo"]["git_url"] user = pull_request["base"]["repo"]["owner"]["login"] repo = pull_request["base"]["repo"]["name"] except Exception as e: log.error("Got an invalid JSON body. '%s'", e) return Response(status=403, response="You must provide a valid JSON body\n") log.info("Received GitHub pull request notification for " "%s %s, (%s) from: %s", base_repo_url, number, action, head_repo_url) if action not in ("opened", "synchronize", "reopened", "closed"): log.info("Ignored '%s' action." % action) return Response(status=204) if action == "closed": return close_review(user, repo, pull_request) gh = get_client(app.config, user, repo) try: lintrc = get_lintrc(gh) log.debug("lintrc file contents '%s'", lintrc) except Exception as e: log.warn("Cannot download .lintrc file for '%s', " "skipping lint checks.", base_repo_url) log.warn(e) return Response(status=204) try: log.info("Scheduling pull request for %s/%s %s", user, repo, number) process_pull_request.delay(user, repo, number, lintrc) except: log.error('Could not publish job to celery. Make sure its running.') return Response(status=500) return Response(status=204) def close_review(user, repo, pull_request): try: log.info("Scheduling cleanup for %s/%s", user, repo) cleanup_pull_request.delay(user, repo, pull_request['number']) except: log.error('Could not publish job to celery. ' 'Make sure its running.') return Response(status=204)
Python
0
@@ -618,24 +618,134 @@ t_review():%0A + event = request.headers.get('X-Github-Event')%0A if event == 'ping':%0A return Response(status=200)%0A%0A try:%0A
f4a7200decbff0cb1a2ddde0b3f044da5d6c5250
Return repository collaborators as User instances.
github2/repositories.py
github2/repositories.py
from github2.core import BaseData, GithubCommand, Attribute, DateAttribute class Repository(BaseData): name = Attribute("Name of repository.") description = Attribute("Repository description.") forks = Attribute("Number of forks of this repository.") watchers = Attribute("Number of people watching this repository.") private = Attribute("If True, the repository is private.") url = Attribute("Canonical URL to this repository") fork = Attribute("If True, this is a fork of another repository.") owner = Attribute("Username of the user owning this repository.") homepage = Attribute("Homepage for this project.") master_branch = Attribute("Default branch, if set.") integration_branch = Attribute("Integration branch, if set.") open_issues = Attribute("List of open issues for this repository.") created_at = DateAttribute("Datetime the repository was created.") pushed_at = DateAttribute("Datetime of the last push to this repository") has_downloads = Attribute("If True, this repository has downloads.") has_wiki = Attribute("If True, this repository has a wiki.") has_issues = Attribute("If True, this repository has an issue tracker.") language = Attribute("Primary language for the repository.") def _project(self): return self.owner + "/" + self.name project = property(_project) def __repr__(self): return "<Repository: %s>" % (self._project()) class Repositories(GithubCommand): domain = "repos" def search(self, query): """Get all repositories that match term. :param str query: term to search issues for """ return self.make_request("search", query, filter="repositories") def show(self, project): """Get repository object for project. :param str project: GitHub project """ return self.get_value("show", project, filter="repository", datatype=Repository) def pushable(self): """Return a list of repos you can push to that are not your own. .. versionadded:: 0.3.0 """ return self.get_values("pushable", filter="repositories", datatype=Repository) def list(self, user=None): """Return a list of all repositories for a user. .. deprecated: 0.4.0 Previous releases would attempt to display repositories for the logged-in user when ``user`` wasn't supplied. This functionality is brittle and will be removed in a future release! :param str user: Github user name to list repositories for """ user = user or self.request.username return self.get_values("show", user, filter="repositories", datatype=Repository) def watch(self, project): """Watch a project :param str project: GitHub project """ return self.make_request("watch", project) def unwatch(self, project): """Unwatch a project :param str project: GitHub project """ return self.make_request("unwatch", project) def fork(self, project): """Fork a project :param str project: GitHub project """ return self.get_value("fork", project, filter="repository", datatype=Repository) def create(self, project, description=None, homepage=None, public=True): """Create a repository :param str project: new project name :param str description: optional project description :param str homepage: optional project homepage :param bool public: whether to make a public project """ repo_data = {"name": project, "description": description, "homepage": homepage, "public": str(int(public))} return self.get_value("create", post_data=repo_data, filter="repository", datatype=Repository) def delete(self, project): """Delete a repository :param str project: project name to delete """ # Two-step delete mechanism. We must echo the delete_token value back # to GitHub to actually delete a repository result = self.make_request("delete", project, method="POST") self.make_request("delete", project, post_data=result) def set_private(self, project): """Mark repository as private :param str project: project name to set as private """ return self.make_request("set/private", project) def set_public(self, project): """Mark repository as public :param str project: project name to set as public """ return self.make_request("set/public", project) def list_collaborators(self, project): """Lists all the collaborators in a project :param str project: GitHub project """ return self.make_request("show", project, "collaborators", filter="collaborators") def add_collaborator(self, project, username): """Adds an add_collaborator to a repo :param str project: Github project :param str username: Github user to add as collaborator """ return self.make_request("collaborators", project, "add", username) def remove_collaborator(self, project, username): """Removes an add_collaborator from a repo :param str project: Github project :param str username: Github user to add as collaborator """ return self.make_request("collaborators", project, "remove", username, method="POST") def network(self, project): """Get network data for project :param str project: Github project """ return self.make_request("show", project, "network", filter="network") def languages(self, project): """Get programming language data for project :param str project: Github project """ return self.make_request("show", project, "languages", filter="languages") def tags(self, project): """Get tags for project :param str project: Github project """ return self.make_request("show", project, "tags", filter="tags") def branches(self, project): """Get branch names for project :param str project: Github project """ return self.make_request("show", project, "branches", filter="branches") def watchers(self, project): """Get list of watchers for project :param str project: Github project """ return self.make_request("show", project, "watchers", filter="watchers") def watching(self, for_user=None): """Lists all the repos a user is watching :param str for_user: optional Github user name to list repositories for """ for_user = for_user or self.request.username return self.get_values("watched", for_user, filter="repositories", datatype=Repository) def list_contributors(self, project): """Lists all the contributors in a project :param str project: Github project """ return self.make_request("show", project, "contributors", filter="contributors")
Python
0
@@ -69,16 +69,49 @@ ribute%0A%0A +from github2.users import User%0A%0A%0A class Re @@ -7396,36 +7396,34 @@ return self. -make_req +get_val ues -t (%22show%22, pro @@ -7463,32 +7463,36 @@ + filter=%22contribu @@ -7492,14 +7492,29 @@ ontributors%22 +, datatype=User )%0A
7594763e5e6167c15fa7898b13283e875c13c099
Update BotPMError.py
resources/Dependencies/DecoraterBotCore/BotPMError.py
resources/Dependencies/DecoraterBotCore/BotPMError.py
# coding=utf-8 """ DecoraterBotCore ~~~~~~~~~~~~~~~~~~~ Core to DecoraterBot :copyright: (c) 2015-2017 Decorater :license: MIT, see LICENSE for more details. """ import discord __all__ = ['BotPMError'] class BotPMError: """ Class for PMing bot errors. """ def __init__(self, bot): self.bot = bot def construct_reply(self, message): """Constructs an bot reply.""" svr_name = message.channel.server.name cnl_name = message.channel.name msginfo = 'Missing the Send Message Permssions in the ' \ '{0} server on the {1} channel.' unabletosendmessageerror = msginfo.format(svr_name, cnl_name) return unabletosendmessageerror async def resolve_send_message_error(self, ctx): """ Relolves Errors when Sending messages. :param ctx: Merssage Context. :return: Nothing. """ await self.resolve_send_message_error_old( ctx.message) async def resolve_send_message_error_old(self, message): """ Relolves Errors when Sending messages. :param message: Merssage. :return: Nothing. """ unabletosendmessageerror = self.construct_reply( message) try: await bot.send_message( message.author, content=unabletosendmessageerror) except discord.errors.Forbidden: return
Python
0.000001
@@ -1280,16 +1280,21 @@ await +self. bot.send
bec6505195543536cd3952966fcb31702a3c6166
Add set header
gpm/utils/git_client.py
gpm/utils/git_client.py
import os from gitdb import GitDB from git import Repo, Git from github import Github, GithubObject from gpm.utils.operation import LocalOperation from gpm.utils.log import Log from gpm.utils.console import gets from gpm.const.status import Status from gpm.utils.conf import SYSConf import getpass class GitClient(LocalOperation): _GITIGNORE_NAME = ".gitignore" def __init__(self, config = None): self._repo = None self._origin = None self._github = None self._config = config self.__uname = None self.__password = None self.__github_url = None @property def github_url(self): if not self.__github_url: self.__github_url = self._config.git_url or "[email protected]:%s/%s" % (self.user_account[0], self._config.name) return self.__github_url @property def user_account(self): sys_conf = SYSConf() self.__uname = self.__uname or gets("Input GitHub user name", sys_conf.author) self.__password = self.__password or getpass.getpass("Input GitHub password") return self.__uname, self.__password @property def github(self): if not self._github: self._github = GitHubClient(self.user_account[0], self.user_account[1]) return self._github @property def repo(self): if not self._repo: self._repo = Repo(self.rel2abs(), odbt=GitDB) return self._repo @property def origin(self): if not self._origin: self._origin = self.repo.remotes[0] return self._origin def init(self, name = None, path = None): name = name or self._config.name path = path or self.rel2abs() repo_path = os.path.join(path, name) repo = Repo.init(repo_path, odbt=GitDB) self._repo = repo return repo.bare def add(self, paths): _paths = [] if not isinstance(paths, list): paths = [paths] for path in paths: _paths.append(self.rel2abs(path)) return self.repo.index.add(_paths) def commit(self, msg): return self.repo.index.commit(msg) def clone(self, url = None, to_path = None, branch = None): Log.info(Status["STAT_GET_PACKAGE"] % url) g = Git(to_path) g.clone(url or self.github_url) if branch: g.checkout(branch) return True def pull(self): self.origin.pull() def push(self): self.origin.push() def _add_remote(self, name, url): return self.repo.create_remote(name=name, url=url) def set_header(self): repo = self.repo.create_head('master') #repo.set_tracking_branch(self.origin.refs.master) def publish(self, name = "origin"): try: self._add_remote(name, self.github_url) except: pass self._create_remote(self._config.name, description = self._config.description or GithubObject.NotSet) self.set_header() self.push() def tag(self, path): return self.repo.tag(path) def _create_remote(self, name, *args, **kwargs): self.github.create_repo(name, *args, **kwargs) def create_gitignore(self): language = self._config.language if language: content = self.github.get_gitignore_template(language) LocalOperation.add_file(self._GITIGNORE_NAME, content) def safe_urljoin(self, *args): url = "" for section in args: section = section if section[-1] != "/" else section[:-1] url += section + "/" return url ################################## # # # GitHub Client # # # ################################## class GitHubClient(object): _API_GOOD = "good" def __init__(self, name, password): self.__username = name self.__password = password self._github = None self._user = None def __verify_login(self, obj): if obj.get_api_status().status == self._API_GOOD: return True return False @property def user(self): if not self._user: self._user = self.github.get_user() return self._user @property def github(self): if not self._github: self._github = Github(self.__username, self.__password) if not self.__verify_login(self._github): Log.fatal(Status["STAT_LOGIN_GITHUB_FAILED"]) return self._github def create_repo(self, name, *args, **kwargs): return self.user.create_repo(name=name, *args, **kwargs) def get_gitignore_template(self, name): return self.github.get_gitignore_template(name)
Python
0.000001
@@ -1624,16 +1624,97 @@ origin%0A%0A + @property%0A def branch(self):%0A return self.repo.active_branch.name%0A%0A def @@ -2762,104 +2762,60 @@ -repo = self.repo.create_head('master')%0A #repo.set_tracking_branch(self.origin.refs.master +self.run(%22git config --global push.default matching%22 )%0A%0A
87b3122ef1210d419bf58341962c5bdb09b6dbda
Remove traces of pdb
nsls2_build_tools/mirror.py
nsls2_build_tools/mirror.py
#!/usr/bin/env conda-execute """ CLI to mirror all files in a package from one conda channel to another """ # conda execute # env: # - anaconda-client # # run_with: python import os from argparse import ArgumentParser from pprint import pprint import re import sys import subprocess import pdb import binstar_client def Popen(cmd): """Returns stdout and stderr Parameters ---------- cmd : list List of strings to be sent to subprocess.Popen Returns ------- stdout : """ # capture the output with subprocess.Popen try: proc = subprocess.Popen(cmd, stderr=subprocess.PIPE) except subprocess.CalledProcessError as cpe: print(cpe) # pdb.set_trace() stdout, stderr = proc.communicate() if stdout: stdout = stdout.decode() if stderr: stderr = stderr.decode() return stdout, stderr, proc.returncode def cli(): p = ArgumentParser("Mirror packages from one channel to a different " "channel for a given anaconda.org site with an " "anaconda token. Note: will also work with " "the BINSTAR_TOKEN environmental variable set or if " "you have logged in to anaconda via the `anaconda " "login` command built in to anaconda-client") p.add_argument( 'packages', nargs='*', help="List of package names to mirror from one channel to another" ) p.add_argument( '--list', action='store_true', help='List all the packages on --from-user and then exit' ) p.add_argument( '--from-owner', nargs='?', help=("anaconda user to mirror packages from. Also acceptable to " "pass in user/channel. channel will default to main unless " "explicitly provided") ) p.add_argument( '--to-owner', nargs='?', help=("anaconda user to mirror packages to. Also acceptable to " "pass in user/channel. channel will default to main unless " "explicitly provided") ) p.add_argument( '--site', nargs='?', help="anaconda api site. only relevant if you are not using anaconda.org", default="https://api.anaconda.org" ) p.add_argument( '--token', nargs="?", help=("anaconda token used to authenticate you to the given anaconda " "site. Required for uploading unless you are logged in (via " "`anaconda login`)"), ) p.add_argument( '--dry-run', action='store_true', help=("Figure out which packages would be copied, print it out and " "then exit") ) args = p.parse_args() args.to_label = 'main' args.from_label = 'main' print(args) if args.token is None: args.token = os.environ.get('BINSTAR_TOKEN') try: args.from_owner, args.from_label = args.from_owner.split('/') except ValueError: # no extra channel information was passed pass try: args.to_owner, args.to_label = args.to_owner.split('/') except ValueError: # no extra channel information was passed pass cli = binstar_client.utils.get_server_api(token=args.token, site=args.site) packages_on_source = cli.show_channel(args.from_label, args.from_owner) files_on_source = [f['full_name'] for f in packages_on_source['files']] if args.list: print(""" Listing all files on {}/{}/{} """.format(args.site, args.from_owner, args.from_label)) pprint(files_on_source) sys.exit(0) matched = [f for f in files_on_source for p in args.packages if p in f] print(""" Packages that match {} on {}/{}/{} """.format(args.packages, args.site, args.from_owner, args.from_label)) pprint(matched) packages_on_destination = cli.show_channel(args.to_label, args.to_owner) files_on_destination = [f['full_name'] for f in packages_on_destination['files']] to_copy = [f for f in matched if f not in files_on_destination] print(""" Packages that match {} and do not already exist on {}/{}/{} """.format(args.packages, args.site, args.to_owner, args.to_label)) pprint(to_copy) if args.dry_run: print(""" Exiting because --dry-run flag is set """) sys.exit(0) # spec = '{}/{}/{}'.format(args.from_owner, args.from_label, to_copy[0]) for full_name in to_copy: cmd = ['anaconda', 'copy', '--to-owner', args.to_owner, '--to-label', args.to_label, full_name] print(' '.join(cmd)) Popen(cmd) if __name__ == "__main__": cli()
Python
0.000002
@@ -281,19 +281,8 @@ cess -%0Aimport pdb %0A%0Aim @@ -684,34 +684,8 @@ pe)%0A - # pdb.set_trace()%0A
cb7f6efbbbe640a2c360f7dc93cb2bc87b2e0ab2
fix example
entity_extract/examples/pos_extraction.py
entity_extract/examples/pos_extraction.py
#from entity_extract.extractor.extractors import PosExtractor from entity_extract.extractor.utilities import SentSplit, Tokenizer from entity_extract.extractor.extractors import PosExtractor from entity_extract.extractor.pos_tagger import PosTagger #p = PosExtractor() sents = p.SentPlit('This is a sentence about the pie in the sky. If would be interesting. If only there was') for sent in sents: tokens = Tokenizer.tokenize(sent) tags = PosTagger(tokens) print tags
Python
0.0001
@@ -244,16 +244,111 @@ Tagger%0A%0A +# Initialize Services%0AsentSplitter = SentSplit()%0Atokenizer = Tokenizer()%0Atagger = PosTagger()%0A%0A #p = Pos @@ -371,15 +371,23 @@ s = -p.SentP +sentSplitter.sp lit( @@ -511,17 +511,17 @@ okens = -T +t okenizer @@ -547,25 +547,26 @@ tags = -PosT +t agger +.tag (tokens)
8dc5b661149fe075d703042cb32af7bbc0bd5d4a
Switch encoding.py to python3 type hints.
encoding.py
encoding.py
"""Script for encoding a payload into an image.""" import argparse import pathlib from PIL import Image, ImageMath import utilities def argument_parser(): # type: () -> argparse.ArgumentParser """Returns a configured argparser.ArgumentParser for this program.""" parser = argparse.ArgumentParser( description='Encode SECRETS into a picture', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument( 'host_image', type=pathlib.Path, help='The image that will hide the information.') parser.add_argument( 'payload_image', type=pathlib.Path, help='The image that will be hidden within the host image.') parser.add_argument( '--significant_digits', type=int, default=1, help='The number of least significant digits available to encode over.') parser.add_argument( '--display', action='store_true', default=False, help='Display the encoded image generated by this program.') parser.add_argument( '--save', action='store_true', help='Save the encoded image generated by this program.') parser.add_argument( '--output_dir', type=pathlib.Path, default='.', help=( 'A specific location to which the processed image will be saved. ' 'If not specified, the current working directory will be used.')) return parser def encode(host, payload, n_significant_digits): # type: (PIL.Image, PIL.Image, int) -> PIL.Image """Encode a payload into an image (using the last n_significant_digits).""" output_rgb_channels = [] for host_channel, payload_channel in zip(host.split(), payload.split()): # Mask out all but the least significant byte, encoding payload there mask = utilities.bit_mask(n_significant_digits) expression = ( "convert(" "(host & (0xff - {mask})) | (payload & {mask}), 'L')".format( mask=mask)) output_rgb_channels.append( ImageMath.eval( expression, host=host_channel, payload=payload_channel)) return Image.merge('RGB', output_rgb_channels) def main(): args = argument_parser().parse_args() host = Image.open(args.host_image) payload = Image.open(args.payload_image) encoded = encode(host, payload, args.significant_digits) # Display the encoded image if args.display: encoded.show() # Save the encoded image, if the user wants us to if args.save: user_response = ( utilities.query_user( 'GONNA SAVE ENCODED IMAGE to "{0:s}"; GAR, IS THAT K???'.format( str(args.output_dir.absolute())))) if user_response: p = args.host_image # Short reference to the host_image path filename = '{0:s}{1:s}{2:s}'.format(p.stem, '.encoded', p.suffix) encoded.save( args.output_dir.joinpath(filename), format='png', quality=100) if __name__ == '__main__': main()
Python
0
@@ -155,24 +155,8 @@ er() -:%0A # type: () -%3E @@ -178,16 +178,17 @@ ntParser +: %0A %22%22%22 @@ -1465,32 +1465,39 @@ %0Adef encode(host +: Image , payload, n_sig @@ -1489,16 +1489,23 @@ payload +: Image , n_sign @@ -1522,45 +1522,9 @@ gits -):%0A # type: (PIL.Image, PIL.Image, +: int @@ -1532,17 +1532,14 @@ -%3E -PIL. Image +: %0A
9bc9137ec22b45b0611b2d786451db890f46f424
Add `initial` parameter to es.__init__
entities.py
entities.py
# vim: set et ts=4 sw=4 fdm=marker """ MIT License Copyright (c) 2016 Jesse Hogan Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ from pdb import set_trace; B=set_trace from random import randint class entities(object): def __init__(self): self.clear() def clear(self): self._ls=[] def __iter__(self): for t in self._list: yield t def getrandom(self, returnIndex=False): if self.isempty: return None ix = randint(0, self.ubound) if returnIndex: return self[ix], ix else: return self[ix] def where(self, fn): es = entities() for e in self: if fn(e): es += e return es def sort(self, key): self._ls.sort(key=key) def remove(self, e): if callable(e): rms = self.where(e) else: rms = [e] for i, e1 in enumerate(self): for e2 in rms: if e1 is e2: del self._list[i] break def reversed(self): r = type(self)() for e in reversed(self._ls): r += e return r @property def ubound(self): if self.isempty: return None return self.count - 1 def insert(self, ix, e): self.insertBefore(ix, e) def insertBefore(self, ix, e): self._ls.insert(ix, e) def insertAfter(self, ix, e): self._ls.insert(ix + 1, e) def move(self, srcix, dstix): # TODO: This is untested if srcix == dstix: raise Exception('Source and destination are the same: {}'.format((srcix, dstix))) e = self.pop(srcix) self.insert(dstix, e) def shift(self): return self._ls.pop(0) def unshift(self, t): # TODO: Return entities object to indicate what was unshifted return self._ls.insert(0, t) def pop(self, ix=None): if ix == None: return self._ls.pop() return self._ls.pop(ix) def push(self, e): self += e def __lshift__(self, a): self.unshift(a) def append(self, obj, uniq=False, r=None): if not r: r = [] if isinstance(obj, entity): t = obj elif isinstance(obj, entities): for t in obj: if uniq: for t1 in self: if t is t1: break else: self.append(t, r=r) continue break else: self.append(t, r=r) return r else: raise ValueError('Unsupported object appended') if uniq: for t1 in self: if t is t1: return r r.append(t) for t in r: self._list.append(t) return r def __iadd__(self, t): self.append(t) return self def __iand__(self, t): self.append(t, uniq=True) return self def add(self, ts): self += ts return self def __add__(self, t): return self.add(t) @property def _list(self): if not hasattr(self, '_ls'): self._ls = [] return self._ls @property def count(self): return len(self._list) def __len__(self): return self.count @property def isempty(self): return self.count == 0 @property def hasone(self): return self.count == 1 @property def ispopulated(self): return not self.isempty def __str__(self): if not self.isempty: r='' for i, t in enumerate(self): if i > 0: r += "\n" r += str(t) return r return '' def __setitem__(self, key, item): self._ls[key]=item def __getitem__(self, key): if type(key) == int or type(key) == slice: return self._list[key] for e in self._list: if hasattr(e, 'id'): if e.id == key: return e elif hasattr(e, 'name'): if e.name == key: return e def getindex(self, e): """ Return the firsnt index of e in the collection. This is similar to list.index except here we use the `is` operator for comarison instead of the `==` operator.""" for ix, e1 in enumerate(self): if e is e1: return ix raise ValueError("'{}' is not in the collection " + e) @property def first(self): return self[0] @property def second(self): return self[1] @second.setter def second(self, v): self[1] = v @property def third(self): return self[2] @property def fourth(self): return self[3] @property def fifth(self): return self[4] @property def last(self): return self[-1] @last.setter def last(self, v): self[-1] = v @property def brokenrules(self): r = brokenrules() for ent in self: r += ent.brokenrules return r @property def isvalid(self): return self.brokenrules.isempty class entity(): def __init__(self): pass def add(self, t): th = entities() th += self th += t return th def isin(self, es): """Test if self is in entities object `es`. This is like the the `in` operator (__contains__()) except it tests for object identity (with `is`) instead of object equality (`==` or __eq__()). """ for e in es: if self is e: return True return False def __add__(self, t): return self.add(t) @property def brokenrules(self): return brokenrules() @property def isvalid(self): return self.brokenrules.isempty class brokenrules(entities): def append(self, o, r=None): if isinstance(o, str): o = brokenrule(o) super().append(o, r) class brokenrule(entity): def __init__(self, msg): self.message = msg def __str__(self): return self.message class event(entities): def __call__(self, src, e): for callable in self: callable(src, e) def append(self, fn): if not callable(fn): raise ValueError('Event must be callable') self._list.append(fn) class eventargs(entity): pass
Python
0.000002
@@ -1207,32 +1207,46 @@ ef __init__(self +, initial=None ):%0A self. @@ -1252,16 +1252,77 @@ .clear() +%0A if initial != None:%0A self.append(initial) %0A%0A de
015fcfaaed0a3ff54801f5821df4f5527255ab06
Update SSL.py
gevent_openssl/SSL.py
gevent_openssl/SSL.py
"""gevent_openssl.SSL - gevent compatibility with OpenSSL.SSL. """ import sys import socket import OpenSSL.SSL class Connection(object): def __init__(self, context, sock): self._context = context self._sock = sock self._connection = OpenSSL.SSL.Connection(context, sock) self._makefile_refs = 0 def __getattr__(self, attr): if attr not in ('_context', '_sock', '_connection', '_makefile_refs'): return getattr(self._connection, attr) def __wait_sock_io(self, sock, io_func, *args, **kwargs): timeout = self._sock.gettimeout() or 0.1 fd = self._sock.fileno() while True: try: return io_func(*args, **kwargs) except (OpenSSL.SSL.WantReadError, OpenSSL.SSL.WantX509LookupError): sys.exc_clear() _, _, errors = select.select([fd], [], [fd], timeout) if errors: break except OpenSSL.SSL.WantWriteError: sys.exc_clear() _, _, errors = select.select([], [fd], [fd], timeout) if errors: break def accept(self): sock, addr = self._sock.accept() client = OpenSSL.SSL.Connection(sock._context, sock) return client, addr def do_handshake(self): return self.__wait_sock_io(self._sock, self._connection.do_handshake) def connect(self, *args, **kwargs): return self.__wait_sock_io(self._sock, self._connection.connect, *args, **kwargs) def send(self, data, flags=0): try: return self.__wait_sock_io(self._sock, self._connection.send, data, flags) except OpenSSL.SSL.SysCallError as e: if e[0] == -1 and not data: # errors when writing empty strings are expected and can be ignored return 0 raise def recv(self, bufsiz, flags=0): pending = self._connection.pending() if pending: return self._connection.recv(min(pending, bufsiz)) try: return self.__wait_sock_io(self._sock, self._connection.recv, bufsiz, flags) except OpenSSL.SSL.ZeroReturnError: return '' def read(self, bufsiz, flags=0): return self.recv(bufsiz, flags) def write(self, buf, flags=0): return self.sendall(buf, flags) def close(self): if self._makefile_refs < 1: self._connection = None if self._sock: socket.socket.close(self._sock) else: self._makefile_refs -= 1 def makefile(self, mode='r', bufsize=-1): self._makefile_refs += 1 return socket._fileobject(self, mode, bufsize, close=True)
Python
0.000001
@@ -112,16 +112,19 @@ %0A%0Aclass +SSL Connecti @@ -134,16 +134,52 @@ object): +%0A %22%22%22OpenSSL Connection Wapper%22%22%22 %0A%0A de @@ -545,31 +545,19 @@ f __ +io wait -_sock_io(self, sock +(self , io @@ -1394,33 +1394,15 @@ f.__ +io wait -_sock_io(self._sock, +( self @@ -1495,33 +1495,15 @@ f.__ +io wait -_sock_io(self._sock, +( self @@ -1620,33 +1620,15 @@ f.__ +io wait -_sock_io(self._sock, +( self @@ -2081,33 +2081,15 @@ f.__ +io wait -_sock_io(self._sock, +( self @@ -2187,16 +2187,246 @@ eturn '' +%0A except OpenSSL.SSL.SysCallError as e:%0A if e%5B0%5D == -1 and 'Unexpected EOF' in e%5B1%5D:%0A # errors when reading empty strings are expected and can be ignored%0A return ''%0A raise %0A%0A de
b0a4683670f05ed068a702144814f310586331fb
fix models error
src/public/models.py
src/public/models.py
# coding=utf-8 from django.db import models from django.contrib.auth.models import BaseUserManager, AbstractBaseUser, PermissionsMixin # todo: Group设置:供应商组,采购商组? class Org(models.Model): """组织""" name = models.CharField('名称', max_length=50) code = models.CharField('编码', max_length=50) location = models.CharField('位置', max_length=200, null=True) telephone = models.CharField('手机', max_length=30, null=True) phone = models.CharField('固话', max_length=30, null=True) url = models.URLField('链接地址', null=True) created_at = models.DateTimeField(auto_now_add=True) def __unicode__(self): return self.name class UserManager(BaseUserManager): def create_user(self, email, username, telephone, password=None): """ Creates and saves a User with the given email, date of birth and password. """ if not email: raise ValueError('Users must have an email address') user = self.model( email=self.normalize_email(email), username=username, telephone=telephone, ) user.set_password(password) user.save(using=self._db) return user # todo: bug # $ python manage.py createsuperuser # $ ... TypeError: create_superuser() takes at least 4 arguments (3 given) def create_superuser(self, email, username, password, telephone=""): """ Creates and saves a superuser with the given email, username and password. """ user = self.create_user(email=email, username=username, password=password, telephone=telephone ) user.is_admin = True user.save(using=self._db) return user class User(AbstractBaseUser, PermissionsMixin): org = models.ForeignKey(Org, null=True) #注意:不继承PermissionsMixin类,是无法实现使用Django Group功能的,本人的项目需要使用所以继承该类。 email = models.EmailField(verbose_name='email address', max_length=255, null=True, unique=True) private_email = models.EmailField(verbose_name='email address', max_length=255, null=True, unique=True) # 车牌号码 car_no = models.CharField(max_length=50, null=True) # 用户登录名 username = models.CharField(max_length=100, unique=True, db_index=True) # 英文名 first_name = models.CharField(max_length=100, db_index=True) # 英文姓 last_name = models.CharField(max_length=100, null=True, db_index=True) # 中文姓名 cn_name = models.CharField(max_length=100, unique=True, null=True, db_index=True) avatar = models.URLField(blank=True) telephone = models.CharField(null=True, max_length=50) created_at = models.DateTimeField(null=True, auto_now_add=True) is_active = models.BooleanField(default=True) is_admin = models.BooleanField(default=False) qq = models.CharField(null=True, max_length=20) idcard_no = models.CharField(null=True, max_length=50) hired_at = models.DateTimeField(null=True, auto_now_add=True) birthday = models.DateField(null=True) gender = models.IntegerField(null=True, default=1) # 离职 # todo: ->BoolField quited = models.IntegerField(null=True, default=0) objects = UserManager() USERNAME_FIELD = 'username' REQUIRED_FIELDS = [] def get_full_name(self): # The user is identified by their email address return self.email def get_short_name(self): # The user is identified by their email address return self.username # On Python 3: def __str__(self): def __unicode__(self): return self.username def has_perm(self, perm, obj=None): """Does the user have a specific permission?""" # Simplest possible answer: Yes, always return True def has_module_perms(self, app_label): """Does the user have permissions to view the app `app_label`?""" # Simplest possible answer: Yes, always return True @property def is_staff(self): """Is the user a member of staff?""" # Simplest possible answer: All admins are staff return self.is_admin class Calendar(models.Model): """日历""" year = models.SmallIntegerField('年') month = models.SmallIntegerField('月') day = models.SmallIntegerField('日') # 1=是节假日 is_holiday = models.SmallIntegerField('是节假日', default=0) holiday_mark = models.CharField('节假日说明', null=True, max_length=50) def get_full_datetime(self): # todo: 返回datetime()格式 return '%s-%s-%s' % (self.year, self.month, self.day) def __unicode__(self): return self.get_full_datetime() class Conf(models.Model): """配置""" name = models.CharField('名称', max_length=50) content = models.CharField('内容', max_length=100) desc = models.CharField('配置说明', max_length=50) created_at = models.DateTimeField(auto_now_add=True) def __unicode__(self): return self.name
Python
0.000001
@@ -4351,36 +4351,31 @@ ay = models. -SmallInteger +Boolean Field('%E6%98%AF%E8%8A%82%E5%81%87%E6%97%A5' @@ -4384,17 +4384,21 @@ default= -0 +False )%0A ho
78ef8bbb721d6673ba576726c57dfae963153153
fix bugs in evaluate.py
evaluate.py
evaluate.py
from envs import create_env import numpy as np import time import argparse def evaluate_loop(env, network, max_episodes, args): sleep_time = args.sleep_time render = args.render verbose = args.verbose last_state = env.reset() last_features = network.get_initial_features() n_episode, step = 0, 0 episode_reward = np.zeros((max_episodes,), dtype='float32') episode_length = np.zeros((max_episodes,), dtype='float32') print('evaluating for {} episodes...'.format(max_episodes)) while n_episode < max_episodes: fetched = network.act(last_state, *last_features) action = fetched[0] state, reward, terminal, _ = env.step(action.argmax()) if render: env.render() episode_reward[n_episode] += reward if verbose: print("#step = {}, action = {}".format(step, action.argmax())) print("reward = {}".format(reward)) if terminal: print("#episode = {}, #step = {}, reward sum = {}".format(n_episode, step, episode_reward[n_episode])) episode_length[n_episode] = step env.reset() step = 0 n_episode += 1 else: step += 1 time.sleep(sleep_time) print('evaluation done.') print('avg score = {}'.format(episode_reward.mean())) print('avg episode length = {}'.format(episode_length.mean())) def main(args): env_id = args.env_id ckpt_dir = args.ckpt_dir max_episodes = args.max_episodes # env env = create_env(env_id, 0, 1) if args.render: env.render() # work-around to the nasty env.render() failing issue when working with tensorflow # see https://github.com/openai/gym/issues/418 import tensorflow as tf from model import Convx2LSTMActorCritic # model with tf.variable_scope("global"): network = Convx2LSTMActorCritic(env.observation_space.shape, env.action_space.n) sess = tf.Session() init = tf.global_variables_initializer() sess.run(init) saver = tf.train.Saver() # load model parameters checkpoint = tf.train.get_checkpoint_state(ckpt_dir) if checkpoint and checkpoint.model_checkpoint_path: saver.restore(sess, checkpoint.model_checkpoint_path) print("checkpoint loaded:", checkpoint.model_checkpoint_path) else: raise Exception('cannot find checkpoint path') # run evaluating with sess.as_default(): evaluate_loop(env, network, max_episodes, args) if __name__ == "__main__": parser = argparse.ArgumentParser(description=None) parser.add_argument('--env-id', default="BreakoutDeterministic-v3", help='Environment id') parser.add_argument('--ckpt-dir', default="save/breakout/train", help='Checkpoint directory path') parser.add_argument('--max-episodes', default=2, type=int, help='Number of episodes to evaluate') parser.add_argument('--sleep-time', default=0.0, type=float, help='sleeping time') parser.add_argument('--render', action='store_true', help='render screen') parser.add_argument('--verbose', action='store_true', help='verbose') args = parser.parse_args() main(args=args)
Python
0.000001
@@ -617,16 +617,26 @@ action +, features = fetch @@ -640,16 +640,29 @@ tched%5B0%5D +, fetched%5B2:%5D %0A%0A @@ -972,16 +972,113 @@ rminal:%0A + last_state = env.reset()%0A last_features = network.get_initial_features()%0A%0A @@ -1233,32 +1233,8 @@ tep%0A - env.reset()%0A @@ -1283,32 +1283,101 @@ 1%0A else:%0A + last_state = state%0A last_features = features%0A%0A step @@ -1993,16 +1993,40 @@ # model%0A + sess = tf.Session()%0A with @@ -2148,32 +2148,8 @@ .n)%0A - sess = tf.Session()%0A @@ -2211,37 +2211,8 @@ nit) -%0A saver = tf.train.Saver() %0A%0A @@ -2294,16 +2294,16 @@ pt_dir)%0A - if c @@ -2350,16 +2350,49 @@ t_path:%0A + saver = tf.train.Saver()%0A
2e6c80717099fb6c6ca59d9d6193807b1aabfa8b
Update docstring
git_update/actions.py
git_update/actions.py
"""Git repo actions.""" import logging import os import pathlib import click from git import InvalidGitRepositoryError, Repo from git.exc import GitCommandError LOG = logging.getLogger(__name__) def crawl(path): """Crawl the path for possible Git directories.""" main_dir = pathlib.Path(path) if not main_dir.is_dir(): main_dir = main_dir.parent main_dir = main_dir.resolve() LOG.info('Finding directories in %s', main_dir) dir_list = [directory for directory in main_dir.iterdir() if directory.is_dir() and directory.parts[-1] != '.git'] LOG.debug('List of directories: %s', dir_list) for directory in dir_list: update_repo(os.path.join(main_dir, directory)) def check_changes(current, fetch_info_list, branch_list): """Check for changes in local branches and remote. Args: current: Dict(reference: commit) from before `git pull` operation. fetch_info_list: List of remote references from `git pull`. branch_list: List of branches in repository. """ log = logging.getLogger(__name__) for fetch_info in fetch_info_list: log.debug('Checking for change in %s', fetch_info.name) try: if current[fetch_info.ref] != fetch_info.commit: log.info('%s has updates, %s..%s', fetch_info.name, current[fetch_info.ref], fetch_info.commit) except KeyError: log.info('New reference %s', fetch_info.name) for branch in branch_list: log.debug('Checking for change in %s', branch.name) if current[branch] != branch.commit: log.info('%s updated, %s..%s', branch.name, current[branch], branch.commit) return True def update_repo(directory): """Update a repository. Returns: False if bad repository. True if everything worked. """ log = logging.getLogger(__name__) try: repo = Repo(directory) current = {ref: ref.commit for ref in repo.refs} click.secho('Updating {0}'.format(repo.git_dir), fg='blue') remote = repo.remote() fetch_info_list = remote.pull() except InvalidGitRepositoryError: log.warning('%s is not a valid repository.', directory) return False except ValueError: log.warning('Check remotes for %s: %s', directory, repo.remotes) return False except GitCommandError as error: log.fatal('Pull failed. %s', error) return False check_changes(current, fetch_info_list, repo.branches) return True
Python
0
@@ -260,16 +260,76 @@ ctories. +%0A%0A Args:%0A path (str): Original path to crawl.%0A %22%22%22%0A
ef1f303072307f259e8555e0148c29677b4f7d6f
Fix approve permissions typing
idb/ipc/approve.py
idb/ipc/approve.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. from typing import Set, Dict # noqa F401 from idb.grpc.types import CompanionClient from idb.grpc.idb_pb2 import ApproveRequest MAP = { # type: Dict[str, ApproveRequest.Permission] "photos": ApproveRequest.PHOTOS, "camera": ApproveRequest.CAMERA, "contacts": ApproveRequest.CONTACTS, } async def client( client: CompanionClient, bundle_id: str, permissions: Set[str] ) -> None: print(f"Sending {[MAP[permission] for permission in permissions]}") await client.stub.approve( ApproveRequest( bundle_id=bundle_id, permissions=[MAP[permission] for permission in permissions], ) )
Python
0.000009
@@ -122,21 +122,13 @@ Dict - # noqa F401 +, Any %0A%0Afr @@ -221,20 +221,8 @@ %0AMAP - = %7B # type : Di @@ -234,33 +234,15 @@ r, A -pproveRequest.Permission%5D +ny%5D = %7B %0A @@ -442,16 +442,16 @@ et%5Bstr%5D%0A + ) -%3E Non @@ -457,80 +457,8 @@ ne:%0A - print(f%22Sending %7B%5BMAP%5Bpermission%5D for permission in permissions%5D%7D%22)%0A
025ad18a2b6e483c2d44fe84d1b9d1ad0a7288b6
add njobs cli option
mriqc/classifier/cli.py
mriqc/classifier/cli.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # @Author: oesteban # @Date: 2015-11-19 16:44:27 # @Last Modified by: oesteban # @Last Modified time: 2017-01-13 15:45:09 """ mriqc_fit command line interface definition """ from __future__ import absolute_import, division, print_function, unicode_literals from sys import version_info import os.path as op from fcntl import flock, LOCK_EX, LOCK_UN import warnings PY3 = version_info[0] > 2 from sklearn.metrics.base import UndefinedMetricWarning warnings.simplefilter("once", UndefinedMetricWarning) cached_warnings = [] def warn_redirect(message, category, filename, lineno, file=None, line=None): from mriqc import logging LOG = logging.getLogger('mriqc.warnings') if category not in cached_warnings: LOG.debug('captured warning (%s): %s', category, message) cached_warnings.append(category) def main(): """Entry point""" import yaml from io import open from argparse import ArgumentParser from argparse import RawTextHelpFormatter from pkg_resources import resource_filename as pkgrf from .cv import CVHelper from mriqc import logging, LOG_FORMAT warnings.showwarning = warn_redirect parser = ArgumentParser(description='MRIQC Cross-validation', formatter_class=RawTextHelpFormatter) parser.add_argument('training_data', help='input data') parser.add_argument('training_labels', help='input data') parser.add_argument('--test-data', help='test data') parser.add_argument('--test-labels', help='test labels') g_input = parser.add_argument_group('Inputs') g_input.add_argument('-P', '--parameters', action='store', default=pkgrf('mriqc', 'data/grid_nested_cv.yml')) g_input.add_argument('-C', '--classifier', action='store', nargs='*', choices=['svc_linear', 'svc_rbf', 'rfc', 'all'], default=['svc_rbf']) g_input.add_argument('--cv-inner', action='store', default=10, help='inner loop of cross-validation') g_input.add_argument('--cv-outer', action='store', default='loso', help='outer loop of cross-validation') g_input.add_argument('--create-split', action='store_true', default=False, help='create a data split for the validation set') g_input.add_argument('--nperm', action='store', default=5000, type=int, help='number of permutations') g_input.add_argument( '-S', '--score-types', action='store', nargs='*', default=['accuracy'], choices=[ 'accuracy', 'adjusted_rand_score', 'average_precision', 'f1', 'f1_macro', 'f1_micro', 'f1_samples', 'f1_weighted', 'log_loss', 'mean_absolute_error', 'mean_squared_error', 'median_absolute_error', 'precision', 'precision_macro', 'precision_micro', 'precision_samples', 'precision_weighted', 'r2', 'recall', 'recall_macro', 'recall_micro', 'recall_samples', 'recall_weighted', 'roc_auc']) g_input.add_argument('--log-file', action='store', default='mriqcfit.log') g_input.add_argument('--log-level', action='store', default='INFO', choices=['CRITICAL', 'ERROR', 'WARN', 'INFO', 'DEBUG']) g_input.add_argument('-o', '--output-file', action='store', default='cv_result.csv', help='the output table with cross validated scores') opts = parser.parse_args() filelogger = logging.getLogger() fhl = logging.FileHandler(opts.log_file) fhl.setFormatter(fmt=logging.Formatter(LOG_FORMAT)) filelogger.addHandler(fhl) filelogger.setLevel(opts.log_level) parameters = None if opts.parameters is not None: with open(opts.parameters) as paramfile: parameters = yaml.load(paramfile) cvhelper = CVHelper(opts.training_data, opts.training_labels, scores=opts.score_types, param=parameters, n_perm=opts.nperm) cvhelper.cv_inner = read_cv(opts.cv_inner) cvhelper.cv_outer = read_cv(opts.cv_outer) # Run inner loop before setting held-out data, for hygene cvhelper.fit() with open(opts.output_file, 'a' if PY3 else 'ab') as outfile: flock(outfile, LOCK_EX) save_headers = op.getsize(opts.output_file) == 0 cvhelper.cv_scores_df[['clf', 'accuracy', 'roc_auc']].to_csv( outfile, index=False, header=save_headers) flock(outfile, LOCK_UN) def read_cv(value): from numbers import Number try: value = int(value) except ValueError: pass if isinstance(value, Number): if value > 0: return {'type': 'kfold', 'n_splits': value} else: return None return {'type': 'loso'} if __name__ == '__main__': main()
Python
0.000001
@@ -161,14 +161,14 @@ 13 1 -5:45:0 +6:31:3 9%0A%0A%22 @@ -3490,24 +3490,138 @@ d scores')%0A%0A + g_input.add_argument('--njobs', action='store', default=-1,%0A help='number of jobs')%0A%0A%0A opts = p @@ -4069,16 +4069,35 @@ _labels, + n_jobs=opts.njobs, %0A
61cb93a348879a36902159a6055b7e32b959e1c1
Fix data formatting bugs.
projects/tpoafptarbmit/scrape.py
projects/tpoafptarbmit/scrape.py
#!/usr/bin/env python3 from urllib.parse import parse_qsl import json import os import os.path import re import sys from bs4 import BeautifulSoup import requests import shapely.geometry ROUTE_BASE_URL = 'http://www.thepassageride.com/Routes/' OUTPUT_DIR = 'data/' def fetch_text(url): r = requests.get(url) if r.status_code != 200: r.raise_for_status() return r.text def scrape_route_list(html): print('Fetching route list...', end='') routes = [] soup = BeautifulSoup(html, 'html.parser') for link in soup.select('#wikitext a[href*="/Routes/"]'): href = link.get('href') routes.append( {'name': link.text, 'number': int(href.strip(ROUTE_BASE_URL)), 'url': href,} ) print('done (%d routes)' % len(routes)) return routes def fetch_route_description(route_url): print('\t%s' % route_url) html = fetch_text(route_url) soup = BeautifulSoup(html, 'html.parser') description = [p.prettify() for p in soup.select('#wikitext p')] map_url = soup.select_one('#wikitext a[href*="gmap-pedometer"]') if map_url is not None: map_url = map_url.get('href') return { 'map_url': map_url, 'description': '\n'.join(description), } def fetch_route_map(map_url): print('\t%s' % map_url, end='') _, map_id = map_url.split('?r=') path = '/getRoute.php' if int(map_id) <= 5_000_000 else '/gp/ajaxRoute/get' r = requests.post('https://www.gmap-pedometer.com' + path, {'rId': map_id}) if r.status_code != 200: r.raise_for_status() data = parse_qsl(r.text) polyline = [x[1] for x in data if x[0] == 'polyline'][0] coords = [] points = polyline.split('a') for i in range(0, len(points) - 1, 2): # lat, lon coords.append( [float(points[i + 1]), float(points[i]),] ) return coords def route_to_geojson(route_meta, coords): return { 'type': 'Feature', 'geometry': {'type': 'LineString', 'coordinates': [coords]}, 'properties': route_meta, } def simplify_route(coords): # This seems to be a good trade off between accurately # representing the full route and limiting the data. tolerance = 0.001 line = shapely.geometry.LineString(coords) return line.simplify(tolerance).coords def to_file_name(route): name = route['name'].lower() name = re.sub('[^a-z0-9]', '_', name) name = re.sub('_+', '_', name) return '%3d_%s.geojson' % (route['number'], name) def main(): os.makedirs(os.path.join(OUTPUT_DIR, "routes"), exist_ok=True) html = fetch_text(ROUTE_BASE_URL) routes = [] for r in scrape_route_list(html): print('#%d "%s"' % (r['number'], r['name'])) desc = fetch_route_description(r['url']) if desc['map_url'] is not None: coords = fetch_route_map(desc['map_url']) else: coords = [] full_geo = route_to_geojson({**r, **desc}, coords) # Full resolution for the individual route file, low # resolution for the overview file. f = os.path.join(OUTPUT_DIR, 'routes', to_file_name(r)) with open(f, 'w') as fp: json.dump(full_geo, fp, indent=4) simple_coords = simplify_route(coords) if coords else [] simple_geo = route_to_geojson({**r, **desc}, simple_coords) routes.append(simple_geo) print( ' ... done (%d coords, simplified to %d)' % (len(coords), len(simple_coords)) ) collection = {'type': 'FeatureCollection', 'features': routes} print('Dumping full resolution to file...') with open(OUTPUT_DIR, 'index.geojson', 'w') as fp: json.dump(collection, fp, indent=4) print('All done!') if __name__ == '__main__': main()
Python
0
@@ -2035,16 +2035,14 @@ s': -%5B coords -%5D %7D,%0A @@ -2306,16 +2306,21 @@ return +list( line.sim @@ -2338,24 +2338,25 @@ ance).coords +) %0A%0A%0Adef to_fi @@ -3683,16 +3683,29 @@ th open( +os.path.join( OUTPUT_D @@ -3723,16 +3723,17 @@ geojson' +) , 'w') a
7cb9703b1af4138e8f1a036245125d723add55a3
Fix error handling to return sensible HTTP error codes.
grano/views/__init__.py
grano/views/__init__.py
from colander import Invalid from flask import request from grano.core import app from grano.lib.serialisation import jsonify from grano.views.base_api import blueprint as base_api from grano.views.entities_api import blueprint as entities_api from grano.views.relations_api import blueprint as relations_api from grano.views.schemata_api import blueprint as schemata_api from grano.views.sessions_api import blueprint as sessions_api from grano.views.projects_api import blueprint as projects_api from grano.views.accounts_api import blueprint as accounts_api from grano.views.files_api import blueprint as files_api from grano.views.imports_api import blueprint as imports_api from grano.views.pipelines_api import blueprint as pipelines_api from grano.views.log_entries_api import blueprint as log_entries_api from grano.views.permissions_api import blueprint as permissions_api from grano.views.auth import check_auth @app.errorhandler(401) @app.errorhandler(403) @app.errorhandler(404) @app.errorhandler(410) @app.errorhandler(500) def handle_exceptions(exc): if not hasattr(exc, 'get_description'): message = exc.get_description(request.environ) message = message.replace('<p>', '').replace('</p>', '') body = { 'status': exc.code, 'name': exc.name, 'message': message } headers = exc.get_headers(request.environ) else: body = { 'status': 500, 'name': exc.__class__.__name__, 'message': unicode(exc) } headers = {} return jsonify(body, status=exc.code, headers=headers) @app.errorhandler(Invalid) def handle_invalid(exc): body = { 'status': 400, 'name': 'Invalid Data', 'message': unicode(exc), 'errors': exc.asdict() } return jsonify(body, status=400) app.register_blueprint(base_api) app.register_blueprint(entities_api) app.register_blueprint(relations_api) app.register_blueprint(schemata_api) app.register_blueprint(sessions_api) app.register_blueprint(projects_api) app.register_blueprint(accounts_api) app.register_blueprint(files_api) app.register_blueprint(permissions_api) app.register_blueprint(imports_api) app.register_blueprint(pipelines_api) app.register_blueprint(log_entries_api)
Python
0
@@ -47,16 +47,62 @@ request +%0Afrom werkzeug.exceptions import HTTPException %0A%0Afrom g @@ -1118,42 +1118,37 @@ if -not hasattr(exc, 'get_descri +isinstance(exc, HTTPExce ption -' ):%0A @@ -1634,24 +1634,34 @@ status= -exc.code +body.get('status') ,%0A
b4f0bbb8e9fd198cfa60daa3a01a4a48a0fd18af
Replace assertFalse/assertTrue(a in b)
sahara/tests/unit/plugins/storm/test_config_helper.py
sahara/tests/unit/plugins/storm/test_config_helper.py
# Copyright 2017 Massachusetts Open Cloud # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from testtools import testcase from sahara.plugins.storm import config_helper as s_config from sahara.plugins.storm import plugin as s_plugin class TestStormConfigHelper(testcase.TestCase): def test_generate_storm_config(self): STORM_092 = '0.9.2' STORM_101 = '1.0.1' STORM_110 = '1.1.0' tested_versions = [] master_hostname = "s-master" zk_hostnames = ["s-zoo"] configs_092 = s_config.generate_storm_config( master_hostname, zk_hostnames, STORM_092) self.assertTrue('nimbus.host' in configs_092.keys()) self.assertFalse('nimbus.seeds' in configs_092.keys()) tested_versions.append(STORM_092) configs_101 = s_config.generate_storm_config( master_hostname, zk_hostnames, STORM_101) self.assertFalse('nimbus.host' in configs_101.keys()) self.assertTrue('nimbus.seeds' in configs_101.keys()) self.assertTrue('client.jartransformer.class' in configs_101.keys()) self.assertEqual(configs_101['client.jartransformer.class'], 'org.apache.storm.hack.StormShadeTransformer') tested_versions.append(STORM_101) configs_110 = s_config.generate_storm_config( master_hostname, zk_hostnames, STORM_110) self.assertFalse('nimbus.host' in configs_110.keys()) self.assertTrue('nimbus.seeds' in configs_110.keys()) self.assertTrue('client.jartransformer.class' in configs_110.keys()) self.assertEqual(configs_110['client.jartransformer.class'], 'org.apache.storm.hack.StormShadeTransformer') tested_versions.append(STORM_110) storm = s_plugin.StormProvider() self.assertEqual(storm.get_versions(), tested_versions)
Python
0.000311
@@ -1141,36 +1141,34 @@ self.assert -True +In ('nimbus.host' i @@ -1157,35 +1157,33 @@ In('nimbus.host' - in +, configs_092.key @@ -1198,37 +1198,37 @@ self.assert -False +NotIn ('nimbus.seeds' @@ -1218,35 +1218,33 @@ n('nimbus.seeds' - in +, configs_092.key @@ -1409,37 +1409,37 @@ self.assert -False +NotIn ('nimbus.host' i @@ -1428,35 +1428,33 @@ In('nimbus.host' - in +, configs_101.key @@ -1469,36 +1469,34 @@ self.assert -True +In ('nimbus.seeds' @@ -1486,35 +1486,33 @@ n('nimbus.seeds' - in +, configs_101.key @@ -1527,36 +1527,34 @@ self.assert -True +In ('client.jartran @@ -1559,35 +1559,33 @@ ansformer.class' - in +, configs_101.key @@ -1903,13 +1903,13 @@ sert -False +NotIn ('ni @@ -1918,19 +1918,17 @@ us.host' - in +, configs @@ -1959,20 +1959,18 @@ f.assert -True +In ('nimbus @@ -1976,19 +1976,17 @@ s.seeds' - in +, configs @@ -2021,12 +2021,10 @@ sert -True +In ('cl @@ -2049,19 +2049,17 @@ r.class' - in +, configs
d98ac8c127caf4b70c8b0da9b6b6415c47f0f3eb
remove cruft
munge/codec/__init__.py
munge/codec/__init__.py
import os import imp __all__ = ['django', 'mysql', 'json', 'yaml'] __codecs = {} # TODO move to .load? def _do_find_import(directory, skiplist=None, suffixes=None): # explicitly look for None, suffixes=[] might be passed to not load anything if suffixes is None: suffixes = [t[0] for t in imp.get_suffixes()] loaded = dict() for module in os.listdir(directory): name, ext = os.path.splitext(module) if name in loaded: continue if name in skiplist: continue if ext in suffixes: #print "finding %s in %s" % (name, directory) #mod = imp.load_module(name, *imp.find_module(name, [directory])) try: imp_args = imp.find_module(name, [directory]) mod = imp.load_module(name, *imp_args) loaded[name] = mod.__file__ finally: try: imp_args[0].close() except Exception: pass return loaded def find_import(): this = os.path.split(__file__) this_dir = this[0] # remove trailing c if cached bytecode #this_file = this[1].rstrip('c') _do_find_import(this_dir, ('all', '__init__')) def add_codec(exts, cls): if not isinstance(exts, tuple): exts = tuple(exts) # check for dupe extensions dupe_exts = set(ext for k in __codecs.keys() for ext in k).intersection(exts) if dupe_exts: raise ValueError("duplicate extension %s" % str(dupe_exts)) __codecs[exts] = cls def get_codecs(): return __codecs def list_codecs(): return [ext[0] for ext in get_codecs().keys()] def get_codec(tag, codecs=get_codecs()): for exts, cls in codecs.items(): if tag in exts: return cls def find_datafile(name, search_path=('.'), codecs=get_codecs()): """ find all matching data files in search_path search_path: path of directories to load from codecs: allow to override from list of installed returns array of tuples (codec_object, filename) """ rv = [] if isinstance(search_path, basestring): search_path = [search_path] #print "search path ", str(search_path) ext = os.path.splitext(name)[1][1:] cls = get_codec(ext) if cls: for each in search_path: fq_filename = os.path.join(each, name) if os.path.exists(fq_filename): rv.append((cls, fq_filename)) for exts, obj in codecs.items(): for ext in exts: filename = "%s.%s" % (name, ext) for each in search_path: fq_filename = os.path.join(each, filename) if os.path.exists(fq_filename): rv.append((obj, fq_filename)) return rv def load_datafile(name, search_path=('.'), codecs=get_codecs(), **kwargs): """ find datafile and load them from codec TODO only does the first one kwargs: default = if passed will return that on failure instead of throwing """ mod = find_datafile(name, search_path, codecs) if not mod: if 'default' in kwargs: return kwargs['default'] raise IOError("file %s not found in search path %s" %(name, str(search_path))) (codec, datafile) = mod[0] return codec().load(open(datafile))
Python
0
@@ -82,1174 +82,8 @@ %7D%0A%0A%0A -# TODO move to .load?%0Adef _do_find_import(directory, skiplist=None, suffixes=None):%0A # explicitly look for None, suffixes=%5B%5D might be passed to not load anything%0A if suffixes is None:%0A suffixes = %5Bt%5B0%5D for t in imp.get_suffixes()%5D%0A%0A loaded = dict()%0A for module in os.listdir(directory):%0A name, ext = os.path.splitext(module)%0A if name in loaded:%0A continue%0A%0A if name in skiplist:%0A continue%0A%0A if ext in suffixes:%0A #print %22finding %25s in %25s%22 %25 (name, directory)%0A #mod = imp.load_module(name, *imp.find_module(name, %5Bdirectory%5D))%0A try:%0A imp_args = imp.find_module(name, %5Bdirectory%5D)%0A mod = imp.load_module(name, *imp_args)%0A loaded%5Bname%5D = mod.__file__%0A finally:%0A try:%0A imp_args%5B0%5D.close()%0A except Exception:%0A pass%0A%0A return loaded%0A%0Adef find_import():%0A%0A this = os.path.split(__file__)%0A this_dir = this%5B0%5D%0A # remove trailing c if cached bytecode%0A #this_file = this%5B1%5D.rstrip('c')%0A _do_find_import(this_dir, ('all', '__init__'))%0A%0A%0A def
603c36aec2a4704bb4cf41c224194a5f83f9babe
Set the module as auto_install
sale_payment_method_automatic_workflow/__openerp__.py
sale_payment_method_automatic_workflow/__openerp__.py
# -*- coding: utf-8 -*- ############################################################################## # # Author: Guewen Baconnier # Copyright 2015 Camptocamp SA # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## {'name': 'Sale Payment Method - Automatic Reconcile', 'version': '1.0', 'author': ['Camptocamp', 'Akretion'], 'license': 'AGPL-3', 'category': 'Generic Modules/Others', 'depends': ['sale_payment_method', 'sale_automatic_workflow'], 'website': 'http://www.camptocamp.com', 'data': [], 'test': [], 'installable': True, 'auto_install': False, }
Python
0.000001
@@ -1291,12 +1291,11 @@ l': -Fals +Tru e,%0A
0d4ad25bb01dc1e303ebaf940853f1b18532b3cd
update hook
muxiwebsite/__init__.py
muxiwebsite/__init__.py
# coding: utf-8 """ muxiwebsite: 木犀团队的官网 ~~~~~~~~~~~~~~~~~~~~~~~~~ 木犀团队是华中师范大学自由的学生互联网团队,分为 web(前端、后台),设计, 安卓 组 木犀官网是木犀团队的官方网站: 功能模块: 1.muxi: 木犀官网 木犀的简介信息 2.blog: 木犀博客 木犀团队的博客 3.book: 木犀图书 木犀图书管理 4.share: 木犀分享 木犀内部的分享小站 管理模块: backend: 木犀统一管理后台 ~我们在路上, 前方不会太远~。 """ from flask import Flask, Markup, redirect, url_for, render_template, request from flask_sqlalchemy import SQLAlchemy import flask_login as login from flask_login import LoginManager from flask_pagedown import PageDown from basedir import basedir import flask_admin as admin from flask_admin import Admin, BaseView, expose from flask_admin.contrib.sqla import ModelView import markdown import os # the root path of xueer # __filename__ 就是占位 muxi_root_path = os.path.abspath(os.path.dirname("__filename__")) # 实例创建+蓝图注册 app = Flask(__name__) # 配置(通用) app.config['SECRET_KEY'] = "I hate flask!" app.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get("MUXI_WEBSITE_SQL") or "sqlite:///" + os.path.join(basedir, 'muxi_data.sqlite') # 系统相应替换 app.config['SQLALCHEMY_COMMIT_ON_TEARDOWN'] = True app.config['WHOOSH_BASE'] = "search.db" app.config['MAX_SEARCH_RESULTS'] = 5 # 图书搜索每页最多加载5个搜索结果 app.config['MUXI_ADMIN'] = 'neo1218' app.config["SHARE_PER_PAGE"] = 5 app.config["MUXI_SHARES_PER_PAGE"] = 10 app.config["SHARE_HOT_PER_PAGE"] = 3 app.config['MUXI_USERS_PER_PAGE'] = 10 app.config['BLOG_PER_PAGE'] = 10 app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True app.config['SERVER_NAME'] = os.environ.get("MUXI_WEBSITE_SERVERNAME") # 初始化扩展(app全局属性) db = SQLAlchemy(app) login_manager = LoginManager(app) login_manager.session_protection = 'strong' login_manager.login_view = 'auth.login' pagedown = PageDown(app) # Index def is_mobie(): platform = request.user_agent.platform if platform in ["android", "iphone", "ipad"]: return True else: return False @app.route('/') def index(): flag = is_mobie() if flag: return render_template("index_m.html") else: return render_template('index_d.html') @app.route('/deploy') def deploy(): os.system('sudo kill `sudo lsof -t -i:9001`;git pull;uwsgi --ini app.ini&') return "deployed" @app.route('/test/') def test(): return '<h1>test 23</h1>' @app.route('/join') def join(): flag = is_mobie() if flag: return render_template("index_m.html") else: return render_template('join_d.html') class MyAdminIndexView(admin.AdminIndexView): """rewrite is_authenticated method""" def is_accessible(self): # return login.current_user.is_authenticated return login.current_user.is_admin() def inaccessible_callback(self, name, **kwargs): return redirect(url_for('auth.login')) admin = Admin( app, name="木muxi犀", template_mode="bootstrap3", index_view=MyAdminIndexView(), base_template='my_master.html' ) from .models import User, Share, Blog, Book, Comment admin.add_view(ModelView(User, db.session)) admin.add_view(ModelView(Book, db.session)) admin.add_view(ModelView(Share, db.session)) admin.add_view(ModelView(Comment, db.session)) admin.add_view(ModelView(Blog, db.session)) # jinja2 filters @app.template_filter('neomarkdown') def neomarkdown(markdown_content): """ jinja2 markdown filter :param markdown_content: markdown :return: text """ content = Markup(markdown.markdown(markdown_content)) return content # 蓝图注册 from .book import books app.register_blueprint(books) from .share import shares app.register_blueprint(shares) from .auth import auth app.register_blueprint(auth, url_prefix='/auth') from .blog import blogs app.register_blueprint(blogs) from profile import profile app.register_blueprint(profile, url_prefix="/profile") from api import api app.register_blueprint(api, url_prefix="/api")
Python
0
@@ -2199,12 +2199,45 @@ -i: -9001 +5555%60;sudo kill %60sudo lsof -t -i:5555 %60;gi @@ -2346,11 +2346,8 @@ test - 23 %3C/h1
a90142fb0ae6282fda3cadaa31a4f8ce73af1352
modify __init__.py
muxiwebsite/__init__.py
muxiwebsite/__init__.py
# coding: utf-8 """ muxiwebsite: 木犀团队的官网 ~~~~~~~~~~~~~~~~~~~~~~~~~ 木犀团队是华中师范大学自由的学生互联网团队,分为 web(前端、后台),设计, 安卓 组 木犀官网是木犀团队的官方网站: 功能模块: 1.muxi: 木犀官网 木犀的简介信息 2.blog: 木犀博客 木犀团队的博客 3.book: 木犀图书 木犀图书管理 4.share: 木犀分享 木犀内部的分享小站 管理模块: backend: 木犀统一管理后台 ~我们在路上, 前方不会太远~。 """ from flask import Flask, Markup, redirect, url_for from flask_sqlalchemy import SQLAlchemy import flask_login as login from flask_login import LoginManager from flask_pagedown import PageDown from basedir import basedir import flask_admin as admin from flask_admin import Admin, BaseView, expose from flask_admin.contrib.sqla import ModelView import markdown import os # the root path of xueer # __filename__ 就是占位 muxi_root_path = os.path.abspath(os.path.dirname("__filename__")) # 实例创建+蓝图注册 app = Flask(__name__) # 配置(通用) app.config['SECRET_KEY'] = "I hate flask!" app.config['SQLALCHEMY_DATABASE_URI'] = "sqlite:///" + os.path.join(basedir, 'muxi_data.sqlite') # 系统相应替换 app.config['SQLALCHEMY_COMMIT_ON_TEARDOWN'] = True app.config['WHOOSH_BASE'] = "search.db" app.config['MAX_SEARCH_RESULTS'] = 5 # 图书搜索最多加载5个搜索结果 app.config['MUXI_ADMIN'] = 'neo1218' app.config["SHARE_PER_PAGE"] = 5 app.config["MUXI_SHARES_PER_PAGE"] = 10 app.config["SHARE_HOT_PER_PAGE"] = 3 app.config['MUXI_USERS_PER_PAGE'] = 10 # 初始化扩展(app全局属性) db = SQLAlchemy(app) login_manager = LoginManager(app) login_manager.session_protection = 'strong' login_manager.login_view = 'auth.login' pagedown = PageDown(app) class MyAdminIndexView(admin.AdminIndexView): """rewrite is_authenticated method""" def is_accessible(self): # return login.current_user.is_authenticated return login.current_user.is_admin() def inaccessible_callback(self, name, **kwargs): return redirect(url_for('auth.login')) admin = Admin( app, name="木muxi犀", template_mode="bootstrap3", index_view=MyAdminIndexView(), base_template='my_master.html' ) from .models import User, Share, Blog, Book, Comment admin.add_view(ModelView(User, db.session)) admin.add_view(ModelView(Book, db.session)) admin.add_view(ModelView(Share, db.session)) admin.add_view(ModelView(Comment, db.session)) admin.add_view(ModelView(Blog, db.session)) # jinja2 filters @app.template_filter('neomarkdown') def neomarkdown(markdown_content): """ jinja2 markdown filter :param markdown_content: markdown :return: text """ content = Markup(markdown.markdown(markdown_content)) return content # 蓝图注册 from .book import books app.register_blueprint(books, url_prefix='/book') from .muxi import muxi app.register_blueprint(muxi, url_prefix='/muxi') from .share import shares app.register_blueprint(shares, url_prefix='/share') from .auth import auth app.register_blueprint(auth, url_prefix='/auth') from .blog import blogs app.register_blueprint(blogs, url_prefix='/blog') from profile import profile app.register_blueprint(profile, url_prefix="/profile") from api import api app.register_blueprint(api, url_prefix="/api")
Python
0.000162
@@ -68,16 +68,17 @@ ~~~~~~~%0A +%0A %E6%9C%A8%E7%8A%80%E5%9B%A2%E9%98%9F @@ -98,16 +98,17 @@ %E8%81%94%E7%BD%91%E5%9B%A2%E9%98%9F%EF%BC%8C%E5%88%86%E4%B8%BA%0A +%0A @@ -127,16 +127,17 @@ %E8%AE%A1%EF%BC%8C %E5%AE%89%E5%8D%93 %E7%BB%84%0A +%0A %E6%9C%A8%E7%8A%80%E5%AE%98%E7%BD%91 @@ -154,24 +154,25 @@ :%0A %E5%8A%9F%E8%83%BD%E6%A8%A1%E5%9D%97:%0A +%0A 1.mu @@ -292,16 +292,17 @@ %E5%86%85%E9%83%A8%E7%9A%84%E5%88%86%E4%BA%AB%E5%B0%8F%E7%AB%99%0A +%0A %E7%AE%A1%E7%90%86%E6%A8%A1%E5%9D%97 @@ -330,16 +330,17 @@ %E7%8A%80%E7%BB%9F%E4%B8%80%E7%AE%A1%E7%90%86%E5%90%8E%E5%8F%B0%0A +%0A ~%E6%88%91%E4%BB%AC%E5%9C%A8 @@ -3112,8 +3112,10 @@ =%22/api%22) +%0A%0A
27b0e86f15a2f89b2f8715dffa5cade17b7f5adf
Update singletons.py
omstd_lefito/lib/singletons.py
omstd_lefito/lib/singletons.py
# -*- coding: utf-8 -*- __ALL__ = ["Displayer", "IntellCollector"] # ------------------------------------------------------------------------- class Displayer: """Output system""" instance = None # --------------------------------------------------------------------- def __new__(cls, *args, **kwargs): if cls.instance is None: cls.instance = object.__new__(cls, *args, **kwargs) cls.__initialized = False return cls.instance # --------------------------------------------------------------------- def config(self, **kwargs): self.out_file = kwargs.get("out_file", None) self.out_screen = kwargs.get("out_screen", True) self.verbosity = kwargs.get("verbosity", 0) if self.out_file: self.out_file_handler = open(self.out_file, "w") # --------------------------------------------------------------------- def display(self, message): if self.verbosity > 0: self.__display(message) # --------------------------------------------------------------------- def display_verbosity(self, message): if self.verbosity > 1: self.__display(message) # --------------------------------------------------------------------- def display_more_verbosity(self, message): if self.verbosity > 2: self.__display(message) # --------------------------------------------------------------------- def __display(self, message): if self.out_screen: print(message) if self.out_file_handler: self.out_file_handler.write(message) # --------------------------------------------------------------------- def __init__(self): if not self.__initialized: self.__initialized = True self.out_file = None self.out_file_handler = None self.out_screen = True self.verbosity = 0 # ------------------------------------------------------------------------- class IntellCollector: """gathered data container""" instance = None # --------------------------------------------------------------------- def __new__(cls, *args, **kwargs): if cls.instance is None: cls.instance = object.__new__(cls, *args, **kwargs) cls.__initialized = False return cls.instance # --------------------------------------------------------------------- def config(self, **kwargs): self.target = kwargs.get("target", None) # --------------------------------------------------------------------- def gather(self, params): out = Displayer() if params.url is not None: self.target = params.url else: self.target = str(input("url: ")) originalreq = dorequest(self.target, params) m = re.search(b"(charset=(?P<value>.*)\")", originalreq['body']) if m: self.charset = m.group('value').decode() self.originalreq_lines = [x.decode(self.charset) for x in originalreq['body'].splitlines()] self.originalhead = originalreq['head'] out.display(originalreq['head']) self.originalsess = getsess(originalreq['head']) self.parsedurl = urlparse(self.target) self.parametros = self.parsedurl.query.split('&') # --------------------------------------------------------------------- def show(self): out = Displayer() out.display("target: %s" % str(self.target)) out.display("originalreq_lines: %s" % str(self.originalreq_lines)) out.display("originalhead: %s" % str(self.originalhead)) out.display("originalsess: %s" % str(self.originalsess)) out.display("parsedurl: %s" % str(self.parsedurl)) out.display("parametros: %s" % str(self.parametros)) out.display("charset: %s" % str(self.charset)) # --------------------------------------------------------------------- def __init__(self): if not self.__initialized: self.__initialized = True self.target = None self.originalreq_lines = [] self.originalhead = None self.originalsess = None self.parsedurl = None self.parametros = [] self.charset = 'utf-8'
Python
0.000001
@@ -2535,24 +2535,570 @@ et%22, None)%0A%0A + # --------------------------------------------------------------------------%0A def getsess(self):%0A out = Displayer()%0A if 'set-cookie' in self.originalhead:%0A out.display(self.originalhead%5B'set-cookie'%5D)%0A m = re.search(%22(PHPSESSID=(?P%3Cvalue%3E.*);)%22, self.originalhead%5B'set-cookie'%5D)%0A if m:%0A out.display(m.group('value'))%0A self.originalsess = m.group('value')%0A else:%0A self.originalsess = ''%0A else:%0A self.originalsess = ''%0A%0A # ------ @@ -3748,50 +3748,16 @@ elf. -originalsess = getsess(originalreq%5B'head'%5D +getsess( )%0A
989988aa604b5a125c765294080777c57ec6c535
Fix bug OppsDetail, add channel_long_slug
opps/articles/views/generic.py
opps/articles/views/generic.py
#!/usr/bin/env python # -*- coding: utf-8 -*- from django.views.generic.detail import DetailView from django.views.generic.list import ListView from django.contrib.sites.models import get_current_site from django.shortcuts import get_object_or_404 from django.utils import timezone from django import template from django.conf import settings from opps.articles.utils import set_context_data from opps.channels.models import Channel class OppsList(ListView): context_object_name = "context" paginate_by = settings.OPPS_PAGINATE_BY limit = settings.OPPS_VIEWS_LIMIT slug = None def get_context_data(self, **kwargs): return set_context_data(self, OppsList, **kwargs) @property def template_name(self): domain_folder = self.type if self.site.id > 1: domain_folder = "{0}/{1}".format(self.site, self.type) return '{0}/{1}.html'.format(domain_folder, self.long_slug) @property def queryset(self): self.site = get_current_site(self.request) try: self.long_slug = self.kwargs.get( 'channel__long_slug', Channel.objects.get_homepage(site=self.site).long_slug) except AttributeError: self.long_slug = None return None self.channel = get_object_or_404(Channel, site=self.site, long_slug=self.long_slug, date_available__lte=timezone.now(), published=True) self.channel_long_slug = [self.long_slug] self.channel_long_slug.append( [children.long_slug for children in self.channel.get_children()]) self.article = self.model.objects.filter( site=self.site, channel_long_slug__in=self.channel_long_slug, date_available__lte=timezone.now(), published=True)[:self.limit] return self.article class OppsDetail(DetailView): context_object_name = "context" limit = settings.OPPS_VIEWS_LIMIT def get_context_data(self, **kwargs): return set_context_data(self, OppsDetail, **kwargs) @property def template_name(self): domain_folder = self.type if self.site.id > 1: domain_folder = "{0}/{1}".format(self.site, self.type) try: _template = '{0}/{1}/{2}.html'.format( domain_folder, self.long_slug, self.slug) template.loader.get_template(_template) except template.TemplateDoesNotExist: _template = '{0}/{1}.html'.format(domain_folder, self.long_slug) return _template @property def queryset(self): self.site = get_current_site(self.request) self.slug = self.kwargs.get('slug') try: self.long_slug = self.kwargs.get( 'channel__long_slug', Channel.objects.get_homepage(site=self.site).long_slug) except AttributeError: self.long_slug = None return None self.article = self.model.objects.filter( site=self.site, channel_long_slug=self.long_slug, slug=self.slug, date_available__lte=timezone.now(), published=True) return self.article
Python
0
@@ -2078,16 +2078,43 @@ WS_LIMIT +%0A channel_long_slug = %5B%5D %0A%0A de @@ -2839,16 +2839,17 @@ 'slug')%0A +%0A
5878a9f6102acc466a95d286931fed494c81e571
Add debug
pskb_website/views.py
pskb_website/views.py
""" Main views of PSKB app """ from functools import wraps from flask import redirect, url_for, session, request, render_template, flash, json, g from . import app from . import remote from . import models def login_required(f): @wraps(f) def decorated_function(*args, **kwargs): if 'github_token' not in session: # Save off the page so we can redirect them to what they were # trying to view after logging in. session['previously_requested_page'] = request.url return redirect(url_for('login')) return f(*args, **kwargs) return decorated_function @app.route('/') def index(): # FIXME: This should only fetch the most recent x number. articles = models.get_available_articles(published=True) text = models.read_file('welcome.md', rendered_text=True) g.index_active = True return render_template('index.html', articles=articles, welcome_text=text) @app.route('/login') def login(): return render_template('login.html') @app.route('/faq') def faq(): g.faq_active = True text = models.read_file('faq.md', rendered_text=True) return render_template('faq.html', body=text) @app.route('/github_login') def github_login(): return remote.github.authorize(callback=url_for('authorized', _external=True)) @app.route('/logout') @login_required def logout(): session.pop('github_token', None) session.pop('login', None) session.pop('name', None) return redirect(url_for('index')) @app.route('/github/authorized') def authorized(): resp = remote.github.authorized_response() if resp is None: return 'Access denied: reason=%s error=%s' % ( request.args['error'], request.args['error_description']) session['github_token'] = (resp['access_token'], '') url = session.pop('previously_requested_page', None) if url is not None: return redirect(url) return redirect(url_for('user_profile')) @app.route('/user/<author_name>', methods=['GET']) @app.route('/user/', defaults={'author_name': None}) def user_profile(author_name): if author_name is None: user = models.find_user() if user.name: session['name'] = user.name if user.login: session['login'] = user.login if 'name' not in session: session['name'] = user.login else: user = models.find_user(author_name) articles = models.get_articles_for_author(user.login) g.profile_active = True return render_template('profile.html', user=user, articles=articles) @app.route('/write/<path:article_path>/', methods=['GET']) @app.route('/write/', defaults={'article_path': None}) @login_required def write(article_path): article = None branch_article = False g.write_active = True if article_path is not None: article = models.read_article(article_path, rendered_text=False) if article.sha is None: article.sha = '' user = models.find_user(session['login']) if user is None: flash('Cannot save unless logged in') return render_template('index.html'), 404 if user.login != article.author_name: branch_article = True return render_template('editor.html', article=article, branch_article=branch_article) @app.route('/review/<path:article_path>', methods=['GET']) @app.route('/review/', defaults={'article_path': None}, methods=['GET']) def review(article_path): if article_path is None: g.review_active = True articles = models.get_available_articles(published=False) return render_template('review.html', articles=articles) g.write_active = True branch = request.args.get('branch', 'master') article = models.read_article(article_path, branch=branch) if article is None: flash('Failing reading article') return redirect(url_for('index')) login = session.get('login', None) # Only allow editing if user is logged in and it's the master branch (i.e. # they can branch from it) or it's their own branch. if (login and branch == 'master') or login == branch: allow_edits = True else: allow_edits = False # Use http as canonical protocol for url to avoid having two separate # comment threads for an article. Disqus uses this variable to save # comments. canonical_url = request.base_url.replace('https://', 'http://') return render_template('article.html', article=article, allow_edits=allow_edits, canonical_url=canonical_url) @app.route('/save/', methods=['POST']) @login_required def save(): user = models.find_user(session['login']) if user is None: flash('Cannot save unless logged in') return render_template('index.html'), 404 # Data is stored in form with input named content which holds json. The # json has the 'real' data in the 'content' key. content = json.loads(request.form['content'])['content'] path = request.form['path'] title = request.form['title'] sha = request.form['sha'] if path: message = 'Updates to %s' % (title) else: message = 'New article %s' % (title) article = models.branch_or_save_article(title, path, message, content, user.login, user.email, sha) # Successful creation if article: return redirect(url_for('review', article_path=article.path, branch=article.branch)) flash('Failed creating article on github') return redirect(url_for('index'))
Python
0.000003
@@ -2439,24 +2439,70 @@ thor_name)%0A%0A + print 'session', session%5B'github_token'%5D%0A%0A articles
da9a82aedac233cd5411c3edd7be6ac0b0957838
Fix return issue of testexecutor.
ptest/testexecutor.py
ptest/testexecutor.py
from datetime import datetime import threading import traceback from ptest import plistener, plogger, screencapturer from enumeration import PDecoratorType, TestCaseStatus from plogger import pconsole from testsuite import test_suite, NoTestCaseAvailableForThisThread __author__ = 'karl.gong' class TestExecutor(threading.Thread): def __init__(self): threading.Thread.__init__(self) self.__properties = {} def run(self): while True: try: test_case = test_suite.pop_test_case() except NoTestCaseAvailableForThisThread: break test_case_full_name = test_case.full_name logger_filler = "-" * (100 - len(test_case_full_name) - 6) before_method = test_case.before_method test = test_case.test after_method = test_case.after_method plistener.test_listener.on_test_case_start(test_case) test_case.start_time = datetime.now() is_before_method_failed = False # before method if before_method is not None: self.update_properties(running_test_case_fixture=before_method) before_method.start_time = datetime.now() try: before_method.run() except Exception as e: # before method failed is_before_method_failed = True plogger.warn("Failed with following message:\n%s\n%s" % (e.message, traceback.format_exc())) screencapturer.take_screen_shot() before_method.end_time = datetime.now() # test case self.update_properties(running_test_case_fixture=test) test.start_time = datetime.now() if is_before_method_failed: # skip test case plogger.warn("@%s failed, so skipped." % PDecoratorType.BeforeMethod) pconsole.warning("%s%s|SKIP|" % (test_case_full_name, logger_filler)) test_case.status = TestCaseStatus.SKIPPED test_case.skip_message = "@%s failed, so skipped." % PDecoratorType.BeforeMethod else: # run test case try: test.run() pconsole.info("%s%s|PASS|" % (test_case_full_name, logger_filler)) test_case.status = TestCaseStatus.PASSED except Exception as e: plogger.warn("Failed with following message:\n%s\n%s" % (e.message, traceback.format_exc())) screencapturer.take_screen_shot() pconsole.warning("%s%s|FAIL|" % (test_case_full_name, logger_filler)) test_case.status = TestCaseStatus.FAILED test_case.failure_message = e.message test_case.failure_type = e.__class__.__name__ test_case.stack_trace = traceback.format_exc() test.end_time = datetime.now() # after method if after_method is not None: self.update_properties(running_test_case_fixture=after_method) after_method.start_time = datetime.now() if not is_before_method_failed or after_method.always_run: # run after method try: after_method.run() except Exception as e: plogger.warn("Failed with following message:\n%s\n%s" % (e.message, traceback.format_exc())) screencapturer.take_screen_shot() else: # skip after method plogger.warn("@%s failed, so skipped." % PDecoratorType.BeforeMethod) after_method.end_time = datetime.now() test_case.end_time = datetime.now() plistener.test_listener.on_test_case_finish(test_case) def update_properties(self, **kwargs): self.__properties.update(kwargs) def get_property(self, key): try: return self.__properties[key] except KeyError: return None def update_properties(**kwargs): threading.currentThread().update_properties(**kwargs) def get_property(key): threading.currentThread().get_property(key) def get_name(): threading.currentThread().getName()
Python
0.000001
@@ -4304,24 +4304,31 @@ y(key):%0A +return threading.cu @@ -4376,24 +4376,31 @@ name():%0A +return threading.cu
4a6060f476aebac163dbac8f9822539596379c0a
Use current_app.babel_instance instead of babel
welt2000/__init__.py
welt2000/__init__.py
from flask import Flask, request, session from flask.ext.babel import Babel from babel.core import negotiate_locale from welt2000.__about__ import ( __title__, __summary__, __uri__, __version__, __author__, __email__, __license__, ) # noqa app = Flask(__name__) app.secret_key = '1234567890' babel = Babel(app) translations = ['en'] translations.extend(map(str, babel.list_translations())) @app.template_global() @babel.localeselector def get_locale(): lang = session.get('lang') if lang and lang in translations: return lang preferred = map(lambda l: l[0], request.accept_languages) return negotiate_locale(preferred, translations) from welt2000 import views # noqa
Python
0.000018
@@ -34,16 +34,29 @@ session +, current_app %0Afrom fl @@ -335,151 +335,174 @@ p)%0A%0A -translations = %5B'en'%5D%0Atranslations.extend(map(str, babel.list_translations()))%0A%0A%[email protected]_global()%[email protected]%0Adef get_locale(): +%[email protected]_global()%[email protected]%0Adef get_locale():%0A available = %5B'en'%5D%0A available.extend(map(str, current_app.babel_instance.list_translations()))%0A %0A @@ -553,28 +553,25 @@ lang in -translations +available :%0A @@ -686,28 +686,25 @@ ferred, -translations +available )%0A%0A%0Afrom
58ec62fe47bf6e7acb3302a29fd0df48c4342cec
Enable break and continue in templates
logya/template.py
logya/template.py
# -*- coding: utf-8 -*- import io import os from jinja2 import Environment, BaseLoader, TemplateNotFound, escape def filesource(logya_inst, name, lines=None): """Read and return source of text files. A template function that reads the source of the given file and returns it. The text is escaped so it can be rendered safely on a Web page. The lines keyword argument is used to limit the number of lines returned. A use case is for documentation projects to show the source code used to render the current example. """ fname = os.path.join(logya_inst.dir_site, name) with io.open(fname, 'r', encoding='utf-8') as f: if lines is None: content = f.read() else: content = ''.join(f.readlines()[:lines]) return escape(content) def get_doc(logya_inst, url): """Get document located at given URL.""" return logya_inst.docs.get(url) class Template(): """Class to handle templates.""" def __init__(self, logya_inst): """Initialize template environment.""" self.vars = {} self.dir_templates = logya_inst.dir_templates self.env = Environment(loader=TemplateLoader(self.dir_templates)) # self.env.trim_blocks = True # add filesource global to allow for including the source of a file self.env.globals['filesource'] = lambda x, lines=None: filesource( logya_inst, x, lines=lines) self.env.globals['get_doc'] = lambda x: get_doc(logya_inst, x) class TemplateLoader(BaseLoader): """Class to handle template Loading.""" def __init__(self, path): """Set template path.""" self.path = path def get_source(self, environment, template): """Set template source.""" path = os.path.join(self.path, template) if not os.path.exists(path): raise TemplateNotFound(template) mtime = os.path.getmtime(path) with io.open(path, 'r', encoding='utf-8') as f: source = f.read() return source, path, lambda: mtime == os.path.getmtime(path)
Python
0
@@ -1210,16 +1210,124 @@ ates))%0A%0A + # Enable break and continue in templates%0A self.env.add_extension('jinja2.ext.loopcontrols')%0A%0A
fcbc7a5a1551c1148ea747a51976c2047a3850a2
Clean up code a bit
graph/weight-graph.py
graph/weight-graph.py
#!/usr/bin/env python3 import sys import tkinter as tk import tkplot import threading from queue import Queue, Empty import serial import struct from collections import namedtuple import time import csv import math def execute_delayed(root, generator): """For each yielded value wait the given amount of time (in seconds) without pausing the Tkinter main loop. See 'slowmotion' in http://effbot.org/zone/tkinter-generator-polyline.htm """ try: root.after(int(next(generator) * 1000), execute_delayed, root, generator) except StopIteration: pass class Status(namedtuple('Status', ['local_time', 'weight_reading'])): @property def temp(self): return self.weight_reading START_TIME = time.time() def local_time(): return time.time() - START_TIME class Arduino: def __init__(self, filename): self.filename = filename self.status = Queue() self.command = Queue() self.thread = threading.Thread(target=self.interact, daemon=True) self.started = threading.Event() self.last_status = None self._power = None def line_status(self, line): t_ms = 0 pwm = 0 setpoint = 0 temp_outside = 0 weight_reading = float(line.strip()) return Status(local_time(), weight_reading) def interact(self): with open(self.filename, 'wb') as f: self.serial = serial.Serial('/dev/arduino', 9600) try: self.started.set() while True: try: while True: None self.serial.write(self.command.get_nowait()) except Empty: pass line = self.serial.readline() try: status = self.line_status(line) except ValueError: continue f.write(line) f.flush() self.status.put_nowait(status) self.last_status = status finally: self.started.clear() def iter_status(self): assert(self.started.is_set()) try: while True: status = self.status.get_nowait() yield status except Empty: pass def __str__(self): return "<{} {}>".format(self.__class__.__name__, self.last_status if self.started.is_set() else '(stopped)') @property def calibrationWeight(self): assert(self.started.is_set()) return self._power @calibrationWeight.setter def calibrationWeight(self, calibrationWeight): assert(self.started.is_set()) assert(0 <= calibrationWeight <= 2**24) command = struct.pack('4sc', str.encode(str(int(calibrationWeight))), b'\n') self.command.put(command) def start(self): self.thread.start() self.started.wait() class HeatPlot(tkplot.TkPlot): def __init__(self, root): tkplot.TkPlot.__init__(self, root, (9, 6)) self.plot = self.figure.add_subplot(111) self.plot.set_xlabel("Time (s)") self.plot.set_ylabel("Weight (g)") self.plot.set_xlim(0, 1) self.plot.set_ylim(0, 110) self.weight_reading_line, = self.plot.plot([], [], label="Weight, g") self.plot.legend(handles=[self.weight_reading_line], bbox_to_anchor=(0.3, 1)) self.figure.tight_layout() def update(self, status): time = [s.local_time for s in status] weight_reading = [s.weight_reading for s in status] if time: self.plot.set_xlim(min(time), max(time)) self.plot.set_ylim(0, max(110, round(max(weight_reading) / 50.0 + 0.5) * 50 + 10)) self.weight_reading_line.set_xdata(time) self.weight_reading_line.set_ydata(weight_reading) self.figure.canvas.draw() class Krosnis: def __init__(self, root, experiment): self.root = root self.root.title("Scales - {}".format(experiment)) self.experiment = experiment self.update_period = 1.0 self.plot = HeatPlot(self.root) self.plot.pack(fill=tk.BOTH, expand=1) self.toolbar = tk.Frame(self.root) self.toolbar.pack(fill=tk.X) self.label = tk.Label(self.toolbar) self.label.pack(side=tk.RIGHT, fill=tk.BOTH, expand=1) self.power_val = tk.StringVar() self.power_val.set('0') self.power = tk.Entry(self.toolbar, textvariable=self.power_val) self.power.bind('<Return>', self.set_calibrationWeight) self.power.pack(side=tk.LEFT) self.power.focus_set() self.set_power = tk.Button(self.toolbar, text='Set weight', command=self.set_calibrationWeight) self.set_power.pack(side=tk.LEFT) # self.setpoint_val = tk.StringVar() # self.setpoint_val.set('0.0') # self.setpoint = tk.Entry(self.toolbar, textvariable=self.setpoint_val) # self.setpoint.bind('<Return>', self.set_setpoint) # self.setpoint.pack(side=tk.LEFT) # self.setpoint.focus_set() # self.set_setpoint = tk.Button(self.toolbar, text='Set temperature', command=self.set_setpoint) # self.set_setpoint.pack(side=tk.LEFT) self.arduino = Arduino("experiments/{}_raw.csv".format(experiment)) self.every_status = [] self.th0 = 0 def set_status(self, status): self.label.config(text=status) def set_calibrationWeight(self, event=None): self.arduino.calibrationWeight = float(self.power_val.get()) # def set_setpoint(self, event=None): # self.arduino.setpoint = float(self.setpoint_val.get()) def start(self): _self = self def shell(): self = _self threading.Thread(target=shell, daemon=True).start() execute_delayed(self.root, self.sample()) def time_deviation(self): if self.every_status: t0 = self.every_status[0].time t0_local = self.every_status[0].local_time t_sum = 0 for s in self.every_status: t_sum += (s.time - t0) - (s.local_time - t0_local) return t_sum / len(self.every_status) else: return 0 def control(self): pass def sample(self): self.arduino.start() with open("experiments/{}.csv".format(self.experiment), 'w') as f: csvf = csv.writer(f) csvf.writerow(Status._fields) while True: try: for s in self.arduino.iter_status(): if self.th0 is None: self.th0 = s.temp csvf.writerow(s) self.set_status(str(s)) self.every_status.append(s) f.flush() self.plot.update(self.every_status) self.control() yield self.update_period except Exception as e: print(e) def run(experiment): root = tk.Tk() root.geometry("1000x700") win = Krosnis(root, experiment) win.start() tk.mainloop() if __name__ == "__main__": run(sys.argv[1])
Python
0.000002
@@ -1161,97 +1161,8 @@ e):%0A - %0A t_ms = 0%0A pwm = 0%0A setpoint = 0%0A temp_outside = 0%0A%0A
86465840261a8af2a777c87a6f09a3b4f5390c73
Check that at least source/dest are bound before attempting to plot
graphistry/plotter.py
graphistry/plotter.py
import random import string import json import pandas import pygraphistry import util class Plotter(object): def __init__(self): # Bindings self.edges = None self.nodes = None self.source = None self.destination = None self.node = None self.edge_title = None self.edge_label = None self.edge_color = None self.edge_weight = None self.point_title = None self.point_label = None self.point_color = None self.point_size = None # Settings self.height = 500 self.url_params = {'info': 'true'} def bind(self, source=None, destination=None, node=None, edge_title=None, edge_label=None, edge_color=None, edge_weight=None, point_title=None, point_label=None, point_color=None, point_size=None): self.source = source or self.source self.destination = destination or self.destination self.node = node or self.node self.edge_title = edge_title or self.edge_title self.edge_label = edge_label or self.edge_label self.edge_color = edge_color or self.edge_color self.edge_weight = edge_weight or self.edge_weight self.point_title = point_title or self.point_title self.point_label = point_label or self.point_label self.point_color = point_color or self.point_color self.point_size = point_size or self.point_size return self def nodes(nodes): self.nodes = nodes return self def edges(edges): self.edges = edges return self def graph(ig): self.edges = ig self.nodes = None return self def settings(self, height=None, url_params={}): self.height = height or self.height self.url_params = dict(self.url_params, **url_params) return self def plot(self, graph=None, nodes=None): if graph is None: if self.edges is None: raise ValueError('Must specify graph/edges') g = self.edges else: g = graph n = self.nodes if nodes is None else nodes dataset = self._plot_dispatch(g, n) if dataset is None: raise TypeError('Expected Pandas dataframe or Igraph graph') dataset_name = pygraphistry.PyGraphistry._etl(json.dumps(dataset)) viz_url = pygraphistry.PyGraphistry._viz_url(dataset_name, self.url_params) if util.in_ipython() is True: from IPython.core.display import HTML return HTML(self._iframe(viz_url)) else: print 'Url: ', viz_url import webbrowser webbrowser.open(viz_url) return self def pandas2igraph(self, edges): import igraph eattribs = edges.columns.values.tolist() eattribs.remove(self.source) eattribs.remove(self.destination) cols = [self.source, self.destination] + eattribs etuples = [tuple(x) for x in edges[cols].values] return igraph.Graph.TupleList(etuples, directed=True, edge_attrs=eattribs, vertex_name_attr=self.node) def igraph2pandas(self, ig): def get_edgelist(ig): idmap = dict(enumerate(ig.vs[self.node])) for e in ig.es: t = e.tuple yield dict({self.source: idmap[t[0]], self.destination: idmap[t[1]]}, **e.attributes()) edata = get_edgelist(ig) ndata = [v.attributes() for v in ig.vs] nodes = pandas.DataFrame(ndata, columns=ig.vs.attributes()) cols = [self.source, self.destination] + ig.es.attributes() edges = pandas.DataFrame(edata, columns=cols) return (edges, nodes) def _plot_dispatch(self, graph, nodes): if isinstance(graph, pandas.core.frame.DataFrame): return self._pandas2dataset(graph, nodes) try: import igraph if isinstance(graph, igraph.Graph): (e, n) = self.igraph2pandas(graph) return self._pandas2dataset(e, n) except ImportError: pass return None def _pandas2dataset(self, edges, nodes): elist = edges.reset_index() if self.edge_color: elist['edgeColor'] = elist[self.edge_color] if self.edge_label: elist['edgeLabel'] = elist[self.edge_label] if self.edge_title: elist['edgeTitle'] = elist[self.edge_title] if self.edge_weight: elist['edgeWeight'] = elist[self.edge_weight] if nodes is None: self.node = '__nodeid__' nodes = pandas.DataFrame() nodes[self.node] = pandas.concat([edges[self.source], edges[self.destination]], ignore_index=True).drop_duplicates() self.point_title = self.node nlist = nodes.reset_index() if self.point_color: nlist['pointColor'] = nlist[self.point_color] if self.point_label: nlist['pointLabel'] = nlist[self.point_label] if self.point_title: nlist['pointTitle'] = nlist[self.point_title] if self.point_size: nlist['pointSize'] = nlist[self.point_size] return self._make_dataset(elist.to_dict(orient='records'), nlist.to_dict(orient='records')) def _make_dataset(self, elist, nlist=None): name = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(10)) bindings = {'idField': self.node, 'destinationField': self.destination, 'sourceField': self.source} dataset = {'name': pygraphistry.PyGraphistry._dataset_prefix + name, 'bindings': bindings, 'type': 'edgelist', 'graph': elist} if nlist: dataset['labels'] = nlist return dataset def _iframe(self, url): tag = '<iframe src="%s" style="width:100%%; height:%dpx; border: 1px solid #DDD"></iframe>' return tag % (url, self.height)
Python
0
@@ -2161,24 +2161,303 @@ e else nodes +%0A%0A if self.source is None or self.destination is None:%0A raise ValueError('Source/destination must be bound before plotting.')%0A if n is not None and self.node is None:%0A raise ValueError('Node identifier must be bound when using node dataframe') %0A dat
6f0740fbd94acc2398f0628552a6329c2a90a348
Allow start and end arguments to take inputs of multiple words such as 'New York'
greengraph/command.py
greengraph/command.py
from argparse import ArgumentParser from matplotlib import pyplot as plt from graph import Greengraph def process(): parser = ArgumentParser( description="Produce graph quantifying the amount of green land between two locations") parser.add_argument("--start", required=True, help="The starting location ") parser.add_argument("--end", required=True, help="The ending location") parser.add_argument("--steps", help="The number of steps between the starting and ending locations, defaults to 10") parser.add_argument("--out", help="The output filename, defaults to graph.png") arguments = parser.parse_args() mygraph = Greengraph(arguments.start, arguments.end) if arguments.steps: data = mygraph.green_between(arguments.steps) else: data = mygraph.green_between(10) plt.plot(data) # TODO add a title and axis labels to this graph if arguments.out: plt.savefig(arguments.out) else: plt.savefig("graph.png") if __name__ == "__main__": process()
Python
0.000065
@@ -278,33 +278,44 @@ , required=True, + nargs=%22+%22, %0A - @@ -401,16 +401,27 @@ ed=True, + nargs=%22+%22, %0A @@ -759,16 +759,17 @@ ()%0A%0A +# mygraph @@ -1073,32 +1073,32 @@ .out)%0A else:%0A - plt.save @@ -1113,16 +1113,66 @@ ph.png%22) +%0A print arguments.start%0A print arguments.end %0A%0Aif __n
75a47485629725f9035c7a4aa7c154ce30de3b5e
Add new allowed host
greenland/settings.py
greenland/settings.py
""" Django settings for greenland project. Generated by 'django-admin startproject' using Django 1.9.5. For more information on this file, see https://docs.djangoproject.com/en/1.9/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.9/ref/settings/ """ import os import dj_database_url # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'lc3^jbn=netrea_9o+1+gt-1@r#w$y758%&2%_d-=tg#o89r^x' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'maps' ] MIDDLEWARE_CLASSES = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', # 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'greenland.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'greenland.wsgi.application' # Database # https://docs.djangoproject.com/en/1.9/ref/settings/#databases if 'PORT' in os.environ: DATABASES = {} DATABASES['default'] = dj_database_url.config(conn_max_age=600) else: DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Password validation # https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/1.9/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.9/howto/static-files/ STATIC_URL = '/static/'
Python
0
@@ -849,16 +849,41 @@ OSTS = %5B +'greenland.herokuapp.com' %5D%0A%0A%0A# Ap
3fe0a520a458a575117fc8d809f21efd133d2887
Add license file
wikilink/__init__.py
wikilink/__init__.py
""" wiki-link ~~~~~~~~ wiki-link is a web-scraping application to find minimum number of links between two given wiki pages. :copyright: (c) 2016 - 2018 by Tran Ly VU. All Rights Reserved. :license: Apache License 2.0. """ __all__ = ["wiki_link"] __author__ = "Tran Ly Vu ([email protected])" __version__ = "1.0.0" __copyright__ = "Copyright (c) 2016 - 2018 Tran Ly Vu. All Rights Reserved." __license__ = "Apache License 2.0"
Python
0
@@ -82,16 +82,17 @@ m number + %0A%09of lin
7a9f3f6cc880d2bcf0cdac8b5193b471eb2b9095
Refactor Adapter pattern
structural/adapter.py
structural/adapter.py
""" Convert the interface of a class into another interface clients expect. Adapter lets classes work together that couldn't otherwise because of incompatible interfaces. """ import abc class Target(metaclass=abc.ABCMeta): """ Define the domain-specific interface that Client uses. """ def __init__(self): self._adaptee = Adaptee() @abc.abstractmethod def request(self): pass class Adapter(Target): """ Adapt the interface of Adaptee to the Target interface. """ def request(self): self._adaptee.specific_request() class Adaptee: """ Define an existing interface that needs adapting. """ def specific_request(self): pass def main(): adapter = Adapter() adapter.request() if __name__ == "__main__": main()
Python
0
@@ -312,24 +312,33 @@ _init__(self +, adaptee ):%0A s @@ -356,17 +356,15 @@ e = -A +a daptee -() %0A%0A @@ -734,16 +734,40 @@ main():%0A + adaptee = Adaptee()%0A adap @@ -780,16 +780,23 @@ Adapter( +adaptee )%0A ad
e8ec681a7bd6485874114b2eec4050e931fd7ee6
update UserKarma for the new row structure
hamper/plugins/karma.py
hamper/plugins/karma.py
import operator import re from collections import defaultdict from datetime import datetime from hamper.interfaces import ChatCommandPlugin, Command from hamper.utils import ude, uen from sqlalchemy import Column, DateTime, Integer, String from sqlalchemy.ext.declarative import declarative_base SQLAlchemyBase = declarative_base() class Karma(ChatCommandPlugin): '''Give, take, and scoreboard Internet Points''' """ Hamper will look for lines that end in ++ or -- and modify that user's karma value accordingly NOTE: The user is just a string, this really could be anything...like potatoes or the infamous cookie clicker.... """ name = 'karma' priority = -2 short_desc = 'karma - Give or take karma from someone' long_desc = ('username++ - Give karma\n' 'username-- - Take karma\n' '!karma --top - Show the top 5 karma earners\n' '!karma --bottom - Show the bottom 5 karma earners\n' '!karma username - Show the user\'s karma count\n') gotta_catch_em_all = r"""# 3 or statement ( # Starting with a (, look for anything within # parens that end with 2 or more + or - (?=\()[^\)]+\)(\+\++|--+) | # Looking from the start of the line until 2 or # more - or + are found. No whitespace in this # grouping ^[^\s]+(\+\++|--+) | # Finally group any non-whitespace groupings # that end with 2 or more + or - [^\s]+?(\+\++|--+)((?=\s)|(?=$)) ) """ regstr = re.compile(gotta_catch_em_all, re.X) def setup(self, loader): super(Karma, self).setup(loader) self.db = loader.db SQLAlchemyBase.metadata.create_all(self.db.engine) def message(self, bot, comm): """ Check for strings ending with 2 or more '-' or '+' """ super(Karma, self).message(bot, comm) # No directed karma giving or taking if not comm['directed'] and not comm['pm']: msg = comm['message'].strip().lower() # use the magic above words = self.regstr.findall(msg) # Do things to people karmas = self.modify_karma(words) # Notify the users they can't modify their own karma if comm['user'] in karmas.keys(): bot.reply(comm, "Nice try, no modifying your own karma") # Commit karma changes to the db self.update_db(comm["user"], karmas) def modify_karma(self, words): """ Given a regex object, look through the groups and modify karma as necessary """ # 'user': karma k = defaultdict(int) if words: # For loop through all of the group members for word_tuple in words: word = word_tuple[0] ending = word[-1] # This will either end with a - or +, if it's a - subract 1 # kara, if it ends with a +, add 1 karma change = -1 if ending == '-' else 1 # Now strip the ++ or -- from the end if '-' in ending: word = word.rstrip('-') elif '+' in ending: word = word.rstrip('+') # Check if surrounded by parens, if so, remove them if word.startswith('(') and word.endswith(')'): word = word[1:-1] # Finally strip whitespace word = word.strip() # Add the user to the dict if word: k[word] += change return k def update_db(self, giver, receiverkarma): """ Record a the giver of karma, the receiver of karma, and the karma amount. Typically the count will be 1, but it can be any positive or negative integer. """ kt = self.db.session.query(KarmaTable) for receiver in receiverkarma: if receiver != giver: urow = KarmaTable(ude(giver), ude(receiver), receiverkarma[receiver]) self.db.session.add(urow) self.db.session.commit() class KarmaList(Command): """ Return the highest or lowest 5 receivers of karma """ regex = r'^karma --(top|bottom)$' LIMIT = 5 def command(self, bot, comm, groups): # We'll need all the rows kts = bot.factory.loader.db.session.query(KarmaTable).all() # From all the rows, tally the karma for each receiver receivers = defaultdict(int) for row in kts: receivers[row.receiver] += row.kcount rec_count = len(receivers.keys()) rec_sorted = sorted(receivers.iteritems(), key=operator.itemgetter(1)) # We should limit the list of users to at most self.LIMIT limit = self.LIMIT if rec_count >= self.LIMIT else rec_count if limit: if groups[0] == 'top': snippet = rec_sorted[-limit:] elif groups[0] == 'bottom': snippet = rec_sorted[0:limit] else: bot.reply( comm, r'Something went wrong with karma\'s regex' ) return for rec in snippet: bot.reply( comm, '%s\x0f: %d' % (uen(rec[0]), rec[1]), encode=False ) else: bot.reply(comm, r'No one has any karma yet :-(') class UserKarma(Command): """ Retrieve karma for a given user """ # !karma <username> regex = r'^karma\s+([^-].*)$' def command(self, bot, comm, groups): # Play nice when the user isn't in the db kt = bot.factory.loader.db.session.query(KarmaTable) thing = ude(groups[0].strip().lower()) user = kt.filter(KarmaTable.user == thing).first() if user: bot.reply( comm, '%s has %d points' % (uen(user.user), user.kcount), encode=False ) else: bot.reply( comm, 'No karma for %s ' % uen(thing), encode=False ) class KarmaTable(SQLAlchemyBase): """ Keep track of users karma in a persistant manner """ __tablename__ = 'karma' # Calling the primary key user, though, really, this can be any string id = Column(Integer, primary_key=True) giver = Column(String) receiver = Column(String) kcount = Column(Integer) datetime = Column(DateTime, default=datetime.utcnow()) def __init__(self, giver, receiver, kcount): self.giver = giver self.receiver = receiver self.kcount = kcount karma = Karma()
Python
0
@@ -6393,20 +6393,24 @@ -user +rec_list = kt.fi @@ -6425,18 +6425,22 @@ maTable. -us +receiv er == th @@ -6448,37 +6448,138 @@ ng). -first()%0A%0A if user: +all()%0A%0A if rec_list:%0A total = 0%0A for r in rec_list:%0A total += r.kcount %0A @@ -6658,31 +6658,21 @@ uen( -user.user), user.kcount +thing), total ),%0A
37a5126f75ef64492c918d6e5f9b84641c7f03c8
fix import
otindex/query_study_helpers.py
otindex/query_study_helpers.py
# helper functions for the find_studies views from .models import ( DBSession, Study, Tree, Curator, Property, ) import simplejson as json import sqlalchemy import logging from sqlalchemy.dialects.postgresql import JSON,JSONB from sqlalchemy import Integer from sqlalchemy.exc import ProgrammingError from pyramid.httpexceptions import HTTPNotFound, HTTPBadRequest from .util import clean_dict_values _LOG = logging.getLogger(__name__) def is_deprecated_property(prop): deprecated_oti_properties = [ "ot:studyModified", "ot:focalCladeOTTId", "ot:studyLastEditor", "ot:focalCladeTaxonName", "ot:studyLabel", "ot:authorContributed", "ot:studyUploaded", "is_deprecated", "ot:candidateTreeForSynthesis" ] if prop in deprecated_oti_properties: return True else: return False # get all trees, no filtering def get_all_studies(verbose): resultlist = [] query_obj = get_study_query_object(verbose) # get results as dict, where keys are the labels set in # get_study_query_object for row in query_obj.all(): item = {} for k,v in row._asdict().items(): k = k.encode('utf-8') if isinstance(v, dict): v = dict([(kk.encode('utf-8'), vv.encode('utf-8')) for kk, vv in v.items()]) item[k]=v resultlist.append(item) return resultlist # given a property, returns the property with prefix def get_prop_with_prefix(prop): query_obj = DBSession.query(Property.prefix).filter( Property.property == prop ).first() if query_obj.prefix is None: return prop else: return query_obj.prefix+prop # get the list of searchable study properties # v3 list pruned down to only those implemented in v3 def get_study_property_list(prop_only=True): properties = util.get_study_properties() # now add the non-JSON properties properties.append("ntrees") properties.append("treebaseId") return properties # return the query object without any filtering # (query not yet executed) def get_study_query_object(verbose): query_obj = None if (verbose): # these need to have '^' at the start, becuase that is how they # appear in the JSON column labels = util.get_study_properties(decorated=False) clist = util.get_study_properties(decorated=True) # assigning labels like this makes it easy to build the response json # but can't directly access any particular item via the label, # i.e result.ot:studyId because of ':' in label query_obj = DBSession.query( Study.id.label('ot:studyId'), Study.data[(clist[0])].label(labels[0]), Study.data[(clist[1])].label(labels[1]), Study.data[(clist[2])].label(labels[2]), Study.data[(clist[3])].label(labels[3]), Study.data[(clist[4])].label(labels[4]), Study.data[(clist[5])].label(labels[5]), Study.data[(clist[6])].label(labels[6]), Study.data[(clist[7])].label(labels[7]), ) else: query_obj = DBSession.query(Study.id.label('ot:studyId')) return query_obj # find studies by curators; uses Study-Curator association table def query_studies_by_curator(query_obj,property_value): filtered = query_obj.filter( Study.curators.any(name=property_value) ) return filtered # looking for a value in a list, e.g. ot:tag def query_by_tag(query_obj,property_value): property_type = '^ot:tag' filtered = query_obj.filter( Study.data.contains({property_type:[property_value]}) ) return filtered def query_fulltext(query_obj,property_type,property_value): property_type = get_prop_with_prefix(property_type) # add wildcards to the property_value property_value = '%'+property_value+'%' filtered = query_obj.filter( Study.data[ property_type ].astext.ilike(property_value) ) return filtered # find studies in cases where the property_value is an int def query_studies_with_int_values(query_obj,property_type,property_value): property_type = get_prop_with_prefix(property_type) filtered = query_obj.filter( Study.data[ (property_type) ].astext.cast(sqlalchemy.Integer) == property_value ) return filtered # filter query to return only studies that match property_type and # property_value def query_studies(verbose,property_type,property_value): _LOG.debug("querying studies by {p} : {v}".format(p=property_type,v=property_value)) # get the base (unfiltered) query object query_obj = get_study_query_object(verbose) filtered = None # for studyId, use id column rather than ^ot:studyId json property if property_type == "ot:studyId": filtered = query_obj.filter(Study.id == property_value) # curator uses study-curator association table elif property_type == "ot:curatorName": filtered = query_studies_by_curator(query_obj,property_value) # year and focal clade are in json, need to cast an int to string elif property_type == "ot:studyYear" or property_type == "ot:focalClade": filtered = query_studies_with_int_values(query_obj,property_type,property_value) # property_type = get_prop_with_prefix(property_type) # str_value = str(property_value) # filtered = query_obj.filter( # Study.data[ # (property_type) # ].astext == str_value # ) # value of ot:studyPublication and ot:dataDeposit # is a dict with key '@href' elif property_type == "ot:studyPublication" or property_type == "ot:dataDeposit": property_type = get_prop_with_prefix(property_type) filtered = query_obj.filter( Study.data[ (property_type,'@href') ].astext == property_value ) elif property_type == "ot:studyPublicationReference" or property_type == "ot:comment": filtered = query_fulltext(query_obj,property_type,property_value) elif property_type == "treebaseId": filtered = query_obj.filter(Study.treebase_id == property_value) # tag is a list elif property_type == "ot:tag": filtered = query_by_tag(query_obj,property_value) # all other property types are strings contained in json else: property_type = get_prop_with_prefix(property_type) filtered = query_obj.filter( Study.data[ (property_type) ].astext == property_value ) # get results as dict, where keys are the labels set in # get_study_query_object resultlist = [] try: for row in filtered.all(): item = {} clean_dict_values(row._asdict(), item) resultlist.append(item) return resultlist except ProgrammingError as e: _LOG.exception('exception in dict creation'.format(e)) raise HTTPBadRequest()
Python
0.000001
@@ -419,16 +419,38 @@ t_values +, get_study_properties %0A%0A_LOG = @@ -1842,29 +1842,24 @@ roperties = -util. get_study_pr @@ -2274,21 +2274,16 @@ abels = -util. get_stud @@ -2332,13 +2332,8 @@ t = -util. get_
bc40db9fa1c4663db604cb7890de10ef91d6a65e
Use correct name
haproxystats/metrics.py
haproxystats/metrics.py
""" haproxstats.metrics ~~~~~~~~~~~~~~~~~~ This module provides the field names contained in the HAProxy statistics. """ DAEMON_METRICS = [ 'CompressBpsIn', 'CompressBpsOut', 'CompressBpsRateLim', 'ConnRate', 'ConnRateLimit', 'CumConns', 'CumReq', 'CumSslConns', 'CurrConns', 'CurrSslConns', 'Hard_maxcon', 'MaxConnRate', 'MaxSessRate', 'MaxSslConns', 'MaxSslRate', 'MaxZlibMemUsage', 'Maxconn', 'Maxpipes', 'Maxsock', 'Memmax_MB', 'PipesFree', 'PipesUsed', 'Run_queue', 'SessRate', 'SessRateLimit', 'SslBackendKeyRate', 'SslBackendMaxKeyRate', 'SslCacheLookups', 'SslCacheMisses', 'SslFrontendKeyRate', 'SslFrontendMaxKeyRate', 'SslFrontendSessionReuse_pct', 'SslRate', 'SslRateLimit', 'Tasks', 'Ulimit-n', 'Uptime_sec', 'ZlibMemUsage', ] DAEMON_AVG_METRICS = ['Idle_pct'] COMMON = [ 'bin', 'bout', 'dresp', 'hrsp_1xx', 'hrsp_2xx', 'hrsp_3xx', 'hrsp_4xx', 'hrsp_5xx', 'hrsp_other', 'rate', 'rate_max', 'scur', 'smax', 'stot' ] SERVER_METRICS = [ 'chkfail', 'cli_abrt', 'econ', 'eresp', 'lbtot', 'qcur', 'qmax', 'srv_abrt', 'wredis', 'wretr' ] + COMMON SERVER_AVG_METRICS = ['qtime', 'rtime', 'throttle', 'ttime', 'weight'] BACKEND_METRICS = [ 'chkdown', 'cli_abrt', 'comp_byp', 'comp_in', 'comp_out', 'comp_rsp', 'downtime', 'dreq', 'econ', 'eresp', 'lbtot', 'qcur', 'qmax', 'slim', 'srv_abrt', 'wredis', 'wretr', ] + COMMON BACKEND_AVG_METRICS = [ 'act', 'bck', 'rtime', 'ctime', 'qtime', 'ttime', 'weight' ] FRONTEND_METRICS = [ 'comp_byp', 'comp_in', 'comp_out', 'comp_rsp', 'dreq', 'ereq', 'rate_lim', 'req_rate', 'req_rate_max', 'req_tot', 'slim' ] + COMMON
Python
0
@@ -342,16 +342,17 @@ d_maxcon +n ',%0A '
3c63201d6113d01c870748f21be2501282a2316a
Remove unneeded import in gmail.py.
paas_manager/app/util/gmail.py
paas_manager/app/util/gmail.py
import sys import smtplib from email.mime.text import MIMEText from email.utils import formatdate import yaml from ... import config def create_message(from_addr, to_addr, subject, message, encoding): body = MIMEText(message, 'plain', encoding) body['Subject'] = subject body['From'] = from_addr body['To'] = to_addr body['Date'] = formatdate() return body def send_via_gmail(from_addr, to_addr, body): s = smtplib.SMTP('smtp.gmail.com', 587) s.ehlo() s.starttls() s.ehlo() s.login( config['gmail']['user'], config['gmail']['password']) s.sendmail(from_addr, [to_addr], body.as_string()) s.close() def gmail(message, to_addr): body = create_message( config['gmail']['user'], to_addr, '[Notification]', message, 'utf8') send_via_gmail(config['gmail']['user'], to_addr, body) return if __name__ == '__main__': argvs = sys.argv argc = len(argvs) if (argc < 3): print('USAGE: python gmail.py address message') raise SystemExit(0) else: to_addr = argvs[1] message = argvs[2] gmail(message, to_addr)
Python
0
@@ -95,20 +95,8 @@ ate%0A -import yaml%0A from
f7922bef544044f62b49c25b69de5fae8fa6e458
Update cs2quiz1.py
cs2quiz1.py
cs2quiz1.py
# 13.5 pts of 15 for terminology # 20 pts of 25 for programming #Part 1: Terminology (15 points) #1 1pt) What is the symbol "=" used for? # assigning values, function calls to a variable # 1 pt right answer # #2 3pts) Write a technical definition for 'function' # A function is a named sequence of statements that perform some calculation and may return an output # 3 pts right answer # #3 1pt) What does the keyword "return" do? # returns some form of output from a function # 1 pt right answer # #4 5pts) We know 5 basic data types. Write the name for each one and provide two # examples of each below # 1: boolean: True, False # 2: string: "ASDASD", "sooosad" # 3: float: 1.23, 14264.80124 # 4: integer: 314, 0 # 5: tuple: True, "SDA", 12.456, 87 # 4pts, missed parenthesis on tuple #5 2pts) What is the difference between a "function definition" and a # "function call"? # a function definition defines what the function does and a function call calls the function to do what it was defined to do. # the main difference between the two is the definition has a ":" after it while the function call does not # a function must me defined before it can be called # 1.5 pts, mostly right answer, missed function name #6 3pts) What are the 3 phases that every computer program has? What happens in # each of them # 1: Input: user inputs something # 2: Processing/computation: computer does something with the input # 3: Output: computer returns some form of output # 3pts right answer #Part 2: Programming (25 points) #Write a program that asks the user for the areas of 3 circles. #It should then calculate the diameter of each and the sum of the diameters #of the 3 circles. #Finally, it should produce output like this: #Circle Diameter #c1 ... #c2 ... #c3 ... #TOTALS ... # Hint: Radius is the square root of the area divided by pi import math #1 pt for header line 1 pt correct #3 pt for correct formula 3 pt correct #1 pt for return value 1 pt correct #1 pt for parameter name 0 pt put x instead of area #1 pt for function name 1 pt correct def diameterfromarea(x): return math.sqrt(x/math.pi)*2 #1pt for header line 1 pt correct #1pt for parameter names 1 pt correct #1pt for return value 1 pt correct #1pt for correct output format 1 pt correct #3pt for correct use of format function 3 pts correct def output(c1, c2, c3, total): out = """ Circle Diameter c1 {} c2 {} c3 {} Totals {} """.format(c1, c2, c3, total) return out #1pt header line 1 pt correct #1pt getting input 1 pt got input #1pt converting input 1 pt converted input #1pt for calling output function 1 pt called output #2pt for correct diameter formula 2 pts correct #1pt for variable names 0 pt used single letter variable names def main(): #Input Section a = float(raw_input("Area of C1: ")) b = float(raw_input("Area of C2: ")) c = float(raw_input("Area of C3: ")) #Processings c1 = diameterfromarea(a) c2 = diameterfromarea(b) c3 = diameterfromarea(c) total = c1 + c2 + c3 #Output Section res = output(c1, c2, c3, total) print res #1pt for calling main 1 pt main called main() #1pt explanatory comments 1 pt added explanatory comments #1pt code format 1 pt code format correct #1pt script runs without errors 1 pt script runs no errors
Python
0
@@ -29,17 +29,17 @@ logy%0A# 2 -0 +3 pts of
f5aa2ab4796b258be54112ea87809e7ada4ee3e0
add advice shortlink
csp/urls.py
csp/urls.py
from django.conf.urls import patterns, include, url from django.contrib.staticfiles.urls import staticfiles_urlpatterns from django.views.decorators.csrf import csrf_exempt from django.views.generic import RedirectView from rest_framework.routers import SimpleRouter from crowdsourcing import views from crowdsourcing.viewsets.file import FileViewSet from crowdsourcing.viewsets.message import ConversationViewSet, MessageViewSet, RedisMessageViewSet, \ ConversationRecipientViewSet from crowdsourcing.viewsets.payment import ChargeViewSet, TransferViewSet from crowdsourcing.viewsets.project import * from crowdsourcing.viewsets.qualification import QualificationViewSet, RequesterACGViewSet, WorkerACEViewSet, \ QualificationItemViewSet from crowdsourcing.viewsets.rating import WorkerRequesterRatingViewset, RatingViewset from crowdsourcing.viewsets.task import TaskViewSet, TaskWorkerResultViewSet, TaskWorkerViewSet, \ ExternalSubmit, ReturnFeedbackViewSet from crowdsourcing.viewsets.template import TemplateViewSet, TemplateItemViewSet, TemplateItemPropertiesViewSet from crowdsourcing.viewsets.user import UserViewSet, UserProfileViewSet, UserPreferencesViewSet, CountryViewSet, \ CityViewSet from mturk import views as mturk_views from mturk.viewsets import MTurkAssignmentViewSet, MTurkConfig, MTurkAccountViewSet router = SimpleRouter(trailing_slash=True) router.register(r'profile', UserProfileViewSet) router.register(r'user', UserViewSet) router.register(r'preferences', UserPreferencesViewSet) router.register(r'worker-requester-rating', WorkerRequesterRatingViewset) router.register(r'rating', RatingViewset) router.register(r'project', ProjectViewSet) router.register(r'category', CategoryViewSet) router.register(r'country', CountryViewSet) router.register(r'city', CityViewSet) router.register(r'task', TaskViewSet) router.register(r'task-worker', TaskWorkerViewSet) router.register(r'task-worker-result', TaskWorkerResultViewSet) router.register(r'template', TemplateViewSet) router.register(r'template-item', TemplateItemViewSet) router.register(r'template-item-properties', TemplateItemPropertiesViewSet) router.register(r'return-feedback', ReturnFeedbackViewSet) router.register(r'conversation', ConversationViewSet) router.register(r'conversation-recipients', ConversationRecipientViewSet) router.register(r'message', MessageViewSet) router.register(r'inbox', RedisMessageViewSet, base_name='redis-message') router.register(r'file', FileViewSet) router.register(r'qualification', QualificationViewSet) router.register(r'requester-access-group', RequesterACGViewSet) router.register(r'worker-access-entry', WorkerACEViewSet) router.register(r'qualification-item', QualificationItemViewSet) router.register(r'charges', ChargeViewSet) router.register(r'transfers', TransferViewSet) mturk_router = SimpleRouter(trailing_slash=False) mturk_router.register(r'mturk', MTurkAssignmentViewSet) mturk_router.register(r'mturk-account', MTurkAccountViewSet) urlpatterns = patterns('', url(r'^api/auth/login/$', views.Login.as_view()), url(r'^api/auth/logout/$', views.Logout.as_view()), url(r'^api/oauth2/', include('oauth2_provider.urls', namespace='oauth2_provider')), url(r'^api/oauth2-ng/token', views.Oauth2TokenView.as_view()), url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')), url(r'^api/done/$', csrf_exempt(ExternalSubmit.as_view())), url(r'^api/', include(router.urls)), url(r'^mturk/task', mturk_views.mturk_index), url(r'^api/', include(mturk_router.urls)), url(r'^api/mturk/url', MTurkConfig.as_view({'get': 'get_mturk_url'})), url(r'^forum', RedirectView.as_view(url=settings.DISCOURSE_BASE_URL), name='forum'), url(r'^discourse/sso$', views.sso), url('^.*$', views.home, name='home'), ) urlpatterns += staticfiles_urlpatterns()
Python
0.000001
@@ -3823,24 +3823,236 @@ k_url'%7D)),%0A%0A + url(r'%5Eadvice', RedirectView.as_view(url='https://docs.google.com/forms/d/e/1FAIpQLScB5yz_2gdJOjSDu76gqDrMpUyiczQt-MTgtii4QLhuoP3YMA/viewform'),%0A name='advice'),%0A%0A
331c7a349e2d507ca52b24619ffc9cb1c4570e0b
Change default to *NOT* run image_harvest
harvester/run_ingest.py
harvester/run_ingest.py
""" Script to run the ingest process. Usage: $ python run_ingest.py user-email url_collection_api """ import sys import os from email.mime.text import MIMEText from dplaingestion.scripts import enrich_records from dplaingestion.scripts import save_records from dplaingestion.scripts import remove_deleted_records from dplaingestion.scripts import dashboard_cleanup from dplaingestion.scripts import check_ingestion_counts import logbook from harvester import fetcher from harvester.config import config as config_harvest from harvester.collection_registry_client import Collection from redis import Redis from redis.exceptions import ConnectionError as RedisConnectionError from rq import Queue from harvester import solr_updater from harvester import grab_solr_index import harvester.image_harvest EMAIL_RETURN_ADDRESS = os.environ.get('EMAIL_RETURN_ADDRESS', '[email protected]') # csv delim email addresses EMAIL_SYS_ADMIN = os.environ.get('EMAIL_SYS_ADMINS', None) IMAGE_HARVEST_TIMEOUT = 14400 def def_args(): import argparse parser = argparse.ArgumentParser(description='Harvest a collection') parser.add_argument('user_email', type=str, help='user email') parser.add_argument('url_api_collection', type=str, help='URL for the collection Django tastypie api resource') return parser def queue_image_harvest(redis_host, redis_port, redis_pswd, redis_timeout, collection_key, url_couchdb, object_auth=None): rQ = Queue(connection=Redis(host=redis_host, port=redis_port, password=redis_pswd, socket_connect_timeout=redis_timeout) ) job = rQ.enqueue_call(func=harvester.image_harvest.main, kwargs=dict(collection_key=collection_key, url_couchdb=url_couchdb, object_auth=object_auth), timeout=IMAGE_HARVEST_TIMEOUT) return job def main(user_email, url_api_collection, log_handler=None, mail_handler=None, dir_profile='profiles', profile_path=None, config_file='akara.ini', redis_host=None, redis_port=None, redis_pswd=None, redis_timeout=600, run_image_harvest=True): '''Runs a UCLDC ingest process for the given collection''' emails = [user_email] if EMAIL_SYS_ADMIN: emails.extend([u for u in EMAIL_SYS_ADMIN.split(',')]) if not mail_handler: mail_handler = logbook.MailHandler(EMAIL_RETURN_ADDRESS, emails, level='ERROR', bubble=True) mail_handler.push_application() if not(redis_host and redis_port and redis_pswd): config = config_harvest(config_file=config_file) try: collection = Collection(url_api_collection) except Exception, e: msg = 'Exception in Collection {}, init {}'.format(url_api_collection, str(e)) logbook.error(msg) raise e if not log_handler: log_handler = logbook.StderrHandler(level='DEBUG') log_handler.push_application() logger = logbook.Logger('run_ingest') ingest_doc_id, num_recs, dir_save, harvester = fetcher.main( emails, url_api_collection, log_handler=log_handler, mail_handler=mail_handler ) logger.info("INGEST DOC ID:{0}".format(ingest_doc_id)) logger.info('HARVESTED {0} RECORDS'.format(num_recs)) logger.info('IN DIR:{0}'.format(dir_save)) resp = enrich_records.main([None, ingest_doc_id]) if not resp == 0: logger.error("Error enriching records {0}".format(resp)) raise Exception('Failed during enrichment process: {0}'.format(resp)) logger.info('Enriched records') resp = save_records.main([None, ingest_doc_id]) if not resp == 0: logger.error("Error saving records {0}".format(str(resp))) raise Exception("Error saving records {0}".format(str(resp))) logger.info("SAVED RECS") resp = remove_deleted_records.main([None, ingest_doc_id]) if not resp == 0: logger.error("Error deleting records {0}".format(resp)) raise Exception("Error deleting records {0}".format(resp)) resp = check_ingestion_counts.main([None, ingest_doc_id]) if not resp == 0: logger.error("Error checking counts {0}".format(resp)) raise Exception("Error checking counts {0}".format(resp)) resp = dashboard_cleanup.main([None, ingest_doc_id]) if not resp == 0: logger.error("Error cleaning up dashboard {0}".format(resp)) raise Exception("Error cleaning up dashboard {0}".format(resp)) url_couchdb = harvester.config_dpla.get("CouchDb", "URL") # the image_harvest should be a separate job, with a long timeout if run_image_harvest: job = queue_image_harvest(config.redis_host, config.redis_port, config.redis_pswd, config.redis_timeout, collection_key=collection.provider, url_couchdb=url_couchdb, object_auth=collection.auth) logger.info("Started job for image_harvest:{}".format(job.result)) log_handler.pop_application() mail_handler.pop_application() if __name__ == '__main__': parser = def_args() args = parser.parse_args(sys.argv[1:]) if not args.user_email or not args.url_api_collection: parser.print_help() sys.exit(27) redis_host, redis_port, redis_pswd, redis_connect_timeout, id_ec2_ingest, id_ec2_solr_build, DPLA = config() print("HOST:{0} PORT:{1}".format(redis_host, redis_port, )) print "EMAIL", args.user_email, " URI: ", args.url_api_collection main(args.user_email, args.url_api_collection, redis_host=redis_host, redis_port=redis_port, redis_pswd=redis_pswd, redis_timeout=redis_connect_timeout)
Python
0.000002
@@ -2308,19 +2308,20 @@ harvest= -Tru +Fals e):%0A
4588a52ebfc3aee127a34a9e10067c0121c4f72e
add 'tab' and 'shift tab' for down/up movement
subiquity/ui/frame.py
subiquity/ui/frame.py
# Copyright 2015 Canonical, Ltd. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """ Base Frame Widget """ from urwid import Frame, WidgetWrap from subiquity.ui.anchors import Header, Footer, Body import logging log = logging.getLogger('subiquity.ui.frame') class SubiquityUI(WidgetWrap): def __init__(self, header=None, body=None, footer=None): self.header = header if header else Header() self.body = body if body else Body() self.footer = footer if footer else Footer() self.frame = Frame(self.body, header=self.header, footer=self.footer) super().__init__(self.frame) def set_header(self, title, excerpt): self.frame.header = Header(title, excerpt) def set_footer(self, message): self.frame.footer = Footer(message) def set_body(self, widget): self.frame.body = widget
Python
0
@@ -897,16 +897,77 @@ tWrap):%0A + key_conversion_map = %7B'tab': 'down', 'shift tab': 'up'%7D%0A%0A def @@ -1282,24 +1282,155 @@ elf.frame)%0A%0A + def keypress(self, size, key):%0A key = self.key_conversion_map.get(key, key)%0A return super().keypress(size, key)%0A%0A def set_
791fb484937cabeb3a098bcd173db782efe53d7c
support filtering of Authors by organization and positions
authors/views.py
authors/views.py
from rest_framework import viewsets, permissions from . import serializers from . import models class AuthorViewSet(viewsets.ModelViewSet): permission_classes = (permissions.DjangoModelPermissionsOrAnonReadOnly,) queryset = models.Author.objects.all() serializer_class = serializers.AuthorSerializer filter_fields = () search_fields = ('first_name', 'last_name', 'organization', 'title', 'email', 'twitter', 'bio') ordering_fields = "__all__" class OrganizationViewSet(viewsets.ModelViewSet): permission_classes = (permissions.DjangoModelPermissionsOrAnonReadOnly,) queryset = models.Organization.objects.all() serializer_class = serializers.OrganizationSerializer filter_fields = () search_fields = ('name',) ordering_fields = "__all__" class PositionViewSet(viewsets.ModelViewSet): permission_classes = (permissions.DjangoModelPermissionsOrAnonReadOnly,) queryset = models.Position.objects.all() serializer_class = serializers.PositionSerializer filter_fields = () search_fields = ('name', 'description') ordering_fields = "__all__"
Python
0
@@ -321,32 +321,59 @@ ilter_fields = ( +'organization', 'positions' )%0A search_fie
71265b648aa9410c0ec9aa250e50bf421dda23a4
Rename TestCase and test methods.
headers/cpp/ast_test.py
headers/cpp/ast_test.py
#!/usr/bin/env python # # Copyright 2008 Neal Norwitz # Portions Copyright 2008 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """AST test.""" __author__ = '[email protected] (Neal Norwitz)' from test import test_support import unittest from cpp import ast from cpp import tokenize def _InstallGenericEqual(cls, attrs): """Add an __eq__ method to |cls| so objects can be compared for tests. Args: cls: Python class to add __eq__ method to attrs: string - space separated of attribute names to compare """ attrs = attrs.split() def __eq__(self, other): if not isinstance(other, cls): return False for a in attrs: if getattr(self, a) != getattr(other, a): return False return True cls.__eq__ = __eq__ def _InstallEqualMethods(): """Install __eq__ methods on the appropriate objects used for testing.""" _InstallGenericEqual(ast.Class, 'name bases templated_types') _InstallEqualMethods() def MakeBuilder(code_string): """Convenience function to make an AstBuilder from a code snippet..""" tokens = list(tokenize.GetTokens(code_string + '\n')) return ast.AstBuilder(tokens, '<test>') def Class(name, start=0, end=0, bases=None, body=None, templated_types=None, namespace=()): return ast.Class(start, end, name, bases, templated_types, body, namespace) class AstTest(unittest.TestCase): def test_ConvertBaseTokensToAST_Simple(self): builder = MakeBuilder('Bar') result = builder._ConvertBaseTokensToAST(builder.tokens) self.assertEqual(1, len(result)) self.assertEqual(Class('Bar'), result[0]) def test_ConvertBaseTokensToAST_Template(self): builder = MakeBuilder('Bar<Foo>') result = builder._ConvertBaseTokensToAST(builder.tokens) self.assertEqual(1, len(result)) self.assertEqual(Class('Bar', templated_types=[Class('Foo')]), result[0]) def test_ConvertBaseTokensToAST_TemplateWithMultipleArgs(self): builder = MakeBuilder('Bar<Foo, Blah, Bling>') result = builder._ConvertBaseTokensToAST(builder.tokens) self.assertEqual(1, len(result)) types = [Class('Foo'), Class('Blah'), Class('Bling')] self.assertEqual(Class('Bar', templated_types=types), result[0]) def test_ConvertBaseTokensToAST_TemplateWithMultipleTemplateArgsStart(self): builder = MakeBuilder('Bar<Foo<x>, Blah, Bling>') result = builder._ConvertBaseTokensToAST(builder.tokens) self.assertEqual(1, len(result)) types = [Class('Foo', templated_types=[Class('x')]), Class('Blah'), Class('Bling')] self.assertEqual(types[0], result[0].templated_types[0]) self.assertEqual(types[1], result[0].templated_types[1]) self.assertEqual(types[2], result[0].templated_types[2]) self.assertEqual(Class('Bar', templated_types=types), result[0]) def test_ConvertBaseTokensToAST_TemplateWithMultipleTemplateArgsMid(self): builder = MakeBuilder('Bar<Foo, Blah<x>, Bling>') result = builder._ConvertBaseTokensToAST(builder.tokens) self.assertEqual(1, len(result)) types = [Class('Foo'), Class('Blah', templated_types=[Class('x')]), Class('Bling')] self.assertEqual(Class('Bar', templated_types=types), result[0]) def test_ConvertBaseTokensToAST_TemplateWithMultipleTemplateArgsEnd(self): builder = MakeBuilder('Bar<Foo, Blah, Bling<x> >') result = builder._ConvertBaseTokensToAST(builder.tokens) self.assertEqual(1, len(result)) types = [Class('Foo'), Class('Blah'), Class('Bling', templated_types=[Class('x')])] self.assertEqual(Class('Bar', templated_types=types), result[0]) def test_main(): test_support.run_unittest(AstTest) if __name__ == '__main__': test_main()
Python
0.017761
@@ -1912,16 +1912,46 @@ %0A%0Aclass +AstBuilder_ConvertBaseTokensTo AstTest( @@ -1987,32 +1987,8 @@ test -_ConvertBaseTokensToAST_ Simp @@ -2207,32 +2207,8 @@ test -_ConvertBaseTokensToAST_ Temp @@ -2491,32 +2491,8 @@ test -_ConvertBaseTokensToAST_ Temp @@ -2832,32 +2832,8 @@ test -_ConvertBaseTokensToAST_ Temp @@ -3448,32 +3448,8 @@ test -_ConvertBaseTokensToAST_ Temp @@ -3867,32 +3867,8 @@ test -_ConvertBaseTokensToAST_ Temp @@ -4320,16 +4320,46 @@ nittest( +AstBuilder_ConvertBaseTokensTo AstTest)
8aa52ea8f07f922bc6d5952ca8ad56bedd042a1f
Bump version number.
nativeconfig/version.py
nativeconfig/version.py
VERSION = '2.3.0'
Python
0
@@ -10,9 +10,9 @@ '2. -3 +4 .0'%0A
1085c00f4372a0ab63cf8b597983adecf3f5ad76
Refactor conversion to float array.
svtools/breakpoint.py
svtools/breakpoint.py
import l_bp class Breakpoint: ''' Class for storing information about Breakpoints for merging ''' def __init__(self, line, percent_slop=0, fixed_slop=0): ''' Initialize with slop for probabilities ''' self.l = line (self.sv_type, self.chr_l, self.chr_r, self.strands, self.start_l, self.end_l, self.start_r, self.end_r, m) = l_bp.split_v(line) # TODO Handle missing PRPOS and PREND with intelligent message. Pull out into method. self.p_l = [float(x) for x in m['PRPOS'].split(',')] self.p_r = [float(x) for x in m['PREND'].split(',')] slop_prob = 1e-100 # FIXME This is a constant. Pull out to make more obvious if ((percent_slop > 0) or (fixed_slop > 0)): l_slop = int(max(percent_slop * (self.end_l - self.start_l + 1), fixed_slop)) r_slop = int(max(percent_slop * (self.end_r - self.start_r + 1), fixed_slop)) # pad each interval with slop_prob on each side. TODO This should be a method self.start_l = self.start_l - l_slop self.end_l = self.end_l + l_slop new_p_l = [slop_prob] * l_slop + self.p_l + [slop_prob] * l_slop self.start_r = self.start_r - r_slop self.end_r = self.end_r + r_slop new_p_r = [slop_prob] * r_slop + self.p_r + [slop_prob] * r_slop # chew off overhang if self.start_l or self.start_r less than 0 TODO This should also be a method if self.start_l < 0: new_p_l = new_p_l[-self.start_l:] self.start_l = 0 if self.start_r < 0: new_p_r = new_p_r[-self.start_r:] self.start_r = 0 # normalize so each probability curve sums to 1. TODO Should be a method sum_p_l = sum(new_p_l) self.p_l = [float(x)/sum_p_l for x in new_p_l] sum_p_r = sum(new_p_r) self.p_r = [float(x)/sum_p_r for x in new_p_r] def __str__(self): ''' Convert back to a string ''' return '\t'.join([str(x) for x in [self.chr_l, self.start_l, self.end_l, self.chr_r, self.start_r, self.end_r, self.sv_type, self.strands, self.p_l, self.p_r]]) def ovl(self, b): ''' Calculate overlapping cumulative probability value as weight? 0 if not overlapping. ''' if ((self.chr_l != b.chr_l) or (self.chr_r != b.chr_r) or (self.sv_type != b.sv_type)): return 0 #get left common interval c_start_l, c_end_l = max(self.start_l, b.start_l), min(self.end_l, b.end_l) #get right common interval c_start_r, c_end_r = max(self.start_r, b.start_r), min(self.end_r, b.end_r) c_l_len = c_end_l - c_start_l + 1 c_r_len = c_end_r - c_start_r + 1 if (c_l_len < 1) or (c_r_len < 1): return 0 # TODO This should probably be a method as well self_start_off_l = c_start_l - self.start_l b_start_off_l = c_start_l - b.start_l self_start_off_r = c_start_r - self.start_r b_start_off_r = c_start_r - b.start_r ovl_l = 0 for i in range(c_l_len): ovl_l += min(self.p_l[i + self_start_off_l], b.p_l[i + b_start_off_l]) ovl_r = 0 for i in range(c_r_len): ovl_r += min(self.p_r[i + self_start_off_r], b.p_r[i + b_start_off_r]) return ovl_l * ovl_r
Python
0
@@ -406,17 +406,16 @@ start_r, - %0A @@ -418,33 +418,32 @@ self.end_r, - %0A m) = l_ @@ -472,215 +472,103 @@ -# TODO Handle missing PRPOS and PREND with intelligent message. Pull out into method.%0A self.p_l = %5Bfloat(x) for x in m%5B'PRPOS'%5D.split(',')%5D%0A self.p_r = %5Bfloat(x) for x in m%5B'PREND'%5D.split(',')%5D +self.p_l = self.floats_from_tag(m, 'PRPOS')%0A self.p_r = self.floats_from_tag(m, 'PREND') %0A%0A @@ -2349,17 +2349,16 @@ f.end_r, - %0A @@ -3771,24 +3771,24 @@ rt_off_r%5D)%0A%0A - retu @@ -3804,8 +3804,250 @@ * ovl_r%0A +%0A @staticmethod%0A def floats_from_tag(info_dict, tag):%0A if tag in info_dict:%0A return %5Bfloat(x) for x in info_dict%5Btag%5D.split(',')%5D%0A else:%0A raise RuntimeError('Required tag %7B0%7D not found.'.format(tag))%0A
fb223397ccdee519af7e17dc73db864fe0120e8b
Create a random HDFS folder for unit testing
fs/tests/test_hadoop.py
fs/tests/test_hadoop.py
""" fs.tests.test_hadoop: TestCases for the HDFS Hadoop Filesystem This test suite is skipped unless the following environment variables are configured with valid values. * PYFS_HADOOP_NAMENODE_ADDR * PYFS_HADOOP_NAMENODE_PORT [default=50070] * PYFS_HADOOP_NAMENODE_PATH [default="/"] All tests will be executed within a subdirectory "pyfs-hadoop" for safety. """ import os import unittest from fs.tests import FSTestCases, ThreadingTestCases from fs.path import * try: from fs import hadoop except ImportError: raise unittest.SkipTest("hadoop fs wasn't importable") class TestHadoopFS(unittest.TestCase, FSTestCases, ThreadingTestCases): def setUp(self): namenode_host = os.environ.get("PYFS_HADOOP_NAMENODE_ADDR") namenode_port = os.environ.get("PYFS_HADOOP_NAMENODE_PORT", "50070") base_path = os.environ.get("PYFS_HADOOP_NAMENODE_PATH", "/") if not namenode_host or not namenode_port or not base_path: raise unittest.SkipTest("Skipping HDFS tests due to lack of config") self.fs = hadoop.HadoopFS( namenode=namenode_host, port=namenode_port, base=base_path ) def tearDown(self): for dir_path in self.fs.ilistdir(dirs_only=True): if dir_path == "/": continue self.fs.removedir(dir_path, recursive=False, force=True) for file_path in self.fs.ilistdir(files_only=True): self.fs.remove(file_path) self.fs.close() @unittest.skip("HadoopFS does not support seek") def test_readwriteappendseek(self): pass @unittest.skip("HadoopFS does not support truncate") def test_truncate(self): pass @unittest.skip("HadoopFS does not support truncate") def test_truncate_to_larger_size(self): pass @unittest.skip("HadoopFS does not support seek") def test_write_past_end_of_file(self): pass
Python
0
@@ -390,16 +390,28 @@ unittest +%0Aimport uuid %0A%0Afrom f @@ -677,29 +677,50 @@ def -setUp(self +__init__(self, *args, **kwargs ):%0A +%0A name @@ -707,32 +707,37 @@ args):%0A%0A +self. namenode_host = @@ -780,32 +780,37 @@ _ADDR%22)%0A +self. namenode_port = @@ -852,16 +852,60 @@ E_PORT%22, +%0A %2250070%22 @@ -906,24 +906,25 @@ 50070%22)%0A +%0A base_pat @@ -911,24 +911,29 @@ %22)%0A%0A +self. base_path = @@ -931,16 +931,42 @@ e_path = + os.path.join(%0A os.envi @@ -1006,26 +1006,30 @@ H%22, %22/%22) -%0A%0A +,%0A if not n @@ -1024,66 +1024,167 @@ -if not namenode_host or not namenode_port or not base_path +%22pyfstest-%22 + str(uuid.uuid4())%0A )%0A%0A super(TestHadoopFS, self).__init__(*args, **kwargs)%0A%0A def setUp(self):%0A%0A if not self.namenode_host :%0A @@ -1242,29 +1242,24 @@ sts -due to lack of +(missing config +) %22)%0A%0A @@ -1314,16 +1314,21 @@ amenode= +self. namenode @@ -1351,16 +1351,21 @@ port= +self. namenode @@ -1388,16 +1388,21 @@ base= +self. base_pat
db33f2d1e14c48cd2c73ae3e3c835fac54f39224
lower bool priority, raise int priority
sympy/core/sympify.py
sympy/core/sympify.py
"""sympify -- convert objects SymPy internal format""" # from basic import Basic, BasicType, S # from numbers import Integer, Real # from interval import Interval import decimal class SympifyError(ValueError): def __init__(self, expr, base_exc=None): self.expr = expr self.base_exc = base_exc def __str__(self): if self.base_exc is None: return "SympifyError: %s" % (self.expr,) return "Sympify of expression '%s' failed, because of exception being raised:\n%s: %s" % (self.expr, self.base_exc.__class__.__name__, str(self.base_exc)) def sympify(a, sympify_lists=False, locals= {}): """Converts an arbitrary expression to a type that can be used inside sympy. For example, it will convert python int's into instance of sympy.Rational, floats into intances of sympy.Real, etc. It is also able to coerce symbolic expressions which does inherit after Basic. This can be useful in cooperation with SAGE. It currently accepts as arguments: - any object defined in sympy (except maybe matrices [TODO]) - standard numeric python types: int, long, float, Decimal - strings (like "0.09" or "2e-19") If sympify_lists is set to True then sympify will also accept lists, tuples and sets. It will return the same type but with all of the entries sympified. If the argument is already a type that sympy understands, it will do nothing but return that value. This can be used at the begining of a function to ensure you are working with the correct type. >>> from sympy import * >>> sympify(2).is_integer True >>> sympify(2).is_real True >>> sympify(2.0).is_real True >>> sympify("2.0").is_real True >>> sympify("2e-45").is_real True """ if isinstance(a, Basic): return a if isinstance(a, BasicType): return a elif isinstance(a, bool): raise NotImplementedError("bool support") elif isinstance(a, (int, long)): return Integer(a) elif isinstance(a, (float, decimal.Decimal)): return Real(a) elif isinstance(a, complex): real, imag = map(sympify, (a.real, a.imag)) ireal, iimag = int(real), int(imag) if ireal + iimag*1j == a: return ireal + iimag*S.ImaginaryUnit return real + S.ImaginaryUnit * imag elif (a.__class__ in [list,tuple]) and len(a) == 2: # isinstance causes problems in the issue #432, so we use .__class__ return Interval(*a) elif isinstance(a, (list,tuple,set)) and sympify_lists: return type(a)([sympify(x, True) for x in a]) elif hasattr(a, "_sympy_"): # the "a" implements _sympy_() method, that returns a SymPy # expression (by definition), so we just use it return a._sympy_() else: # XXX this is here because of cyclic-import issues from sympy.matrices import Matrix from sympy.polynomials import Polynomial if isinstance(a, Polynomial): return a if isinstance(a, Matrix): raise NotImplementedError('matrix support') if not isinstance(a, str): # At this point we were given an arbitrary expression # which does not inherit from Basic and doesn't implement # _sympy_ (which is a canonical and robust way to convert # anything to SymPy expression). # # As a last chance, we try to take "a"'s normal form via str() # and try to parse it. If it fails, then we have no luck and # return an exception a = str(a) try: import ast_parser return ast_parser.SymPyParser(local_dict=locals).parse_expr(a) except Exception, exc: raise SympifyError(a, exc) raise SympifyError("%r is NOT a valid SymPy expression" % a)
Python
0.999987
@@ -1975,88 +1975,8 @@ n a%0A - elif isinstance(a, bool):%0A raise NotImplementedError(%22bool support%22)%0A @@ -2365,16 +2365,96 @@ * imag%0A + elif isinstance(a, bool):%0A raise NotImplementedError(%22bool support%22)%0A elif
4dac601f24556f3949e7a0e711d1a1b56215beac
Bump version for release.
jsmin/__init__.py
jsmin/__init__.py
# This code is original from jsmin by Douglas Crockford, it was translated to # Python by Baruch Even. It was rewritten by Dave St.Germain for speed. # # The MIT License (MIT) # # Copyright (c) 2013 Dave St.Germain # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. import sys is_3 = sys.version_info >= (3, 0) if is_3: import io else: import StringIO try: import cStringIO except ImportError: cStringIO = None __all__ = ['jsmin', 'JavascriptMinify'] __version__ = '2.0.7' def jsmin(js): """ returns a minified version of the javascript string """ if not is_3: if cStringIO and not isinstance(js, unicode): # strings can use cStringIO for a 3x performance # improvement, but unicode (in python2) cannot klass = cStringIO.StringIO else: klass = StringIO.StringIO else: klass = io.StringIO ins = klass(js) outs = klass() JavascriptMinify(ins, outs).minify() return outs.getvalue() class JavascriptMinify(object): """ Minify an input stream of javascript, writing to an output stream """ def __init__(self, instream=None, outstream=None): self.ins = instream self.outs = outstream def minify(self, instream=None, outstream=None): if instream and outstream: self.ins, self.outs = instream, outstream self.is_return = False self.return_buf = '' def write(char): # all of this is to support literal regular expressions. # sigh if char in 'return': self.return_buf += char self.is_return = self.return_buf == 'return' self.outs.write(char) if self.is_return: self.return_buf = '' read = self.ins.read space_strings = "abcdefghijklmnopqrstuvwxyz"\ "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_$\\" starters, enders = '{[(+-', '}])+-"\'' newlinestart_strings = starters + space_strings newlineend_strings = enders + space_strings do_newline = False do_space = False doing_single_comment = False previous_before_comment = '' doing_multi_comment = False in_re = False in_quote = '' quote_buf = [] previous = read(1) next1 = read(1) if previous == '/': if next1 == '/': doing_single_comment = True elif next1 == '*': doing_multi_comment = True previous = next1 next1 = read(1) else: write(previous) elif not previous: return elif previous >= '!': if previous in "'\"": in_quote = previous write(previous) previous_non_space = previous else: previous_non_space = ' ' if not next1: return while 1: next2 = read(1) if not next2: last = next1.strip() if not (doing_single_comment or doing_multi_comment)\ and last not in ('', '/'): if in_quote: write(''.join(quote_buf)) write(last) break if doing_multi_comment: if next1 == '*' and next2 == '/': doing_multi_comment = False next2 = read(1) elif doing_single_comment: if next1 in '\r\n': doing_single_comment = False while next2 in '\r\n': next2 = read(1) if not next2: break if previous_before_comment in ')}]': do_newline = True elif previous_before_comment in space_strings: write('\n') elif in_quote: quote_buf.append(next1) if next1 == in_quote: numslashes = 0 for c in reversed(quote_buf[:-1]): if c != '\\': break else: numslashes += 1 if numslashes % 2 == 0: in_quote = '' write(''.join(quote_buf)) elif next1 in '\r\n': if previous_non_space in newlineend_strings \ or previous_non_space > '~': while 1: if next2 < '!': next2 = read(1) if not next2: break else: if next2 in newlinestart_strings \ or next2 > '~' or next2 == '/': do_newline = True break elif next1 < '!' and not in_re: if (previous_non_space in space_strings \ or previous_non_space > '~') \ and (next2 in space_strings or next2 > '~'): do_space = True elif previous_non_space in '-+' and next2 == previous_non_space: # protect against + ++ or - -- sequences do_space = True elif self.is_return and next2 == '/': # returning a regex... write(' ') elif next1 == '/': if do_space: write(' ') if in_re: if previous != '\\' or next2 in 'gimy': in_re = False write('/') elif next2 == '/': doing_single_comment = True previous_before_comment = previous_non_space elif next2 == '*': doing_multi_comment = True previous = next1 next1 = next2 next2 = read(1) else: in_re = previous_non_space in '(,=:[?!&|' or self.is_return # literal regular expression write('/') else: if do_space: do_space = False write(' ') if do_newline: write('\n') do_newline = False write(next1) if not in_re and next1 in "'\"": in_quote = next1 quote_buf = [] previous = next1 next1 = next2 if previous >= '!': previous_non_space = previous
Python
0
@@ -1509,17 +1509,17 @@ = '2.0. -7 +8 '%0A%0A%0Adef
926bf60c77673571cb8f6d12e3754507f41b9e80
add optional args
ngage/plugins/napalm.py
ngage/plugins/napalm.py
from __future__ import absolute_import import ngage from ngage.exceptions import AuthenticationError, ConfigError import napalm_base from napalm_base.exceptions import ( ConnectionException, ReplaceConfigException, MergeConfigException ) @ngage.plugin.register('napalm') class Driver(ngage.plugins.DriverPlugin): plugin_type = 'napalm' def _do_init(self): config = self.config self.host = config.get('host') self.user = config.get('user') self.password = config.get('password') if ':' not in config['type']: raise ValueError('napalm requires a subtype') (na, driver) = config['type'].split(':', 2) cls = napalm_base.get_network_driver(driver) self.dev = cls(self.host, self.user, self.password) def _do_open(self): try: self.dev.open() except ConnectionException: raise AuthenticationError def _do_close(self): self.dev.close() def _do_pull(self): if not hasattr(self.dev, 'get_config'): raise NotImplementedError('get_config not implemented, please update napalm') return self.dev.get_config(retrieve='candidate')['candidate'] def _do_push(self, fname, **kwargs): try: self.dev.load_merge_candidate(filename=fname) except (MergeConfigException, ReplaceConfigException) as e: raise ConfigError(e.message) def _do_diff(self, index=0): if index != 0: raise NotImplementedError('version index not implemented') return self.dev.compare_config() def _do_lock(self): self.dev.lock() def _do_unlock(self): self.dev.unlock() def _do_commit(self, **kwargs): self.dev.commit_config() # def _do_check(self): # not impl by napalm def _do_rollback(self, index=0): if index == 0: self.dev.discard_config() elif index == 1: self.dev.rollback() else: raise NotImplementedError('version index not implemented')
Python
0.000001
@@ -528,16 +528,75 @@ ssword') +%0A self.optional_args = config.get('driver_args', %7B%7D) %0A%0A @@ -697,21 +697,15 @@ - (na, driver -) = c @@ -731,16 +731,19 @@ (':', 2) +%5B1%5D %0A @@ -788,16 +788,16 @@ driver)%0A - @@ -846,16 +846,50 @@ password +, optional_args=self.optional_args )%0A%0A d
2573d4ba20649d0ed506b34bfe8aa932f17a6bbe
Fix handling of CTRL+C on Windows Windows doesn't support os.killpg
src/rez/cli/_util.py
src/rez/cli/_util.py
import os import sys import signal from rez.vendor.argparse import _SubParsersAction, ArgumentParser, SUPPRESS, \ ArgumentError # Subcommands and their behaviors. # # 'arg_mode' determines how cli args are parsed. Values are: # * 'grouped': Args can be separated by '--'. This causes args to be grouped into # lists which are then passed as 'extra_arg_groups' to each command. # * 'passthrough': Unknown args are passed as first list in 'extra_arg_groups'. # The '--' arg is not treated as a special case. # * missing: Native python argparse behavior. # subcommands = { "bind": {}, "build": { "arg_mode": "grouped" }, "config": {}, "context": {}, "complete": { "hidden": True }, "cp": {}, "depends": {}, "diff": {}, "env": { "arg_mode": "grouped" }, "forward": { "hidden": True, "arg_mode": "passthrough" }, "gui": {}, "help": {}, "interpret": {}, "memcache": {}, "pip": {}, "plugins": {}, "python": { "arg_mode": "passthrough" }, "release": { "arg_mode": "grouped" }, "search": {}, "selftest": {}, "status": {}, "suite": {}, "test": {}, "view": {}, "yaml2py": {}, } class LazySubParsersAction(_SubParsersAction): """Argparse Action which calls the `setup_subparser` function provided to `LazyArgumentParser`. """ def __call__(self, parser, namespace, values, option_string=None): parser_name = values[0] # this bit is taken directly from argparse: try: parser = self._name_parser_map[parser_name] except KeyError: tup = parser_name, ', '.join(self._name_parser_map) msg = 'unknown parser %r (choices: %s)' % tup raise ArgumentError(self, msg) self._setup_subparser(parser_name, parser) caller = super(LazySubParsersAction, self).__call__ return caller(parser, namespace, values, option_string) def _setup_subparser(self, parser_name, parser): if hasattr(parser, 'setup_subparser'): help_ = parser.setup_subparser(parser_name, parser) if help_ is not None: if help_ == SUPPRESS: self._choices_actions = [act for act in self._choices_actions if act.dest != parser_name] else: help_action = self._find_choice_action(parser_name) if help_action is not None: help_action.help = help_ delattr(parser, 'setup_subparser') def _find_choice_action(self, parser_name): for help_action in self._choices_actions: if help_action.dest == parser_name: return help_action class LazyArgumentParser(ArgumentParser): """ ArgumentParser sub-class which accepts an additional `setup_subparser` argument for lazy setup of sub-parsers. `setup_subparser` is passed 'parser_name', 'parser', and can return a help string. """ def __init__(self, *args, **kwargs): self.setup_subparser = kwargs.pop('setup_subparser', None) super(LazyArgumentParser, self).__init__(*args, **kwargs) self.register('action', 'parsers', LazySubParsersAction) def format_help(self): """Sets up all sub-parsers when help is requested.""" if self._subparsers: for action in self._subparsers._actions: if isinstance(action, LazySubParsersAction): for parser_name, parser in action._name_parser_map.iteritems(): action._setup_subparser(parser_name, parser) return super(LazyArgumentParser, self).format_help() _handled_int = False _handled_term = False def _env_var_true(name): return (os.getenv(name, "").lower() in ("1", "true", "on", "yes")) def sigbase_handler(signum, frame): # show cursor - progress lib may have hidden it SHOW_CURSOR = '\x1b[?25h' sys.stdout.write(SHOW_CURSOR) sys.stdout.flush() # kill all child procs # FIXME this kills parent procs as well if not _env_var_true("_REZ_NO_KILLPG"): os.killpg(os.getpgid(0), signum) sys.exit(1) def sigint_handler(signum, frame): """Exit gracefully on ctrl-C.""" global _handled_int if not _handled_int: _handled_int = True if not _env_var_true("_REZ_QUIET_ON_SIG"): print >> sys.stderr, "Interrupted by user" sigbase_handler(signum, frame) def sigterm_handler(signum, frame): """Exit gracefully on terminate.""" global _handled_term if not _handled_term: _handled_term = True if not _env_var_true("_REZ_QUIET_ON_SIG"): print >> sys.stderr, "Terminated by user" sigbase_handler(signum, frame) signal.signal(signal.SIGINT, sigint_handler) signal.signal(signal.SIGTERM, sigterm_handler) # Copyright 2013-2016 Allan Johns. # # This library is free software: you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation, either # version 3 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see <http://www.gnu.org/licenses/>.
Python
0.000001
@@ -4203,24 +4203,124 @@ O_KILLPG%22):%0A + if os.name == %22nt%22:%0A os.kill(os.getpid(), signal.CTRL_C_EVENT)%0A else:%0A os.k
b524b8047467f0282e303aeec090833baf16dfd9
Use comprehension instead of map(add, ...).
dask/dot.py
dask/dot.py
from __future__ import absolute_import, division, print_function import re from subprocess import check_call, CalledProcessError from graphviz import Digraph from toolz.curried.operator import add from .core import istask, get_dependencies, ishashable from .compatibility import BytesIO def task_label(task): """Label for a task on a dot graph. Examples -------- >>> from operator import add >>> task_label((add, 1, 2)) 'add' >>> task_label((add, (add, 1, 2), 3)) 'add(...)' """ func = task[0] if hasattr(func, 'funcs'): if len(func.funcs) > 1: return '{0}(...)'.format(funcname(func.funcs[0])) else: head = funcname(func.funcs[0]) else: head = funcname(task[0]) if any(has_sub_tasks(i) for i in task[1:]): return '{0}(...)'.format(head) else: return head def has_sub_tasks(task): """Returns True if the task has sub tasks""" if istask(task): return True elif isinstance(task, list): return any(has_sub_tasks(i) for i in task) else: return False def funcname(func): """Get the name of a function.""" while hasattr(func, 'func'): func = func.func return func.__name__ def name(x): try: return str(hash(x)) except TypeError: return str(hash(str(x))) _HASHPAT = re.compile('([0-9a-z]{32})') def label(x, cache=None): """ >>> label('x') 'x' >>> label(('x', 1)) "('x', 1)" >>> from hashlib import md5 >>> x = 'x-%s-hello' % md5(b'1234').hexdigest() >>> x 'x-81dc9bdb52d04dc20036dbd8313ed055-hello' >>> label(x) 'x-#-hello' """ s = str(x) m = re.search(_HASHPAT, s) if m is not None: for h in m.groups(): if cache is not None: n = cache.get(h, len(cache)) label = '#{0}'.format(n) # cache will be overwritten destructively cache[h] = n else: label = '#' s = s.replace(h, label) return s def to_graphviz(dsk, data_attributes=None, function_attributes=None): if data_attributes is None: data_attributes = {} if function_attributes is None: function_attributes = {} g = Digraph(graph_attr={'rankdir': 'BT'}) seen = set() cache = {} for k, v in dsk.items(): k_name = name(k) if k_name not in seen: seen.add(k_name) g.node(k_name, label=label(k, cache=cache), shape='box', **data_attributes.get(k, {})) if istask(v): func_name = name((k, 'function')) if func_name not in seen: seen.add(func_name) g.node(func_name, label=task_label(v), shape='circle', **function_attributes.get(k, {})) g.edge(func_name, k_name) for dep in get_dependencies(dsk, k): dep_name = name(dep) if dep_name not in seen: seen.add(dep_name) g.node(dep_name, label=label(dep, cache=cache), shape='box', **data_attributes.get(dep, {})) g.edge(dep_name, func_name) elif ishashable(v) and v in dsk: g.edge(name(v), k_name) return g def dot_graph(dsk, filename='mydask', **kwargs): g = to_graphviz(dsk, **kwargs) if filename is not None: dot, pdf, png = map(add(filename), ('.dot', '.pdf', '.png')) g.save(dot) try: check_call(['dot', '-Tpdf', dot, '-o', pdf]) check_call(['dot', '-Tpng', dot, '-o', png]) except CalledProcessError: msg = ("Please install The `dot` utility from graphviz:\n" " Debian: sudo apt-get install graphviz\n" " Mac OSX: brew install graphviz\n" " Windows: http://www.graphviz.org/Download..php") raise RuntimeError(msg) # pragma: no cover try: from IPython.display import Image return Image(png) except ImportError: pass else: try: from IPython.display import Image s = BytesIO() s.write(g.pipe(format='png')) s.seek(0) return Image(s.read()) except ImportError: pass
Python
0
@@ -156,47 +156,8 @@ raph -%0Afrom toolz.curried.operator import add %0A%0Afr @@ -3457,15 +3457,8 @@ g = -map(add (fil @@ -3462,18 +3462,33 @@ filename -), + + ext for ext in ('.dot'
68cf8281b512ea5941ec0b88ca532409e0e97866
Fix circular import
app/evaluation/emails.py
app/evaluation/emails.py
import json from django.conf import settings from django.core.mail import send_mail from comicsite.core.urlresolvers import reverse from evaluation.models import Result, Job def send_failed_job_email(job: Job): message = ( f'Unfortunately the evaluation for the submission to ' f'{job.challenge.short_name} failed with an error. The error message ' f'is:\n\n' f'{job.output}\n\n' f'You may wish to try and correct this, or contact the challenge ' f'organizers. The following information may help them:\n' f'User: {job.submission.creator.username}\n' f'Job ID: {job.pk}\n' f'Submission ID: {job.submission.pk}' ) recipient_list = [o.email for o in job.challenge.get_admins()] recipient_list.append(job.submission.creator.email) for r in recipient_list: send_mail( subject='Evaluation Failed', message=message, from_email=settings.DEFAULT_FROM_EMAIL, recipient_list=[r.email], ) def send_new_result_email(result: Result): recipient_list = [o.email for o in result.challenge.get_admins()] message = ( f'There is a new result for {result.challenge.short_name} from ' f'{result.job.submission.creator.username}. The following metrics ' f'were calculated:\n\n' f'{json.dumps(result.metrics, indent=2)}\n\n' ) if result.public: leaderboard_url = reverse( 'evaluation:results-list', kwargs={ 'challenge_short_name': result.challenge.short_name, } ) message += ( f'You can view the result on the leaderboard here: ' f'{leaderboard_url}' ) recipient_list.append(result.job.submission.creator.email) else: message += ( f'You can publish the result on the leaderboard here: ' f'{result.get_absolute_url()}' ) for r in recipient_list: send_mail( subject=f'New Result for {result.challenge.short_name}', message=message, from_email=settings.DEFAULT_FROM_EMAIL, recipient_list=[r.email], )
Python
0.000005
@@ -131,50 +131,8 @@ rse%0A -from evaluation.models import Result, Job%0A %0A%0Ade @@ -162,13 +162,8 @@ (job -: Job ):%0A @@ -1023,16 +1023,8 @@ sult -: Result ):%0A